repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
leptos-rs/leptos | https://github.com/leptos-rs/leptos/blob/dd507168fa47b5eead64339431b0bd654bd1c951/meta/src/meta_tags.rs | meta/src/meta_tags.rs | use crate::register;
use leptos::{
component,
prelude::{CustomAttribute, GlobalAttributes},
tachys::html::element::meta,
text_prop::TextProp,
IntoView,
};
/// Injects an [`HTMLMetaElement`](https://developer.mozilla.org/en-US/docs/Web/API/HTMLMetaElement) into the document
/// head to set metadata
///
/// ```
/// use leptos::prelude::*;
/// use leptos_meta::*;
///
/// #[component]
/// fn MyApp() -> impl IntoView {
/// provide_meta_context();
///
/// view! {
/// <main>
/// <Meta charset="utf-8"/>
/// <Meta name="description" content="A Leptos fan site."/>
/// <Meta http_equiv="refresh" content="3;url=https://github.com/leptos-rs/leptos"/>
/// </main>
/// }
/// }
/// ```
#[component]
pub fn Meta(
/// The [`charset`](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/meta#attr-charset) attribute.
#[prop(optional, into)]
charset: Option<TextProp>,
/// The [`name`](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/meta#attr-name) attribute.
#[prop(optional, into)]
name: Option<TextProp>,
/// The [`property`](https://ogp.me/) attribute.
#[prop(optional, into)]
property: Option<TextProp>,
/// The [`http-equiv`](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/meta#attr-http-equiv) attribute.
#[prop(optional, into)]
http_equiv: Option<TextProp>,
/// The [`itemprop`](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/meta#attr-itemprop) attribute.
#[prop(optional, into)]
itemprop: Option<TextProp>,
/// The [`content`](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/meta#attr-content) attribute.
#[prop(optional, into)]
content: Option<TextProp>,
) -> impl IntoView {
register(
meta()
.charset(charset.map(|v| move || v.get()))
.name(name.map(|v| move || v.get()))
.attr("property", property.map(|v| move || v.get()))
.http_equiv(http_equiv.map(|v| move || v.get()))
.itemprop(itemprop.map(|v| move || v.get()))
.content(content.map(|v| move || v.get())),
)
}
| rust | MIT | dd507168fa47b5eead64339431b0bd654bd1c951 | 2026-01-04T15:41:20.302544Z | false |
leptos-rs/leptos | https://github.com/leptos-rs/leptos/blob/dd507168fa47b5eead64339431b0bd654bd1c951/meta/src/link.rs | meta/src/link.rs | use crate::register;
use leptos::{
component, oco::Oco, prelude::GlobalAttributes,
tachys::html::element::link, IntoView,
};
/// Injects an [`HTMLLinkElement`](https://developer.mozilla.org/en-US/docs/Web/API/HTMLLinkElement) into the document
/// head, accepting any of the valid attributes for that tag.
///
/// ```
/// use leptos::prelude::*;
/// use leptos_meta::*;
///
/// #[component]
/// fn MyApp() -> impl IntoView {
/// provide_meta_context();
///
/// view! {
/// <main>
/// <Link rel="preload"
/// href="myFont.woff2"
/// as_="font"
/// type_="font/woff2"
/// crossorigin="anonymous"
/// />
/// </main>
/// }
/// }
/// ```
#[component]
pub fn Link(
/// The [`id`](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/link#attr-id) attribute.
#[prop(optional, into)]
id: Option<Oco<'static, str>>,
/// The [`as`](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/link#attr-as) attribute.
#[prop(optional, into)]
as_: Option<Oco<'static, str>>,
/// The [`crossorigin`](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/link#attr-crossorigin) attribute.
#[prop(optional, into)]
crossorigin: Option<Oco<'static, str>>,
/// The [`fetchpriority`](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/link#attr-fetchpriority) attribute.
#[prop(optional, into)]
fetchpriority: Option<Oco<'static, str>>,
/// The [`href`](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/link#attr-href) attribute.
#[prop(optional, into)]
href: Option<Oco<'static, str>>,
/// The [`hreflang`](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/link#attr-hreflang) attribute.
#[prop(optional, into)]
hreflang: Option<Oco<'static, str>>,
/// The [`imagesizes`](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/link#attr-imagesizes) attribute.
#[prop(optional, into)]
imagesizes: Option<Oco<'static, str>>,
/// The [`imagesrcset`](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/link#attr-imagesrcset) attribute.
#[prop(optional, into)]
imagesrcset: Option<Oco<'static, str>>,
/// The [`integrity`](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/link#attr-integrity) attribute.
#[prop(optional, into)]
integrity: Option<Oco<'static, str>>,
/// The [`media`](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/link#attr-media) attribute.
#[prop(optional, into)]
media: Option<Oco<'static, str>>,
/// The [`referrerpolicy`](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/link#attr-referrerpolicy) attribute.
#[prop(optional, into)]
referrerpolicy: Option<Oco<'static, str>>,
/// The [`rel`](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/link#attr-rel) attribute.
#[prop(optional, into)]
rel: Option<Oco<'static, str>>,
/// The [`sizes`](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/link#attr-sizes) attribute.
#[prop(optional, into)]
sizes: Option<Oco<'static, str>>,
/// The [`title`](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/link#attr-title) attribute.
#[prop(optional, into)]
title: Option<Oco<'static, str>>,
/// The [`type`](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/link#attr-type) attribute.
#[prop(optional, into)]
type_: Option<Oco<'static, str>>,
/// The [`blocking`](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/link#attr-blocking) attribute.
#[prop(optional, into)]
blocking: Option<Oco<'static, str>>,
) -> impl IntoView {
// TODO additional attributes
register(
link()
.id(id)
.r#as(as_)
.crossorigin(crossorigin)
.fetchpriority(fetchpriority)
.href(href)
.hreflang(hreflang)
.imagesizes(imagesizes)
.imagesrcset(imagesrcset)
.integrity(integrity)
.media(media)
.referrerpolicy(referrerpolicy)
.rel(rel)
.sizes(sizes)
.title(title)
.r#type(type_)
.blocking(blocking),
)
}
| rust | MIT | dd507168fa47b5eead64339431b0bd654bd1c951 | 2026-01-04T15:41:20.302544Z | false |
leptos-rs/leptos | https://github.com/leptos-rs/leptos/blob/dd507168fa47b5eead64339431b0bd654bd1c951/meta/src/html.rs | meta/src/html.rs | use crate::ServerMetaContext;
use leptos::{
attr::{any_attribute::AnyAttribute, NextAttribute},
component, html,
reactive::owner::use_context,
tachys::{
dom::document,
html::attribute::Attribute,
hydration::Cursor,
view::{
add_attr::AddAnyAttr, Mountable, Position, PositionState, Render,
RenderHtml,
},
},
IntoView,
};
/// A component to set metadata on the document’s `<html>` element from
/// within the application.
///
/// This component takes no props, but can take any number of spread attributes
/// following the `{..}` operator.
///
/// ```
/// use leptos::prelude::*;
/// use leptos_meta::*;
///
/// #[component]
/// fn MyApp() -> impl IntoView {
/// provide_meta_context();
///
/// view! {
/// <main>
/// <Html
/// {..}
/// lang="he"
/// dir="rtl"
/// data-theme="dark"
/// />
/// </main>
/// }
/// }
/// ```
#[component]
pub fn Html() -> impl IntoView {
HtmlView { attributes: () }
}
struct HtmlView<At> {
attributes: At,
}
struct HtmlViewState<At>
where
At: Attribute,
{
attributes: At::State,
}
impl<At> Render for HtmlView<At>
where
At: Attribute,
{
type State = HtmlViewState<At>;
fn build(self) -> Self::State {
let el = document()
.document_element()
.expect("there to be a <html> element");
let attributes = self.attributes.build(&el);
HtmlViewState { attributes }
}
fn rebuild(self, state: &mut Self::State) {
self.attributes.rebuild(&mut state.attributes);
}
}
impl<At> AddAnyAttr for HtmlView<At>
where
At: Attribute,
{
type Output<SomeNewAttr: Attribute> =
HtmlView<<At as NextAttribute>::Output<SomeNewAttr>>;
fn add_any_attr<NewAttr: Attribute>(
self,
attr: NewAttr,
) -> Self::Output<NewAttr>
where
Self::Output<NewAttr>: RenderHtml,
{
HtmlView {
attributes: self.attributes.add_any_attr(attr),
}
}
}
impl<At> RenderHtml for HtmlView<At>
where
At: Attribute,
{
type AsyncOutput = HtmlView<At::AsyncOutput>;
type Owned = HtmlView<At::CloneableOwned>;
const MIN_LENGTH: usize = At::MIN_LENGTH;
fn dry_resolve(&mut self) {
self.attributes.dry_resolve();
}
async fn resolve(self) -> Self::AsyncOutput {
HtmlView {
attributes: self.attributes.resolve().await,
}
}
fn to_html_with_buf(
self,
_buf: &mut String,
_position: &mut Position,
_escape: bool,
_mark_branches: bool,
extra_attrs: Vec<AnyAttribute>,
) {
if let Some(meta) = use_context::<ServerMetaContext>() {
let mut buf = String::new();
_ = html::attributes_to_html(
(self.attributes, extra_attrs),
&mut buf,
);
if !buf.is_empty() {
_ = meta.html.send(buf);
}
}
}
fn hydrate<const FROM_SERVER: bool>(
self,
_cursor: &Cursor,
_position: &PositionState,
) -> Self::State {
let el = document()
.document_element()
.expect("there to be a <html> element");
let attributes = self.attributes.hydrate::<FROM_SERVER>(&el);
HtmlViewState { attributes }
}
fn into_owned(self) -> Self::Owned {
HtmlView {
attributes: self.attributes.into_cloneable_owned(),
}
}
}
impl<At> Mountable for HtmlViewState<At>
where
At: Attribute,
{
fn unmount(&mut self) {}
fn mount(
&mut self,
_parent: &leptos::tachys::renderer::types::Element,
_marker: Option<&leptos::tachys::renderer::types::Node>,
) {
// <Html> only sets attributes
// the <html> tag doesn't need to be mounted anywhere, of course
}
fn insert_before_this(&self, _child: &mut dyn Mountable) -> bool {
false
}
fn elements(&self) -> Vec<leptos::tachys::renderer::types::Element> {
vec![document()
.document_element()
.expect("there to be a <html> element")]
}
}
| rust | MIT | dd507168fa47b5eead64339431b0bd654bd1c951 | 2026-01-04T15:41:20.302544Z | false |
leptos-rs/leptos | https://github.com/leptos-rs/leptos/blob/dd507168fa47b5eead64339431b0bd654bd1c951/meta/src/lib.rs | meta/src/lib.rs | #![deny(missing_docs)]
#![forbid(unsafe_code)]
//! # Leptos Meta
//!
//! Leptos Meta allows you to modify content in a document’s `<head>` from within components
//! using the [`Leptos`](https://github.com/leptos-rs/leptos) web framework.
//!
//! Document metadata is updated automatically when running in the browser. For server-side
//! rendering, after the component tree is rendered to HTML, [`ServerMetaContextOutput::inject_meta_context`] will inject meta tags into a stream of HTML inside the `<head>`.
//!
//! ```
//! use leptos::prelude::*;
//! use leptos_meta::*;
//!
//! #[component]
//! fn MyApp() -> impl IntoView {
//! // Provides a [`MetaContext`], if there is not already one provided.
//! provide_meta_context();
//!
//! let (name, set_name) = create_signal("Alice".to_string());
//!
//! view! {
//! <Title
//! // reactively sets document.title when `name` changes
//! text=move || name.get()
//! // applies the `formatter` function to the `text` value
//! formatter=|text| format!("“{text}” is your name")
//! />
//! <main>
//! <input
//! prop:value=move || name.get()
//! on:input=move |ev| set_name.set(event_target_value(&ev))
//! />
//! </main>
//! }
//! }
//! ```
//! # Feature Flags
//! - `ssr` Server-side rendering: Generate an HTML string (typically on the server)
//! - `tracing` Adds integration with the `tracing` crate.
//!
//! **Important Note:** If you’re using server-side rendering, you should enable `ssr`.
use futures::{Stream, StreamExt};
use leptos::{
attr::{any_attribute::AnyAttribute, NextAttribute},
component,
logging::debug_warn,
oco::Oco,
reactive::owner::{provide_context, use_context},
tachys::{
dom::document,
html::{
attribute::Attribute,
element::{ElementType, HtmlElement},
},
hydration::Cursor,
view::{
add_attr::AddAnyAttr, Mountable, Position, PositionState, Render,
RenderHtml,
},
},
IntoView,
};
use send_wrapper::SendWrapper;
use std::{
fmt::Debug,
sync::{
mpsc::{channel, Receiver, Sender},
Arc, LazyLock,
},
};
use wasm_bindgen::JsCast;
use web_sys::HtmlHeadElement;
mod body;
mod html;
mod link;
mod meta_tags;
mod script;
mod style;
mod stylesheet;
mod title;
pub use body::*;
pub use html::*;
pub use link::*;
pub use meta_tags::*;
pub use script::*;
pub use style::*;
pub use stylesheet::*;
pub use title::*;
/// Contains the current state of meta tags. To access it, you can use [`use_head`].
///
/// This should generally by provided somewhere in the root of your application using
/// [`provide_meta_context`].
#[derive(Clone, Debug)]
pub struct MetaContext {
/// Metadata associated with the `<title>` element.
pub(crate) title: TitleContext,
/// The hydration cursor for the location in the `<head>` for arbitrary tags will be rendered.
pub(crate) cursor: Arc<LazyLock<SendWrapper<Cursor>>>,
}
impl MetaContext {
/// Creates an empty [`MetaContext`].
pub fn new() -> Self {
Default::default()
}
}
pub(crate) const HEAD_MARKER_COMMENT: &str = "HEAD";
/// Return value of [`Node::node_type`] for a comment.
/// https://developer.mozilla.org/en-US/docs/Web/API/Node/nodeType#node.comment_node
const COMMENT_NODE: u16 = 8;
impl Default for MetaContext {
fn default() -> Self {
let build_cursor: fn() -> SendWrapper<Cursor> = || {
let head = document().head().expect("missing <head> element");
let mut cursor = None;
let mut child = head.first_child();
while let Some(this_child) = child {
if this_child.node_type() == COMMENT_NODE
&& this_child.text_content().as_deref()
== Some(HEAD_MARKER_COMMENT)
{
cursor = Some(this_child);
break;
}
child = this_child.next_sibling();
}
SendWrapper::new(Cursor::new(
cursor
.expect(
"no leptos_meta HEAD marker comment found. Did you \
include the <MetaTags/> component in the <head> of \
your server-rendered app?",
)
.unchecked_into(),
))
};
let cursor = Arc::new(LazyLock::new(build_cursor));
Self {
title: Default::default(),
cursor,
}
}
}
/// Allows you to add `<head>` content from components located in the `<body>` of the application,
/// which can be accessed during server rendering via [`ServerMetaContextOutput`].
///
/// This should be provided as context during server rendering.
///
/// No content added after the first chunk of the stream has been sent will be included in the
/// initial `<head>`. Data that needs to be included in the `<head>` during SSR should be
/// synchronous or loaded as a blocking resource.
#[derive(Clone, Debug)]
pub struct ServerMetaContext {
/// Metadata associated with the `<title>` element.
pub(crate) title: TitleContext,
/// Attributes for the `<html>` element.
pub(crate) html: Sender<String>,
/// Attributes for the `<body>` element.
pub(crate) body: Sender<String>,
/// Arbitrary elements to be added to the `<head>` as HTML.
#[allow(unused)] // used in SSR
pub(crate) elements: Sender<String>,
}
/// Allows you to access `<head>` content that was inserted via [`ServerMetaContext`].
#[must_use = "If you do not use the output, adding meta tags will have no \
effect."]
#[derive(Debug)]
pub struct ServerMetaContextOutput {
pub(crate) title: TitleContext,
html: Receiver<String>,
body: Receiver<String>,
elements: Receiver<String>,
}
impl ServerMetaContext {
/// Creates an empty [`ServerMetaContext`].
pub fn new() -> (ServerMetaContext, ServerMetaContextOutput) {
let title = TitleContext::default();
let (html_tx, html_rx) = channel();
let (body_tx, body_rx) = channel();
let (elements_tx, elements_rx) = channel();
let tx = ServerMetaContext {
title: title.clone(),
html: html_tx,
body: body_tx,
elements: elements_tx,
};
let rx = ServerMetaContextOutput {
title,
html: html_rx,
body: body_rx,
elements: elements_rx,
};
(tx, rx)
}
}
impl ServerMetaContextOutput {
/// Consumes the metadata, injecting it into the the first chunk of an HTML stream in the
/// appropriate place.
///
/// This means that only meta tags rendered during the first chunk of the stream will be
/// included.
pub async fn inject_meta_context(
self,
mut stream: impl Stream<Item = String> + Send + Unpin,
) -> impl Stream<Item = String> + Send {
// if the first chunk consists of a synchronously-available Suspend,
// inject_meta_context can accidentally run a tick before it, but the Suspend
// when both are available. waiting a tick before awaiting the first chunk
// in the Stream ensures that this always runs after that first chunk
// see https://github.com/leptos-rs/leptos/issues/3976 for the original issue
leptos::task::tick().await;
// wait for the first chunk of the stream, to ensure our components hve run
let mut first_chunk = stream.next().await.unwrap_or_default();
// create <title> tag
let title = self.title.as_string();
let title_len = title
.as_ref()
.map(|n| "<title>".len() + n.len() + "</title>".len())
.unwrap_or(0);
// collect all registered meta tags
let meta_buf = self.elements.try_iter().collect::<String>();
// get HTML strings for `<html>` and `<body>`
let html_attrs = self.html.try_iter().collect::<String>();
let body_attrs = self.body.try_iter().collect::<String>();
let mut modified_chunk = if title_len == 0 && meta_buf.is_empty() {
first_chunk
} else {
let mut buf = String::with_capacity(
first_chunk.len() + title_len + meta_buf.len(),
);
let head_loc = first_chunk
.find("</head>")
.expect("you are using leptos_meta without a </head> tag");
let marker_loc = first_chunk
.find("<!--HEAD-->")
.map(|pos| pos + "<!--HEAD-->".len())
.unwrap_or_else(|| {
first_chunk.find("</head>").unwrap_or(head_loc)
});
let (before_marker, after_marker) =
first_chunk.split_at_mut(marker_loc);
buf.push_str(before_marker);
buf.push_str(&meta_buf);
if let Some(title) = title {
buf.push_str("<title>");
buf.push_str(&title);
buf.push_str("</title>");
}
buf.push_str(after_marker);
buf
};
if !html_attrs.is_empty() {
if let Some(index) = modified_chunk.find("<html") {
// Calculate the position where the new string should be inserted
let insert_pos = index + "<html".len();
modified_chunk.insert_str(insert_pos, &html_attrs);
}
}
if !body_attrs.is_empty() {
if let Some(index) = modified_chunk.find("<body") {
// Calculate the position where the new string should be inserted
let insert_pos = index + "<body".len();
modified_chunk.insert_str(insert_pos, &body_attrs);
}
}
futures::stream::once(async move { modified_chunk }).chain(stream)
}
}
/// Provides a [`MetaContext`], if there is not already one provided. This ensures that you can provide it
/// at the highest possible level, without overwriting a [`MetaContext`] that has already been provided
/// (for example, by a server-rendering integration.)
pub fn provide_meta_context() {
if use_context::<MetaContext>().is_none() {
provide_context(MetaContext::new());
}
}
/// Returns the current [`MetaContext`].
///
/// If there is no [`MetaContext`] in this or any parent scope, this will
/// create a new [`MetaContext`] and provide it to the current scope.
///
/// Note that this may cause confusing behavior, e.g., if multiple nested routes independently
/// call `use_head()` but a single [`MetaContext`] has not been provided at the application root.
/// The best practice is always to call [`provide_meta_context`] early in the application.
pub fn use_head() -> MetaContext {
match use_context::<MetaContext>() {
None => {
debug_warn!(
"use_head() is being called without a MetaContext being \
provided. We'll automatically create and provide one, but if \
this is being called in a child route it may cause bugs. To \
be safe, you should provide_meta_context() somewhere in the \
root of the app."
);
let meta = MetaContext::new();
provide_context(meta.clone());
meta
}
Some(ctx) => ctx,
}
}
pub(crate) fn register<E, At, Ch>(
el: HtmlElement<E, At, Ch>,
) -> RegisteredMetaTag<E, At, Ch>
where
HtmlElement<E, At, Ch>: RenderHtml,
{
RegisteredMetaTag { el }
}
struct RegisteredMetaTag<E, At, Ch> {
// this is `None` if we've already taken it out to render to HTML on the server
// we don't render it in place in RenderHtml, so it's fine
el: HtmlElement<E, At, Ch>,
}
struct RegisteredMetaTagState<E, At, Ch>
where
HtmlElement<E, At, Ch>: Render,
{
state: <HtmlElement<E, At, Ch> as Render>::State,
}
impl<E, At, Ch> Drop for RegisteredMetaTagState<E, At, Ch>
where
HtmlElement<E, At, Ch>: Render,
{
fn drop(&mut self) {
self.state.unmount();
}
}
fn document_head() -> HtmlHeadElement {
let document = document();
document.head().unwrap_or_else(|| {
let el = document.create_element("head").unwrap();
let document = document.document_element().unwrap();
_ = document.append_child(&el);
el.unchecked_into()
})
}
impl<E, At, Ch> Render for RegisteredMetaTag<E, At, Ch>
where
E: ElementType,
At: Attribute,
Ch: Render,
{
type State = RegisteredMetaTagState<E, At, Ch>;
fn build(self) -> Self::State {
let state = self.el.build();
RegisteredMetaTagState { state }
}
fn rebuild(self, state: &mut Self::State) {
self.el.rebuild(&mut state.state);
}
}
impl<E, At, Ch> AddAnyAttr for RegisteredMetaTag<E, At, Ch>
where
E: ElementType + Send,
At: Attribute + Send,
Ch: RenderHtml + Send,
{
type Output<SomeNewAttr: Attribute> =
RegisteredMetaTag<E, <At as NextAttribute>::Output<SomeNewAttr>, Ch>;
fn add_any_attr<NewAttr: Attribute>(
self,
attr: NewAttr,
) -> Self::Output<NewAttr>
where
Self::Output<NewAttr>: RenderHtml,
{
RegisteredMetaTag {
el: self.el.add_any_attr(attr),
}
}
}
impl<E, At, Ch> RenderHtml for RegisteredMetaTag<E, At, Ch>
where
E: ElementType,
At: Attribute,
Ch: RenderHtml + Send,
{
type AsyncOutput = Self;
type Owned = RegisteredMetaTag<E, At::CloneableOwned, Ch::Owned>;
const MIN_LENGTH: usize = 0;
const EXISTS: bool = false;
fn dry_resolve(&mut self) {
self.el.dry_resolve()
}
async fn resolve(self) -> Self::AsyncOutput {
self // TODO?
}
fn to_html_with_buf(
self,
_buf: &mut String,
_position: &mut Position,
_escape: bool,
_mark_branches: bool,
_extra_attrs: Vec<AnyAttribute>,
) {
// meta tags are rendered into the buffer stored into the context
// the value has already been taken out, when we're on the server
#[cfg(feature = "ssr")]
if let Some(cx) = use_context::<ServerMetaContext>() {
let mut buf = String::new();
self.el.to_html_with_buf(
&mut buf,
&mut Position::NextChild,
false,
false,
vec![],
);
_ = cx.elements.send(buf); // fails only if the receiver is already dropped
} else {
let msg = "tried to use a leptos_meta component without \
`ServerMetaContext` provided";
#[cfg(feature = "tracing")]
tracing::warn!("{}", msg);
#[cfg(not(feature = "tracing"))]
eprintln!("{msg}");
}
}
fn hydrate<const FROM_SERVER: bool>(
self,
_cursor: &Cursor,
_position: &PositionState,
) -> Self::State {
let cursor = use_context::<MetaContext>()
.expect(
"attempting to hydrate `leptos_meta` components without a \
MetaContext provided",
)
.cursor;
let state = self.el.hydrate::<FROM_SERVER>(
&cursor,
&PositionState::new(Position::NextChild),
);
RegisteredMetaTagState { state }
}
fn into_owned(self) -> Self::Owned {
RegisteredMetaTag {
el: self.el.into_owned(),
}
}
}
impl<E, At, Ch> Mountable for RegisteredMetaTagState<E, At, Ch>
where
E: ElementType,
At: Attribute,
Ch: Render,
{
fn unmount(&mut self) {
self.state.unmount();
}
fn mount(
&mut self,
_parent: &leptos::tachys::renderer::types::Element,
_marker: Option<&leptos::tachys::renderer::types::Node>,
) {
// we always mount this to the <head>, which is the whole point
// but this shouldn't warn about the parent being a regular element or being unused
// because it will call "mount" with the parent where it is located in the component tree,
// but actually be mounted to the <head>
self.state.mount(&document_head(), None);
}
fn insert_before_this(&self, _child: &mut dyn Mountable) -> bool {
// Registered meta tags will be mounted in the <head>, but *seem* to be mounted somewhere
// else in the DOM. We should never tell the renderer that we have successfully mounted
// something before this, because if e.g., a <Meta/> is the first item in an Either, then
// the alternate view will end up being mounted in the <head> -- which is not at all what
// we intended!
false
}
fn elements(&self) -> Vec<leptos::tachys::renderer::types::Element> {
self.state.elements()
}
}
/// During server rendering, inserts the meta tags that have been generated by the other components
/// in this crate into the DOM. This should be placed somewhere inside the `<head>` element that is
/// being used during server rendering.
#[component]
pub fn MetaTags() -> impl IntoView {
MetaTagsView
}
#[derive(Debug)]
struct MetaTagsView;
// this implementation doesn't do anything during client-side rendering, it's just for server-side
// rendering HTML for all the tags that will be injected into the `<head>`
//
// client-side rendering is handled by the individual components
impl Render for MetaTagsView {
type State = ();
fn build(self) -> Self::State {}
fn rebuild(self, _state: &mut Self::State) {}
}
impl AddAnyAttr for MetaTagsView {
type Output<SomeNewAttr: Attribute> = MetaTagsView;
fn add_any_attr<NewAttr: Attribute>(
self,
_attr: NewAttr,
) -> Self::Output<NewAttr>
where
Self::Output<NewAttr>: RenderHtml,
{
self
}
}
impl RenderHtml for MetaTagsView {
type AsyncOutput = Self;
type Owned = Self;
const MIN_LENGTH: usize = 0;
fn dry_resolve(&mut self) {}
async fn resolve(self) -> Self::AsyncOutput {
self
}
fn to_html_with_buf(
self,
buf: &mut String,
_position: &mut Position,
_escape: bool,
_mark_branches: bool,
_extra_attrs: Vec<AnyAttribute>,
) {
buf.push_str("<!--HEAD-->");
}
fn hydrate<const FROM_SERVER: bool>(
self,
_cursor: &Cursor,
_position: &PositionState,
) -> Self::State {
}
fn into_owned(self) -> Self::Owned {
self
}
}
pub(crate) trait OrDefaultNonce {
fn or_default_nonce(self) -> Option<Oco<'static, str>>;
}
impl OrDefaultNonce for Option<Oco<'static, str>> {
fn or_default_nonce(self) -> Option<Oco<'static, str>> {
#[cfg(feature = "nonce")]
{
use leptos::nonce::use_nonce;
match self {
Some(nonce) => Some(nonce),
None => use_nonce().map(|n| Arc::clone(n.as_inner()).into()),
}
}
#[cfg(not(feature = "nonce"))]
{
self
}
}
}
| rust | MIT | dd507168fa47b5eead64339431b0bd654bd1c951 | 2026-01-04T15:41:20.302544Z | false |
leptos-rs/leptos | https://github.com/leptos-rs/leptos/blob/dd507168fa47b5eead64339431b0bd654bd1c951/meta/src/title.rs | meta/src/title.rs | use crate::{use_head, MetaContext, ServerMetaContext};
use leptos::{
attr::{any_attribute::AnyAttribute, Attribute},
component,
oco::Oco,
prelude::{ArcTrigger, Notify, Track},
reactive::{effect::RenderEffect, owner::use_context},
tachys::{
dom::document,
hydration::Cursor,
view::{
add_attr::AddAnyAttr, Mountable, Position, PositionState, Render,
RenderHtml,
},
},
text_prop::TextProp,
IntoView,
};
use or_poisoned::OrPoisoned;
use std::sync::{
atomic::{AtomicU32, Ordering},
Arc, Mutex, RwLock,
};
/// Contains the current state of the document's `<title>`.
#[derive(Clone, Default)]
pub struct TitleContext {
id: Arc<AtomicU32>,
formatter_stack: Arc<RwLock<Vec<(TitleId, Formatter)>>>,
text_stack: Arc<RwLock<Vec<(TitleId, TextProp)>>>,
revalidate: ArcTrigger,
#[allow(clippy::type_complexity)]
effect: Arc<Mutex<Option<RenderEffect<Option<Oco<'static, str>>>>>>,
}
impl core::fmt::Debug for TitleContext {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
f.debug_tuple("TitleContext").finish()
}
}
type TitleId = u32;
impl TitleContext {
fn next_id(&self) -> TitleId {
self.id.fetch_add(1, Ordering::Relaxed)
}
fn invalidate(&self) {
self.revalidate.notify();
}
fn spawn_effect(&self) {
let this = self.clone();
let revalidate = self.revalidate.clone();
let mut effect_lock = self.effect.lock().or_poisoned();
if effect_lock.is_none() {
*effect_lock = Some(RenderEffect::new({
move |_| {
revalidate.track();
let text = this.as_string();
document().set_title(text.as_deref().unwrap_or_default());
text
}
}));
}
}
fn push_text_and_formatter(
&self,
id: TitleId,
text: Option<TextProp>,
formatter: Option<Formatter>,
) {
if let Some(text) = text {
self.text_stack.write().or_poisoned().push((id, text));
}
if let Some(formatter) = formatter {
self.formatter_stack
.write()
.or_poisoned()
.push((id, formatter));
}
self.invalidate();
}
fn update_text_and_formatter(
&self,
id: TitleId,
text: Option<TextProp>,
formatter: Option<Formatter>,
) {
let mut text_stack = self.text_stack.write().or_poisoned();
let mut formatter_stack = self.formatter_stack.write().or_poisoned();
let text_pos =
text_stack.iter().position(|(item_id, _)| *item_id == id);
let formatter_pos = formatter_stack
.iter()
.position(|(item_id, _)| *item_id == id);
match (text_pos, text) {
(None, None) => {}
(Some(old), Some(new)) => {
text_stack[old].1 = new;
self.invalidate();
}
(Some(old), None) => {
text_stack.remove(old);
self.invalidate();
}
(None, Some(new)) => {
text_stack.push((id, new));
self.invalidate();
}
}
match (formatter_pos, formatter) {
(None, None) => {}
(Some(old), Some(new)) => {
formatter_stack[old].1 = new;
self.invalidate();
}
(Some(old), None) => {
formatter_stack.remove(old);
self.invalidate();
}
(None, Some(new)) => {
formatter_stack.push((id, new));
self.invalidate();
}
}
}
fn remove_id(&self, id: TitleId) -> (Option<TextProp>, Option<Formatter>) {
let mut text_stack = self.text_stack.write().or_poisoned();
let text = text_stack
.iter()
.position(|(item_id, _)| *item_id == id)
.map(|pos| text_stack.remove(pos).1);
let mut formatter_stack = self.formatter_stack.write().or_poisoned();
let formatter = formatter_stack
.iter()
.position(|(item_id, _)| *item_id == id)
.map(|pos| formatter_stack.remove(pos).1);
self.invalidate();
(text, formatter)
}
/// Converts the title into a string that can be used as the text content of a `<title>` tag.
pub fn as_string(&self) -> Option<Oco<'static, str>> {
let title = self
.text_stack
.read()
.or_poisoned()
.last()
.map(|n| n.1.get());
title.map(|title| {
if let Some(formatter) =
self.formatter_stack.read().or_poisoned().last()
{
(formatter.1 .0)(title.into_owned()).into()
} else {
title
}
})
}
}
/// A function that is applied to the text value before setting `document.title`.
#[repr(transparent)]
pub struct Formatter(Box<dyn Fn(String) -> String + Send + Sync>);
impl<F> From<F> for Formatter
where
F: Fn(String) -> String + Send + Sync + 'static,
{
#[inline(always)]
fn from(f: F) -> Formatter {
Formatter(Box::new(f))
}
}
/// A component to set the document’s title by creating an [`HTMLTitleElement`](https://developer.mozilla.org/en-US/docs/Web/API/HTMLTitleElement).
///
/// The `title` and `formatter` can be set independently of one another. For example, you can create a root-level
/// `<Title formatter=.../>` that will wrap each of the text values of `<Title/>` components created lower in the tree.
///
/// ```
/// use leptos::prelude::*;
/// use leptos_meta::*;
///
/// #[component]
/// fn MyApp() -> impl IntoView {
/// provide_meta_context();
/// let formatter = |text| format!("{text} — Leptos Online");
///
/// view! {
/// <main>
/// <Title formatter/>
/// // ... routing logic here
/// </main>
/// }
/// }
///
/// #[component]
/// fn PageA() -> impl IntoView {
/// view! {
/// <main>
/// <Title text="Page A"/> // sets title to "Page A — Leptos Online"
/// </main>
/// }
/// }
///
/// #[component]
/// fn PageB() -> impl IntoView {
/// view! {
/// <main>
/// <Title text="Page B"/> // sets title to "Page B — Leptos Online"
/// </main>
/// }
/// }
/// ```
#[component]
pub fn Title(
/// A function that will be applied to any text value before it’s set as the title.
#[prop(optional, into)]
mut formatter: Option<Formatter>,
/// Sets the current `document.title`.
#[prop(optional, into)]
mut text: Option<TextProp>,
) -> impl IntoView {
let meta = use_head();
let server_ctx = use_context::<ServerMetaContext>();
let id = meta.title.next_id();
if let Some(cx) = server_ctx {
// if we are server rendering, we will not actually use these values via RenderHtml
// instead, they'll be handled separately by the server integration
// so it's safe to take them out of the props here
cx.title
.push_text_and_formatter(id, text.take(), formatter.take());
};
TitleView {
id,
meta,
formatter,
text,
}
}
struct TitleView {
id: u32,
meta: MetaContext,
formatter: Option<Formatter>,
text: Option<TextProp>,
}
struct TitleViewState {
id: TitleId,
meta: MetaContext,
// these are only Some(_) after being unmounted, and hold these values until dropped or remounted
formatter: Option<Formatter>,
text: Option<TextProp>,
}
impl Drop for TitleViewState {
fn drop(&mut self) {
// when TitleViewState is dropped, it should remove its ID from the text and formatter stacks
// so that they no longer appear. it will also revalidate the whole title in case this one was active
self.meta.title.remove_id(self.id);
}
}
impl Render for TitleView {
type State = TitleViewState;
fn build(self) -> Self::State {
let TitleView {
id,
meta,
formatter,
text,
} = self;
meta.title.spawn_effect();
TitleViewState {
id,
meta,
text,
formatter,
}
}
fn rebuild(self, _state: &mut Self::State) {
self.meta.title.update_text_and_formatter(
self.id,
self.text,
self.formatter,
);
}
}
impl AddAnyAttr for TitleView {
type Output<SomeNewAttr: Attribute> = TitleView;
fn add_any_attr<NewAttr: Attribute>(
self,
_attr: NewAttr,
) -> Self::Output<NewAttr>
where
Self::Output<NewAttr>: RenderHtml,
{
self
}
}
impl RenderHtml for TitleView {
type AsyncOutput = Self;
type Owned = Self;
const MIN_LENGTH: usize = 0;
const EXISTS: bool = false;
fn dry_resolve(&mut self) {}
async fn resolve(self) -> Self::AsyncOutput {
self
}
fn to_html_with_buf(
self,
_buf: &mut String,
_position: &mut Position,
_escape: bool,
_mark_branches: bool,
_extra_attrs: Vec<AnyAttribute>,
) {
// meta tags are rendered into the buffer stored into the context
// the value has already been taken out, when we're on the server
}
fn hydrate<const FROM_SERVER: bool>(
self,
_cursor: &Cursor,
_position: &PositionState,
) -> Self::State {
let TitleView {
id,
meta,
formatter,
text,
} = self;
meta.title.spawn_effect();
// these need to be pushed here, rather than on mount, because mount() is not called when hydrating
meta.title.push_text_and_formatter(id, text, formatter);
TitleViewState {
id,
meta,
text: None,
formatter: None,
}
}
fn into_owned(self) -> Self::Owned {
self
}
}
impl Mountable for TitleViewState {
fn unmount(&mut self) {
let (text, formatter) = self.meta.title.remove_id(self.id);
if text.is_some() {
self.text = text;
}
if formatter.is_some() {
self.formatter = formatter;
}
}
fn mount(
&mut self,
_parent: &leptos::tachys::renderer::types::Element,
_marker: Option<&leptos::tachys::renderer::types::Node>,
) {
// TitleView::el() guarantees that there is a <title> in the <head>
// so there is no element to be mounted
//
// "mounting" in this case means that we actually want this title to be in active use
// as a result, we will push it into the title stack and revalidate
self.meta.title.push_text_and_formatter(
self.id,
self.text.take(),
self.formatter.take(),
);
}
fn insert_before_this(&self, _child: &mut dyn Mountable) -> bool {
false
}
fn elements(&self) -> Vec<leptos::tachys::renderer::types::Element> {
vec![]
}
}
| rust | MIT | dd507168fa47b5eead64339431b0bd654bd1c951 | 2026-01-04T15:41:20.302544Z | false |
leptos-rs/leptos | https://github.com/leptos-rs/leptos/blob/dd507168fa47b5eead64339431b0bd654bd1c951/meta/src/stylesheet.rs | meta/src/stylesheet.rs | use crate::register;
use leptos::{
attr::global::GlobalAttributes, component, prelude::LeptosOptions,
tachys::html::element::link, IntoView,
};
/// Injects an [`HTMLLinkElement`](https://developer.mozilla.org/en-US/docs/Web/API/HTMLLinkElement) into the document
/// head that loads a stylesheet from the URL given by the `href` property.
///
/// Note that this does *not* work with the `cargo-leptos` `hash-files` feature: if you are using file
/// hashing, you should use [`HashedStylesheet`](crate::HashedStylesheet).
///
/// ```
/// use leptos::prelude::*;
/// use leptos_meta::*;
///
/// #[component]
/// fn MyApp() -> impl IntoView {
/// provide_meta_context();
///
/// view! {
/// <main>
/// <Stylesheet href="/style.css"/>
/// </main>
/// }
/// }
/// ```
#[component]
pub fn Stylesheet(
/// The URL at which the stylesheet is located.
#[prop(into)]
href: String,
/// An ID for the stylesheet.
#[prop(optional, into)]
id: Option<String>,
) -> impl IntoView {
// TODO additional attributes
register(link().id(id).rel("stylesheet").href(href))
}
/// Injects an [`HTMLLinkElement`](https://developer.mozilla.org/en-US/docs/Web/API/HTMLLinkElement) into the document head that loads a `cargo-leptos`-hashed stylesheet.
///
/// This should only be used in the application’s server-side `shell` function, as
/// [`LeptosOptions`] is not available in the browser. Unlike other `leptos_meta` components, it
/// will render the `<link>` it creates exactly where it is called.
#[component]
pub fn HashedStylesheet(
/// Leptos options
options: LeptosOptions,
/// An ID for the stylesheet.
#[prop(optional, into)]
id: Option<String>,
/// A base url, not including a trailing slash
#[prop(optional, into)]
root: Option<String>,
) -> impl IntoView {
let mut css_file_name = options.output_name.to_string();
if options.hash_files {
let hash_path = std::env::current_exe()
.map(|path| {
path.parent().map(|p| p.to_path_buf()).unwrap_or_default()
})
.unwrap_or_default()
.join(options.hash_file.as_ref());
if hash_path.exists() {
let hashes = std::fs::read_to_string(&hash_path)
.expect("failed to read hash file");
for line in hashes.lines() {
let line = line.trim();
if !line.is_empty() {
if let Some((file, hash)) = line.split_once(':') {
if file == "css" {
css_file_name
.push_str(&format!(".{}", hash.trim()));
}
}
}
}
}
}
css_file_name.push_str(".css");
let pkg_path = &options.site_pkg_dir;
let root = root.unwrap_or_default();
link()
.id(id)
.rel("stylesheet")
.href(format!("{root}/{pkg_path}/{css_file_name}"))
}
| rust | MIT | dd507168fa47b5eead64339431b0bd654bd1c951 | 2026-01-04T15:41:20.302544Z | false |
leptos-rs/leptos | https://github.com/leptos-rs/leptos/blob/dd507168fa47b5eead64339431b0bd654bd1c951/meta/src/style.rs | meta/src/style.rs | use crate::{register, OrDefaultNonce};
use leptos::{
component, oco::Oco, prelude::*, tachys::html::element::style, IntoView,
};
/// Injects an [`HTMLStyleElement`](https://developer.mozilla.org/en-US/docs/Web/API/HTMLStyleElement) into the document
/// head, accepting any of the valid attributes for that tag.
///
/// ```
/// use leptos::prelude::*;
/// use leptos_meta::*;
///
/// #[component]
/// fn MyApp() -> impl IntoView {
/// provide_meta_context();
///
/// view! {
/// <main>
/// <Style>
/// "body { font-weight: bold; }"
/// </Style>
/// </main>
/// }
/// }
/// ```
#[component]
pub fn Style(
/// An ID for the `<script>` tag.
#[prop(optional, into)]
id: Option<Oco<'static, str>>,
/// The [`media`](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/style#attr-media) attribute.
#[prop(optional, into)]
media: Option<Oco<'static, str>>,
/// The [`nonce`](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/style#attr-nonce) attribute.
#[prop(optional, into)]
nonce: Option<Oco<'static, str>>,
/// The [`title`](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/style#attr-title) attribute.
#[prop(optional, into)]
title: Option<Oco<'static, str>>,
/// The [`blocking`](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/style#attr-blocking) attribute.
#[prop(optional, into)]
blocking: Option<Oco<'static, str>>,
/// The content of the `<style>` tag.
#[prop(optional)]
children: Option<Children>,
) -> impl IntoView {
register(
style()
.id(id)
.media(media)
.nonce(nonce.or_default_nonce())
.title(title)
.blocking(blocking)
.child(children.map(|c| c())),
)
}
| rust | MIT | dd507168fa47b5eead64339431b0bd654bd1c951 | 2026-01-04T15:41:20.302544Z | false |
leptos-rs/leptos | https://github.com/leptos-rs/leptos/blob/dd507168fa47b5eead64339431b0bd654bd1c951/meta/src/body.rs | meta/src/body.rs | use crate::ServerMetaContext;
use leptos::{
attr::{any_attribute::AnyAttribute, NextAttribute},
component, html,
reactive::owner::use_context,
tachys::{
dom::document,
html::attribute::Attribute,
hydration::Cursor,
view::{
add_attr::AddAnyAttr, Mountable, Position, PositionState, Render,
RenderHtml,
},
},
IntoView,
};
/// A component to set metadata on the document’s `<body>` element from
/// within the application.
///
/// This component takes no props, but can take any number of spread attributes
/// following the `{..}` operator.
///
/// ```
/// use leptos::prelude::*;
/// use leptos_meta::*;
///
/// #[component]
/// fn MyApp() -> impl IntoView {
/// provide_meta_context();
/// let (prefers_dark, set_prefers_dark) = signal(false);
/// let body_class = move || {
/// if prefers_dark.get() {
/// "dark".to_string()
/// } else {
/// "light".to_string()
/// }
/// };
///
/// view! {
/// <main>
/// <Body {..} class=body_class id="body"/>
/// </main>
/// }
/// }
/// ```
#[component]
pub fn Body() -> impl IntoView {
BodyView { attributes: () }
}
struct BodyView<At> {
attributes: At,
}
struct BodyViewState<At>
where
At: Attribute,
{
attributes: At::State,
}
impl<At> Render for BodyView<At>
where
At: Attribute,
{
type State = BodyViewState<At>;
fn build(self) -> Self::State {
let el = document().body().expect("there to be a <body> element");
let attributes = self.attributes.build(&el);
BodyViewState { attributes }
}
fn rebuild(self, state: &mut Self::State) {
self.attributes.rebuild(&mut state.attributes);
}
}
impl<At> AddAnyAttr for BodyView<At>
where
At: Attribute,
{
type Output<SomeNewAttr: Attribute> =
BodyView<<At as NextAttribute>::Output<SomeNewAttr>>;
fn add_any_attr<NewAttr: Attribute>(
self,
attr: NewAttr,
) -> Self::Output<NewAttr>
where
Self::Output<NewAttr>: RenderHtml,
{
BodyView {
attributes: self.attributes.add_any_attr(attr),
}
}
}
impl<At> RenderHtml for BodyView<At>
where
At: Attribute,
{
type AsyncOutput = BodyView<At::AsyncOutput>;
type Owned = BodyView<At::CloneableOwned>;
const MIN_LENGTH: usize = At::MIN_LENGTH;
fn dry_resolve(&mut self) {
self.attributes.dry_resolve();
}
async fn resolve(self) -> Self::AsyncOutput {
BodyView {
attributes: self.attributes.resolve().await,
}
}
fn to_html_with_buf(
self,
_buf: &mut String,
_position: &mut Position,
_escape: bool,
_mark_branches: bool,
extra_attrs: Vec<AnyAttribute>,
) {
if let Some(meta) = use_context::<ServerMetaContext>() {
let mut buf = String::new();
_ = html::attributes_to_html(
(self.attributes, extra_attrs),
&mut buf,
);
if !buf.is_empty() {
_ = meta.body.send(buf);
}
}
}
fn hydrate<const FROM_SERVER: bool>(
self,
_cursor: &Cursor,
_position: &PositionState,
) -> Self::State {
let el = document().body().expect("there to be a <body> element");
let attributes = self.attributes.hydrate::<FROM_SERVER>(&el);
BodyViewState { attributes }
}
fn into_owned(self) -> Self::Owned {
BodyView {
attributes: self.attributes.into_cloneable_owned(),
}
}
}
impl<At> Mountable for BodyViewState<At>
where
At: Attribute,
{
fn unmount(&mut self) {}
fn mount(
&mut self,
_parent: &leptos::tachys::renderer::types::Element,
_marker: Option<&leptos::tachys::renderer::types::Node>,
) {
}
fn insert_before_this(&self, _child: &mut dyn Mountable) -> bool {
false
}
fn elements(&self) -> Vec<leptos::tachys::renderer::types::Element> {
vec![document()
.body()
.expect("there to be a <body> element")
.into()]
}
}
| rust | MIT | dd507168fa47b5eead64339431b0bd654bd1c951 | 2026-01-04T15:41:20.302544Z | false |
leptos-rs/leptos | https://github.com/leptos-rs/leptos/blob/dd507168fa47b5eead64339431b0bd654bd1c951/meta/src/script.rs | meta/src/script.rs | use crate::{register, OrDefaultNonce};
use leptos::{
component, oco::Oco, prelude::*, tachys::html::element::script, IntoView,
};
/// Injects an [`HTMLScriptElement`](https://developer.mozilla.org/en-US/docs/Web/API/HTMLScriptElement) into the document
/// head, accepting any of the valid attributes for that tag.
///
/// ```
/// use leptos::prelude::*;
/// use leptos_meta::*;
///
/// #[component]
/// fn MyApp() -> impl IntoView {
/// provide_meta_context();
///
/// view! {
/// <main>
/// <Script>
/// "console.log('Hello, world!');"
/// </Script>
/// </main>
/// }
/// }
/// ```
#[component]
pub fn Script(
/// An ID for the `<script>` tag.
#[prop(optional, into)]
id: Option<Oco<'static, str>>,
/// The [`async`](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/script#attr-async) attribute.
#[prop(optional, into)]
async_: Option<Oco<'static, str>>,
/// The [`crossorigin`](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/script#attr-crossorigin) attribute.
#[prop(optional, into)]
crossorigin: Option<Oco<'static, str>>,
/// The [`defer`](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/script#attr-defer) attribute.
#[prop(optional, into)]
defer: Option<Oco<'static, str>>,
/// The [`fetchpriority `](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/script#attr-fetchpriority ) attribute.
#[prop(optional, into)]
fetchpriority: Option<Oco<'static, str>>,
/// The [`integrity`](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/script#attr-integrity) attribute.
#[prop(optional, into)]
integrity: Option<Oco<'static, str>>,
/// The [`nomodule`](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/script#attr-nomodule) attribute.
#[prop(optional, into)]
nomodule: Option<Oco<'static, str>>,
/// The [`nonce`](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/script#attr-nonce) attribute.
#[prop(optional, into)]
nonce: Option<Oco<'static, str>>,
/// The [`referrerpolicy`](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/script#attr-referrerpolicy) attribute.
#[prop(optional, into)]
referrerpolicy: Option<Oco<'static, str>>,
/// The [`src`](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/script#attr-src) attribute.
#[prop(optional, into)]
src: Option<Oco<'static, str>>,
/// The [`type`](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/script#attr-type) attribute.
#[prop(optional, into)]
type_: Option<Oco<'static, str>>,
/// The [`blocking`](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/script#attr-blocking) attribute.
#[prop(optional, into)]
blocking: Option<Oco<'static, str>>,
/// The content of the `<script>` tag.
#[prop(optional)]
children: Option<Children>,
) -> impl IntoView {
register(
script()
.id(id)
.r#async(async_)
.crossorigin(crossorigin)
.defer(defer)
.fetchpriority(fetchpriority)
.integrity(integrity)
.nomodule(nomodule)
.nonce(nonce.or_default_nonce())
.referrerpolicy(referrerpolicy)
.src(src)
.r#type(type_)
.blocking(blocking)
.child(children.map(|c| c())),
)
}
| rust | MIT | dd507168fa47b5eead64339431b0bd654bd1c951 | 2026-01-04T15:41:20.302544Z | false |
leptos-rs/leptos | https://github.com/leptos-rs/leptos/blob/dd507168fa47b5eead64339431b0bd654bd1c951/leptos_hot_reload/src/node.rs | leptos_hot_reload/src/node.rs | use crate::parsing::is_component_node;
use anyhow::Result;
use quote::ToTokens;
use rstml::node::{Node, NodeAttribute};
use serde::{Deserialize, Serialize};
// A lightweight virtual DOM structure we can use to hold
// the state of a Leptos view macro template. This is because
// `syn` types are `!Send` so we can't store them as we might like.
// This is only used to diff view macros for hot reloading so it's very minimal
// and ignores many of the data types.
#[allow(clippy::module_name_repetitions)]
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub enum LNode {
Fragment(Vec<LNode>),
Text(String),
Element {
name: String,
attrs: Vec<(String, LAttributeValue)>,
children: Vec<LNode>,
},
// don't need anything; skipped during patching because it should
// contain its own view macros
Component {
name: String,
props: Vec<(String, String)>,
children: Vec<LNode>,
},
DynChild(String),
}
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub enum LAttributeValue {
Boolean,
Static(String),
// safely ignored
Dynamic,
Noop,
}
impl LNode {
/// # Errors
///
/// Will return `Err` if parsing the view fails.
pub fn parse_view(nodes: Vec<Node>) -> Result<LNode> {
let mut out = Vec::new();
for node in nodes {
LNode::parse_node(node, &mut out)?;
}
if out.len() == 1 {
out.pop().ok_or_else(|| {
unreachable!("The last element should not be None.")
})
} else {
Ok(LNode::Fragment(out))
}
}
/// # Errors
///
/// Will return `Err` if parsing the node fails.
pub fn parse_node(node: Node, views: &mut Vec<LNode>) -> Result<()> {
match node {
Node::Fragment(frag) => {
for child in frag.children {
LNode::parse_node(child, views)?;
}
}
Node::RawText(text) => {
views.push(LNode::Text(text.to_string_best()));
}
Node::Text(text) => {
views.push(LNode::Text(text.value_string()));
}
Node::Block(block) => {
views.push(LNode::DynChild(
block.into_token_stream().to_string(),
));
}
Node::Element(el) => {
if is_component_node(&el) {
let name = el.name().to_string();
let mut children = Vec::new();
for child in el.children {
LNode::parse_node(child, &mut children)?;
}
views.push(LNode::Component {
name,
props: el
.open_tag
.attributes
.into_iter()
.filter_map(|attr| match attr {
NodeAttribute::Attribute(attr) => Some((
attr.key.to_string(),
format!("{:#?}", attr.value()),
)),
NodeAttribute::Block(_) => None,
})
.collect(),
children,
});
} else {
let name = el.name().to_string();
let mut attrs = Vec::new();
for attr in el.open_tag.attributes {
if let NodeAttribute::Attribute(attr) = attr {
let name = attr.key.to_string();
if let Some(value) = attr.value_literal_string() {
attrs.push((
name,
LAttributeValue::Static(value),
));
} else {
attrs.push((name, LAttributeValue::Dynamic));
}
}
}
let mut children = Vec::new();
for child in el.children {
LNode::parse_node(child, &mut children)?;
}
views.push(LNode::Element {
name,
attrs,
children,
});
}
}
_ => {}
}
Ok(())
}
pub fn to_html(&self) -> String {
match self {
LNode::Fragment(frag) => frag.iter().map(LNode::to_html).collect(),
LNode::Text(text) => text.to_owned(),
LNode::Component { name, .. } => format!(
"<!--<{name}>--><pre><{name}/> will load once Rust code \
has been compiled.</pre><!--</{name}>-->"
),
LNode::DynChild(_) => "<!--<DynChild>--><pre>Dynamic content will \
load once Rust code has been \
compiled.</pre><!--</DynChild>-->"
.to_string(),
LNode::Element {
name,
attrs,
children,
} => {
// this is naughty, but the browsers are tough and can handle it
// I wouldn't do this for real code, but this is just for dev mode
let is_self_closing = children.is_empty();
let attrs = attrs
.iter()
.filter_map(|(name, value)| match value {
LAttributeValue::Boolean => Some(format!("{name} ")),
LAttributeValue::Static(value) => {
Some(format!("{name}=\"{value}\" "))
}
LAttributeValue::Dynamic | LAttributeValue::Noop => {
None
}
})
.collect::<String>();
let children =
children.iter().map(LNode::to_html).collect::<String>();
if is_self_closing {
format!("<{name} {attrs}/>")
} else {
format!("<{name} {attrs}>{children}</{name}>")
}
}
}
}
}
| rust | MIT | dd507168fa47b5eead64339431b0bd654bd1c951 | 2026-01-04T15:41:20.302544Z | false |
leptos-rs/leptos | https://github.com/leptos-rs/leptos/blob/dd507168fa47b5eead64339431b0bd654bd1c951/leptos_hot_reload/src/lib.rs | leptos_hot_reload/src/lib.rs | extern crate proc_macro;
use anyhow::Result;
use camino::Utf8PathBuf;
use diff::Patches;
use node::LNode;
use parking_lot::RwLock;
use serde::{Deserialize, Serialize};
use std::{
collections::HashMap,
fs::File,
io::Read,
path::{Path, PathBuf},
sync::Arc,
};
use syn::{
spanned::Spanned,
visit::{self, Visit},
Macro,
};
use walkdir::WalkDir;
pub mod diff;
pub mod node;
pub mod parsing;
pub const HOT_RELOAD_JS: &str = include_str!("patch.js");
#[derive(Debug, Clone, Default)]
pub struct ViewMacros {
// keyed by original location identifier
views: Arc<RwLock<HashMap<Utf8PathBuf, Vec<MacroInvocation>>>>,
}
impl ViewMacros {
#[must_use]
pub fn new() -> Self {
Self::default()
}
/// # Errors
///
/// Will return `Err` if the path is not UTF-8 path or the contents of the file cannot be parsed.
pub fn update_from_paths<T: AsRef<Path>>(&self, paths: &[T]) -> Result<()> {
let mut views = HashMap::new();
for path in paths {
for entry in WalkDir::new(path).into_iter().flatten() {
if entry.file_type().is_file() {
let path: PathBuf = entry.path().into();
let path = Utf8PathBuf::try_from(path)?;
if path.extension() == Some("rs") || path.ends_with(".rs") {
let macros = Self::parse_file(&path)?;
let entry = views.entry(path.clone()).or_default();
*entry = macros;
}
}
}
}
*self.views.write() = views;
Ok(())
}
/// # Errors
///
/// Will return `Err` if the contents of the file cannot be parsed.
pub fn parse_file(path: &Utf8PathBuf) -> Result<Vec<MacroInvocation>> {
let mut file = File::open(path)?;
let mut content = String::new();
file.read_to_string(&mut content)?;
let ast = syn::parse_file(&content)?;
let mut visitor = ViewMacroVisitor::default();
visitor.visit_file(&ast);
let mut views = Vec::new();
for view in visitor.views {
let span = view.span();
let id = span_to_stable_id(path, span.start().line);
if view.tokens.is_empty() {
views.push(MacroInvocation {
id,
template: LNode::Fragment(Vec::new()),
});
} else {
let tokens = view.tokens.clone().into_iter();
// TODO handle class = ...
let rsx = rstml::parse2(
tokens.collect::<proc_macro2::TokenStream>(),
)?;
let template = LNode::parse_view(rsx)?;
views.push(MacroInvocation { id, template });
}
}
Ok(views)
}
/// # Errors
///
/// Will return `Err` if the contents of the file cannot be parsed.
pub fn patch(&self, path: &Utf8PathBuf) -> Result<Option<Patches>> {
let new_views = Self::parse_file(path)?;
let mut lock = self.views.write();
let diffs = match lock.get(path) {
None => return Ok(None),
Some(current_views) => {
if current_views.len() == new_views.len() {
let mut diffs = Vec::new();
for (current_view, new_view) in
current_views.iter().zip(&new_views)
{
if current_view.id == new_view.id
&& current_view.template != new_view.template
{
diffs.push((
current_view.id.clone(),
current_view.template.diff(&new_view.template),
));
}
}
diffs
} else {
// TODO: instead of simply returning no patches, when number of views differs,
// we can compare views content to determine which views were shifted
// or come up with another idea that will allow to send patches when views were shifted/removed/added
lock.insert(path.clone(), new_views);
return Ok(None);
}
}
};
// update the status to the new views
lock.insert(path.clone(), new_views);
Ok(Some(Patches(diffs)))
}
}
#[derive(Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct MacroInvocation {
id: String,
template: LNode,
}
impl core::fmt::Debug for MacroInvocation {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
f.debug_struct("MacroInvocation")
.field("id", &self.id)
.finish_non_exhaustive()
}
}
#[derive(Default, Debug)]
pub struct ViewMacroVisitor<'a> {
views: Vec<&'a Macro>,
}
impl<'ast> Visit<'ast> for ViewMacroVisitor<'ast> {
fn visit_macro(&mut self, node: &'ast Macro) {
let ident = node.path.get_ident().map(ToString::to_string);
if ident == Some("view".to_string()) {
self.views.push(node);
}
// Delegate to the default impl to visit any nested functions.
visit::visit_macro(self, node);
}
}
pub fn span_to_stable_id(path: impl AsRef<Path>, line: usize) -> String {
let file = path
.as_ref()
.to_str()
.unwrap_or_default()
.replace(['/', '\\'], "-");
format!("{file}-{line}")
}
| rust | MIT | dd507168fa47b5eead64339431b0bd654bd1c951 | 2026-01-04T15:41:20.302544Z | false |
leptos-rs/leptos | https://github.com/leptos-rs/leptos/blob/dd507168fa47b5eead64339431b0bd654bd1c951/leptos_hot_reload/src/diff.rs | leptos_hot_reload/src/diff.rs | use crate::node::{LAttributeValue, LNode};
use indexmap::IndexMap;
use serde::{Deserialize, Serialize};
#[derive(Debug, Default)]
struct OldChildren(IndexMap<LNode, Vec<usize>>);
impl LNode {
#[must_use]
pub fn diff(&self, other: &LNode) -> Vec<Patch> {
let mut old_children = OldChildren::default();
self.add_old_children(vec![], &mut old_children);
self.diff_at(other, &[], &old_children)
}
fn to_replacement_node(
&self,
old_children: &OldChildren,
) -> ReplacementNode {
match old_children.0.get(self) {
// if the child already exists in the DOM, we can pluck it out
// and move it around
Some(path) => ReplacementNode::Path(path.to_owned()),
// otherwise, we should generate some HTML
// but we need to do this recursively in case we're replacing an element
// with children who need to be plucked out
None => match self {
LNode::Fragment(fragment) => ReplacementNode::Fragment(
fragment
.iter()
.map(|node| node.to_replacement_node(old_children))
.collect(),
),
LNode::Element {
name,
attrs,
children,
} => ReplacementNode::Element {
name: name.to_owned(),
attrs: attrs
.iter()
.filter_map(|(name, value)| match value {
LAttributeValue::Boolean => {
Some((name.to_owned(), name.to_owned()))
}
LAttributeValue::Static(value) => {
Some((name.to_owned(), value.to_owned()))
}
_ => None,
})
.collect(),
children: children
.iter()
.map(|node| node.to_replacement_node(old_children))
.collect(),
},
LNode::Text(_)
| LNode::Component { .. }
| LNode::DynChild(_) => ReplacementNode::Html(self.to_html()),
},
}
}
fn add_old_children(&self, path: Vec<usize>, positions: &mut OldChildren) {
match self {
LNode::Fragment(frag) => {
for (idx, child) in frag.iter().enumerate() {
let mut new_path = path.clone();
new_path.push(idx);
child.add_old_children(new_path, positions);
}
}
LNode::Element { children, .. } => {
for (idx, child) in children.iter().enumerate() {
let mut new_path = path.clone();
new_path.push(idx);
child.add_old_children(new_path, positions);
}
}
// need to insert dynamic content and children, as these might change
LNode::DynChild(_) => {
positions.0.insert(self.clone(), path);
}
LNode::Component { children, .. } => {
positions.0.insert(self.clone(), path.clone());
for (idx, child) in children.iter().enumerate() {
let mut new_path = path.clone();
new_path.push(idx);
child.add_old_children(new_path, positions);
}
}
// can just create text nodes, whatever
LNode::Text(_) => {}
}
}
fn diff_at(
&self,
other: &LNode,
path: &[usize],
orig_children: &OldChildren,
) -> Vec<Patch> {
if std::mem::discriminant(self) != std::mem::discriminant(other) {
return vec![Patch {
path: path.to_owned(),
action: PatchAction::ReplaceWith(
other.to_replacement_node(orig_children),
),
}];
}
match (self, other) {
// fragment: diff children
(LNode::Fragment(old), LNode::Fragment(new)) => {
LNode::diff_children(path, old, new, orig_children)
}
// text node: replace text
(LNode::Text(_), LNode::Text(new)) => vec![Patch {
path: path.to_owned(),
action: PatchAction::SetText(new.to_owned()),
}],
// elements
(
LNode::Element {
name: old_name,
attrs: old_attrs,
children: old_children,
},
LNode::Element {
name: new_name,
attrs: new_attrs,
children: new_children,
},
) => {
let tag_patch = (old_name != new_name).then(|| Patch {
path: path.to_owned(),
action: PatchAction::ChangeTagName(new_name.to_owned()),
});
let attrs_patch = LNode::diff_attrs(path, old_attrs, new_attrs);
let children_patch = LNode::diff_children(
path,
old_children,
new_children,
orig_children,
);
attrs_patch
.into_iter()
// tag patch comes second so we remove old attrs before copying them over
.chain(tag_patch)
.chain(children_patch)
.collect()
}
// components + dynamic context: no patches
(
LNode::Component {
name: old_name,
children: old_children,
..
},
LNode::Component {
name: new_name,
children: new_children,
..
},
) if old_name == new_name => {
let mut path = path.to_vec();
path.push(0);
path.push(0);
LNode::diff_children(
&path,
old_children,
new_children,
orig_children,
)
}
_ => vec![],
}
}
fn diff_attrs<'a>(
path: &'a [usize],
old: &'a [(String, LAttributeValue)],
new: &'a [(String, LAttributeValue)],
) -> impl Iterator<Item = Patch> + 'a {
let additions = new
.iter()
.filter_map(|(name, new_value)| {
let old_attr = old.iter().find(|(o_name, _)| o_name == name);
let replace = match old_attr {
None => true,
Some((_, old_value)) if old_value != new_value => true,
_ => false,
};
if replace {
match &new_value {
LAttributeValue::Boolean => {
Some((name.to_owned(), String::new()))
}
LAttributeValue::Static(s) => {
Some((name.to_owned(), s.to_owned()))
}
_ => None,
}
} else {
None
}
})
.map(|(name, value)| Patch {
path: path.to_owned(),
action: PatchAction::SetAttribute(name, value),
});
let removals = old.iter().filter_map(|(name, _)| {
if new.iter().any(|(new_name, _)| new_name == name) {
None
} else {
Some(Patch {
path: path.to_owned(),
action: PatchAction::RemoveAttribute(name.to_owned()),
})
}
});
additions.chain(removals)
}
fn diff_children(
path: &[usize],
old: &[LNode],
new: &[LNode],
old_children: &OldChildren,
) -> Vec<Patch> {
if old.is_empty() && new.is_empty() {
vec![]
} else if old.is_empty() {
vec![Patch {
path: path.to_owned(),
action: PatchAction::AppendChildren(
new.iter()
.map(LNode::to_html)
.map(ReplacementNode::Html)
.collect(),
),
}]
} else if new.is_empty() {
vec![Patch {
path: path.to_owned(),
action: PatchAction::ClearChildren,
}]
} else {
let width = old.len() + 1;
let height = new.len() + 1;
let mut mat = vec![0; width * height];
#[allow(clippy::needless_range_loop)]
for i in 1..width {
mat[i] = i;
}
for i in 1..height {
mat[i * width] = i;
}
for j in 1..height {
for i in 1..width {
if old[i - 1] == new[j - 1] {
mat[j * width + i] = mat[(j - 1) * width + (i - 1)];
} else {
mat[j * width + i] = (mat[(j - 1) * width + i] + 1)
.min(mat[j * width + (i - 1)] + 1)
.min(mat[(j - 1) * width + (i - 1)] + 1)
}
}
}
let (mut i, mut j) = (old.len(), new.len());
let mut patches = vec![];
while i > 0 || j > 0 {
if i > 0 && j > 0 && old[i - 1] == new[j - 1] {
i -= 1;
j -= 1;
} else {
let current = mat[j * width + i];
if i > 0
&& j > 0
&& mat[(j - 1) * width + i - 1] + 1 == current
{
let mut new_path = path.to_owned();
new_path.push(i - 1);
let diffs = old[i - 1].diff_at(
&new[j - 1],
&new_path,
old_children,
);
patches.extend(&mut diffs.into_iter());
i -= 1;
j -= 1;
} else if i > 0 && mat[j * width + i - 1] + 1 == current {
patches.push(Patch {
path: path.to_owned(),
action: PatchAction::RemoveChild { at: i - 1 },
});
i -= 1;
} else if j > 0 && mat[(j - 1) * width + i] + 1 == current {
patches.push(Patch {
path: path.to_owned(),
action: PatchAction::InsertChild {
before: i,
child: new[j - 1]
.to_replacement_node(old_children),
},
});
j -= 1;
} else {
unreachable!();
}
}
}
patches
}
}
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct Patches(pub Vec<(String, Vec<Patch>)>);
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct Patch {
path: Vec<usize>,
action: PatchAction,
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub enum PatchAction {
ReplaceWith(ReplacementNode),
ChangeTagName(String),
RemoveAttribute(String),
SetAttribute(String, String),
SetText(String),
ClearChildren,
AppendChildren(Vec<ReplacementNode>),
RemoveChild {
at: usize,
},
InsertChild {
before: usize,
child: ReplacementNode,
},
InsertChildAfter {
after: usize,
child: ReplacementNode,
},
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub enum ReplacementNode {
Html(String),
Path(Vec<usize>),
Fragment(Vec<ReplacementNode>),
Element {
name: String,
attrs: Vec<(String, String)>,
children: Vec<ReplacementNode>,
},
}
#[cfg(test)]
mod tests {
use crate::{
diff::{Patch, PatchAction, ReplacementNode},
node::LAttributeValue,
LNode,
};
#[test]
fn patches_text() {
let a = LNode::Text("foo".into());
let b = LNode::Text("bar".into());
let delta = a.diff(&b);
assert_eq!(
delta,
vec![Patch {
path: vec![],
action: PatchAction::SetText("bar".into())
}]
);
}
#[test]
fn patches_attrs() {
let a = LNode::Element {
name: "button".into(),
attrs: vec![
("class".into(), LAttributeValue::Static("a".into())),
("type".into(), LAttributeValue::Static("button".into())),
],
children: vec![],
};
let b = LNode::Element {
name: "button".into(),
attrs: vec![
("class".into(), LAttributeValue::Static("a b".into())),
("id".into(), LAttributeValue::Static("button".into())),
],
children: vec![],
};
let delta = a.diff(&b);
assert_eq!(
delta,
vec![
Patch {
path: vec![],
action: PatchAction::SetAttribute(
"class".into(),
"a b".into()
)
},
Patch {
path: vec![],
action: PatchAction::SetAttribute(
"id".into(),
"button".into()
)
},
Patch {
path: vec![],
action: PatchAction::RemoveAttribute("type".into())
},
]
);
}
#[test]
fn patches_child_text() {
let a = LNode::Element {
name: "button".into(),
attrs: vec![],
children: vec![
LNode::Text("foo".into()),
LNode::Text("bar".into()),
],
};
let b = LNode::Element {
name: "button".into(),
attrs: vec![],
children: vec![
LNode::Text("foo".into()),
LNode::Text("baz".into()),
],
};
let delta = a.diff(&b);
assert_eq!(
delta,
vec![Patch {
path: vec![1],
action: PatchAction::SetText("baz".into())
},]
);
}
#[test]
fn inserts_child() {
let a = LNode::Element {
name: "div".into(),
attrs: vec![],
children: vec![LNode::Element {
name: "button".into(),
attrs: vec![],
children: vec![LNode::Text("bar".into())],
}],
};
let b = LNode::Element {
name: "div".into(),
attrs: vec![],
children: vec![
LNode::Element {
name: "button".into(),
attrs: vec![],
children: vec![LNode::Text("foo".into())],
},
LNode::Element {
name: "button".into(),
attrs: vec![],
children: vec![LNode::Text("bar".into())],
},
],
};
let delta = a.diff(&b);
assert_eq!(
delta,
vec![Patch {
path: vec![],
action: PatchAction::InsertChild {
before: 0,
child: ReplacementNode::Element {
name: "button".into(),
attrs: vec![],
children: vec![ReplacementNode::Html("foo".into())]
}
}
}]
);
}
#[test]
fn removes_child() {
let a = LNode::Element {
name: "div".into(),
attrs: vec![],
children: vec![
LNode::Element {
name: "button".into(),
attrs: vec![],
children: vec![LNode::Text("foo".into())],
},
LNode::Element {
name: "button".into(),
attrs: vec![],
children: vec![LNode::Text("bar".into())],
},
],
};
let b = LNode::Element {
name: "div".into(),
attrs: vec![],
children: vec![LNode::Element {
name: "button".into(),
attrs: vec![],
children: vec![LNode::Text("foo".into())],
}],
};
let delta = a.diff(&b);
assert_eq!(
delta,
vec![Patch {
path: vec![],
action: PatchAction::RemoveChild { at: 1 }
},]
);
}
}
| rust | MIT | dd507168fa47b5eead64339431b0bd654bd1c951 | 2026-01-04T15:41:20.302544Z | false |
leptos-rs/leptos | https://github.com/leptos-rs/leptos/blob/dd507168fa47b5eead64339431b0bd654bd1c951/leptos_hot_reload/src/parsing.rs | leptos_hot_reload/src/parsing.rs | use rstml::node::{CustomNode, NodeElement, NodeName};
/// Converts `syn::Block` to simple expression
///
/// For example:
/// ```no_build
/// // "string literal" in
/// {"string literal"}
/// // number literal
/// {0x12}
/// // boolean literal
/// {true}
/// // variable
/// {path::x}
/// ```
#[must_use]
pub fn block_to_primitive_expression(block: &syn::Block) -> Option<&syn::Expr> {
// its empty block, or block with multi lines
if block.stmts.len() != 1 {
return None;
}
match &block.stmts[0] {
syn::Stmt::Expr(e, None) => Some(e),
_ => None,
}
}
/// Converts simple literals to its string representation.
///
/// This function doesn't convert literal wrapped inside block
/// like: `{"string"}`.
#[must_use]
pub fn value_to_string(value: &syn::Expr) -> Option<String> {
match &value {
syn::Expr::Lit(lit) => match &lit.lit {
syn::Lit::Str(s) => Some(s.value()),
syn::Lit::Char(c) => Some(c.value().to_string()),
syn::Lit::Int(i) => Some(i.base10_digits().to_string()),
syn::Lit::Float(f) => Some(f.base10_digits().to_string()),
_ => None,
},
_ => None,
}
}
/// # Panics
///
/// Will panic if the last element does not exist in the path.
#[must_use]
pub fn is_component_tag_name(name: &NodeName) -> bool {
match name {
NodeName::Path(path) => {
!path.path.segments.is_empty()
&& path
.path
.segments
.last()
.unwrap()
.ident
.to_string()
.starts_with(|c: char| c.is_ascii_uppercase())
}
NodeName::Block(_) | NodeName::Punctuated(_) => false,
}
}
#[must_use]
pub fn is_component_node(node: &NodeElement<impl CustomNode>) -> bool {
is_component_tag_name(node.name())
}
| rust | MIT | dd507168fa47b5eead64339431b0bd654bd1c951 | 2026-01-04T15:41:20.302544Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/build.rs | rustfs/build.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
fn main() -> shadow_rs::SdResult<()> {
shadow_rs::ShadowBuilder::builder().build()?;
Ok(())
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/version.rs | rustfs/src/version.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use shadow_rs::shadow;
use std::process::Command;
shadow!(build);
type VersionParseResult = Result<(u32, u32, u32, Option<String>), Box<dyn std::error::Error>>;
#[allow(clippy::const_is_empty)]
pub fn get_version() -> String {
// Get the latest tag
if let Ok(latest_tag) = get_latest_tag() {
// Check if current commit is newer than the latest tag
if is_head_newer_than_tag(&latest_tag) {
// If current commit is newer, increment the version number
if let Ok(new_version) = increment_version(&latest_tag) {
return format!("refs/tags/{new_version}");
}
}
// If current commit is the latest tag, or version increment failed, return current tag
return format!("refs/tags/{latest_tag}");
}
// If no tag exists, use original logic
if !build::TAG.is_empty() {
format!("refs/tags/{}", build::TAG)
} else if !build::SHORT_COMMIT.is_empty() {
format!("@{}", build::SHORT_COMMIT)
} else {
format!("refs/tags/{}", build::PKG_VERSION)
}
}
/// Get the latest git tag
fn get_latest_tag() -> Result<String, Box<dyn std::error::Error>> {
let output = Command::new("git").args(["describe", "--tags", "--abbrev=0"]).output()?;
if output.status.success() {
let tag = String::from_utf8(output.stdout)?;
Ok(tag.trim().to_string())
} else {
Err("Failed to get latest tag".into())
}
}
/// Check if current HEAD is newer than specified tag
fn is_head_newer_than_tag(tag: &str) -> bool {
let output = Command::new("git")
.args(["merge-base", "--is-ancestor", tag, "HEAD"])
.output();
match output {
Ok(result) => result.status.success(),
Err(_) => false,
}
}
/// Increment version number (increase patch version)
fn increment_version(version: &str) -> Result<String, Box<dyn std::error::Error>> {
// Parse version number, e.g. "1.0.0-alpha.19" -> (1, 0, 0, Some("alpha.19"))
let (major, minor, patch, pre_release) = parse_version(version)?;
// If there's a pre-release identifier, increment the pre-release version number
if let Some(pre) = pre_release
&& let Some(new_pre) = increment_pre_release(&pre)
{
return Ok(format!("{major}.{minor}.{patch}-{new_pre}"));
}
// Otherwise increment patch version number
Ok(format!("{major}.{minor}.{}", patch + 1))
}
/// Parse version number
pub fn parse_version(version: &str) -> VersionParseResult {
let parts: Vec<&str> = version.split('-').collect();
let base_version = parts[0];
let pre_release = if parts.len() > 1 { Some(parts[1..].join("-")) } else { None };
let version_parts: Vec<&str> = base_version.split('.').collect();
if version_parts.len() < 3 {
return Err("Invalid version format".into());
}
let major: u32 = version_parts[0].parse()?;
let minor: u32 = version_parts[1].parse()?;
let patch: u32 = version_parts[2].parse()?;
Ok((major, minor, patch, pre_release))
}
/// Increment pre-release version number
fn increment_pre_release(pre_release: &str) -> Option<String> {
// Handle pre-release versions like "alpha.19"
let parts: Vec<&str> = pre_release.split('.').collect();
if parts.len() == 2
&& let Ok(num) = parts[1].parse::<u32>()
{
return Some(format!("{}.{}", parts[0], num + 1));
}
// Handle pre-release versions like "alpha19"
if let Some(pos) = pre_release.rfind(|c: char| c.is_alphabetic()) {
let prefix = &pre_release[..=pos];
let suffix = &pre_release[pos + 1..];
if let Ok(num) = suffix.parse::<u32>() {
return Some(format!("{prefix}{}", num + 1));
}
}
None
}
/// Clean version string - removes common prefixes
pub fn clean_version(version: &str) -> String {
version
.trim()
.trim_start_matches("refs/tags/")
.trim_start_matches('v')
.trim_start_matches("RELEASE.")
.trim_start_matches('@')
.to_string()
}
/// Compare two versions to determine if the latest is newer
pub fn is_newer_version(current: &str, latest: &str) -> Result<bool, Box<dyn std::error::Error>> {
// Clean version numbers, remove prefixes like "v", "RELEASE.", etc.
let current_clean = clean_version(current);
let latest_clean = clean_version(latest);
// If versions are the same, no update is needed
if current_clean == latest_clean {
return Ok(false);
}
// Try semantic version comparison using parse_version
match (parse_version(¤t_clean), parse_version(&latest_clean)) {
(Ok(current_parts), Ok(latest_parts)) => Ok(compare_version_parts(¤t_parts, &latest_parts)),
(Err(_), _) | (_, Err(_)) => {
// If semantic version comparison fails, use string comparison
Ok(latest_clean > current_clean)
}
}
}
/// Compare two version parts tuples (major, minor, patch, pre_release)
fn compare_version_parts(current: &(u32, u32, u32, Option<String>), latest: &(u32, u32, u32, Option<String>)) -> bool {
let (cur_major, cur_minor, cur_patch, cur_pre) = current;
let (lat_major, lat_minor, lat_patch, lat_pre) = latest;
// Compare major version
if lat_major != cur_major {
return lat_major > cur_major;
}
// Compare minor version
if lat_minor != cur_minor {
return lat_minor > cur_minor;
}
// Compare patch version
if lat_patch != cur_patch {
return lat_patch > cur_patch;
}
// Compare pre-release versions
match (cur_pre, lat_pre) {
(None, None) => false, // Same version
(Some(_), None) => true, // Pre-release < release
(None, Some(_)) => false, // Release > pre-release
(Some(cur_pre), Some(lat_pre)) => {
// Both are pre-release, compare them
compare_pre_release(cur_pre, lat_pre)
}
}
}
/// Compare pre-release versions
fn compare_pre_release(current: &str, latest: &str) -> bool {
// Split by dots and compare each part
let current_parts: Vec<&str> = current.split('.').collect();
let latest_parts: Vec<&str> = latest.split('.').collect();
for (cur_part, lat_part) in current_parts.iter().zip(latest_parts.iter()) {
// Try to parse as numbers first
match (cur_part.parse::<u32>(), lat_part.parse::<u32>()) {
(Ok(cur_num), Ok(lat_num)) => {
if cur_num != lat_num {
return lat_num > cur_num;
}
}
_ => {
// If not numbers, compare as strings
if cur_part != lat_part {
return lat_part > cur_part;
}
}
}
}
// If all compared parts are equal, longer version is newer
latest_parts.len() > current_parts.len()
}
#[cfg(test)]
mod tests {
use super::*;
use tracing::debug;
#[test]
fn test_parse_version() {
// Test standard version parsing
let (major, minor, patch, pre_release) = parse_version("1.0.0").unwrap();
assert_eq!(major, 1);
assert_eq!(minor, 0);
assert_eq!(patch, 0);
assert_eq!(pre_release, None);
// Test pre-release version parsing
let (major, minor, patch, pre_release) = parse_version("1.0.0-alpha.19").unwrap();
assert_eq!(major, 1);
assert_eq!(minor, 0);
assert_eq!(patch, 0);
assert_eq!(pre_release, Some("alpha.19".to_string()));
}
#[test]
fn test_increment_pre_release() {
// Test alpha.19 -> alpha.20
assert_eq!(increment_pre_release("alpha.19"), Some("alpha.20".to_string()));
// Test beta.5 -> beta.6
assert_eq!(increment_pre_release("beta.5"), Some("beta.6".to_string()));
// Test unparsable case
assert_eq!(increment_pre_release("unknown"), None);
}
#[test]
fn test_increment_version() {
// Test pre-release version increment
assert_eq!(increment_version("1.0.0-alpha.19").unwrap(), "1.0.0-alpha.20");
// Test standard version increment
assert_eq!(increment_version("1.0.0").unwrap(), "1.0.1");
}
#[test]
fn test_version_format() {
// Test if version format starts with refs/tags/
let version = get_version();
assert!(version.starts_with("refs/tags/") || version.starts_with("@"));
// If it's refs/tags/ format, should contain version number
if let Some(version_part) = version.strip_prefix("refs/tags/") {
assert!(!version_part.is_empty());
}
}
#[test]
fn test_current_version_output() {
// Display current version output
let version = get_version();
debug!("Current version: {version}");
// Verify version format
assert!(version.starts_with("refs/tags/") || version.starts_with("@"));
// If it's refs/tags/ format, verify version number is not empty
if let Some(version_part) = version.strip_prefix("refs/tags/") {
assert!(!version_part.is_empty());
debug!("Version part: {version_part}");
}
}
#[test]
fn test_clean_version() {
assert_eq!(clean_version("v1.0.0"), "1.0.0");
assert_eq!(clean_version("RELEASE.1.0.0"), "1.0.0");
assert_eq!(clean_version("@1.0.0"), "1.0.0");
assert_eq!(clean_version("1.0.0"), "1.0.0");
assert_eq!(clean_version("refs/tags/1.0.0-alpha.17"), "1.0.0-alpha.17");
assert_eq!(clean_version("refs/tags/v1.0.0"), "1.0.0");
}
#[test]
fn test_is_newer_version() {
// Test semantic version comparison
assert!(is_newer_version("1.0.0", "1.0.1").unwrap());
assert!(is_newer_version("1.0.0", "1.1.0").unwrap());
assert!(is_newer_version("1.0.0", "2.0.0").unwrap());
assert!(!is_newer_version("1.0.1", "1.0.0").unwrap());
assert!(!is_newer_version("1.0.0", "1.0.0").unwrap());
// Test version comparison with pre-release identifiers
assert!(is_newer_version("1.0.0-alpha.1", "1.0.0-alpha.2").unwrap());
assert!(is_newer_version("1.0.0-alpha.17", "1.0.1").unwrap());
assert!(is_newer_version("refs/tags/1.0.0-alpha.16", "refs/tags/1.0.0-alpha.17").unwrap());
assert!(!is_newer_version("refs/tags/1.0.0-alpha.17", "refs/tags/1.0.0-alpha.16").unwrap());
// Test pre-release vs release comparison
assert!(is_newer_version("1.0.0-alpha.1", "1.0.0").unwrap());
assert!(is_newer_version("1.0.0-beta.1", "1.0.0").unwrap());
assert!(!is_newer_version("1.0.0", "1.0.0-alpha.1").unwrap());
assert!(!is_newer_version("1.0.0", "1.0.0-beta.1").unwrap());
// Test pre-release version ordering
assert!(is_newer_version("1.0.0-alpha.1", "1.0.0-alpha.2").unwrap());
assert!(is_newer_version("1.0.0-alpha.19", "1.0.0-alpha.20").unwrap());
assert!(is_newer_version("1.0.0-alpha.1", "1.0.0-beta.1").unwrap());
assert!(is_newer_version("1.0.0-beta.1", "1.0.0-rc.1").unwrap());
// Test complex pre-release versions
assert!(is_newer_version("1.0.0-alpha.1.2", "1.0.0-alpha.1.3").unwrap());
assert!(is_newer_version("1.0.0-alpha.1", "1.0.0-alpha.1.1").unwrap());
assert!(!is_newer_version("1.0.0-alpha.1.3", "1.0.0-alpha.1.2").unwrap());
}
#[test]
fn test_compare_version_parts() {
// Test basic version comparison
assert!(compare_version_parts(&(1, 0, 0, None), &(1, 0, 1, None)));
assert!(compare_version_parts(&(1, 0, 0, None), &(1, 1, 0, None)));
assert!(compare_version_parts(&(1, 0, 0, None), &(2, 0, 0, None)));
assert!(!compare_version_parts(&(1, 0, 1, None), &(1, 0, 0, None)));
// Test pre-release vs release
assert!(compare_version_parts(&(1, 0, 0, Some("alpha.1".to_string())), &(1, 0, 0, None)));
assert!(!compare_version_parts(&(1, 0, 0, None), &(1, 0, 0, Some("alpha.1".to_string()))));
// Test pre-release comparison
assert!(compare_version_parts(
&(1, 0, 0, Some("alpha.1".to_string())),
&(1, 0, 0, Some("alpha.2".to_string()))
));
assert!(compare_version_parts(
&(1, 0, 0, Some("alpha.19".to_string())),
&(1, 0, 0, Some("alpha.20".to_string()))
));
assert!(compare_version_parts(
&(1, 0, 0, Some("alpha.1".to_string())),
&(1, 0, 0, Some("beta.1".to_string()))
));
}
#[test]
fn test_compare_pre_release() {
// Test numeric pre-release comparison
assert!(compare_pre_release("alpha.1", "alpha.2"));
assert!(compare_pre_release("alpha.19", "alpha.20"));
assert!(!compare_pre_release("alpha.2", "alpha.1"));
// Test string pre-release comparison
assert!(compare_pre_release("alpha.1", "beta.1"));
assert!(compare_pre_release("beta.1", "rc.1"));
assert!(!compare_pre_release("beta.1", "alpha.1"));
// Test complex pre-release comparison
assert!(compare_pre_release("alpha.1.2", "alpha.1.3"));
assert!(compare_pre_release("alpha.1", "alpha.1.1"));
assert!(!compare_pre_release("alpha.1.3", "alpha.1.2"));
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/update.rs | rustfs/src/update.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::version;
use serde::{Deserialize, Serialize};
use std::time::Duration;
use thiserror::Error;
use tracing::{debug, error, info};
/// Update check related errors
#[derive(Error, Debug)]
pub enum UpdateCheckError {
#[error("HTTP request failed: {0}")]
HttpError(#[from] reqwest::Error),
#[error("Version parsing failed: {0}")]
VersionParseError(String),
#[error("Invalid version response: {0}")]
InvalidResponse(String),
}
/// Version information structure
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct VersionInfo {
/// Version number
pub version: String,
/// Release date
pub release_date: Option<String>,
/// Release notes
pub release_notes: Option<String>,
/// Download URL
pub download_url: Option<String>,
}
/// Update check result
#[allow(dead_code)]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct UpdateCheckResult {
/// Whether update is available
pub update_available: bool,
/// Current version
pub current_version: String,
/// Latest version information
pub latest_version: Option<VersionInfo>,
/// Check time
pub check_time: chrono::DateTime<chrono::Utc>,
}
/// Version checker
pub struct VersionChecker {
/// HTTP client
client: reqwest::Client,
/// Version server URL
version_url: String,
/// Request timeout
timeout: Duration,
}
impl Default for VersionChecker {
fn default() -> Self {
Self::new()
}
}
impl VersionChecker {
/// Create a new version checker
pub fn new() -> Self {
let client = reqwest::Client::builder()
.timeout(Duration::from_secs(10))
.user_agent(format!("RustFS/{}", get_current_version()))
.build()
.unwrap_or_else(|_| reqwest::Client::new());
Self {
client,
version_url: "https://version.rustfs.com/latest.json".to_string(),
timeout: Duration::from_secs(10),
}
}
/// Create version checker with custom configuration
#[allow(dead_code)]
pub fn with_config(url: String, timeout: Duration) -> Self {
let client = reqwest::Client::builder()
.timeout(timeout)
.user_agent(format!("RustFS/{}", get_current_version()))
.build()
.unwrap_or_else(|_| reqwest::Client::new());
Self {
client,
version_url: url,
timeout,
}
}
/// Check for updates
pub async fn check_for_updates(&self) -> Result<UpdateCheckResult, UpdateCheckError> {
let current_version = get_current_version();
debug!("Checking for updates, current version: {}", current_version);
// Send HTTP GET request to get latest version information
let response = self.client.get(&self.version_url).timeout(self.timeout).send().await?;
if !response.status().is_success() {
let status = response.status();
let error_text = response.text().await.unwrap_or_default();
error!("Version check request failed, status code: {}, response: {}", status, error_text);
return Err(UpdateCheckError::InvalidResponse(format!(
"HTTP status code: {status}, response: {error_text}"
)));
}
// Parse response
let response_bytes = response.bytes().await?;
let version_info: VersionInfo = match serde_json::from_slice(&response_bytes) {
Ok(v) => v,
Err(e) => {
let error_text = String::from_utf8_lossy(&response_bytes);
error!("Version check request failed, response: {}", e);
return Err(UpdateCheckError::InvalidResponse(format!(
"JSON parsing failed: {e}, response: {error_text}"
)));
}
};
debug!("Retrieved latest version information: {:?}", version_info);
// Compare versions using version.rs functions
let update_available = version::is_newer_version(¤t_version, &version_info.version)
.map_err(|e| UpdateCheckError::VersionParseError(e.to_string()))?;
let result = UpdateCheckResult {
update_available,
current_version,
latest_version: Some(version_info),
check_time: chrono::Utc::now(),
};
if result.update_available {
info!(
"New version available: {} -> {}",
result.current_version,
result.latest_version.as_ref().unwrap().version
);
} else {
info!("Current version is up to date: {}", result.current_version);
}
Ok(result)
}
}
/// Get current version number
pub fn get_current_version() -> String {
version::get_version()
}
/// Convenience function for async update checking
pub async fn check_updates() -> Result<UpdateCheckResult, UpdateCheckError> {
let checker = VersionChecker::new();
checker.check_for_updates().await
}
/// Update check with custom URL
#[allow(dead_code)]
pub async fn check_updates_with_url(url: String) -> Result<UpdateCheckResult, UpdateCheckError> {
let checker = VersionChecker::with_config(url, Duration::from_secs(10));
checker.check_for_updates().await
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn test_get_current_version() {
let version = get_current_version();
assert!(!version.is_empty());
debug!("Current version: {version}");
}
#[test]
fn test_update_check_result() {
use chrono::Utc;
// Test creating UpdateCheckResult with update available
let version_info = VersionInfo {
version: "1.2.0".to_string(),
release_date: Some("2024-01-15T10:00:00Z".to_string()),
release_notes: Some("Bug fixes and new features".to_string()),
download_url: Some("https://github.com/rustfs/rustfs/releases/tag/v1.2.0".to_string()),
};
let check_time = Utc::now();
let result = UpdateCheckResult {
update_available: true,
current_version: "1.1.0".to_string(),
latest_version: Some(version_info.clone()),
check_time,
};
debug!("Update check result: {:?}", serde_json::to_string(&result).unwrap());
// Test fields
assert!(result.update_available);
assert_eq!(result.current_version, "1.1.0");
assert!(result.latest_version.is_some());
assert_eq!(result.check_time, check_time);
// Test latest version info
if let Some(latest) = &result.latest_version {
assert_eq!(latest.version, "1.2.0");
assert_eq!(latest.release_date, Some("2024-01-15T10:00:00Z".to_string()));
assert_eq!(latest.release_notes, Some("Bug fixes and new features".to_string()));
assert_eq!(
latest.download_url,
Some("https://github.com/rustfs/rustfs/releases/tag/v1.2.0".to_string())
);
}
// Test Clone functionality
let cloned_result = result.clone();
assert_eq!(cloned_result.update_available, result.update_available);
assert_eq!(cloned_result.current_version, result.current_version);
assert_eq!(cloned_result.check_time, result.check_time);
// Test Debug functionality (should not panic)
let debug_output = format!("{result:?}");
assert!(debug_output.contains("UpdateCheckResult"));
assert!(debug_output.contains("1.1.0"));
assert!(debug_output.contains("1.2.0"));
// Test creating UpdateCheckResult with no update available
let no_update_result = UpdateCheckResult {
update_available: false,
current_version: "1.2.0".to_string(),
latest_version: Some(VersionInfo {
version: "1.2.0".to_string(),
release_date: Some("2024-01-15T10:00:00Z".to_string()),
release_notes: None,
download_url: None,
}),
check_time: Utc::now(),
};
assert!(!no_update_result.update_available);
assert_eq!(no_update_result.current_version, "1.2.0");
// Test creating UpdateCheckResult with None latest_version (error case)
let error_result = UpdateCheckResult {
update_available: false,
current_version: "1.1.0".to_string(),
latest_version: None,
check_time: Utc::now(),
};
assert!(!error_result.update_available);
assert!(error_result.latest_version.is_none());
debug!("UpdateCheckResult tests passed successfully");
}
#[test]
fn test_version_info() {
// Test VersionInfo structure
let version_info = VersionInfo {
version: "2.0.0".to_string(),
release_date: Some("2024-02-01T12:00:00Z".to_string()),
release_notes: Some("Major release with breaking changes".to_string()),
download_url: Some("https://github.com/rustfs/rustfs/releases/tag/v2.0.0".to_string()),
};
// Test fields
assert_eq!(version_info.version, "2.0.0");
assert_eq!(version_info.release_date, Some("2024-02-01T12:00:00Z".to_string()));
assert_eq!(version_info.release_notes, Some("Major release with breaking changes".to_string()));
assert_eq!(
version_info.download_url,
Some("https://github.com/rustfs/rustfs/releases/tag/v2.0.0".to_string())
);
// Test Clone functionality
let cloned_info = version_info.clone();
assert_eq!(cloned_info.version, version_info.version);
assert_eq!(cloned_info.release_date, version_info.release_date);
assert_eq!(cloned_info.release_notes, version_info.release_notes);
assert_eq!(cloned_info.download_url, version_info.download_url);
// Test Debug functionality
let debug_output = format!("{version_info:?}");
assert!(debug_output.contains("VersionInfo"));
assert!(debug_output.contains("2.0.0"));
// Test minimal VersionInfo with only version
let minimal_info = VersionInfo {
version: "1.0.0".to_string(),
release_date: None,
release_notes: None,
download_url: None,
};
assert_eq!(minimal_info.version, "1.0.0");
assert!(minimal_info.release_date.is_none());
assert!(minimal_info.release_notes.is_none());
assert!(minimal_info.download_url.is_none());
// Test JSON serialization/deserialization
let json_string = serde_json::to_string(&version_info).unwrap();
debug!("Serialized version info: {json_string}");
assert!(json_string.contains("2.0.0"));
assert!(json_string.contains("Major release"));
let deserialized: VersionInfo = serde_json::from_str(&json_string).unwrap();
assert_eq!(deserialized.version, version_info.version);
assert_eq!(deserialized.release_notes, version_info.release_notes);
debug!("VersionInfo tests passed successfully");
}
#[test]
fn test_version_functions_integration() {
// Test that version functions from version.rs work correctly
assert_eq!(version::clean_version("refs/tags/1.0.0-alpha.17"), "1.0.0-alpha.17");
assert_eq!(version::clean_version("v1.0.0"), "1.0.0");
// Test version comparison
assert!(version::is_newer_version("1.0.0", "1.0.1").unwrap());
assert!(!version::is_newer_version("1.0.1", "1.0.0").unwrap());
// Test version parsing using parse_version
assert_eq!(version::parse_version("1.0.0").unwrap(), (1, 0, 0, None));
assert_eq!(version::parse_version("2.1.3-alpha.1").unwrap(), (2, 1, 3, Some("alpha.1".to_string())));
debug!("Version functions integration tests passed successfully");
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/profiling.rs | rustfs/src/profiling.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#[cfg(not(all(target_os = "linux", target_env = "gnu", target_arch = "x86_64")))]
pub async fn init_from_env() {
let (target_os, target_env, target_arch) = get_platform_info();
tracing::info!(
target: "rustfs::main::run",
target_os = %target_os,
target_env = %target_env,
target_arch = %target_arch,
"profiling: disabled on this platform. target_os={}, target_env={}, target_arch={}",
target_os, target_env, target_arch
);
}
#[cfg(not(all(target_os = "linux", target_env = "gnu", target_arch = "x86_64")))]
fn get_platform_info() -> (String, String, String) {
(
std::env::consts::OS.to_string(),
option_env!("CARGO_CFG_TARGET_ENV").unwrap_or("unknown").to_string(),
std::env::consts::ARCH.to_string(),
)
}
#[cfg(not(all(target_os = "linux", target_env = "gnu", target_arch = "x86_64")))]
pub async fn dump_cpu_pprof_for(_duration: std::time::Duration) -> Result<std::path::PathBuf, String> {
let (target_os, target_env, target_arch) = get_platform_info();
let msg = format!(
"CPU profiling is not supported on this platform. target_os={target_os}, target_env={target_env}, target_arch={target_arch}"
);
Err(msg)
}
#[cfg(not(all(target_os = "linux", target_env = "gnu", target_arch = "x86_64")))]
pub async fn dump_memory_pprof_now() -> Result<std::path::PathBuf, String> {
let (target_os, target_env, target_arch) = get_platform_info();
let msg = format!(
"Memory profiling is not supported on this platform. target_os={target_os}, target_env={target_env}, target_arch={target_arch}"
);
Err(msg)
}
#[cfg(all(target_os = "linux", target_env = "gnu", target_arch = "x86_64"))]
mod linux_impl {
use chrono::Utc;
use jemalloc_pprof::PROF_CTL;
use pprof::protos::Message;
use rustfs_config::{
DEFAULT_CPU_DURATION_SECS, DEFAULT_CPU_FREQ, DEFAULT_CPU_INTERVAL_SECS, DEFAULT_CPU_MODE, DEFAULT_ENABLE_PROFILING,
DEFAULT_MEM_INTERVAL_SECS, DEFAULT_MEM_PERIODIC, DEFAULT_OUTPUT_DIR, ENV_CPU_DURATION_SECS, ENV_CPU_FREQ,
ENV_CPU_INTERVAL_SECS, ENV_CPU_MODE, ENV_ENABLE_PROFILING, ENV_MEM_INTERVAL_SECS, ENV_MEM_PERIODIC, ENV_OUTPUT_DIR,
};
use rustfs_utils::{get_env_bool, get_env_str, get_env_u64, get_env_usize};
use std::fs::{File, create_dir_all};
use std::io::Write;
use std::path::{Path, PathBuf};
use std::sync::{Arc, OnceLock};
use std::time::Duration;
use tokio::sync::Mutex;
use tokio::time::sleep;
use tracing::{debug, error, info, warn};
static CPU_CONT_GUARD: OnceLock<Arc<Mutex<Option<pprof::ProfilerGuard<'static>>>>> = OnceLock::new();
/// CPU profiling mode
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
enum CpuMode {
Off,
Continuous,
Periodic,
}
/// Get or create output directory
fn output_dir() -> PathBuf {
let dir = get_env_str(ENV_OUTPUT_DIR, DEFAULT_OUTPUT_DIR);
let p = PathBuf::from(dir);
if let Err(e) = create_dir_all(&p) {
warn!("profiling: create output dir {} failed: {}, fallback to current dir", p.display(), e);
return PathBuf::from(".");
}
p
}
/// Read CPU profiling mode from env
fn read_cpu_mode() -> CpuMode {
match get_env_str(ENV_CPU_MODE, DEFAULT_CPU_MODE).to_lowercase().as_str() {
"continuous" => CpuMode::Continuous,
"periodic" => CpuMode::Periodic,
_ => CpuMode::Off,
}
}
/// Generate timestamp string for filenames
fn ts() -> String {
Utc::now().format("%Y%m%dT%H%M%S").to_string()
}
/// Write pprof report to file in protobuf format
fn write_pprof_report_pb(report: &pprof::Report, path: &Path) -> Result<(), String> {
let profile = report.pprof().map_err(|e| format!("pprof() failed: {e}"))?;
let mut buf = Vec::with_capacity(512 * 1024);
profile.write_to_vec(&mut buf).map_err(|e| format!("encode failed: {e}"))?;
let mut f = File::create(path).map_err(|e| format!("create file failed: {e}"))?;
f.write_all(&buf).map_err(|e| format!("write file failed: {e}"))?;
Ok(())
}
/// Internal: dump CPU pprof from existing guard
async fn dump_cpu_with_guard(guard: &pprof::ProfilerGuard<'_>) -> Result<PathBuf, String> {
let report = guard.report().build().map_err(|e| format!("build report failed: {e}"))?;
let out = output_dir().join(format!("cpu_profile_{}.pb", ts()));
write_pprof_report_pb(&report, &out)?;
info!("CPU profile exported: {}", out.display());
Ok(out)
}
// Public API: dump CPU for a duration; if continuous guard exists, snapshot immediately.
pub async fn dump_cpu_pprof_for(duration: Duration) -> Result<PathBuf, String> {
if let Some(cell) = CPU_CONT_GUARD.get() {
let guard_slot = cell.lock().await;
if let Some(ref guard) = *guard_slot {
debug!("profiling: using continuous profiler guard for CPU dump");
return dump_cpu_with_guard(guard).await;
}
}
let freq = get_env_usize(ENV_CPU_FREQ, DEFAULT_CPU_FREQ) as i32;
let guard = pprof::ProfilerGuard::new(freq).map_err(|e| format!("create profiler failed: {e}"))?;
sleep(duration).await;
dump_cpu_with_guard(&guard).await
}
// Public API: dump memory pprof now (jemalloc)
pub async fn dump_memory_pprof_now() -> Result<PathBuf, String> {
let out = output_dir().join(format!("mem_profile_{}.pb", ts()));
let mut f = File::create(&out).map_err(|e| format!("create file failed: {e}"))?;
let prof_ctl_cell = PROF_CTL
.as_ref()
.ok_or_else(|| "jemalloc profiling control not available".to_string())?;
let mut prof_ctl = prof_ctl_cell.lock().await;
if !prof_ctl.activated() {
return Err("jemalloc profiling is not active".to_string());
}
let bytes = prof_ctl.dump_pprof().map_err(|e| format!("dump pprof failed: {e}"))?;
f.write_all(&bytes).map_err(|e| format!("write file failed: {e}"))?;
info!("Memory profile exported: {}", out.display());
Ok(out)
}
// Jemalloc status check (No forced placement, only status observation)
pub async fn check_jemalloc_profiling() {
use tikv_jemalloc_ctl::{config, epoch, stats};
if let Err(e) = epoch::advance() {
warn!("jemalloc epoch advance failed: {e}");
}
match config::malloc_conf::read() {
Ok(conf) => debug!("jemalloc malloc_conf: {}", conf),
Err(e) => debug!("jemalloc read malloc_conf failed: {e}"),
}
match std::env::var("MALLOC_CONF") {
Ok(v) => debug!("MALLOC_CONF={}", v),
Err(_) => debug!("MALLOC_CONF is not set"),
}
if let Some(lock) = PROF_CTL.as_ref() {
let ctl = lock.lock().await;
info!(activated = ctl.activated(), "jemalloc profiling status");
} else {
info!("jemalloc profiling controller is NOT available");
}
let _ = epoch::advance();
macro_rules! show {
($name:literal, $reader:expr) => {
match $reader {
Ok(v) => debug!(concat!($name, "={}"), v),
Err(e) => debug!(concat!($name, " read failed: {}"), e),
}
};
}
show!("allocated", stats::allocated::read());
show!("resident", stats::resident::read());
show!("mapped", stats::mapped::read());
show!("metadata", stats::metadata::read());
show!("active", stats::active::read());
}
// Internal: start continuous CPU profiling
async fn start_cpu_continuous(freq_hz: i32) {
let cell = CPU_CONT_GUARD.get_or_init(|| Arc::new(Mutex::new(None))).clone();
let mut slot = cell.lock().await;
if slot.is_some() {
warn!("profiling: continuous CPU guard already running");
return;
}
match pprof::ProfilerGuardBuilder::default()
.frequency(freq_hz)
.blocklist(&["libc", "libgcc", "pthread", "vdso"])
.build()
{
Ok(guard) => {
*slot = Some(guard);
info!(freq = freq_hz, "start continuous CPU profiling");
}
Err(e) => warn!("start continuous CPU profiling failed: {e}"),
}
}
// Internal: start periodic CPU sampling loop
async fn start_cpu_periodic(freq_hz: i32, interval: Duration, duration: Duration) {
info!(freq = freq_hz, ?interval, ?duration, "start periodic CPU profiling");
tokio::spawn(async move {
loop {
sleep(interval).await;
let guard = match pprof::ProfilerGuard::new(freq_hz) {
Ok(g) => g,
Err(e) => {
warn!("periodic CPU profiler create failed: {e}");
continue;
}
};
sleep(duration).await;
match guard.report().build() {
Ok(report) => {
let out = output_dir().join(format!("cpu_profile_{}.pb", ts()));
if let Err(e) = write_pprof_report_pb(&report, &out) {
warn!("write periodic CPU pprof failed: {e}");
} else {
info!("periodic CPU profile exported: {}", out.display());
}
}
Err(e) => warn!("periodic CPU report build failed: {e}"),
}
}
});
}
// Internal: start periodic memory dump when jemalloc profiling is active
async fn start_memory_periodic(interval: Duration) {
info!(?interval, "start periodic memory pprof dump");
tokio::spawn(async move {
loop {
sleep(interval).await;
let Some(lock) = PROF_CTL.as_ref() else {
debug!("skip memory dump: PROF_CTL not available");
continue;
};
let mut ctl = lock.lock().await;
if !ctl.activated() {
debug!("skip memory dump: jemalloc profiling not active");
continue;
}
let out = output_dir().join(format!("mem_profile_periodic_{}.pb", ts()));
match File::create(&out) {
Err(e) => {
error!("periodic mem dump create file failed: {}", e);
continue;
}
Ok(mut f) => match ctl.dump_pprof() {
Ok(bytes) => {
if let Err(e) = f.write_all(&bytes) {
error!("periodic mem dump write failed: {}", e);
} else {
info!("periodic memory profile dumped to {}", out.display());
}
}
Err(e) => error!("periodic mem dump failed: {}", e),
},
}
}
});
}
// Public: unified init entry, avoid duplication/conflict
pub async fn init_from_env() {
let enabled = get_env_bool(ENV_ENABLE_PROFILING, DEFAULT_ENABLE_PROFILING);
if !enabled {
debug!("profiling: disabled by env");
return;
}
// Jemalloc state check once (no dump)
check_jemalloc_profiling().await;
// CPU
let cpu_mode = read_cpu_mode();
let cpu_freq = get_env_usize(ENV_CPU_FREQ, DEFAULT_CPU_FREQ) as i32;
let cpu_interval = Duration::from_secs(get_env_u64(ENV_CPU_INTERVAL_SECS, DEFAULT_CPU_INTERVAL_SECS));
let cpu_duration = Duration::from_secs(get_env_u64(ENV_CPU_DURATION_SECS, DEFAULT_CPU_DURATION_SECS));
match cpu_mode {
CpuMode::Off => debug!("profiling: CPU mode off"),
CpuMode::Continuous => start_cpu_continuous(cpu_freq).await,
CpuMode::Periodic => start_cpu_periodic(cpu_freq, cpu_interval, cpu_duration).await,
}
// Memory
let mem_periodic = get_env_bool(ENV_MEM_PERIODIC, DEFAULT_MEM_PERIODIC);
let mem_interval = Duration::from_secs(get_env_u64(ENV_MEM_INTERVAL_SECS, DEFAULT_MEM_INTERVAL_SECS));
if mem_periodic {
start_memory_periodic(mem_interval).await;
}
}
}
#[cfg(all(target_os = "linux", target_env = "gnu", target_arch = "x86_64"))]
pub use linux_impl::{dump_cpu_pprof_for, dump_memory_pprof_now, init_from_env};
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/error.rs | rustfs/src/error.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use rustfs_ecstore::error::StorageError;
use s3s::{S3Error, S3ErrorCode};
#[derive(Debug)]
pub struct ApiError {
pub code: S3ErrorCode,
pub message: String,
pub source: Option<Box<dyn std::error::Error + Send + Sync>>,
}
impl std::fmt::Display for ApiError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.message)
}
}
impl std::error::Error for ApiError {}
impl ApiError {
pub fn other<E>(error: E) -> Self
where
E: std::fmt::Display + Into<Box<dyn std::error::Error + Send + Sync>>,
{
ApiError {
code: S3ErrorCode::InternalError,
message: error.to_string(),
source: Some(error.into()),
}
}
pub fn error_code_to_message(code: &S3ErrorCode) -> String {
match code {
S3ErrorCode::InvalidRequest => "Invalid Request".to_string(),
S3ErrorCode::InvalidArgument => "Invalid argument".to_string(),
S3ErrorCode::InvalidStorageClass => "Invalid storage class.".to_string(),
S3ErrorCode::AccessDenied => "Access Denied.".to_string(),
S3ErrorCode::BadDigest => "The Content-Md5 you specified did not match what we received.".to_string(),
S3ErrorCode::EntityTooSmall => "Your proposed upload is smaller than the minimum allowed object size.".to_string(),
S3ErrorCode::EntityTooLarge => "Your proposed upload exceeds the maximum allowed object size.".to_string(),
S3ErrorCode::InternalError => "We encountered an internal error, please try again.".to_string(),
S3ErrorCode::InvalidAccessKeyId => "The Access Key Id you provided does not exist in our records.".to_string(),
S3ErrorCode::InvalidBucketName => "The specified bucket is not valid.".to_string(),
S3ErrorCode::InvalidDigest => "The Content-Md5 you specified is not valid.".to_string(),
S3ErrorCode::InvalidRange => "The requested range is not satisfiable".to_string(),
S3ErrorCode::MalformedXML => "The XML you provided was not well-formed or did not validate against our published schema.".to_string(),
S3ErrorCode::MissingContentLength => "You must provide the Content-Length HTTP header.".to_string(),
S3ErrorCode::MissingSecurityHeader => "Your request was missing a required header".to_string(),
S3ErrorCode::MissingRequestBodyError => "Request body is empty.".to_string(),
S3ErrorCode::NoSuchBucket => "The specified bucket does not exist".to_string(),
S3ErrorCode::NoSuchBucketPolicy => "The bucket policy does not exist".to_string(),
S3ErrorCode::NoSuchLifecycleConfiguration => "The lifecycle configuration does not exist".to_string(),
S3ErrorCode::NoSuchKey => "The specified key does not exist.".to_string(),
S3ErrorCode::NoSuchUpload => "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.".to_string(),
S3ErrorCode::NoSuchVersion => "The specified version does not exist.".to_string(),
S3ErrorCode::NotImplemented => "A header you provided implies functionality that is not implemented".to_string(),
S3ErrorCode::PreconditionFailed => "At least one of the pre-conditions you specified did not hold".to_string(),
S3ErrorCode::SignatureDoesNotMatch => "The request signature we calculated does not match the signature you provided. Check your key and signing method.".to_string(),
S3ErrorCode::MethodNotAllowed => "The specified method is not allowed against this resource.".to_string(),
S3ErrorCode::InvalidPart => "One or more of the specified parts could not be found. The part may not have been uploaded, or the specified entity tag may not match the part's entity tag.".to_string(),
S3ErrorCode::InvalidPartOrder => "The list of parts was not in ascending order. The parts list must be specified in order by part number.".to_string(),
S3ErrorCode::InvalidObjectState => "The operation is not valid for the current state of the object.".to_string(),
S3ErrorCode::AuthorizationHeaderMalformed => "The authorization header is malformed; the region is wrong; expecting 'us-east-1'.".to_string(),
S3ErrorCode::MalformedPOSTRequest => "The body of your POST request is not well-formed multipart/form-data.".to_string(),
S3ErrorCode::BucketNotEmpty => "The bucket you tried to delete is not empty".to_string(),
S3ErrorCode::BucketAlreadyExists => "The requested bucket name is not available. The bucket namespace is shared by all users of the system. Please select a different name and try again.".to_string(),
S3ErrorCode::BucketAlreadyOwnedByYou => "Your previous request to create the named bucket succeeded and you already own it.".to_string(),
S3ErrorCode::AllAccessDisabled => "All access to this resource has been disabled.".to_string(),
S3ErrorCode::InvalidPolicyDocument => "The content of the form does not meet the conditions specified in the policy document.".to_string(),
S3ErrorCode::IncompleteBody => "You did not provide the number of bytes specified by the Content-Length HTTP header.".to_string(),
S3ErrorCode::RequestTimeTooSkewed => "The difference between the request time and the server's time is too large.".to_string(),
S3ErrorCode::InvalidRegion => "Region does not match.".to_string(),
S3ErrorCode::SlowDown => "Resource requested is unreadable, please reduce your request rate".to_string(),
S3ErrorCode::KeyTooLongError => "Your key is too long".to_string(),
S3ErrorCode::NoSuchTagSet => "The TagSet does not exist".to_string(),
S3ErrorCode::ObjectLockConfigurationNotFoundError => "Object Lock configuration does not exist for this bucket".to_string(),
S3ErrorCode::InvalidBucketState => "Object Lock configuration cannot be enabled on existing buckets".to_string(),
S3ErrorCode::NoSuchCORSConfiguration => "The CORS configuration does not exist".to_string(),
S3ErrorCode::NoSuchWebsiteConfiguration => "The specified bucket does not have a website configuration".to_string(),
S3ErrorCode::NoSuchObjectLockConfiguration => "The specified object does not have a ObjectLock configuration".to_string(),
S3ErrorCode::MetadataTooLarge => "Your metadata headers exceed the maximum allowed metadata size.".to_string(),
S3ErrorCode::ServiceUnavailable => "The service is unavailable. Please retry.".to_string(),
S3ErrorCode::Busy => "The service is unavailable. Please retry.".to_string(),
S3ErrorCode::EmptyRequestBody => "Request body cannot be empty.".to_string(),
S3ErrorCode::UnauthorizedAccess => "You are not authorized to perform this operation".to_string(),
S3ErrorCode::ExpressionTooLong => "The SQL expression is too long: The maximum byte-length for the SQL expression is 256 KB.".to_string(),
S3ErrorCode::IllegalSqlFunctionArgument => "Illegal argument was used in the SQL function.".to_string(),
S3ErrorCode::InvalidKeyPath => "Key path in the SQL expression is invalid.".to_string(),
S3ErrorCode::InvalidCompressionFormat => "The file is not in a supported compression format. Only GZIP is supported at this time.".to_string(),
S3ErrorCode::InvalidFileHeaderInfo => "The FileHeaderInfo is invalid. Only NONE, USE, and IGNORE are supported.".to_string(),
S3ErrorCode::InvalidJsonType => "The JsonType is invalid. Only DOCUMENT and LINES are supported at this time.".to_string(),
S3ErrorCode::InvalidQuoteFields => "The QuoteFields is invalid. Only ALWAYS and ASNEEDED are supported.".to_string(),
S3ErrorCode::InvalidRequestParameter => "The value of a parameter in SelectRequest element is invalid. Check the service API documentation and try again.".to_string(),
S3ErrorCode::InvalidDataSource => "Invalid data source type. Only CSV and JSON are supported at this time.".to_string(),
S3ErrorCode::InvalidExpressionType => "The ExpressionType is invalid. Only SQL expressions are supported at this time.".to_string(),
S3ErrorCode::InvalidDataType => "The SQL expression contains an invalid data type.".to_string(),
S3ErrorCode::InvalidTextEncoding => "Invalid encoding type. Only UTF-8 encoding is supported at this time.".to_string(),
S3ErrorCode::InvalidTableAlias => "The SQL expression contains an invalid table alias.".to_string(),
S3ErrorCode::MissingRequiredParameter => "The SelectRequest entity is missing a required parameter. Check the service documentation and try again.".to_string(),
S3ErrorCode::ObjectSerializationConflict => "The SelectRequest entity can only contain one of CSV or JSON. Check the service documentation and try again.".to_string(),
S3ErrorCode::UnsupportedSqlOperation => "Encountered an unsupported SQL operation.".to_string(),
S3ErrorCode::UnsupportedSqlStructure => "Encountered an unsupported SQL structure. Check the SQL Reference.".to_string(),
S3ErrorCode::UnsupportedSyntax => "Encountered invalid syntax.".to_string(),
S3ErrorCode::UnsupportedRangeHeader => "Range header is not supported for this operation.".to_string(),
S3ErrorCode::LexerInvalidChar => "The SQL expression contains an invalid character.".to_string(),
S3ErrorCode::LexerInvalidOperator => "The SQL expression contains an invalid literal.".to_string(),
S3ErrorCode::LexerInvalidLiteral => "The SQL expression contains an invalid operator.".to_string(),
S3ErrorCode::LexerInvalidIONLiteral => "The SQL expression contains an invalid operator.".to_string(),
S3ErrorCode::ParseExpectedDatePart => "Did not find the expected date part in the SQL expression.".to_string(),
S3ErrorCode::ParseExpectedKeyword => "Did not find the expected keyword in the SQL expression.".to_string(),
S3ErrorCode::ParseExpectedTokenType => "Did not find the expected token in the SQL expression.".to_string(),
S3ErrorCode::ParseExpected2TokenTypes => "Did not find the expected token in the SQL expression.".to_string(),
S3ErrorCode::ParseExpectedNumber => "Did not find the expected number in the SQL expression.".to_string(),
S3ErrorCode::ParseExpectedRightParenBuiltinFunctionCall => "Did not find the expected right parenthesis character in the SQL expression.".to_string(),
S3ErrorCode::ParseExpectedTypeName => "Did not find the expected type name in the SQL expression.".to_string(),
S3ErrorCode::ParseExpectedWhenClause => "Did not find the expected WHEN clause in the SQL expression. CASE is not supported.".to_string(),
S3ErrorCode::ParseUnsupportedToken => "The SQL expression contains an unsupported token.".to_string(),
S3ErrorCode::ParseUnsupportedLiteralsGroupBy => "The SQL expression contains an unsupported use of GROUP BY.".to_string(),
S3ErrorCode::ParseExpectedMember => "The SQL expression contains an unsupported use of MEMBER.".to_string(),
S3ErrorCode::ParseUnsupportedSelect => "The SQL expression contains an unsupported use of SELECT.".to_string(),
S3ErrorCode::ParseUnsupportedCase => "The SQL expression contains an unsupported use of CASE.".to_string(),
S3ErrorCode::ParseUnsupportedCaseClause => "The SQL expression contains an unsupported use of CASE.".to_string(),
S3ErrorCode::ParseUnsupportedAlias => "The SQL expression contains an unsupported use of ALIAS.".to_string(),
S3ErrorCode::ParseUnsupportedSyntax => "The SQL expression contains unsupported syntax.".to_string(),
S3ErrorCode::ParseUnknownOperator => "The SQL expression contains an invalid operator.".to_string(),
S3ErrorCode::ParseMissingIdentAfterAt => "Did not find the expected identifier after the @ symbol in the SQL expression.".to_string(),
S3ErrorCode::ParseUnexpectedOperator => "The SQL expression contains an unexpected operator.".to_string(),
S3ErrorCode::ParseUnexpectedTerm => "The SQL expression contains an unexpected term.".to_string(),
S3ErrorCode::ParseUnexpectedToken => "The SQL expression contains an unexpected token.".to_string(),
S3ErrorCode::ParseExpectedExpression => "Did not find the expected SQL expression.".to_string(),
S3ErrorCode::ParseExpectedLeftParenAfterCast => "Did not find expected the left parenthesis in the SQL expression.".to_string(),
S3ErrorCode::ParseExpectedLeftParenValueConstructor => "Did not find expected the left parenthesis in the SQL expression.".to_string(),
S3ErrorCode::ParseExpectedLeftParenBuiltinFunctionCall => "Did not find the expected left parenthesis in the SQL expression.".to_string(),
S3ErrorCode::ParseExpectedArgumentDelimiter => "Did not find the expected argument delimiter in the SQL expression.".to_string(),
S3ErrorCode::ParseCastArity => "The SQL expression CAST has incorrect arity.".to_string(),
S3ErrorCode::ParseInvalidTypeParam => "The SQL expression contains an invalid parameter value.".to_string(),
S3ErrorCode::ParseEmptySelect => "The SQL expression contains an empty SELECT.".to_string(),
S3ErrorCode::ParseSelectMissingFrom => "GROUP is not supported in the SQL expression.".to_string(),
S3ErrorCode::ParseExpectedIdentForGroupName => "GROUP is not supported in the SQL expression.".to_string(),
S3ErrorCode::ParseExpectedIdentForAlias => "Did not find the expected identifier for the alias in the SQL expression.".to_string(),
S3ErrorCode::ParseUnsupportedCallWithStar => "Only COUNT with (*) as a parameter is supported in the SQL expression.".to_string(),
S3ErrorCode::ParseNonUnaryAgregateFunctionCall => "Only one argument is supported for aggregate functions in the SQL expression.".to_string(),
S3ErrorCode::ParseMalformedJoin => "JOIN is not supported in the SQL expression.".to_string(),
S3ErrorCode::ParseExpectedIdentForAt => "Did not find the expected identifier for AT name in the SQL expression.".to_string(),
S3ErrorCode::ParseAsteriskIsNotAloneInSelectList => "Other expressions are not allowed in the SELECT list when '*' is used without dot notation in the SQL expression.".to_string(),
S3ErrorCode::ParseCannotMixSqbAndWildcardInSelectList => "Cannot mix [] and * in the same expression in a SELECT list in SQL expression.".to_string(),
S3ErrorCode::ParseInvalidContextForWildcardInSelectList => "Invalid use of * in SELECT list in the SQL expression.".to_string(),
S3ErrorCode::IncorrectSqlFunctionArgumentType => "Incorrect type of arguments in function call in the SQL expression.".to_string(),
S3ErrorCode::ValueParseFailure => "Time stamp parse failure in the SQL expression.".to_string(),
S3ErrorCode::EvaluatorInvalidArguments => "Incorrect number of arguments in the function call in the SQL expression.".to_string(),
S3ErrorCode::IntegerOverflow => "Int overflow or underflow in the SQL expression.".to_string(),
S3ErrorCode::LikeInvalidInputs => "Invalid argument given to the LIKE clause in the SQL expression.".to_string(),
S3ErrorCode::CastFailed => "Attempt to convert from one data type to another using CAST failed in the SQL expression.".to_string(),
S3ErrorCode::InvalidCast => "Attempt to convert from one data type to another using CAST failed in the SQL expression.".to_string(),
S3ErrorCode::EvaluatorInvalidTimestampFormatPattern => "Time stamp format pattern requires additional fields in the SQL expression.".to_string(),
S3ErrorCode::EvaluatorInvalidTimestampFormatPatternSymbolForParsing => "Time stamp format pattern contains a valid format symbol that cannot be applied to time stamp parsing in the SQL expression.".to_string(),
S3ErrorCode::EvaluatorTimestampFormatPatternDuplicateFields => "Time stamp format pattern contains multiple format specifiers representing the time stamp field in the SQL expression.".to_string(),
S3ErrorCode::EvaluatorTimestampFormatPatternHourClockAmPmMismatch => "Time stamp format pattern contains unterminated token in the SQL expression.".to_string(),
S3ErrorCode::EvaluatorUnterminatedTimestampFormatPatternToken => "Time stamp format pattern contains an invalid token in the SQL expression.".to_string(),
S3ErrorCode::EvaluatorInvalidTimestampFormatPatternToken => "Time stamp format pattern contains an invalid token in the SQL expression.".to_string(),
S3ErrorCode::EvaluatorInvalidTimestampFormatPatternSymbol => "Time stamp format pattern contains an invalid symbol in the SQL expression.".to_string(),
S3ErrorCode::EvaluatorBindingDoesNotExist => "A column name or a path provided does not exist in the SQL expression".to_string(),
S3ErrorCode::InvalidColumnIndex => "The column index is invalid. Please check the service documentation and try again.".to_string(),
S3ErrorCode::UnsupportedFunction => "Encountered an unsupported SQL function.".to_string(),
_ => code.as_str().to_string(),
}
}
}
impl From<ApiError> for S3Error {
fn from(err: ApiError) -> Self {
let mut s3e = S3Error::with_message(err.code, err.message);
if let Some(source) = err.source {
s3e.set_source(source);
}
s3e
}
}
impl From<StorageError> for ApiError {
fn from(err: StorageError) -> Self {
// Special handling for Io errors that may contain ChecksumMismatch
if let StorageError::Io(ref io_err) = err
&& let Some(inner) = io_err.get_ref()
&& (inner.downcast_ref::<rustfs_rio::ChecksumMismatch>().is_some()
|| inner.downcast_ref::<rustfs_rio::BadDigest>().is_some())
{
return ApiError {
code: S3ErrorCode::BadDigest,
message: ApiError::error_code_to_message(&S3ErrorCode::BadDigest),
source: Some(Box::new(err)),
};
}
let code = match &err {
StorageError::NotImplemented => S3ErrorCode::NotImplemented,
StorageError::InvalidArgument(_, _, _) => S3ErrorCode::InvalidArgument,
StorageError::MethodNotAllowed => S3ErrorCode::MethodNotAllowed,
StorageError::BucketNotFound(_) => S3ErrorCode::NoSuchBucket,
StorageError::BucketNotEmpty(_) => S3ErrorCode::BucketNotEmpty,
StorageError::BucketNameInvalid(_) => S3ErrorCode::InvalidBucketName,
StorageError::ObjectNameInvalid(_, _) => S3ErrorCode::InvalidArgument,
StorageError::BucketExists(_) => S3ErrorCode::BucketAlreadyOwnedByYou,
StorageError::StorageFull => S3ErrorCode::ServiceUnavailable,
StorageError::SlowDown => S3ErrorCode::SlowDown,
StorageError::PrefixAccessDenied(_, _) => S3ErrorCode::AccessDenied,
StorageError::InvalidUploadIDKeyCombination(_, _) => S3ErrorCode::InvalidArgument,
StorageError::ObjectNameTooLong(_, _) => S3ErrorCode::InvalidArgument,
StorageError::ObjectNamePrefixAsSlash(_, _) => S3ErrorCode::InvalidArgument,
StorageError::ObjectNotFound(_, _) => S3ErrorCode::NoSuchKey,
StorageError::ConfigNotFound => S3ErrorCode::NoSuchKey,
StorageError::VolumeNotFound => S3ErrorCode::NoSuchBucket,
StorageError::FileNotFound => S3ErrorCode::NoSuchKey,
StorageError::FileVersionNotFound => S3ErrorCode::NoSuchVersion,
StorageError::VersionNotFound(_, _, _) => S3ErrorCode::NoSuchVersion,
StorageError::InvalidUploadID(_, _, _) => S3ErrorCode::InvalidPart,
StorageError::InvalidVersionID(_, _, _) => S3ErrorCode::InvalidArgument,
StorageError::DataMovementOverwriteErr(_, _, _) => S3ErrorCode::InvalidArgument,
StorageError::ObjectExistsAsDirectory(_, _) => S3ErrorCode::InvalidArgument,
StorageError::InvalidPart(_, _, _) => S3ErrorCode::InvalidPart,
StorageError::EntityTooSmall(_, _, _) => S3ErrorCode::EntityTooSmall,
StorageError::PreconditionFailed => S3ErrorCode::PreconditionFailed,
StorageError::InvalidRangeSpec(_) => S3ErrorCode::InvalidRange,
_ => S3ErrorCode::InternalError,
};
let message = if code == S3ErrorCode::InternalError {
err.to_string()
} else {
ApiError::error_code_to_message(&code)
};
ApiError {
code,
message,
source: Some(Box::new(err)),
}
}
}
impl From<std::io::Error> for ApiError {
fn from(err: std::io::Error) -> Self {
// Check if the error is a ChecksumMismatch (BadDigest)
if let Some(inner) = err.get_ref() {
if inner.downcast_ref::<rustfs_rio::ChecksumMismatch>().is_some() {
return ApiError {
code: S3ErrorCode::BadDigest,
message: ApiError::error_code_to_message(&S3ErrorCode::BadDigest),
source: Some(Box::new(err)),
};
}
if inner.downcast_ref::<rustfs_rio::BadDigest>().is_some() {
return ApiError {
code: S3ErrorCode::BadDigest,
message: ApiError::error_code_to_message(&S3ErrorCode::BadDigest),
source: Some(Box::new(err)),
};
}
}
ApiError {
code: S3ErrorCode::InternalError,
message: err.to_string(),
source: Some(Box::new(err)),
}
}
}
impl From<rustfs_iam::error::Error> for ApiError {
fn from(err: rustfs_iam::error::Error) -> Self {
let serr: StorageError = err.into();
serr.into()
}
}
#[cfg(test)]
mod tests {
use super::*;
use s3s::{S3Error, S3ErrorCode};
use std::io::{Error as IoError, ErrorKind};
#[test]
fn test_api_error_from_io_error() {
let io_error = IoError::new(ErrorKind::PermissionDenied, "permission denied");
let api_error: ApiError = io_error.into();
assert_eq!(api_error.code, S3ErrorCode::InternalError);
assert!(api_error.message.contains("permission denied"));
assert!(api_error.source.is_some());
}
#[test]
fn test_api_error_from_io_error_different_kinds() {
let test_cases = vec![
(ErrorKind::NotFound, "not found"),
(ErrorKind::InvalidInput, "invalid input"),
(ErrorKind::TimedOut, "timed out"),
(ErrorKind::WriteZero, "write zero"),
(ErrorKind::Other, "other error"),
];
for (kind, message) in test_cases {
let io_error = IoError::new(kind, message);
let api_error: ApiError = io_error.into();
assert_eq!(api_error.code, S3ErrorCode::InternalError);
assert!(api_error.message.contains(message));
assert!(api_error.source.is_some());
// Test that source can be downcast back to io::Error
let source = api_error.source.as_ref().unwrap();
let downcast_io_error = source.downcast_ref::<IoError>();
assert!(downcast_io_error.is_some());
assert_eq!(downcast_io_error.unwrap().kind(), kind);
}
}
#[test]
fn test_api_error_other_function() {
let custom_error = "Custom API error";
let api_error = ApiError::other(custom_error);
assert_eq!(api_error.code, S3ErrorCode::InternalError);
assert_eq!(api_error.message, custom_error);
assert!(api_error.source.is_some());
}
#[test]
fn test_api_error_other_function_with_complex_error() {
let io_error = IoError::new(ErrorKind::InvalidData, "complex error");
let api_error = ApiError::other(io_error);
assert_eq!(api_error.code, S3ErrorCode::InternalError);
assert!(api_error.message.contains("complex error"));
assert!(api_error.source.is_some());
// Test that source can be downcast back to io::Error
let source = api_error.source.as_ref().unwrap();
let downcast_io_error = source.downcast_ref::<IoError>();
assert!(downcast_io_error.is_some());
assert_eq!(downcast_io_error.unwrap().kind(), ErrorKind::InvalidData);
}
#[test]
fn test_api_error_from_storage_error() {
let storage_error = StorageError::BucketNotFound("test-bucket".to_string());
let api_error: ApiError = storage_error.into();
assert_eq!(api_error.code, S3ErrorCode::NoSuchBucket);
assert!(api_error.source.is_some());
// Test that source can be downcast back to StorageError
let source = api_error.source.as_ref().unwrap();
let downcast_storage_error = source.downcast_ref::<StorageError>();
assert!(downcast_storage_error.is_some());
}
#[test]
fn test_api_error_from_storage_error_mappings() {
let test_cases = vec![
(StorageError::NotImplemented, S3ErrorCode::NotImplemented),
(
StorageError::InvalidArgument("test".into(), "test".into(), "test".into()),
S3ErrorCode::InvalidArgument,
),
(StorageError::MethodNotAllowed, S3ErrorCode::MethodNotAllowed),
(StorageError::BucketNotFound("test".into()), S3ErrorCode::NoSuchBucket),
(StorageError::BucketNotEmpty("test".into()), S3ErrorCode::BucketNotEmpty),
(StorageError::BucketNameInvalid("test".into()), S3ErrorCode::InvalidBucketName),
(
StorageError::ObjectNameInvalid("test".into(), "test".into()),
S3ErrorCode::InvalidArgument,
),
(StorageError::BucketExists("test".into()), S3ErrorCode::BucketAlreadyOwnedByYou),
(StorageError::StorageFull, S3ErrorCode::ServiceUnavailable),
(StorageError::SlowDown, S3ErrorCode::SlowDown),
(StorageError::PrefixAccessDenied("test".into(), "test".into()), S3ErrorCode::AccessDenied),
(StorageError::ObjectNotFound("test".into(), "test".into()), S3ErrorCode::NoSuchKey),
(StorageError::ConfigNotFound, S3ErrorCode::NoSuchKey),
(StorageError::VolumeNotFound, S3ErrorCode::NoSuchBucket),
(StorageError::FileNotFound, S3ErrorCode::NoSuchKey),
(StorageError::FileVersionNotFound, S3ErrorCode::NoSuchVersion),
];
for (storage_error, expected_code) in test_cases {
let api_error: ApiError = storage_error.into();
assert_eq!(api_error.code, expected_code);
assert!(api_error.source.is_some());
}
}
#[test]
fn test_api_error_from_iam_error() {
let iam_error = rustfs_iam::error::Error::other("IAM test error");
let api_error: ApiError = iam_error.into();
// IAM error is first converted to StorageError, then to ApiError
assert!(api_error.source.is_some());
assert!(api_error.message.contains("test error"));
}
#[test]
fn test_api_error_to_s3_error() {
let api_error = ApiError {
code: S3ErrorCode::NoSuchBucket,
message: "Bucket not found".to_string(),
source: Some(Box::new(IoError::new(ErrorKind::NotFound, "not found"))),
};
let s3_error: S3Error = api_error.into();
assert_eq!(*s3_error.code(), S3ErrorCode::NoSuchBucket);
assert!(s3_error.message().unwrap_or("").contains("Bucket not found"));
assert!(s3_error.source().is_some());
}
#[test]
fn test_api_error_to_s3_error_without_source() {
let api_error = ApiError {
code: S3ErrorCode::InvalidArgument,
message: "Invalid argument".to_string(),
source: None,
};
let s3_error: S3Error = api_error.into();
assert_eq!(*s3_error.code(), S3ErrorCode::InvalidArgument);
assert!(s3_error.message().unwrap_or("").contains("Invalid argument"));
}
#[test]
fn test_api_error_display() {
let api_error = ApiError {
code: S3ErrorCode::InternalError,
message: "Test error message".to_string(),
source: None,
};
assert_eq!(api_error.to_string(), "Test error message");
}
#[test]
fn test_api_error_debug() {
let api_error = ApiError {
code: S3ErrorCode::NoSuchKey,
message: "Object not found".to_string(),
source: Some(Box::new(IoError::new(ErrorKind::NotFound, "file not found"))),
};
let debug_str = format!("{api_error:?}");
assert!(debug_str.contains("NoSuchKey"));
assert!(debug_str.contains("Object not found"));
}
#[test]
fn test_api_error_roundtrip_through_io_error() {
let original_io_error = IoError::new(ErrorKind::PermissionDenied, "original permission error");
// Convert to ApiError
let api_error: ApiError = original_io_error.into();
// Verify the conversion preserved the information
assert_eq!(api_error.code, S3ErrorCode::InternalError);
assert!(api_error.message.contains("original permission error"));
assert!(api_error.source.is_some());
// Test that we can downcast back to the original io::Error
let source = api_error.source.as_ref().unwrap();
let downcast_io_error = source.downcast_ref::<IoError>();
assert!(downcast_io_error.is_some());
assert_eq!(downcast_io_error.unwrap().kind(), ErrorKind::PermissionDenied);
assert!(downcast_io_error.unwrap().to_string().contains("original permission error"));
}
#[test]
fn test_api_error_chain_conversion() {
// Start with an io::Error
let io_error = IoError::new(ErrorKind::InvalidData, "invalid data");
// Convert to StorageError (simulating what happens in the codebase)
let storage_error = StorageError::other(io_error);
// Convert to ApiError
let api_error: ApiError = storage_error.into();
// Verify the chain is preserved
assert!(api_error.source.is_some());
// Check that we can still access the original error information
let source = api_error.source.as_ref().unwrap();
let downcast_storage_error = source.downcast_ref::<StorageError>();
assert!(downcast_storage_error.is_some());
}
#[test]
fn test_api_error_error_trait_implementation() {
let api_error = ApiError {
code: S3ErrorCode::InternalError,
message: "Test error".to_string(),
source: Some(Box::new(IoError::other("source error"))),
};
// Test that it implements std::error::Error
let error: &dyn std::error::Error = &api_error;
assert_eq!(error.to_string(), "Test error");
// ApiError doesn't implement Error::source() properly, so this would be None
// This is expected because ApiError is not a typical Error implementation
assert!(error.source().is_none());
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/license.rs | rustfs/src/license.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use rustfs_appauth::token::Token;
use std::io::{Error, Result};
use std::sync::OnceLock;
use std::time::SystemTime;
use std::time::UNIX_EPOCH;
use tracing::error;
use tracing::info;
static LICENSE: OnceLock<Token> = OnceLock::new();
/// Initialize the license
pub fn init_license(license: Option<String>) {
if license.is_none() {
error!("License is None");
return;
}
let license = license.unwrap();
let token = rustfs_appauth::token::parse_license(&license).unwrap_or_default();
LICENSE.set(token).unwrap_or_else(|_| {
error!("Failed to set license");
});
}
/// Get the license
pub fn get_license() -> Option<Token> {
LICENSE.get().cloned()
}
/// Check the license
/// This function checks if the license is valid.
#[allow(unreachable_code)]
pub fn license_check() -> Result<()> {
return Ok(());
let invalid_license = LICENSE.get().map(|token| {
if token.expired < SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs() {
error!("License expired");
return Err(Error::other("Incorrect license, please contact RustFS."));
}
info!("License is valid ! expired at {}", token.expired);
Ok(())
});
// let invalid_license = config::get_config().license.as_ref().map(|license| {
// if license.is_empty() {
// error!("License is empty");
// return Err(Error::other("Incorrect license, please contact RustFS.".to_string()));
// }
// let token = appauth::token::parse_license(license)?;
// if token.expired < SystemTime::now().duration_since(UNIX_EPOCH)?.as_secs() {
// error!("License expired");
// return Err(Error::other("Incorrect license, please contact RustFS.".to_string()));
// }
// info!("License is valid ! expired at {}", token.expired);
// Ok(())
// });
if invalid_license.is_none() || invalid_license.is_some_and(|v| v.is_err()) {
return Err(Error::other("Incorrect license, please contact RustFS."));
}
Ok(())
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/auth.rs | rustfs/src/auth.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use http::HeaderMap;
use http::Uri;
use rustfs_credentials::{Credentials, get_global_action_cred};
use rustfs_iam::error::Error as IamError;
use rustfs_iam::sys::SESSION_POLICY_NAME;
use rustfs_iam::sys::get_claims_from_token_with_secret;
use rustfs_utils::http::ip::get_source_ip_raw;
use s3s::S3Error;
use s3s::S3ErrorCode;
use s3s::S3Result;
use s3s::auth::S3Auth;
use s3s::auth::SecretKey;
use s3s::auth::SimpleAuth;
use s3s::s3_error;
use serde_json::Value;
use std::collections::HashMap;
use subtle::ConstantTimeEq;
use time::OffsetDateTime;
use time::format_description::well_known::Rfc3339;
/// Performs constant-time string comparison to prevent timing attacks.
///
/// This function should be used when comparing sensitive values like passwords,
/// API keys, or authentication tokens. It ensures the comparison time is
/// independent of the position where strings differ and handles length differences
/// securely.
///
/// # Security Note
/// This implementation uses the `subtle` crate to provide cryptographically
/// sound constant-time guarantees. The function is resistant to timing side-channel
/// attacks and suitable for security-critical comparisons.
///
/// # Example
/// ```
/// use rustfs::auth::constant_time_eq;
///
/// let secret1 = "my-secret-key";
/// let secret2 = "my-secret-key";
/// let secret3 = "wrong-secret";
///
/// assert!(constant_time_eq(secret1, secret2));
/// assert!(!constant_time_eq(secret1, secret3));
/// ```
pub fn constant_time_eq(a: &str, b: &str) -> bool {
a.as_bytes().ct_eq(b.as_bytes()).into()
}
// Authentication type constants
const JWT_ALGORITHM: &str = "Bearer ";
const SIGN_V2_ALGORITHM: &str = "AWS ";
const SIGN_V4_ALGORITHM: &str = "AWS4-HMAC-SHA256";
const STREAMING_CONTENT_SHA256: &str = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD";
const STREAMING_CONTENT_SHA256_TRAILER: &str = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER";
pub(crate) const UNSIGNED_PAYLOAD_TRAILER: &str = "STREAMING-UNSIGNED-PAYLOAD-TRAILER";
const ACTION_HEADER: &str = "Action";
const AMZ_CREDENTIAL: &str = "X-Amz-Credential";
const AMZ_ACCESS_KEY_ID: &str = "AWSAccessKeyId";
pub const UNSIGNED_PAYLOAD: &str = "UNSIGNED-PAYLOAD";
// Authentication type enum
#[derive(Debug, Clone, PartialEq, Eq, Default)]
pub enum AuthType {
#[default]
Unknown,
Anonymous,
Presigned,
PresignedV2,
PostPolicy,
StreamingSigned,
Signed,
SignedV2,
#[allow(clippy::upper_case_acronyms)]
JWT,
#[allow(clippy::upper_case_acronyms)]
STS,
StreamingSignedTrailer,
StreamingUnsignedTrailer,
}
#[derive(Debug)]
pub struct IAMAuth {
simple_auth: SimpleAuth,
}
impl Clone for IAMAuth {
fn clone(&self) -> Self {
// Since SimpleAuth doesn't implement Clone, we create a new one
// This is a simplified implementation - in a real scenario, you might need
// to store the credentials separately to properly clone
Self {
simple_auth: SimpleAuth::new(),
}
}
}
impl IAMAuth {
pub fn new(ak: impl Into<String>, sk: impl Into<SecretKey>) -> Self {
let simple_auth = SimpleAuth::from_single(ak, sk);
Self { simple_auth }
}
}
#[async_trait::async_trait]
impl S3Auth for IAMAuth {
async fn get_secret_key(&self, access_key: &str) -> S3Result<SecretKey> {
if access_key.is_empty() {
return Err(s3_error!(UnauthorizedAccess, "Your account is not signed up"));
}
if let Ok(key) = self.simple_auth.get_secret_key(access_key).await {
return Ok(key);
}
if let Ok(iam_store) = rustfs_iam::get() {
// Use check_key instead of get_user to ensure user is loaded from disk if not in cache
// This is important for newly created users that may not be in cache yet.
// check_key will automatically attempt to load the user from disk if not found in cache.
match iam_store.check_key(access_key).await {
Ok((Some(id), _valid)) => {
// Return secret key for signature verification regardless of user status.
// Authorization will be checked separately in the authorization phase.
return Ok(SecretKey::from(id.credentials.secret_key.clone()));
}
Ok((None, _)) => {
tracing::warn!("get_secret_key failed: no such user, access_key: {access_key}");
}
Err(e) => {
tracing::warn!("get_secret_key failed: check_key error, access_key: {access_key}, error: {e:?}");
}
}
} else {
tracing::warn!("get_secret_key failed: iam not initialized, access_key: {access_key}");
}
Err(s3_error!(UnauthorizedAccess, "Your account is not signed up2, access_key: {access_key}"))
}
}
// check_key_valid checks the key is valid or not. return the user's credentials and if the user is the owner.
pub async fn check_key_valid(session_token: &str, access_key: &str) -> S3Result<(Credentials, bool)> {
let Some(mut cred) = get_global_action_cred() else {
return Err(S3Error::with_message(
S3ErrorCode::InternalError,
format!("get_global_action_cred {:?}", IamError::IamSysNotInitialized),
));
};
let sys_cred = cred.clone();
if !constant_time_eq(&cred.access_key, access_key) {
let Ok(iam_store) = rustfs_iam::get() else {
return Err(S3Error::with_message(
S3ErrorCode::InternalError,
format!("check_key_valid {:?}", IamError::IamSysNotInitialized),
));
};
let (u, ok) = iam_store
.check_key(access_key)
.await
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("check claims failed1 {e}")))?;
if !ok {
if let Some(u) = u
&& u.credentials.status == "off"
{
return Err(s3_error!(InvalidRequest, "ErrAccessKeyDisabled"));
}
return Err(s3_error!(InvalidRequest, "ErrAccessKeyDisabled"));
}
let Some(u) = u else {
return Err(s3_error!(InvalidRequest, "check key failed"));
};
cred = u.credentials;
}
let claims = check_claims_from_token(session_token, &cred)
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("check claims failed {e}")))?;
cred.claims = if !claims.is_empty() { Some(claims) } else { None };
let mut owner =
constant_time_eq(&sys_cred.access_key, &cred.access_key) || constant_time_eq(&cred.parent_user, &sys_cred.access_key);
// permitRootAccess
if let Some(claims) = &cred.claims
&& claims.contains_key(SESSION_POLICY_NAME)
{
owner = false
}
Ok((cred, owner))
}
pub fn check_claims_from_token(token: &str, cred: &Credentials) -> S3Result<HashMap<String, Value>> {
if !token.is_empty() && cred.access_key.is_empty() {
return Err(s3_error!(InvalidRequest, "no access key"));
}
if token.is_empty() && cred.is_temp() && !cred.is_service_account() {
return Err(s3_error!(InvalidRequest, "invalid token1"));
}
if !token.is_empty() && !cred.is_temp() {
return Err(s3_error!(InvalidRequest, "invalid token2"));
}
if !cred.is_service_account() && cred.is_temp() && token != cred.session_token {
return Err(s3_error!(InvalidRequest, "invalid token3"));
}
if cred.is_temp() && cred.is_expired() {
return Err(s3_error!(InvalidRequest, "invalid access key is temp and expired"));
}
let Some(sys_cred) = get_global_action_cred() else {
return Err(s3_error!(InternalError, "action cred not init"));
};
// TODO: REPLICATION
let (token, secret) = if cred.is_service_account() {
(cred.session_token.as_str(), cred.secret_key.as_str())
} else {
(token, sys_cred.secret_key.as_str())
};
if !token.is_empty() {
let claims: HashMap<String, Value> =
get_claims_from_token_with_secret(token, secret).map_err(|_e| s3_error!(InvalidRequest, "invalid token"))?;
return Ok(claims);
}
Ok(HashMap::new())
}
pub fn get_session_token<'a>(uri: &'a Uri, hds: &'a HeaderMap) -> Option<&'a str> {
hds.get("x-amz-security-token")
.map(|v| v.to_str().unwrap_or_default())
.or_else(|| get_query_param(uri.query().unwrap_or_default(), "x-amz-security-token"))
}
/// Get condition values for policy evaluation
///
/// # Arguments
/// * `header` - HTTP headers of the request
/// * `cred` - User credentials
/// * `version_id` - Optional version ID of the object
/// * `region` - Optional region/location constraint
/// * `remote_addr` - Optional remote address of the connection
///
/// # Returns
/// * `HashMap<String, Vec<String>>` - Condition values for policy evaluation
///
pub fn get_condition_values(
header: &HeaderMap,
cred: &Credentials,
version_id: Option<&str>,
region: Option<&str>,
remote_addr: Option<std::net::SocketAddr>,
) -> HashMap<String, Vec<String>> {
let username = if cred.is_temp() || cred.is_service_account() {
cred.parent_user.clone()
} else {
cred.access_key.clone()
};
let sys_cred = get_global_action_cred().unwrap_or_default();
let claims = &cred.claims;
let principal_type = if !username.is_empty() {
if claims.is_some() {
"AssumedRole"
} else if constant_time_eq(&sys_cred.access_key, &username) {
"Account"
} else {
"User"
}
} else {
"Anonymous"
};
// Get current time
let curr_time = OffsetDateTime::now_utc();
let epoch_time = curr_time.unix_timestamp();
// Use provided version ID or empty string
let vid = version_id.unwrap_or("");
// Determine auth type and signature version from headers
let (auth_type, signature_version) = determine_auth_type_and_version(header);
// Get TLS status from header
let is_tls = header
.get("x-forwarded-proto")
.and_then(|v| v.to_str().ok())
.map(|s| s == "https")
.or_else(|| {
header
.get("x-forwarded-scheme")
.and_then(|v| v.to_str().ok())
.map(|s| s == "https")
})
.unwrap_or(false);
// Get remote address from header or use default
let remote_addr_s = remote_addr.map(|a| a.ip().to_string()).unwrap_or_default();
let mut args = HashMap::new();
// Add basic time and security info
args.insert("CurrentTime".to_owned(), vec![curr_time.format(&Rfc3339).unwrap_or_default()]);
args.insert("EpochTime".to_owned(), vec![epoch_time.to_string()]);
args.insert("SecureTransport".to_owned(), vec![is_tls.to_string()]);
args.insert("SourceIp".to_owned(), vec![get_source_ip_raw(header, &remote_addr_s)]);
// Add user agent and referer
if let Some(user_agent) = header.get("user-agent") {
args.insert("UserAgent".to_owned(), vec![user_agent.to_str().unwrap_or("").to_string()]);
}
if let Some(referer) = header.get("referer") {
args.insert("Referer".to_owned(), vec![referer.to_str().unwrap_or("").to_string()]);
}
// Add user and principal info
args.insert("userid".to_owned(), vec![username.clone()]);
args.insert("username".to_owned(), vec![username]);
args.insert("principaltype".to_owned(), vec![principal_type.to_string()]);
// Add version ID
if !vid.is_empty() {
args.insert("versionid".to_owned(), vec![vid.to_string()]);
}
// Add signature version and auth type
if !signature_version.is_empty() {
args.insert("signatureversion".to_owned(), vec![signature_version]);
}
if !auth_type.is_empty() {
args.insert("authType".to_owned(), vec![auth_type]);
}
if let Some(lc) = region
&& !lc.is_empty()
{
args.insert("LocationConstraint".to_owned(), vec![lc.to_string()]);
}
let mut clone_header = header.clone();
if let Some(v) = clone_header.get("x-amz-signature-age") {
args.insert("signatureAge".to_string(), vec![v.to_str().unwrap_or("").to_string()]);
clone_header.remove("x-amz-signature-age");
}
for obj_lock in &[
"x-amz-object-lock-mode",
"x-amz-object-lock-legal-hold",
"x-amz-object-lock-retain-until-date",
] {
let values = clone_header
.get_all(*obj_lock)
.iter()
.map(|v| v.to_str().unwrap_or("").to_string())
.collect::<Vec<String>>();
if !values.is_empty() {
args.insert(obj_lock.trim_start_matches("x-amz-").to_string(), values);
}
clone_header.remove(*obj_lock);
}
for (key, _values) in clone_header.iter() {
if key.as_str().eq_ignore_ascii_case("x-amz-tagging") {
continue;
}
if let Some(existing_values) = args.get_mut(key.as_str()) {
existing_values.extend(clone_header.get_all(key).iter().map(|v| v.to_str().unwrap_or("").to_string()));
} else {
args.insert(
key.as_str().to_string(),
header
.get_all(key)
.iter()
.map(|v| v.to_str().unwrap_or("").to_string())
.collect(),
);
}
}
if let Some(claims) = &cred.claims {
for (k, v) in claims {
if let Some(v_str) = v.as_str() {
args.insert(k.trim_start_matches("ldap").to_lowercase(), vec![v_str.to_string()]);
}
}
if let Some(grps_val) = claims.get("groups")
&& let Some(grps_is) = grps_val.as_array()
{
let grps = grps_is
.iter()
.filter_map(|g| g.as_str().map(|s| s.to_string()))
.collect::<Vec<String>>();
if !grps.is_empty() {
args.insert("groups".to_string(), grps);
}
}
}
if let Some(groups) = &cred.groups
&& !args.contains_key("groups")
{
args.insert("groups".to_string(), groups.clone());
}
args
}
/// Get request authentication type
///
/// # Arguments
/// * `header` - HTTP headers of the request
///
/// # Returns
/// * `AuthType` - The determined authentication type
///
pub fn get_request_auth_type(header: &HeaderMap) -> AuthType {
if is_request_signature_v2(header) {
AuthType::SignedV2
} else if is_request_presigned_signature_v2(header) {
AuthType::PresignedV2
} else if is_request_sign_streaming_v4(header) {
AuthType::StreamingSigned
} else if is_request_sign_streaming_trailer_v4(header) {
AuthType::StreamingSignedTrailer
} else if is_request_unsigned_trailer_v4(header) {
AuthType::StreamingUnsignedTrailer
} else if is_request_signature_v4(header) {
AuthType::Signed
} else if is_request_presigned_signature_v4(header) {
AuthType::Presigned
} else if is_request_jwt(header) {
AuthType::JWT
} else if is_request_post_policy_signature_v4(header) {
AuthType::PostPolicy
} else if is_request_sts(header) {
AuthType::STS
} else if is_request_anonymous(header) {
AuthType::Anonymous
} else {
AuthType::Unknown
}
}
/// Helper function to determine auth type and signature version
///
/// # Arguments
/// * `header` - HTTP headers of the request
///
/// # Returns
/// * `(String, String)` - Tuple of auth type and signature version
///
fn determine_auth_type_and_version(header: &HeaderMap) -> (String, String) {
match get_request_auth_type(header) {
AuthType::JWT => ("JWT".to_string(), String::new()),
AuthType::SignedV2 => ("REST-HEADER".to_string(), "AWS2".to_string()),
AuthType::PresignedV2 => ("REST-QUERY-STRING".to_string(), "AWS2".to_string()),
AuthType::StreamingSigned | AuthType::StreamingSignedTrailer | AuthType::StreamingUnsignedTrailer => {
("REST-HEADER".to_string(), "AWS4-HMAC-SHA256".to_string())
}
AuthType::Signed => ("REST-HEADER".to_string(), "AWS4-HMAC-SHA256".to_string()),
AuthType::Presigned => ("REST-QUERY-STRING".to_string(), "AWS4-HMAC-SHA256".to_string()),
AuthType::PostPolicy => ("POST".to_string(), String::new()),
AuthType::STS => ("STS".to_string(), String::new()),
AuthType::Anonymous => ("Anonymous".to_string(), String::new()),
AuthType::Unknown => (String::new(), String::new()),
}
}
/// Verify if request has JWT
///
/// # Arguments
/// * `header` - HTTP headers of the request
///
/// # Returns
/// * `bool` - True if request has JWT, false otherwise
fn is_request_jwt(header: &HeaderMap) -> bool {
if let Some(auth) = header.get("authorization")
&& let Ok(auth_str) = auth.to_str()
{
return auth_str.starts_with(JWT_ALGORITHM);
}
false
}
/// Verify if request has AWS Signature Version '4'
///
/// # Arguments
/// * `header` - HTTP headers of the request
///
/// # Returns
/// * `bool` - True if request has AWS Signature Version '4', false otherwise
fn is_request_signature_v4(header: &HeaderMap) -> bool {
if let Some(auth) = header.get("authorization")
&& let Ok(auth_str) = auth.to_str()
{
return auth_str.starts_with(SIGN_V4_ALGORITHM);
}
false
}
/// Verify if request has AWS Signature Version '2'
///
/// # Arguments
/// * `header` - HTTP headers of the request
///
/// # Returns
/// * `bool` - True if request has AWS Signature Version '2', false otherwise
fn is_request_signature_v2(header: &HeaderMap) -> bool {
if let Some(auth) = header.get("authorization")
&& let Ok(auth_str) = auth.to_str()
{
return !auth_str.starts_with(SIGN_V4_ALGORITHM) && auth_str.starts_with(SIGN_V2_ALGORITHM);
}
false
}
/// Verify if request has AWS PreSign Version '4'
///
/// # Arguments
/// * `header` - HTTP headers of the request
///
/// # Returns
/// * `bool` - True if request has AWS PreSign Version '4', false otherwise
pub(crate) fn is_request_presigned_signature_v4(header: &HeaderMap) -> bool {
if let Some(credential) = header.get(AMZ_CREDENTIAL) {
return !credential.to_str().unwrap_or("").is_empty();
}
false
}
/// Verify request has AWS PreSign Version '2'
///
/// # Arguments
/// * `header` - HTTP headers of the request
///
/// # Returns
/// * `bool` - True if request has AWS PreSign Version '2', false otherwise
fn is_request_presigned_signature_v2(header: &HeaderMap) -> bool {
if let Some(access_key) = header.get(AMZ_ACCESS_KEY_ID) {
return !access_key.to_str().unwrap_or("").is_empty();
}
false
}
/// Verify if request has AWS Post policy Signature Version '4'
///
/// # Arguments
/// * `header` - HTTP headers of the request
///
/// # Returns
/// * `bool` - True if request has AWS Post policy Signature Version '4', false otherwise
fn is_request_post_policy_signature_v4(header: &HeaderMap) -> bool {
if let Some(content_type) = header.get("content-type")
&& let Ok(ct) = content_type.to_str()
{
return ct.contains("multipart/form-data");
}
false
}
/// Verify if the request has AWS Streaming Signature Version '4'
fn is_request_sign_streaming_v4(header: &HeaderMap) -> bool {
if let Some(content_sha256) = header.get("x-amz-content-sha256")
&& let Ok(sha256_str) = content_sha256.to_str()
{
return sha256_str == STREAMING_CONTENT_SHA256;
}
false
}
// Verify if the request has AWS Streaming Signature Version '4' with trailer
fn is_request_sign_streaming_trailer_v4(header: &HeaderMap) -> bool {
if let Some(content_sha256) = header.get("x-amz-content-sha256")
&& let Ok(sha256_str) = content_sha256.to_str()
{
return sha256_str == STREAMING_CONTENT_SHA256_TRAILER;
}
false
}
// Verify if the request has AWS Streaming Signature Version '4' with unsigned content and trailer
fn is_request_unsigned_trailer_v4(header: &HeaderMap) -> bool {
if let Some(content_sha256) = header.get("x-amz-content-sha256")
&& let Ok(sha256_str) = content_sha256.to_str()
{
return sha256_str == UNSIGNED_PAYLOAD_TRAILER;
}
false
}
// Verify if request is STS (Security Token Service)
fn is_request_sts(header: &HeaderMap) -> bool {
if let Some(action) = header.get(ACTION_HEADER) {
return !action.to_str().unwrap_or("").is_empty();
}
false
}
// Verify if request is anonymous
fn is_request_anonymous(header: &HeaderMap) -> bool {
header.get("authorization").is_none()
}
pub fn get_query_param<'a>(query: &'a str, param_name: &str) -> Option<&'a str> {
let param_name = param_name.to_lowercase();
for pair in query.split('&') {
let mut parts = pair.splitn(2, '=');
if let (Some(key), Some(value)) = (parts.next(), parts.next())
&& key.to_lowercase() == param_name
{
return Some(value);
}
}
None
}
#[cfg(test)]
mod tests {
use super::*;
use http::{HeaderMap, HeaderValue, Uri};
use rustfs_credentials::Credentials;
use s3s::auth::SecretKey;
use serde_json::json;
use std::collections::HashMap;
use time::OffsetDateTime;
fn create_test_credentials() -> Credentials {
Credentials {
access_key: "test-access-key".to_string(),
secret_key: "test-secret-key".to_string(),
session_token: "".to_string(),
expiration: None,
status: "on".to_string(),
parent_user: "".to_string(),
groups: None,
claims: None,
name: Some("test-user".to_string()),
description: Some("test user for auth tests".to_string()),
}
}
fn create_temp_credentials() -> Credentials {
Credentials {
access_key: "temp-access-key".to_string(),
secret_key: "temp-secret-key".to_string(),
session_token: "temp-session-token".to_string(),
expiration: Some(OffsetDateTime::now_utc() + time::Duration::hours(1)),
status: "on".to_string(),
parent_user: "parent-user".to_string(),
groups: Some(vec!["test-group".to_string()]),
claims: None,
name: Some("temp-user".to_string()),
description: Some("temporary user for auth tests".to_string()),
}
}
fn create_service_account_credentials() -> Credentials {
let mut claims = HashMap::new();
claims.insert(rustfs_credentials::IAM_POLICY_CLAIM_NAME_SA.to_string(), json!("test-policy"));
Credentials {
access_key: "service-access-key".to_string(),
secret_key: "service-secret-key".to_string(),
session_token: "service-session-token".to_string(),
expiration: None,
status: "on".to_string(),
parent_user: "service-parent".to_string(),
groups: None,
claims: Some(claims),
name: Some("service-account".to_string()),
description: Some("service account for auth tests".to_string()),
}
}
#[test]
fn test_iam_auth_creation() {
let access_key = "test-access-key";
let secret_key = SecretKey::from("test-secret-key");
let iam_auth = IAMAuth::new(access_key, secret_key);
// The struct should be created successfully
// We can't easily test internal state without exposing it,
// but we can test it doesn't panic on creation
assert_eq!(size_of_val(&iam_auth), size_of::<IAMAuth>());
}
#[tokio::test]
async fn test_iam_auth_get_secret_key_empty_access_key() {
let iam_auth = IAMAuth::new("test-ak", SecretKey::from("test-sk"));
let result = iam_auth.get_secret_key("").await;
assert!(result.is_err());
let error = result.unwrap_err();
assert_eq!(error.code(), &S3ErrorCode::UnauthorizedAccess);
assert!(error.message().unwrap_or("").contains("Your account is not signed up"));
}
#[test]
fn test_check_claims_from_token_empty_token_and_access_key() {
let mut cred = create_test_credentials();
cred.access_key = "".to_string();
let result = check_claims_from_token("test-token", &cred);
assert!(result.is_err());
let error = result.unwrap_err();
assert_eq!(error.code(), &S3ErrorCode::InvalidRequest);
assert!(error.message().unwrap_or("").contains("no access key"));
}
#[test]
fn test_check_claims_from_token_temp_credentials_without_token() {
let mut cred = create_temp_credentials();
// Make it non-service account
cred.claims = None;
let result = check_claims_from_token("", &cred);
assert!(result.is_err());
let error = result.unwrap_err();
assert_eq!(error.code(), &S3ErrorCode::InvalidRequest);
assert!(error.message().unwrap_or("").contains("invalid token1"));
}
#[test]
fn test_check_claims_from_token_non_temp_with_token() {
let mut cred = create_test_credentials();
cred.session_token = "".to_string(); // Make it non-temp
let result = check_claims_from_token("some-token", &cred);
assert!(result.is_err());
let error = result.unwrap_err();
assert_eq!(error.code(), &S3ErrorCode::InvalidRequest);
assert!(error.message().unwrap_or("").contains("invalid token2"));
}
#[test]
fn test_check_claims_from_token_mismatched_session_token() {
let mut cred = create_temp_credentials();
// Make sure it's not a service account
cred.claims = None;
let result = check_claims_from_token("wrong-session-token", &cred);
assert!(result.is_err());
let error = result.unwrap_err();
assert_eq!(error.code(), &S3ErrorCode::InvalidRequest);
assert!(error.message().unwrap_or("").contains("invalid token3"));
}
#[test]
fn test_check_claims_from_token_expired_credentials() {
let mut cred = create_temp_credentials();
cred.expiration = Some(OffsetDateTime::now_utc() - time::Duration::hours(1)); // Expired
cred.claims = None; // Make sure it's not a service account
let result = check_claims_from_token(&cred.session_token, &cred);
assert!(result.is_err());
let error = result.unwrap_err();
assert_eq!(error.code(), &S3ErrorCode::InvalidRequest);
// The function checks various conditions in order. An expired temp credential
// might trigger other validation errors first (like token mismatch)
let msg = error.message().unwrap_or("");
let is_valid_error = msg.contains("invalid access key is temp and expired")
|| msg.contains("invalid token")
|| msg.contains("action cred not init");
assert!(is_valid_error, "Unexpected error message: '{msg}'");
}
#[test]
fn test_check_claims_from_token_valid_non_temp_credentials() {
let mut cred = create_test_credentials();
cred.session_token = "".to_string(); // Make it non-temp
let result = check_claims_from_token("", &cred);
// This might fail due to global state dependencies, but should return error about global cred init
if let Ok(claims) = result {
assert!(claims.is_empty());
} else if let Err(error) = result {
assert_eq!(error.code(), &S3ErrorCode::InternalError);
assert!(error.message().unwrap_or("").contains("action cred not init"));
}
}
#[test]
fn test_get_session_token_from_header() {
let mut headers = HeaderMap::new();
headers.insert("x-amz-security-token", HeaderValue::from_static("test-session-token"));
let uri: Uri = "https://example.com/".parse().unwrap();
let token = get_session_token(&uri, &headers);
assert_eq!(token, Some("test-session-token"));
}
#[test]
fn test_get_session_token_from_query_param() {
let headers = HeaderMap::new();
let uri: Uri = "https://example.com/?x-amz-security-token=query-session-token"
.parse()
.unwrap();
let token = get_session_token(&uri, &headers);
assert_eq!(token, Some("query-session-token"));
}
#[test]
fn test_get_session_token_header_takes_precedence() {
let mut headers = HeaderMap::new();
headers.insert("x-amz-security-token", HeaderValue::from_static("header-token"));
let uri: Uri = "https://example.com/?x-amz-security-token=query-token".parse().unwrap();
let token = get_session_token(&uri, &headers);
assert_eq!(token, Some("header-token"));
}
#[test]
fn test_get_session_token_no_token() {
let headers = HeaderMap::new();
let uri: Uri = "https://example.com/".parse().unwrap();
let token = get_session_token(&uri, &headers);
assert_eq!(token, None);
}
#[test]
fn test_get_condition_values_regular_user() {
let cred = create_test_credentials();
let headers = HeaderMap::new();
let conditions = get_condition_values(&headers, &cred, None, None, None);
assert_eq!(conditions.get("userid"), Some(&vec!["test-access-key".to_string()]));
assert_eq!(conditions.get("username"), Some(&vec!["test-access-key".to_string()]));
assert_eq!(conditions.get("principaltype"), Some(&vec!["User".to_string()]));
}
#[test]
fn test_get_condition_values_temp_user() {
let cred = create_temp_credentials();
let headers = HeaderMap::new();
let conditions = get_condition_values(&headers, &cred, None, None, None);
assert_eq!(conditions.get("userid"), Some(&vec!["parent-user".to_string()]));
assert_eq!(conditions.get("username"), Some(&vec!["parent-user".to_string()]));
assert_eq!(conditions.get("principaltype"), Some(&vec!["User".to_string()]));
}
#[test]
fn test_get_condition_values_service_account() {
let cred = create_service_account_credentials();
let headers = HeaderMap::new();
let conditions = get_condition_values(&headers, &cred, None, None, None);
assert_eq!(conditions.get("userid"), Some(&vec!["service-parent".to_string()]));
assert_eq!(conditions.get("username"), Some(&vec!["service-parent".to_string()]));
// Service accounts with claims should be "AssumedRole" type
assert_eq!(conditions.get("principaltype"), Some(&vec!["AssumedRole".to_string()]));
}
#[test]
fn test_get_condition_values_with_object_lock_headers() {
let cred = create_test_credentials();
let mut headers = HeaderMap::new();
headers.insert("x-amz-object-lock-mode", HeaderValue::from_static("GOVERNANCE"));
headers.insert("x-amz-object-lock-retain-until-date", HeaderValue::from_static("2024-12-31T23:59:59Z"));
let conditions = get_condition_values(&headers, &cred, None, None, None);
assert_eq!(conditions.get("object-lock-mode"), Some(&vec!["GOVERNANCE".to_string()]));
assert_eq!(
conditions.get("object-lock-retain-until-date"),
Some(&vec!["2024-12-31T23:59:59Z".to_string()])
);
}
#[test]
fn test_get_condition_values_with_signature_age() {
let cred = create_test_credentials();
let mut headers = HeaderMap::new();
headers.insert("x-amz-signature-age", HeaderValue::from_static("300"));
let conditions = get_condition_values(&headers, &cred, None, None, None);
assert_eq!(conditions.get("signatureAge"), Some(&vec!["300".to_string()]));
// Verify the header is removed after processing
// (we can't directly test this without changing the function signature)
}
#[test]
fn test_get_condition_values_with_claims() {
let mut cred = create_service_account_credentials();
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | true |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/init.rs | rustfs/src/init.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::storage::ecfs::{process_lambda_configurations, process_queue_configurations, process_topic_configurations};
use crate::{admin, config, version};
use chrono::Datelike;
use rustfs_config::{DEFAULT_UPDATE_CHECK, ENV_UPDATE_CHECK};
use rustfs_ecstore::bucket::metadata_sys;
use rustfs_notify::notifier_global;
use rustfs_targets::arn::{ARN, TargetIDError};
use s3s::s3_error;
use std::env;
use std::io::Error;
use tracing::{debug, error, info, instrument, warn};
#[instrument]
pub(crate) fn print_server_info() {
let current_year = chrono::Utc::now().year();
// Use custom macros to print server information
info!("RustFS Object Storage Server");
info!("Copyright: 2024-{} RustFS, Inc", current_year);
info!("License: Apache-2.0 https://www.apache.org/licenses/LICENSE-2.0");
info!("Version: {}", version::get_version());
info!("Docs: https://rustfs.com/docs/");
}
/// Initialize the asynchronous update check system.
/// This function checks if update checking is enabled via
/// environment variable or default configuration. If enabled,
/// it spawns an asynchronous task to check for updates with a timeout.
pub(crate) fn init_update_check() {
let update_check_enable = env::var(ENV_UPDATE_CHECK)
.unwrap_or_else(|_| DEFAULT_UPDATE_CHECK.to_string())
.parse::<bool>()
.unwrap_or(DEFAULT_UPDATE_CHECK);
if !update_check_enable {
return;
}
// Async update check with timeout
tokio::spawn(async {
use crate::update::{UpdateCheckError, check_updates};
// Add timeout to prevent hanging network calls
match tokio::time::timeout(std::time::Duration::from_secs(30), check_updates()).await {
Ok(Ok(result)) => {
if result.update_available {
if let Some(latest) = &result.latest_version {
info!(
"🚀 Version check: New version available: {} -> {} (current: {})",
result.current_version, latest.version, result.current_version
);
if let Some(notes) = &latest.release_notes {
info!("📝 Release notes: {}", notes);
}
if let Some(url) = &latest.download_url {
info!("🔗 Download URL: {}", url);
}
}
} else {
debug!("✅ Version check: Current version is up to date: {}", result.current_version);
}
}
Ok(Err(UpdateCheckError::HttpError(e))) => {
debug!("Version check: network error (this is normal): {}", e);
}
Ok(Err(e)) => {
debug!("Version check: failed (this is normal): {}", e);
}
Err(_) => {
debug!("Version check: timeout after 30 seconds (this is normal)");
}
}
});
}
/// Add existing bucket notification configurations to the global notifier system.
/// This function retrieves notification configurations for each bucket
/// and registers the corresponding event rules with the notifier system.
/// It processes queue, topic, and lambda configurations and maps them to event rules.
/// # Arguments
/// * `buckets` - A vector of bucket names to process
#[instrument(skip_all)]
pub(crate) async fn add_bucket_notification_configuration(buckets: Vec<String>) {
let region_opt = rustfs_ecstore::global::get_global_region();
let region = match region_opt {
Some(ref r) if !r.is_empty() => r,
_ => {
warn!("Global region is not set; attempting notification configuration for all buckets with an empty region.");
""
}
};
for bucket in buckets.iter() {
let has_notification_config = metadata_sys::get_notification_config(bucket).await.unwrap_or_else(|err| {
warn!("get_notification_config err {:?}", err);
None
});
match has_notification_config {
Some(cfg) => {
info!(
target: "rustfs::main::add_bucket_notification_configuration",
bucket = %bucket,
"Bucket '{}' has existing notification configuration: {:?}", bucket, cfg);
let mut event_rules = Vec::new();
process_queue_configurations(&mut event_rules, cfg.queue_configurations.clone(), |arn_str| {
ARN::parse(arn_str)
.map(|arn| arn.target_id)
.map_err(|e| TargetIDError::InvalidFormat(e.to_string()))
});
process_topic_configurations(&mut event_rules, cfg.topic_configurations.clone(), |arn_str| {
ARN::parse(arn_str)
.map(|arn| arn.target_id)
.map_err(|e| TargetIDError::InvalidFormat(e.to_string()))
});
process_lambda_configurations(&mut event_rules, cfg.lambda_function_configurations.clone(), |arn_str| {
ARN::parse(arn_str)
.map(|arn| arn.target_id)
.map_err(|e| TargetIDError::InvalidFormat(e.to_string()))
});
if let Err(e) = notifier_global::add_event_specific_rules(bucket, region, &event_rules)
.await
.map_err(|e| s3_error!(InternalError, "Failed to add rules: {e}"))
{
error!("Failed to add rules for bucket '{}': {:?}", bucket, e);
}
}
None => {
info!(
target: "rustfs::main::add_bucket_notification_configuration",
bucket = %bucket,
"Bucket '{}' has no existing notification configuration.", bucket);
}
}
}
}
/// Initialize KMS system and configure if enabled
///
/// This function initializes the global KMS service manager. If KMS is enabled
/// via command line options, it configures and starts the service accordingly.
/// If not enabled, it attempts to load any persisted KMS configuration from
/// cluster storage and starts the service if found.
/// # Arguments
/// * `opt` - The application configuration options
///
/// Returns `std::io::Result<()>` indicating success or failure
#[instrument(skip(opt))]
pub(crate) async fn init_kms_system(opt: &config::Opt) -> std::io::Result<()> {
// Initialize global KMS service manager (starts in NotConfigured state)
let service_manager = rustfs_kms::init_global_kms_service_manager();
// If KMS is enabled in configuration, configure and start the service
if opt.kms_enable {
info!("KMS is enabled via command line, configuring and starting service...");
// Create KMS configuration from command line options
let kms_config = match opt.kms_backend.as_str() {
"local" => {
let key_dir = opt
.kms_key_dir
.as_ref()
.ok_or_else(|| Error::other("KMS key directory is required for local backend"))?;
rustfs_kms::config::KmsConfig {
backend: rustfs_kms::config::KmsBackend::Local,
backend_config: rustfs_kms::config::BackendConfig::Local(rustfs_kms::config::LocalConfig {
key_dir: std::path::PathBuf::from(key_dir),
master_key: None,
file_permissions: Some(0o600),
}),
default_key_id: opt.kms_default_key_id.clone(),
timeout: std::time::Duration::from_secs(30),
retry_attempts: 3,
enable_cache: true,
cache_config: rustfs_kms::config::CacheConfig::default(),
}
}
"vault" => {
let vault_address = opt
.kms_vault_address
.as_ref()
.ok_or_else(|| Error::other("Vault address is required for vault backend"))?;
let vault_token = opt
.kms_vault_token
.as_ref()
.ok_or_else(|| Error::other("Vault token is required for vault backend"))?;
rustfs_kms::config::KmsConfig {
backend: rustfs_kms::config::KmsBackend::Vault,
backend_config: rustfs_kms::config::BackendConfig::Vault(rustfs_kms::config::VaultConfig {
address: vault_address.clone(),
auth_method: rustfs_kms::config::VaultAuthMethod::Token {
token: vault_token.clone(),
},
namespace: None,
mount_path: "transit".to_string(),
kv_mount: "secret".to_string(),
key_path_prefix: "rustfs/kms/keys".to_string(),
tls: None,
}),
default_key_id: opt.kms_default_key_id.clone(),
timeout: std::time::Duration::from_secs(30),
retry_attempts: 3,
enable_cache: true,
cache_config: rustfs_kms::config::CacheConfig::default(),
}
}
_ => return Err(Error::other(format!("Unsupported KMS backend: {}", opt.kms_backend))),
};
// Configure the KMS service
service_manager
.configure(kms_config)
.await
.map_err(|e| Error::other(format!("Failed to configure KMS: {e}")))?;
// Start the KMS service
service_manager
.start()
.await
.map_err(|e| Error::other(format!("Failed to start KMS: {e}")))?;
info!("KMS service configured and started successfully from command line options");
} else {
// Try to load persisted KMS configuration from cluster storage
info!("Attempting to load persisted KMS configuration from cluster storage...");
if let Some(persisted_config) = admin::handlers::kms_dynamic::load_kms_config().await {
info!("Found persisted KMS configuration, attempting to configure and start service...");
// Configure the KMS service with persisted config
match service_manager.configure(persisted_config).await {
Ok(()) => {
// Start the KMS service
match service_manager.start().await {
Ok(()) => {
info!("KMS service configured and started successfully from persisted configuration");
}
Err(e) => {
warn!("Failed to start KMS with persisted configuration: {}", e);
}
}
}
Err(e) => {
warn!("Failed to configure KMS with persisted configuration: {}", e);
}
}
} else {
info!("No persisted KMS configuration found. KMS is ready for dynamic configuration via API.");
}
}
Ok(())
}
/// Initialize the adaptive buffer sizing system with workload profile configuration.
///
/// This system provides intelligent buffer size selection based on file size and workload type.
/// Workload-aware buffer sizing is enabled by default with the GeneralPurpose profile,
/// which provides the same buffer sizes as the original implementation for compatibility.
///
/// # Configuration
/// - Default: Enabled with GeneralPurpose profile
/// - Opt-out: Use `--buffer-profile-disable` flag
/// - Custom profile: Set via `--buffer-profile` or `RUSTFS_BUFFER_PROFILE` environment variable
///
/// # Arguments
/// * `opt` - The application configuration options
pub(crate) fn init_buffer_profile_system(opt: &config::Opt) {
use crate::config::workload_profiles::{
RustFSBufferConfig, WorkloadProfile, init_global_buffer_config, set_buffer_profile_enabled,
};
if opt.buffer_profile_disable {
// User explicitly disabled buffer profiling - use GeneralPurpose profile in disabled mode
info!("Buffer profiling disabled via --buffer-profile-disable, using GeneralPurpose profile");
set_buffer_profile_enabled(false);
} else {
// Enabled by default: use configured workload profile
info!("Buffer profiling enabled with profile: {}", opt.buffer_profile);
// Parse the workload profile from configuration string
let profile = WorkloadProfile::from_name(&opt.buffer_profile);
// Log the selected profile for operational visibility
info!("Active buffer profile: {:?}", profile);
// Initialize the global buffer configuration
init_global_buffer_config(RustFSBufferConfig::new(profile));
// Enable buffer profiling globally
set_buffer_profile_enabled(true);
info!("Buffer profiling system initialized successfully");
}
}
/// Initialize the FTPS system
///
/// This function initializes the FTPS server if enabled in the configuration.
/// It sets up the FTPS server with the appropriate configuration and starts
/// the server in a background task.
///
/// MINIO CONSTRAINT: FTPS server MUST follow the same lifecycle management
/// as other services and MUST integrate with the global shutdown system.
#[instrument(skip_all)]
pub async fn init_ftp_system(
shutdown_tx: tokio::sync::broadcast::Sender<()>,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
use crate::protocols::ftps::server::{FtpsConfig, FtpsServer};
use std::net::SocketAddr;
// Check if FTPS is enabled
let ftps_enable = rustfs_utils::get_env_bool(rustfs_config::ENV_FTPS_ENABLE, false);
if !ftps_enable {
debug!("FTPS system is disabled");
return Ok(());
}
// Parse FTPS address
let ftps_address_str = rustfs_utils::get_env_str(rustfs_config::ENV_FTPS_ADDRESS, rustfs_config::DEFAULT_FTPS_ADDRESS);
let addr: SocketAddr = ftps_address_str
.parse()
.map_err(|e| format!("Invalid FTPS address '{}': {}", ftps_address_str, e))?;
// Get FTPS configuration from environment variables
let cert_file = rustfs_utils::get_env_opt_str(rustfs_config::ENV_FTPS_CERTS_FILE);
let key_file = rustfs_utils::get_env_opt_str(rustfs_config::ENV_FTPS_KEY_FILE);
let passive_ports = rustfs_utils::get_env_opt_str(rustfs_config::ENV_FTPS_PASSIVE_PORTS);
let external_ip = rustfs_utils::get_env_opt_str(rustfs_config::ENV_FTPS_EXTERNAL_IP);
// Create FTPS configuration
let config = FtpsConfig {
bind_addr: addr,
passive_ports,
external_ip,
ftps_required: true,
cert_file,
key_file,
};
// Create FTPS server
let server = FtpsServer::new(config).await?;
// Log server configuration
info!(
"FTPS server configured on {} with passive ports {:?}",
server.config().bind_addr,
server.config().passive_ports
);
// Start FTPS server in background task
let shutdown_rx = shutdown_tx.subscribe();
tokio::spawn(async move {
if let Err(e) = server.start(shutdown_rx).await {
error!("FTPS server error: {}", e);
}
});
info!("FTPS system initialized successfully");
Ok(())
}
/// Initialize the SFTP system
///
/// This function initializes the SFTP server if enabled in the configuration.
/// It sets up the SFTP server with the appropriate configuration and starts
/// the server in a background task.
///
/// MINIO CONSTRAINT: SFTP server MUST follow the same lifecycle management
/// as other services and MUST integrate with the global shutdown system.
#[instrument(skip_all)]
pub async fn init_sftp_system(
shutdown_tx: tokio::sync::broadcast::Sender<()>,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
use crate::protocols::sftp::server::{SftpConfig, SftpServer};
use std::net::SocketAddr;
// Check if SFTP is enabled
let sftp_enable = rustfs_utils::get_env_bool(rustfs_config::ENV_SFTP_ENABLE, false);
if !sftp_enable {
debug!("SFTP system is disabled");
return Ok(());
}
// Parse SFTP address
let sftp_address_str = rustfs_utils::get_env_str(rustfs_config::ENV_SFTP_ADDRESS, rustfs_config::DEFAULT_SFTP_ADDRESS);
let addr: SocketAddr = sftp_address_str
.parse()
.map_err(|e| format!("Invalid SFTP address '{}': {}", sftp_address_str, e))?;
// Get SFTP configuration from environment variables
let host_key = rustfs_utils::get_env_opt_str(rustfs_config::ENV_SFTP_HOST_KEY);
let authorized_keys = rustfs_utils::get_env_opt_str(rustfs_config::ENV_SFTP_AUTHORIZED_KEYS);
// Create SFTP configuration
let config = SftpConfig {
bind_addr: addr,
require_key_auth: false, // TODO: Add key auth configuration
cert_file: None, // CA certificates for client certificate authentication
key_file: host_key, // SFTP server host key
authorized_keys_file: authorized_keys, // Pre-loaded authorized SSH public keys
};
// Create SFTP server
let server = SftpServer::new(config)?;
// Log server configuration
info!(
"SFTP server configured on {} with key auth requirement: {}",
server.config().bind_addr,
server.config().require_key_auth
);
// Start SFTP server in background task
let shutdown_rx = shutdown_tx.subscribe();
tokio::spawn(async move {
if let Err(e) = server.start(shutdown_rx).await {
error!("SFTP server error: {}", e);
}
});
info!("SFTP system initialized successfully");
Ok(())
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/main.rs | rustfs/src/main.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
mod admin;
mod auth;
mod config;
mod error;
mod init;
mod license;
mod profiling;
mod protocols;
mod server;
mod storage;
mod update;
mod version;
// Ensure the correct path for parse_license is imported
use crate::init::{
add_bucket_notification_configuration, init_buffer_profile_system, init_ftp_system, init_kms_system, init_sftp_system,
init_update_check, print_server_info,
};
use crate::server::{
SHUTDOWN_TIMEOUT, ServiceState, ServiceStateManager, ShutdownSignal, init_cert, init_event_notifier, shutdown_event_notifier,
start_audit_system, start_http_server, stop_audit_system, wait_for_shutdown,
};
use clap::Parser;
use license::init_license;
use rustfs_ahm::{
Scanner, create_ahm_services_cancel_token, heal::storage::ECStoreHealStorage, init_heal_manager,
scanner::data_scanner::ScannerConfig, shutdown_ahm_services,
};
use rustfs_common::{GlobalReadiness, SystemStage, set_global_addr};
use rustfs_credentials::init_global_action_credentials;
use rustfs_ecstore::{
StorageAPI,
bucket::metadata_sys::init_bucket_metadata_sys,
bucket::replication::{GLOBAL_REPLICATION_POOL, init_background_replication},
config as ecconfig,
config::GLOBAL_CONFIG_SYS,
endpoints::EndpointServerPools,
global::{set_global_rustfs_port, shutdown_background_services},
notification_sys::new_global_notification_sys,
set_global_endpoints,
store::ECStore,
store::init_local_disks,
store_api::BucketOptions,
update_erasure_type,
};
use rustfs_iam::init_iam_sys;
use rustfs_obs::{init_obs, set_global_guard};
use rustfs_utils::net::parse_and_resolve_address;
use std::io::{Error, Result};
use std::sync::Arc;
use tokio_util::sync::CancellationToken;
use tracing::{debug, error, info, instrument, warn};
#[cfg(all(target_os = "linux", target_env = "gnu", target_arch = "x86_64"))]
#[global_allocator]
static GLOBAL: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
#[cfg(not(all(target_os = "linux", target_env = "gnu", target_arch = "x86_64")))]
#[global_allocator]
static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc;
fn main() -> Result<()> {
let runtime = server::get_tokio_runtime_builder()
.build()
.expect("Failed to build Tokio runtime");
runtime.block_on(async_main())
}
async fn async_main() -> Result<()> {
// Parse the obtained parameters
let opt = config::Opt::parse();
// Initialize the configuration
init_license(opt.license.clone());
// Initialize Observability
let guard = match init_obs(Some(opt.clone().obs_endpoint)).await {
Ok(g) => g,
Err(e) => {
println!("Failed to initialize observability: {e}");
return Err(Error::other(e));
}
};
// Store in global storage
match set_global_guard(guard).map_err(Error::other) {
Ok(_) => {
info!(target: "rustfs::main", "Global observability guard set successfully.");
}
Err(e) => {
error!("Failed to set global observability guard: {}", e);
return Err(e);
}
}
// print startup logo
info!("{}", server::LOGO);
// Initialize performance profiling if enabled
profiling::init_from_env().await;
// Initialize TLS if a certificate path is provided
if let Some(tls_path) = &opt.tls_path {
match init_cert(tls_path).await {
Ok(_) => {
info!(target: "rustfs::main", "TLS initialized successfully with certs from {}", tls_path);
}
Err(e) => {
error!("Failed to initialize TLS from {}: {}", tls_path, e);
return Err(Error::other(e));
}
}
}
// Run parameters
match run(opt).await {
Ok(_) => Ok(()),
Err(e) => {
error!("Server encountered an error and is shutting down: {}", e);
Err(e)
}
}
}
#[instrument(skip(opt))]
async fn run(opt: config::Opt) -> Result<()> {
debug!("opt: {:?}", &opt);
// 1. Initialize global readiness tracker
let readiness = Arc::new(GlobalReadiness::new());
if let Some(region) = &opt.region {
rustfs_ecstore::global::set_global_region(region.clone());
}
let server_addr = parse_and_resolve_address(opt.address.as_str()).map_err(Error::other)?;
let server_port = server_addr.port();
let server_address = server_addr.to_string();
info!(
target: "rustfs::main::run",
server_address = %server_address,
ip = %server_addr.ip(),
port = %server_port,
version = %version::get_version(),
"Starting RustFS server at {}",
&server_address
);
// Set up AK and SK
match init_global_action_credentials(Some(opt.access_key.clone()), Some(opt.secret_key.clone())) {
Ok(_) => {
info!(target: "rustfs::main::run", "Global action credentials initialized successfully.");
}
Err(e) => {
let msg = format!("init_global_action_credentials failed: {e:?}");
error!("{msg}");
return Err(Error::other(msg));
}
};
set_global_rustfs_port(server_port);
set_global_addr(&opt.address).await;
// For RPC
let (endpoint_pools, setup_type) = EndpointServerPools::from_volumes(server_address.clone().as_str(), opt.volumes.clone())
.await
.map_err(Error::other)?;
for (i, eps) in endpoint_pools.as_ref().iter().enumerate() {
info!(
target: "rustfs::main::run",
"Formatting {}st pool, {} set(s), {} drives per set.",
i + 1,
eps.set_count,
eps.drives_per_set
);
if eps.drives_per_set > 1 {
warn!(target: "rustfs::main::run","WARNING: Host local has more than 0 drives of set. A host failure will result in data becoming unavailable.");
}
}
for (i, eps) in endpoint_pools.as_ref().iter().enumerate() {
info!(
target: "rustfs::main::run",
id = i,
set_count = eps.set_count,
drives_per_set = eps.drives_per_set,
cmd = ?eps.cmd_line,
"created endpoints {}, set_count:{}, drives_per_set: {}, cmd: {:?}",
i, eps.set_count, eps.drives_per_set, eps.cmd_line
);
for ep in eps.endpoints.as_ref().iter() {
info!(
target: "rustfs::main::run",
" - endpoint: {}", ep
);
}
}
let state_manager = ServiceStateManager::new();
// Update service status to Starting
state_manager.update(ServiceState::Starting);
let s3_shutdown_tx = {
let mut s3_opt = opt.clone();
s3_opt.console_enable = false;
let s3_shutdown_tx = start_http_server(&s3_opt, state_manager.clone(), readiness.clone()).await?;
Some(s3_shutdown_tx)
};
let console_shutdown_tx = if opt.console_enable && !opt.console_address.is_empty() {
let mut console_opt = opt.clone();
console_opt.address = console_opt.console_address.clone();
let console_shutdown_tx = start_http_server(&console_opt, state_manager.clone(), readiness.clone()).await?;
Some(console_shutdown_tx)
} else {
None
};
set_global_endpoints(endpoint_pools.as_ref().clone());
update_erasure_type(setup_type).await;
// Initialize the local disk
init_local_disks(endpoint_pools.clone()).await.map_err(Error::other)?;
let ctx = CancellationToken::new();
// init store
// 2. Start Storage Engine (ECStore)
let store = ECStore::new(server_addr, endpoint_pools.clone(), ctx.clone())
.await
.inspect_err(|err| {
error!("ECStore::new {:?}", err);
})?;
ecconfig::init();
// // Initialize global configuration system
let mut retry_count = 0;
while let Err(e) = GLOBAL_CONFIG_SYS.init(store.clone()).await {
error!("GLOBAL_CONFIG_SYS.init failed {:?}", e);
// TODO: check error type
retry_count += 1;
if retry_count > 15 {
return Err(Error::other("GLOBAL_CONFIG_SYS.init failed"));
}
tokio::time::sleep(tokio::time::Duration::from_secs(1)).await;
}
readiness.mark_stage(SystemStage::StorageReady);
// init replication_pool
init_background_replication(store.clone()).await;
// Initialize KMS system if enabled
init_kms_system(&opt).await?;
// Create a shutdown channel for FTP/SFTP services
let (ftp_sftp_shutdown_tx, _) = tokio::sync::broadcast::channel(1);
// Initialize FTP system if enabled
init_ftp_system(ftp_sftp_shutdown_tx.clone()).await.map_err(Error::other)?;
// Initialize SFTP system if enabled
init_sftp_system(ftp_sftp_shutdown_tx.clone()).await.map_err(Error::other)?;
// Initialize buffer profiling system
init_buffer_profile_system(&opt);
// Initialize event notifier
init_event_notifier().await;
// Start the audit system
match start_audit_system().await {
Ok(_) => info!(target: "rustfs::main::run","Audit system started successfully."),
Err(e) => error!(target: "rustfs::main::run","Failed to start audit system: {}", e),
}
let buckets_list = store
.list_bucket(&BucketOptions {
no_metadata: true,
..Default::default()
})
.await
.map_err(Error::other)?;
// Collect bucket names into a vector
let buckets: Vec<String> = buckets_list.into_iter().map(|v| v.name).collect();
if let Some(pool) = GLOBAL_REPLICATION_POOL.get() {
pool.clone().init_resync(ctx.clone(), buckets.clone()).await?;
}
init_bucket_metadata_sys(store.clone(), buckets.clone()).await;
// 3. Initialize IAM System (Blocking load)
// This ensures data is in memory before moving forward
init_iam_sys(store.clone()).await.map_err(Error::other)?;
readiness.mark_stage(SystemStage::IamReady);
add_bucket_notification_configuration(buckets.clone()).await;
// Initialize the global notification system
new_global_notification_sys(endpoint_pools.clone()).await.map_err(|err| {
error!("new_global_notification_sys failed {:?}", &err);
Error::other(err)
})?;
// Create a cancellation token for AHM services
let _ = create_ahm_services_cancel_token();
// Check environment variables to determine if scanner and heal should be enabled
let enable_scanner = rustfs_utils::get_env_bool("RUSTFS_ENABLE_SCANNER", true);
let enable_heal = rustfs_utils::get_env_bool("RUSTFS_ENABLE_HEAL", true);
info!(
target: "rustfs::main::run",
enable_scanner = enable_scanner,
enable_heal = enable_heal,
"Background services configuration: scanner={}, heal={}", enable_scanner, enable_heal
);
// Initialize heal manager and scanner based on environment variables
if enable_heal || enable_scanner {
if enable_heal {
// Initialize heal manager with channel processor
let heal_storage = Arc::new(ECStoreHealStorage::new(store.clone()));
let heal_manager = init_heal_manager(heal_storage, None).await?;
if enable_scanner {
info!(target: "rustfs::main::run","Starting scanner with heal manager...");
let scanner = Scanner::new(Some(ScannerConfig::default()), Some(heal_manager));
scanner.start().await?;
} else {
info!(target: "rustfs::main::run","Scanner disabled, but heal manager is initialized and available");
}
} else if enable_scanner {
info!("Starting scanner without heal manager...");
let scanner = Scanner::new(Some(ScannerConfig::default()), None);
scanner.start().await?;
}
} else {
info!(target: "rustfs::main::run","Both scanner and heal are disabled, skipping AHM service initialization");
}
// print server info
print_server_info();
init_update_check();
println!(
"RustFS server started successfully at {}, current time: {}",
&server_address,
chrono::offset::Utc::now().to_string()
);
info!(target: "rustfs::main::run","server started successfully at {}", &server_address);
// 4. Mark as Full Ready now that critical components are warm
readiness.mark_stage(SystemStage::FullReady);
// Set the global RustFS initialization time to now
rustfs_common::set_global_init_time_now().await;
// Perform hibernation for 1 second
tokio::time::sleep(SHUTDOWN_TIMEOUT).await;
// listen to the shutdown signal
match wait_for_shutdown().await {
#[cfg(unix)]
ShutdownSignal::CtrlC | ShutdownSignal::Sigint | ShutdownSignal::Sigterm => {
handle_shutdown(&state_manager, s3_shutdown_tx, console_shutdown_tx, ftp_sftp_shutdown_tx, ctx.clone()).await;
}
#[cfg(not(unix))]
ShutdownSignal::CtrlC => {
handle_shutdown(&state_manager, s3_shutdown_tx, console_shutdown_tx, ftp_sftp_shutdown_tx, ctx.clone()).await;
}
}
info!(target: "rustfs::main::run","server is stopped state: {:?}", state_manager.current_state());
Ok(())
}
/// Handles the shutdown process of the server
async fn handle_shutdown(
state_manager: &ServiceStateManager,
s3_shutdown_tx: Option<tokio::sync::broadcast::Sender<()>>,
console_shutdown_tx: Option<tokio::sync::broadcast::Sender<()>>,
ftp_sftp_shutdown_tx: tokio::sync::broadcast::Sender<()>,
ctx: CancellationToken,
) {
ctx.cancel();
info!(
target: "rustfs::main::handle_shutdown",
"Shutdown signal received in main thread"
);
// update the status to stopping first
state_manager.update(ServiceState::Stopping);
// Check environment variables to determine what services need to be stopped
let enable_scanner = rustfs_utils::get_env_bool("RUSTFS_ENABLE_SCANNER", true);
let enable_heal = rustfs_utils::get_env_bool("RUSTFS_ENABLE_HEAL", true);
// Stop background services based on what was enabled
if enable_scanner || enable_heal {
info!(
target: "rustfs::main::handle_shutdown",
"Stopping background services (data scanner and auto heal)..."
);
shutdown_background_services();
info!(
target: "rustfs::main::handle_shutdown",
"Stopping AHM services..."
);
shutdown_ahm_services();
} else {
info!(
target: "rustfs::main::handle_shutdown",
"Background services were disabled, skipping AHM shutdown"
);
}
// Stop the notification system
info!(
target: "rustfs::main::handle_shutdown",
"Shutting down event notifier system..."
);
shutdown_event_notifier().await;
// Stop the audit system
info!(
target: "rustfs::main::handle_shutdown",
"Stopping audit system..."
);
match stop_audit_system().await {
Ok(_) => info!("Audit system stopped successfully."),
Err(e) => error!("Failed to stop audit system: {}", e),
}
info!(
target: "rustfs::main::handle_shutdown",
"Server is stopping..."
);
if let Some(s3_shutdown_tx) = s3_shutdown_tx {
let _ = s3_shutdown_tx.send(());
}
if let Some(console_shutdown_tx) = console_shutdown_tx {
let _ = console_shutdown_tx.send(());
}
// Wait for the worker thread to complete the cleaning work
tokio::time::sleep(SHUTDOWN_TIMEOUT).await;
// Send shutdown signal to FTP/SFTP services
let _ = ftp_sftp_shutdown_tx.send(());
// the last updated status is stopped
state_manager.update(ServiceState::Stopped);
info!(
target: "rustfs::main::handle_shutdown",
"Server stopped current "
);
println!("Server stopped successfully.");
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/storage/entity.rs | rustfs/src/storage/entity.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![allow(dead_code)]
use s3s::dto::{
BucketKeyEnabled, BucketName, ChecksumCRC32, ChecksumCRC32C, ChecksumCRC64NVME, ChecksumSHA1, ChecksumSHA256, ChecksumType,
ETag, Expiration, Location, ObjectKey, ObjectVersionId, RequestCharged, SSEKMSKeyId, ServerSideEncryption,
};
#[derive(Debug, Clone, Default)]
pub struct CompleteMultipartUploadOutput {
pub bucket: Option<BucketName>,
pub bucket_key_enabled: Option<BucketKeyEnabled>,
pub checksum_crc32: Option<ChecksumCRC32>,
pub checksum_crc32c: Option<ChecksumCRC32C>,
pub checksum_crc64nvme: Option<ChecksumCRC64NVME>,
pub checksum_sha1: Option<ChecksumSHA1>,
pub checksum_sha256: Option<ChecksumSHA256>,
pub checksum_type: Option<ChecksumType>,
pub e_tag: Option<ETag>,
pub expiration: Option<Expiration>,
pub key: Option<ObjectKey>,
pub location: Option<Location>,
pub request_charged: Option<RequestCharged>,
pub ssekms_key_id: Option<SSEKMSKeyId>,
pub server_side_encryption: Option<ServerSideEncryption>,
pub version_id: Option<ObjectVersionId>,
}
impl From<s3s::dto::CompleteMultipartUploadOutput> for CompleteMultipartUploadOutput {
fn from(output: s3s::dto::CompleteMultipartUploadOutput) -> Self {
Self {
bucket: output.bucket,
bucket_key_enabled: output.bucket_key_enabled,
checksum_crc32: output.checksum_crc32,
checksum_crc32c: output.checksum_crc32c,
checksum_crc64nvme: output.checksum_crc64nvme,
checksum_sha1: output.checksum_sha1,
checksum_sha256: output.checksum_sha256,
checksum_type: output.checksum_type,
e_tag: output.e_tag,
expiration: output.expiration,
key: output.key,
location: output.location,
request_charged: output.request_charged,
ssekms_key_id: output.ssekms_key_id,
server_side_encryption: output.server_side_encryption,
version_id: output.version_id,
}
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/storage/ecfs.rs | rustfs/src/storage/ecfs.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::auth::get_condition_values;
use crate::config::workload_profiles::{
RustFSBufferConfig, WorkloadProfile, get_global_buffer_config, is_buffer_profile_enabled,
};
use crate::error::ApiError;
use crate::server::RemoteAddr;
use crate::storage::concurrency::{
CachedGetObject, ConcurrencyManager, GetObjectGuard, get_concurrency_aware_buffer_size, get_concurrency_manager,
};
use crate::storage::entity;
use crate::storage::helper::OperationHelper;
use crate::storage::options::{filter_object_metadata, get_content_sha256};
use crate::storage::{
access::{ReqInfo, authorize_request},
options::{
copy_dst_opts, copy_src_opts, del_opts, extract_metadata, extract_metadata_from_mime_with_object_name,
get_complete_multipart_upload_opts, get_opts, parse_copy_source_range, put_opts,
},
};
use base64::{Engine, engine::general_purpose::STANDARD as BASE64_STANDARD};
use bytes::Bytes;
use chrono::{DateTime, Utc};
use datafusion::arrow::{
csv::WriterBuilder as CsvWriterBuilder, json::WriterBuilder as JsonWriterBuilder, json::writer::JsonArray,
};
use futures::StreamExt;
use http::{HeaderMap, StatusCode};
use metrics::counter;
use rustfs_ecstore::{
bucket::{
lifecycle::{
bucket_lifecycle_ops::{RestoreRequestOps, post_restore_opts, validate_transition_tier},
lifecycle::{self, Lifecycle, TransitionOptions},
},
metadata::{
BUCKET_LIFECYCLE_CONFIG, BUCKET_NOTIFICATION_CONFIG, BUCKET_POLICY_CONFIG, BUCKET_REPLICATION_CONFIG,
BUCKET_SSECONFIG, BUCKET_TAGGING_CONFIG, BUCKET_VERSIONING_CONFIG, OBJECT_LOCK_CONFIG,
},
metadata_sys,
metadata_sys::get_replication_config,
object_lock::objectlock_sys::BucketObjectLockSys,
policy_sys::PolicySys,
replication::{
DeletedObjectReplicationInfo, ReplicationConfigurationExt, check_replicate_delete, get_must_replicate_options,
must_replicate, schedule_replication, schedule_replication_delete,
},
tagging::{decode_tags, encode_tags},
utils::serialize,
versioning::VersioningApi,
versioning_sys::BucketVersioningSys,
},
client::object_api_utils::to_s3s_etag,
compress::{MIN_COMPRESSIBLE_SIZE, is_compressible},
disk::{error::DiskError, error_reduce::is_all_buckets_not_found},
error::{StorageError, is_err_bucket_not_found, is_err_object_not_found, is_err_version_not_found},
new_object_layer_fn,
set_disk::{MAX_PARTS_COUNT, is_valid_storage_class},
store_api::{
BucketOptions,
CompletePart,
DeleteBucketOptions,
HTTPRangeSpec,
MakeBucketOptions,
MultipartUploadResult,
ObjectIO,
ObjectInfo,
ObjectOptions,
ObjectToDelete,
PutObjReader,
StorageAPI,
// RESERVED_METADATA_PREFIX,
},
};
use rustfs_filemeta::REPLICATE_INCOMING_DELETE;
use rustfs_filemeta::{ObjectPartInfo, RestoreStatusOps};
use rustfs_filemeta::{ReplicationStatusType, ReplicationType, VersionPurgeStatusType};
use rustfs_kms::{
DataKey,
service_manager::get_global_encryption_service,
types::{EncryptionMetadata, ObjectEncryptionContext},
};
use rustfs_notify::{EventArgsBuilder, notifier_global};
use rustfs_policy::policy::{
action::{Action, S3Action},
{BucketPolicy, BucketPolicyArgs, Validator},
};
use rustfs_rio::{CompressReader, DecryptReader, EncryptReader, EtagReader, HardLimitReader, HashReader, Reader, WarpReader};
use rustfs_s3select_api::{
object_store::bytes_stream,
query::{Context, Query},
};
use rustfs_s3select_query::get_global_db;
use rustfs_targets::{
EventName,
arn::{ARN, TargetID, TargetIDError},
};
use rustfs_utils::{
CompressionAlgorithm, extract_req_params_header, extract_resp_elements, get_request_host, get_request_user_agent,
http::{
AMZ_BUCKET_REPLICATION_STATUS, AMZ_CHECKSUM_MODE, AMZ_CHECKSUM_TYPE,
headers::{
AMZ_DECODED_CONTENT_LENGTH, AMZ_OBJECT_TAGGING, AMZ_RESTORE_EXPIRY_DAYS, AMZ_RESTORE_REQUEST_DATE,
RESERVED_METADATA_PREFIX_LOWER,
},
},
obj::extract_user_defined_metadata,
path::{is_dir_object, path_join_buf},
};
use rustfs_zip::CompressionFormat;
use s3s::header::{X_AMZ_RESTORE, X_AMZ_RESTORE_OUTPUT_PATH};
use s3s::{S3, S3Error, S3ErrorCode, S3Request, S3Response, S3Result, dto::*, s3_error};
use serde_urlencoded::from_bytes;
use std::convert::Infallible;
use std::ops::Add;
use std::{
collections::HashMap,
fmt::Debug,
path::Path,
str::FromStr,
sync::{Arc, LazyLock},
};
use time::{OffsetDateTime, format_description::well_known::Rfc3339};
use tokio::{
io::{AsyncRead, AsyncSeek},
sync::mpsc,
};
use tokio_stream::wrappers::ReceiverStream;
use tokio_tar::Archive;
use tokio_util::io::{ReaderStream, StreamReader};
use tracing::{debug, error, info, instrument, warn};
use urlencoding::encode;
use uuid::Uuid;
macro_rules! try_ {
($result:expr) => {
match $result {
Ok(val) => val,
Err(err) => {
return Err(S3Error::with_message(S3ErrorCode::InternalError, format!("{}", err)));
}
}
};
}
static RUSTFS_OWNER: LazyLock<Owner> = LazyLock::new(|| Owner {
display_name: Some("rustfs".to_owned()),
id: Some("c19050dbcee97fda828689dda99097a6321af2248fa760517237346e5d9c8a66".to_owned()),
});
/// Calculate adaptive buffer size with workload profile support.
///
/// This enhanced version supports different workload profiles for optimal performance
/// across various use cases (AI/ML, web workloads, secure storage, etc.).
///
/// # Arguments
/// * `file_size` - The size of the file in bytes, or -1 if unknown
/// * `profile` - Optional workload profile. If None, uses auto-detection or GeneralPurpose
///
/// # Returns
/// Optimal buffer size in bytes based on the workload profile and file size
///
/// # Examples
/// ```ignore
/// // Use general purpose profile (default)
/// let buffer_size = get_adaptive_buffer_size_with_profile(1024 * 1024, None);
///
/// // Use AI training profile for large model files
/// let buffer_size = get_adaptive_buffer_size_with_profile(
/// 500 * 1024 * 1024,
/// Some(WorkloadProfile::AiTraining)
/// );
///
/// // Use secure storage profile for compliance scenarios
/// let buffer_size = get_adaptive_buffer_size_with_profile(
/// 10 * 1024 * 1024,
/// Some(WorkloadProfile::SecureStorage)
/// );
/// ```
///
#[allow(dead_code)]
fn get_adaptive_buffer_size_with_profile(file_size: i64, profile: Option<WorkloadProfile>) -> usize {
let config = match profile {
Some(p) => RustFSBufferConfig::new(p),
None => {
// Auto-detect OS environment or use general purpose
RustFSBufferConfig::with_auto_detect()
}
};
config.get_buffer_size(file_size)
}
/// Get adaptive buffer size using global workload profile configuration.
///
/// This is the primary buffer sizing function that uses the workload profile
/// system configured at startup to provide optimal buffer sizes for different scenarios.
///
/// The function automatically selects buffer sizes based on:
/// - Configured workload profile (default: GeneralPurpose)
/// - File size characteristics
/// - Optional performance metrics collection
///
/// # Arguments
/// * `file_size` - The size of the file in bytes, or -1 if unknown
///
/// # Returns
/// Optimal buffer size in bytes based on the configured workload profile
///
/// # Performance Metrics
/// When compiled with the `metrics` feature flag, this function tracks:
/// - Buffer size distribution
/// - Selection frequency
/// - Buffer-to-file size ratios
///
/// # Examples
/// ```ignore
/// // Uses configured profile (default: GeneralPurpose)
/// let buffer_size = get_buffer_size_opt_in(file_size);
/// ```
fn get_buffer_size_opt_in(file_size: i64) -> usize {
let buffer_size = if is_buffer_profile_enabled() {
// Use globally configured workload profile (enabled by default in Phase 3)
let config = get_global_buffer_config();
config.get_buffer_size(file_size)
} else {
// Opt-out mode: Use GeneralPurpose profile for consistent behavior
let config = RustFSBufferConfig::new(WorkloadProfile::GeneralPurpose);
config.get_buffer_size(file_size)
};
// Optional performance metrics collection for monitoring and optimization
#[cfg(feature = "metrics")]
{
use metrics::histogram;
histogram!("rustfs.buffer.size.bytes").record(buffer_size as f64);
counter!("rustfs.buffer.size.selections").increment(1);
if file_size >= 0 {
let ratio = buffer_size as f64 / file_size as f64;
histogram!("rustfs.buffer.to.file.ratio").record(ratio);
}
}
buffer_size
}
#[derive(Debug, Clone)]
pub struct FS {
// pub store: ECStore,
}
struct ManagedEncryptionMaterial {
data_key: DataKey,
headers: HashMap<String, String>,
kms_key_id: String,
}
async fn create_managed_encryption_material(
bucket: &str,
key: &str,
algorithm: &ServerSideEncryption,
kms_key_id: Option<String>,
original_size: i64,
) -> Result<ManagedEncryptionMaterial, ApiError> {
let Some(service) = get_global_encryption_service().await else {
return Err(ApiError::from(StorageError::other("KMS encryption service is not initialized")));
};
if !is_managed_sse(algorithm) {
return Err(ApiError::from(StorageError::other(format!(
"Unsupported server-side encryption algorithm: {}",
algorithm.as_str()
))));
}
let algorithm_str = algorithm.as_str();
let mut context = ObjectEncryptionContext::new(bucket.to_string(), key.to_string());
if original_size >= 0 {
context = context.with_size(original_size as u64);
}
let mut kms_key_candidate = kms_key_id;
if kms_key_candidate.is_none() {
kms_key_candidate = service.get_default_key_id().cloned();
}
let kms_key_to_use = kms_key_candidate
.clone()
.ok_or_else(|| ApiError::from(StorageError::other("No KMS key available for managed server-side encryption")))?;
let (data_key, encrypted_data_key) = service
.create_data_key(&kms_key_candidate, &context)
.await
.map_err(|e| ApiError::from(StorageError::other(format!("Failed to create data key: {e}"))))?;
let metadata = EncryptionMetadata {
algorithm: algorithm_str.to_string(),
key_id: kms_key_to_use.clone(),
key_version: 1,
iv: data_key.nonce.to_vec(),
tag: None,
encryption_context: context.encryption_context.clone(),
encrypted_at: Utc::now(),
original_size: if original_size >= 0 { original_size as u64 } else { 0 },
encrypted_data_key,
};
let mut headers = service.metadata_to_headers(&metadata);
headers.insert("x-rustfs-encryption-original-size".to_string(), metadata.original_size.to_string());
Ok(ManagedEncryptionMaterial {
data_key,
headers,
kms_key_id: kms_key_to_use,
})
}
async fn decrypt_managed_encryption_key(
bucket: &str,
key: &str,
metadata: &HashMap<String, String>,
) -> Result<Option<([u8; 32], [u8; 12], Option<i64>)>, ApiError> {
if !metadata.contains_key("x-rustfs-encryption-key") {
return Ok(None);
}
let Some(service) = get_global_encryption_service().await else {
return Err(ApiError::from(StorageError::other("KMS encryption service is not initialized")));
};
let parsed = service
.headers_to_metadata(metadata)
.map_err(|e| ApiError::from(StorageError::other(format!("Failed to parse encryption metadata: {e}"))))?;
if parsed.iv.len() != 12 {
return Err(ApiError::from(StorageError::other("Invalid encryption nonce length; expected 12 bytes")));
}
let context = ObjectEncryptionContext::new(bucket.to_string(), key.to_string());
let data_key = service
.decrypt_data_key(&parsed.encrypted_data_key, &context)
.await
.map_err(|e| ApiError::from(StorageError::other(format!("Failed to decrypt data key: {e}"))))?;
let key_bytes = data_key.plaintext_key;
let mut nonce = [0u8; 12];
nonce.copy_from_slice(&parsed.iv[..12]);
let original_size = metadata
.get("x-rustfs-encryption-original-size")
.and_then(|s| s.parse::<i64>().ok());
Ok(Some((key_bytes, nonce, original_size)))
}
fn derive_part_nonce(base: [u8; 12], part_number: usize) -> [u8; 12] {
let mut nonce = base;
let current = u32::from_be_bytes([nonce[8], nonce[9], nonce[10], nonce[11]]);
let incremented = current.wrapping_add(part_number as u32);
nonce[8..12].copy_from_slice(&incremented.to_be_bytes());
nonce
}
#[derive(Debug, Default, serde::Deserialize)]
struct ListObjectUnorderedQuery {
#[serde(rename = "allow-unordered")]
allow_unordered: Option<String>,
}
struct InMemoryAsyncReader {
cursor: std::io::Cursor<Vec<u8>>,
}
impl InMemoryAsyncReader {
fn new(data: Vec<u8>) -> Self {
Self {
cursor: std::io::Cursor::new(data),
}
}
}
impl AsyncRead for InMemoryAsyncReader {
fn poll_read(
mut self: std::pin::Pin<&mut Self>,
_cx: &mut std::task::Context<'_>,
buf: &mut tokio::io::ReadBuf<'_>,
) -> std::task::Poll<std::io::Result<()>> {
let unfilled = buf.initialize_unfilled();
let bytes_read = std::io::Read::read(&mut self.cursor, unfilled)?;
buf.advance(bytes_read);
std::task::Poll::Ready(Ok(()))
}
}
impl AsyncSeek for InMemoryAsyncReader {
fn start_seek(mut self: std::pin::Pin<&mut Self>, position: std::io::SeekFrom) -> std::io::Result<()> {
// std::io::Cursor natively supports negative SeekCurrent offsets
// It will automatically handle validation and return an error if the final position would be negative
std::io::Seek::seek(&mut self.cursor, position)?;
Ok(())
}
fn poll_complete(self: std::pin::Pin<&mut Self>, _cx: &mut std::task::Context<'_>) -> std::task::Poll<std::io::Result<u64>> {
std::task::Poll::Ready(Ok(self.cursor.position()))
}
}
async fn decrypt_multipart_managed_stream(
mut encrypted_stream: Box<dyn AsyncRead + Unpin + Send + Sync>,
parts: &[ObjectPartInfo],
key_bytes: [u8; 32],
base_nonce: [u8; 12],
) -> Result<(Box<dyn Reader>, i64), StorageError> {
let total_plain_capacity: usize = parts.iter().map(|part| part.actual_size.max(0) as usize).sum();
let mut plaintext = Vec::with_capacity(total_plain_capacity);
for part in parts {
if part.size == 0 {
continue;
}
let mut encrypted_part = vec![0u8; part.size];
tokio::io::AsyncReadExt::read_exact(&mut encrypted_stream, &mut encrypted_part)
.await
.map_err(|e| StorageError::other(format!("failed to read encrypted multipart segment {}: {}", part.number, e)))?;
let part_nonce = derive_part_nonce(base_nonce, part.number);
let cursor = std::io::Cursor::new(encrypted_part);
let mut decrypt_reader = DecryptReader::new(WarpReader::new(cursor), key_bytes, part_nonce);
tokio::io::AsyncReadExt::read_to_end(&mut decrypt_reader, &mut plaintext)
.await
.map_err(|e| StorageError::other(format!("failed to decrypt multipart segment {}: {}", part.number, e)))?;
}
let total_plain_size = plaintext.len() as i64;
let reader = Box::new(WarpReader::new(InMemoryAsyncReader::new(plaintext))) as Box<dyn Reader>;
Ok((reader, total_plain_size))
}
fn strip_managed_encryption_metadata(metadata: &mut HashMap<String, String>) {
const KEYS: [&str; 7] = [
"x-amz-server-side-encryption",
"x-amz-server-side-encryption-aws-kms-key-id",
"x-rustfs-encryption-iv",
"x-rustfs-encryption-tag",
"x-rustfs-encryption-key",
"x-rustfs-encryption-context",
"x-rustfs-encryption-original-size",
];
for key in KEYS.iter() {
metadata.remove(*key);
}
}
fn is_managed_sse(algorithm: &ServerSideEncryption) -> bool {
matches!(algorithm.as_str(), "AES256" | "aws:kms")
}
/// Validate object key for control characters and log special characters
///
/// This function:
/// 1. Rejects keys containing control characters (null bytes, newlines, carriage returns)
/// 2. Logs debug information for keys containing spaces, plus signs, or percent signs
///
/// The s3s library handles URL decoding, so keys are already decoded when they reach this function.
/// This validation ensures that invalid characters that could cause issues are rejected early.
fn validate_object_key(key: &str, operation: &str) -> S3Result<()> {
// Validate object key doesn't contain control characters
if key.contains(['\0', '\n', '\r']) {
return Err(S3Error::with_message(
S3ErrorCode::InvalidArgument,
format!("Object key contains invalid control characters: {key:?}"),
));
}
// Log debug info for keys with special characters to help diagnose encoding issues
if key.contains([' ', '+', '%']) {
debug!("{} object with special characters in key: {:?}", operation, key);
}
Ok(())
}
/// Validate that 'allow-unordered' parameter is not used with a delimiter
///
/// This function:
/// 1. Checks if a delimiter is specified in the ListObjects request
/// 2. Parses the query string to check for the 'allow-unordered' parameter
/// 3. Rejects the request if both 'delimiter' and 'allow-unordered=true' are present
///
/// According to S3 compatibility requirements, unordered listing cannot be combined with
/// hierarchical directory traversal (delimited listing). This validation ensures
/// conflicting parameters are caught before processing the request.
fn validate_list_object_unordered_with_delimiter(delimiter: Option<&Delimiter>, query_string: Option<&str>) -> S3Result<()> {
if delimiter.is_none() {
return Ok(());
}
let Some(query) = query_string else {
return Ok(());
};
if let Ok(params) = from_bytes::<ListObjectUnorderedQuery>(query.as_bytes())
&& params.allow_unordered.as_deref() == Some("true")
{
return Err(S3Error::with_message(
S3ErrorCode::InvalidArgument,
"The allow-unordered parameter cannot be used when delimiter is specified.".to_string(),
));
}
Ok(())
}
impl FS {
pub fn new() -> Self {
// let store: ECStore = ECStore::new(address, endpoint_pools).await?;
Self {}
}
async fn put_object_extract(&self, req: S3Request<PutObjectInput>) -> S3Result<S3Response<PutObjectOutput>> {
let helper = OperationHelper::new(&req, EventName::ObjectCreatedPut, "s3:PutObject").suppress_event();
let input = req.input;
let PutObjectInput {
body,
bucket,
key,
version_id,
content_length,
content_md5,
..
} = input;
let event_version_id = version_id;
let Some(body) = body else { return Err(s3_error!(IncompleteBody)) };
let size = match content_length {
Some(c) => c,
None => {
if let Some(val) = req.headers.get(AMZ_DECODED_CONTENT_LENGTH) {
match atoi::atoi::<i64>(val.as_bytes()) {
Some(x) => x,
None => return Err(s3_error!(UnexpectedContent)),
}
} else {
return Err(s3_error!(UnexpectedContent));
}
}
};
// Apply adaptive buffer sizing based on file size for optimal streaming performance.
// Uses workload profile configuration (enabled by default) to select appropriate buffer size.
// Buffer sizes range from 32KB to 4MB depending on file size and configured workload profile.
let buffer_size = get_buffer_size_opt_in(size);
let body = tokio::io::BufReader::with_capacity(
buffer_size,
StreamReader::new(body.map(|f| f.map_err(|e| std::io::Error::other(e.to_string())))),
);
let Some(ext) = Path::new(&key).extension().and_then(|s| s.to_str()) else {
return Err(s3_error!(InvalidArgument, "key extension not found"));
};
let ext = ext.to_owned();
let md5hex = if let Some(base64_md5) = content_md5 {
let md5 = base64_simd::STANDARD
.decode_to_vec(base64_md5.as_bytes())
.map_err(|e| ApiError::from(StorageError::other(format!("Invalid content MD5: {e}"))))?;
Some(hex_simd::encode_to_string(&md5, hex_simd::AsciiCase::Lower))
} else {
None
};
let sha256hex = get_content_sha256(&req.headers);
let actual_size = size;
let reader: Box<dyn Reader> = Box::new(WarpReader::new(body));
let mut hreader = HashReader::new(reader, size, actual_size, md5hex, sha256hex, false).map_err(ApiError::from)?;
if let Err(err) = hreader.add_checksum_from_s3s(&req.headers, req.trailing_headers.clone(), false) {
return Err(ApiError::from(StorageError::other(format!("add_checksum error={err:?}"))).into());
}
// TODO: support zip
let decoder = CompressionFormat::from_extension(&ext).get_decoder(hreader).map_err(|e| {
error!("get_decoder err {:?}", e);
s3_error!(InvalidArgument, "get_decoder err")
})?;
let mut ar = Archive::new(decoder);
let mut entries = ar.entries().map_err(|e| {
error!("get entries err {:?}", e);
s3_error!(InvalidArgument, "get entries err")
})?;
let Some(store) = new_object_layer_fn() else {
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string()));
};
let prefix = req
.headers
.get("X-Amz-Meta-Rustfs-Snowball-Prefix")
.map(|v| v.to_str().unwrap_or_default())
.unwrap_or_default();
let version_id = match event_version_id {
Some(v) => v.to_string(),
None => String::new(),
};
while let Some(entry) = entries.next().await {
let f = match entry {
Ok(f) => f,
Err(e) => {
error!("Failed to read archive entry: {}", e);
return Err(s3_error!(InvalidArgument, "Failed to read archive entry: {:?}", e));
}
};
if f.header().entry_type().is_dir() {
continue;
}
if let Ok(fpath) = f.path() {
let mut fpath = fpath.to_string_lossy().to_string();
if !prefix.is_empty() {
fpath = format!("{prefix}/{fpath}");
}
let mut size = f.header().size().unwrap_or_default() as i64;
debug!("Extracting file: {}, size: {} bytes", fpath, size);
let mut reader: Box<dyn Reader> = Box::new(WarpReader::new(f));
let mut metadata = HashMap::new();
let actual_size = size;
if is_compressible(&HeaderMap::new(), &fpath) && size > MIN_COMPRESSIBLE_SIZE as i64 {
metadata.insert(
format!("{RESERVED_METADATA_PREFIX_LOWER}compression"),
CompressionAlgorithm::default().to_string(),
);
metadata.insert(format!("{RESERVED_METADATA_PREFIX_LOWER}actual-size",), size.to_string());
let hrd = HashReader::new(reader, size, actual_size, None, None, false).map_err(ApiError::from)?;
reader = Box::new(CompressReader::new(hrd, CompressionAlgorithm::default()));
size = -1;
}
let hrd = HashReader::new(reader, size, actual_size, None, None, false).map_err(ApiError::from)?;
let mut reader = PutObjReader::new(hrd);
let _obj_info = store
.put_object(&bucket, &fpath, &mut reader, &ObjectOptions::default())
.await
.map_err(ApiError::from)?;
// Invalidate cache for the written object to prevent stale data
let manager = get_concurrency_manager();
let fpath_clone = fpath.clone();
let bucket_clone = bucket.clone();
tokio::spawn(async move {
manager.invalidate_cache_versioned(&bucket_clone, &fpath_clone, None).await;
});
let e_tag = _obj_info.etag.clone().map(|etag| to_s3s_etag(&etag));
// // store.put_object(bucket, object, data, opts);
let output = PutObjectOutput {
e_tag,
..Default::default()
};
let event_args = rustfs_notify::EventArgs {
event_name: EventName::ObjectCreatedPut,
bucket_name: bucket.clone(),
object: _obj_info.clone(),
req_params: extract_req_params_header(&req.headers),
resp_elements: extract_resp_elements(&S3Response::new(output.clone())),
version_id: version_id.clone(),
host: get_request_host(&req.headers),
user_agent: get_request_user_agent(&req.headers),
};
// Asynchronous call will not block the response of the current request
tokio::spawn(async move {
notifier_global::notify(event_args).await;
});
}
}
// match decompress(
// body,
// CompressionFormat::from_extension(&ext),
// |entry: tokio_tar::Entry<tokio_tar::Archive<Box<dyn AsyncRead + Send + Unpin + 'static>>>| async move {
// let path = entry.path().unwrap();
// debug!("Extracted: {}", path.display());
// Ok(())
// },
// )
// .await
// {
// Ok(_) => info!("Decompression completed successfully"),
// Err(e) => error!("Decompression failed: {}", e),
// }
let mut checksum_crc32 = input.checksum_crc32;
let mut checksum_crc32c = input.checksum_crc32c;
let mut checksum_sha1 = input.checksum_sha1;
let mut checksum_sha256 = input.checksum_sha256;
let mut checksum_crc64nvme = input.checksum_crc64nvme;
if let Some(alg) = &input.checksum_algorithm
&& let Some(Some(checksum_str)) = req.trailing_headers.as_ref().map(|trailer| {
let key = match alg.as_str() {
ChecksumAlgorithm::CRC32 => rustfs_rio::ChecksumType::CRC32.key(),
ChecksumAlgorithm::CRC32C => rustfs_rio::ChecksumType::CRC32C.key(),
ChecksumAlgorithm::SHA1 => rustfs_rio::ChecksumType::SHA1.key(),
ChecksumAlgorithm::SHA256 => rustfs_rio::ChecksumType::SHA256.key(),
ChecksumAlgorithm::CRC64NVME => rustfs_rio::ChecksumType::CRC64_NVME.key(),
_ => return None,
};
trailer.read(|headers| {
headers
.get(key.unwrap_or_default())
.and_then(|value| value.to_str().ok().map(|s| s.to_string()))
})
})
{
match alg.as_str() {
ChecksumAlgorithm::CRC32 => checksum_crc32 = checksum_str,
ChecksumAlgorithm::CRC32C => checksum_crc32c = checksum_str,
ChecksumAlgorithm::SHA1 => checksum_sha1 = checksum_str,
ChecksumAlgorithm::SHA256 => checksum_sha256 = checksum_str,
ChecksumAlgorithm::CRC64NVME => checksum_crc64nvme = checksum_str,
_ => (),
}
}
warn!(
"put object extract checksum_crc32={checksum_crc32:?}, checksum_crc32c={checksum_crc32c:?}, checksum_sha1={checksum_sha1:?}, checksum_sha256={checksum_sha256:?}, checksum_crc64nvme={checksum_crc64nvme:?}",
);
// TODO: etag
let output = PutObjectOutput {
// e_tag: hreader.try_resolve_etag().map(|v| ETag::Strong(v)),
checksum_crc32,
checksum_crc32c,
checksum_sha1,
checksum_sha256,
checksum_crc64nvme,
..Default::default()
};
let result = Ok(S3Response::new(output));
let _ = helper.complete(&result);
result
}
}
/// Helper function to get store and validate bucket exists
async fn get_validated_store(bucket: &str) -> S3Result<Arc<rustfs_ecstore::store::ECStore>> {
let Some(store) = new_object_layer_fn() else {
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string()));
};
// Validate bucket exists
store
.get_bucket_info(bucket, &BucketOptions::default())
.await
.map_err(ApiError::from)?;
Ok(store)
}
#[async_trait::async_trait]
impl S3 for FS {
#[instrument(
level = "debug",
skip(self, req),
fields(start_time=?time::OffsetDateTime::now_utc())
)]
async fn create_bucket(&self, req: S3Request<CreateBucketInput>) -> S3Result<S3Response<CreateBucketOutput>> {
let helper = OperationHelper::new(&req, EventName::BucketCreated, "s3:CreateBucket");
let CreateBucketInput {
bucket,
object_lock_enabled_for_bucket,
..
} = req.input;
let Some(store) = new_object_layer_fn() else {
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string()));
};
counter!("rustfs_create_bucket_total").increment(1);
store
.make_bucket(
&bucket,
&MakeBucketOptions {
force_create: false, // TODO: force support
lock_enabled: object_lock_enabled_for_bucket.is_some_and(|v| v),
..Default::default()
},
)
.await
.map_err(ApiError::from)?;
let output = CreateBucketOutput::default();
let result = Ok(S3Response::new(output));
let _ = helper.complete(&result);
result
}
/// Copy an object from one location to another
#[instrument(level = "debug", skip(self, req))]
async fn copy_object(&self, req: S3Request<CopyObjectInput>) -> S3Result<S3Response<CopyObjectOutput>> {
let mut helper = OperationHelper::new(&req, EventName::ObjectCreatedCopy, "s3:CopyObject");
let CopyObjectInput {
copy_source,
bucket,
key,
server_side_encryption: requested_sse,
ssekms_key_id: requested_kms_key_id,
sse_customer_algorithm,
sse_customer_key,
sse_customer_key_md5,
metadata_directive,
metadata,
..
} = req.input.clone();
let (src_bucket, src_key, version_id) = match copy_source {
CopySource::AccessPoint { .. } => return Err(s3_error!(NotImplemented)),
CopySource::Bucket {
ref bucket,
ref key,
version_id,
} => (bucket.to_string(), key.to_string(), version_id.map(|v| v.to_string())),
};
// Validate both source and destination keys
validate_object_key(&src_key, "COPY (source)")?;
validate_object_key(&key, "COPY (dest)")?;
// warn!("copy_object {}/{}, to {}/{}", &src_bucket, &src_key, &bucket, &key);
let mut src_opts = copy_src_opts(&src_bucket, &src_key, &req.headers).map_err(ApiError::from)?;
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | true |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/storage/helper.rs | rustfs/src/storage/helper.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use http::StatusCode;
use rustfs_audit::{
entity::{ApiDetails, ApiDetailsBuilder, AuditEntryBuilder},
global::AuditLogger,
};
use rustfs_ecstore::store_api::ObjectInfo;
use rustfs_notify::{EventArgsBuilder, notifier_global};
use rustfs_targets::EventName;
use rustfs_utils::{
extract_req_params, extract_req_params_header, extract_resp_elements, get_request_host, get_request_user_agent,
};
use s3s::{S3Request, S3Response, S3Result};
use std::future::Future;
use tokio::runtime::{Builder, Handle};
/// Schedules an asynchronous task on the current runtime;
/// if there is no runtime, creates a minimal runtime execution on a new thread.
fn spawn_background<F>(fut: F)
where
F: Future<Output = ()> + Send + 'static,
{
if let Ok(handle) = Handle::try_current() {
drop(handle.spawn(fut));
} else {
std::thread::spawn(|| {
if let Ok(rt) = Builder::new_current_thread().enable_all().build() {
rt.block_on(fut);
}
});
}
}
/// A unified helper structure for building and distributing audit logs and event notifications via RAII mode at the end of an S3 operation scope.
pub struct OperationHelper {
audit_builder: Option<AuditEntryBuilder>,
api_builder: ApiDetailsBuilder,
event_builder: Option<EventArgsBuilder>,
start_time: std::time::Instant,
}
impl OperationHelper {
/// Create a new OperationHelper for S3 requests.
pub fn new(req: &S3Request<impl Send + Sync>, event: EventName, trigger: &'static str) -> Self {
// Parse path -> bucket/object
let path = req.uri.path().trim_start_matches('/');
let mut segs = path.splitn(2, '/');
let bucket = segs.next().unwrap_or("").to_string();
let object_key = segs.next().unwrap_or("").to_string();
// Infer remote address
let remote_host = req
.headers
.get("x-forwarded-for")
.and_then(|v| v.to_str().ok())
.or_else(|| req.headers.get("x-real-ip").and_then(|v| v.to_str().ok()))
.unwrap_or("")
.to_string();
// Initialize audit builder
let mut api_builder = ApiDetailsBuilder::new().name(trigger);
if !bucket.is_empty() {
api_builder = api_builder.bucket(&bucket);
}
if !object_key.is_empty() {
api_builder = api_builder.object(&object_key);
}
// Audit builder
let mut audit_builder = AuditEntryBuilder::new("1.0", event, trigger, ApiDetails::default())
.remote_host(remote_host)
.user_agent(get_request_user_agent(&req.headers))
.req_host(get_request_host(&req.headers))
.req_path(req.uri.path().to_string())
.req_query(extract_req_params(req));
if let Some(req_id) = req.headers.get("x-amz-request-id")
&& let Ok(id_str) = req_id.to_str()
{
audit_builder = audit_builder.request_id(id_str);
}
// initialize event builder
// object is a placeholder that must be set later using the `object()` method.
let event_builder = EventArgsBuilder::new(event, bucket, ObjectInfo::default())
.host(get_request_host(&req.headers))
.user_agent(get_request_user_agent(&req.headers))
.req_params(extract_req_params_header(&req.headers));
Self {
audit_builder: Some(audit_builder),
api_builder,
event_builder: Some(event_builder),
start_time: std::time::Instant::now(),
}
}
/// Sets the ObjectInfo for event notification.
pub fn object(mut self, object_info: ObjectInfo) -> Self {
if let Some(builder) = self.event_builder.take() {
self.event_builder = Some(builder.object(object_info));
}
self
}
/// Set the version ID for event notifications.
pub fn version_id(mut self, version_id: impl Into<String>) -> Self {
if let Some(builder) = self.event_builder.take() {
self.event_builder = Some(builder.version_id(version_id));
}
self
}
/// Set the event name for event notifications.
pub fn event_name(mut self, event_name: EventName) -> Self {
if let Some(builder) = self.event_builder.take() {
self.event_builder = Some(builder.event_name(event_name));
}
if let Some(builder) = self.audit_builder.take() {
self.audit_builder = Some(builder.event(event_name));
}
self
}
/// Complete operational details from S3 results.
/// This method should be called immediately before the function returns.
/// It consumes and prepares auxiliary structures for use during `drop`.
pub fn complete(mut self, result: &S3Result<S3Response<impl Send + Sync>>) -> Self {
// Complete audit log
if let Some(builder) = self.audit_builder.take() {
let (status, status_code, error_msg) = match result {
Ok(res) => ("success".to_string(), res.status.unwrap_or(StatusCode::OK).as_u16() as i32, None),
Err(e) => (
"failure".to_string(),
e.status_code().unwrap_or(StatusCode::BAD_REQUEST).as_u16() as i32,
e.message().map(|s| s.to_string()),
),
};
let ttr = self.start_time.elapsed();
let api_details = self
.api_builder
.clone()
.status(status)
.status_code(status_code)
.time_to_response(format!("{ttr:.2?}"))
.time_to_response_in_ns(ttr.as_nanos().to_string())
.build();
let mut final_builder = builder.api(api_details.clone());
if let Some(err) = error_msg {
final_builder = final_builder.error(err);
}
self.audit_builder = Some(final_builder);
self.api_builder = ApiDetailsBuilder(api_details); // Store final details for Drop use
}
// Completion event notification (only on success)
if let (Some(builder), Ok(res)) = (self.event_builder.take(), result) {
self.event_builder = Some(builder.resp_elements(extract_resp_elements(res)));
}
self
}
/// Suppresses the automatic event notification on drop.
pub fn suppress_event(mut self) -> Self {
self.event_builder = None;
self
}
}
impl Drop for OperationHelper {
fn drop(&mut self) {
// Distribute audit logs
if let Some(builder) = self.audit_builder.take() {
spawn_background(async move {
AuditLogger::log(builder.build()).await;
});
}
// Distribute event notification (only on success)
if self.api_builder.0.status.as_deref() == Some("success")
&& let Some(builder) = self.event_builder.take()
{
let event_args = builder.build();
// Avoid generating notifications for copy requests
if !event_args.is_replication_request() {
spawn_background(async move {
notifier_global::notify(event_args).await;
});
}
}
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/storage/access.rs | rustfs/src/storage/access.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::ecfs::FS;
use crate::auth::{check_key_valid, get_condition_values, get_session_token};
use crate::license::license_check;
use crate::server::RemoteAddr;
use rustfs_ecstore::bucket::policy_sys::PolicySys;
use rustfs_iam::error::Error as IamError;
use rustfs_policy::policy::action::{Action, S3Action};
use rustfs_policy::policy::{Args, BucketPolicyArgs};
use s3s::access::{S3Access, S3AccessContext};
use s3s::{S3Error, S3ErrorCode, S3Request, S3Result, dto::*, s3_error};
use std::collections::HashMap;
#[allow(dead_code)]
#[derive(Default, Clone, Debug)]
pub(crate) struct ReqInfo {
pub cred: Option<rustfs_credentials::Credentials>,
pub is_owner: bool,
pub bucket: Option<String>,
pub object: Option<String>,
pub version_id: Option<String>,
pub region: Option<String>,
}
/// Authorizes the request based on the action and credentials.
pub async fn authorize_request<T>(req: &mut S3Request<T>, action: Action) -> S3Result<()> {
let remote_addr = req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0));
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
if let Some(cred) = &req_info.cred {
let Ok(iam_store) = rustfs_iam::get() else {
return Err(S3Error::with_message(
S3ErrorCode::InternalError,
format!("authorize_request {:?}", IamError::IamSysNotInitialized),
));
};
let default_claims = HashMap::new();
let claims = cred.claims.as_ref().unwrap_or(&default_claims);
let conditions = get_condition_values(&req.headers, cred, req_info.version_id.as_deref(), None, remote_addr);
if action == Action::S3Action(S3Action::DeleteObjectAction)
&& req_info.version_id.is_some()
&& !iam_store
.is_allowed(&Args {
account: &cred.access_key,
groups: &cred.groups,
action: Action::S3Action(S3Action::DeleteObjectVersionAction),
bucket: req_info.bucket.as_deref().unwrap_or(""),
conditions: &conditions,
is_owner: req_info.is_owner,
object: req_info.object.as_deref().unwrap_or(""),
claims,
deny_only: false,
})
.await
{
return Err(s3_error!(AccessDenied, "Access Denied"));
}
if iam_store
.is_allowed(&Args {
account: &cred.access_key,
groups: &cred.groups,
action,
bucket: req_info.bucket.as_deref().unwrap_or(""),
conditions: &conditions,
is_owner: req_info.is_owner,
object: req_info.object.as_deref().unwrap_or(""),
claims,
deny_only: false,
})
.await
{
return Ok(());
}
if action == Action::S3Action(S3Action::ListBucketVersionsAction)
&& iam_store
.is_allowed(&Args {
account: &cred.access_key,
groups: &cred.groups,
action: Action::S3Action(S3Action::ListBucketAction),
bucket: req_info.bucket.as_deref().unwrap_or(""),
conditions: &conditions,
is_owner: req_info.is_owner,
object: req_info.object.as_deref().unwrap_or(""),
claims,
deny_only: false,
})
.await
{
return Ok(());
}
} else {
let conditions = get_condition_values(
&req.headers,
&rustfs_credentials::Credentials::default(),
req_info.version_id.as_deref(),
req.region.as_deref(),
remote_addr,
);
if action != Action::S3Action(S3Action::ListAllMyBucketsAction) {
if PolicySys::is_allowed(&BucketPolicyArgs {
bucket: req_info.bucket.as_deref().unwrap_or(""),
action,
is_owner: false,
account: "",
groups: &None,
conditions: &conditions,
object: req_info.object.as_deref().unwrap_or(""),
})
.await
{
return Ok(());
}
if action == Action::S3Action(S3Action::ListBucketVersionsAction)
&& PolicySys::is_allowed(&BucketPolicyArgs {
bucket: req_info.bucket.as_deref().unwrap_or(""),
action: Action::S3Action(S3Action::ListBucketAction),
is_owner: false,
account: "",
groups: &None,
conditions: &conditions,
object: "",
})
.await
{
return Ok(());
}
}
}
Err(s3_error!(AccessDenied, "Access Denied"))
}
#[async_trait::async_trait]
impl S3Access for FS {
// /// Checks whether the current request has accesses to the resources.
// ///
// /// This method is called before deserializing the operation input.
// ///
// /// By default, this method rejects all anonymous requests
// /// and returns [`AccessDenied`](crate::S3ErrorCode::AccessDenied) error.
// ///
// /// An access control provider can override this method to implement custom logic.
// ///
// /// Common fields in the context:
// /// + [`cx.credentials()`](S3AccessContext::credentials)
// /// + [`cx.s3_path()`](S3AccessContext::s3_path)
// /// + [`cx.s3_op().name()`](crate::S3Operation::name)
// /// + [`cx.extensions_mut()`](S3AccessContext::extensions_mut)
async fn check(&self, cx: &mut S3AccessContext<'_>) -> S3Result<()> {
// Upper layer has verified ak/sk
// info!(
// "s3 check uri: {:?}, method: {:?} path: {:?}, s3_op: {:?}, cred: {:?}, headers:{:?}",
// cx.uri(),
// cx.method(),
// cx.s3_path(),
// cx.s3_op().name(),
// cx.credentials(),
// cx.headers(),
// // cx.extensions_mut(),
// );
let (cred, is_owner) = if let Some(input_cred) = cx.credentials() {
let (cred, is_owner) =
check_key_valid(get_session_token(cx.uri(), cx.headers()).unwrap_or_default(), &input_cred.access_key).await?;
(Some(cred), is_owner)
} else {
(None, false)
};
let req_info = ReqInfo {
cred,
is_owner,
region: rustfs_ecstore::global::get_global_region(),
..Default::default()
};
let ext = cx.extensions_mut();
ext.insert(req_info);
// Verify uniformly here? Or verify separately below?
Ok(())
}
/// Checks whether the CreateBucket request has accesses to the resources.
///
/// This method returns `Ok(())` by default.
async fn create_bucket(&self, req: &mut S3Request<CreateBucketInput>) -> S3Result<()> {
license_check().map_err(|er| s3_error!(AccessDenied, "{:?}", er.to_string()))?;
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::CreateBucketAction)).await?;
if req.input.object_lock_enabled_for_bucket.is_some_and(|v| v) {
authorize_request(req, Action::S3Action(S3Action::PutBucketObjectLockConfigurationAction)).await?;
authorize_request(req, Action::S3Action(S3Action::PutBucketVersioningAction)).await?;
}
Ok(())
}
/// Checks whether the AbortMultipartUpload request has accesses to the resources.
///
/// This method returns `Ok(())` by default.
async fn abort_multipart_upload(&self, _req: &mut S3Request<AbortMultipartUploadInput>) -> S3Result<()> {
Ok(())
}
/// Checks whether the CompleteMultipartUpload request has accesses to the resources.
///
/// This method returns `Ok(())` by default.
async fn complete_multipart_upload(&self, _req: &mut S3Request<CompleteMultipartUploadInput>) -> S3Result<()> {
Ok(())
}
/// Checks whether the CopyObject request has accesses to the resources.
///
/// This method returns `Ok(())` by default.
async fn copy_object(&self, req: &mut S3Request<CopyObjectInput>) -> S3Result<()> {
{
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let (src_bucket, src_key, version_id) = match &req.input.copy_source {
CopySource::AccessPoint { .. } => return Err(s3_error!(NotImplemented)),
CopySource::Bucket { bucket, key, version_id } => {
(bucket.to_string(), key.to_string(), version_id.as_ref().map(|v| v.to_string()))
}
};
req_info.bucket = Some(src_bucket);
req_info.object = Some(src_key);
req_info.version_id = version_id;
authorize_request(req, Action::S3Action(S3Action::GetObjectAction)).await?;
}
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
req_info.bucket = Some(req.input.bucket.clone());
req_info.object = Some(req.input.key.clone());
req_info.version_id = req.input.version_id.clone();
authorize_request(req, Action::S3Action(S3Action::PutObjectAction)).await
}
/// Checks whether the CreateMultipartUpload request has accesses to the resources.
///
/// This method returns `Ok(())` by default.
async fn create_multipart_upload(&self, _req: &mut S3Request<CreateMultipartUploadInput>) -> S3Result<()> {
license_check().map_err(|er| s3_error!(AccessDenied, "{:?}", er.to_string()))?;
Ok(())
}
/// Checks whether the DeleteBucket request has accesses to the resources.
///
/// This method returns `Ok(())` by default.
async fn delete_bucket(&self, req: &mut S3Request<DeleteBucketInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::DeleteBucketAction)).await?;
if req.input.force_delete.is_some_and(|v| v) {
authorize_request(req, Action::S3Action(S3Action::ForceDeleteBucketAction)).await?;
}
Ok(())
}
/// Checks whether the DeleteBucketAnalyticsConfiguration request has accesses to the resources.
///
/// This method returns `Ok(())` by default.
async fn delete_bucket_analytics_configuration(
&self,
_req: &mut S3Request<DeleteBucketAnalyticsConfigurationInput>,
) -> S3Result<()> {
Ok(())
}
/// Checks whether the DeleteBucketCors request has accesses to the resources.
///
/// This method returns `Ok(())` by default.
async fn delete_bucket_cors(&self, req: &mut S3Request<DeleteBucketCorsInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::PutBucketCorsAction)).await
}
/// Checks whether the DeleteBucketEncryption request has accesses to the resources.
///
/// This method returns `Ok(())` by default.
async fn delete_bucket_encryption(&self, req: &mut S3Request<DeleteBucketEncryptionInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::PutBucketEncryptionAction)).await
}
/// Checks whether the DeleteBucketIntelligentTieringConfiguration request has accesses to the resources.
///
/// This method returns `Ok(())` by default.
async fn delete_bucket_intelligent_tiering_configuration(
&self,
_req: &mut S3Request<DeleteBucketIntelligentTieringConfigurationInput>,
) -> S3Result<()> {
Ok(())
}
/// Checks whether the DeleteBucketInventoryConfiguration request has accesses to the resources.
///
/// This method returns `Ok(())` by default.
async fn delete_bucket_inventory_configuration(
&self,
_req: &mut S3Request<DeleteBucketInventoryConfigurationInput>,
) -> S3Result<()> {
Ok(())
}
/// Checks whether the DeleteBucketLifecycle request has accesses to the resources.
///
/// This method returns `Ok(())` by default.
async fn delete_bucket_lifecycle(&self, req: &mut S3Request<DeleteBucketLifecycleInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::PutBucketLifecycleAction)).await
}
/// Checks whether the DeleteBucketMetricsConfiguration request has accesses to the resources.
///
/// This method returns `Ok(())` by default.
async fn delete_bucket_metrics_configuration(
&self,
_req: &mut S3Request<DeleteBucketMetricsConfigurationInput>,
) -> S3Result<()> {
Ok(())
}
/// Checks whether the DeleteBucketOwnershipControls request has accesses to the resources.
///
/// This method returns `Ok(())` by default.
async fn delete_bucket_ownership_controls(&self, _req: &mut S3Request<DeleteBucketOwnershipControlsInput>) -> S3Result<()> {
Ok(())
}
/// Checks whether the DeleteBucketPolicy request has accesses to the resources.
///
/// This method returns `Ok(())` by default.
async fn delete_bucket_policy(&self, req: &mut S3Request<DeleteBucketPolicyInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::DeleteBucketPolicyAction)).await
}
/// Checks whether the DeleteBucketReplication request has accesses to the resources.
///
/// This method returns `Ok(())` by default.
async fn delete_bucket_replication(&self, req: &mut S3Request<DeleteBucketReplicationInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::PutReplicationConfigurationAction)).await
}
/// Checks whether the DeleteBucketTagging request has accesses to the resources.
///
/// This method returns `Ok(())` by default.
async fn delete_bucket_tagging(&self, req: &mut S3Request<DeleteBucketTaggingInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::PutBucketTaggingAction)).await
}
/// Checks whether the DeleteBucketWebsite request has accesses to the resources.
///
/// This method returns `Ok(())` by default.
async fn delete_bucket_website(&self, _req: &mut S3Request<DeleteBucketWebsiteInput>) -> S3Result<()> {
Ok(())
}
/// Checks whether the DeleteObject request has accesses to the resources.
///
/// This method returns `Ok(())` by default.
async fn delete_object(&self, req: &mut S3Request<DeleteObjectInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
req_info.bucket = Some(req.input.bucket.clone());
req_info.object = Some(req.input.key.clone());
req_info.version_id = req.input.version_id.clone();
authorize_request(req, Action::S3Action(S3Action::DeleteObjectAction)).await
}
/// Checks whether the DeleteObjectTagging request has accesses to the resources.
///
/// This method returns `Ok(())` by default.
async fn delete_object_tagging(&self, req: &mut S3Request<DeleteObjectTaggingInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
req_info.bucket = Some(req.input.bucket.clone());
req_info.object = Some(req.input.key.clone());
req_info.version_id = req.input.version_id.clone();
authorize_request(req, Action::S3Action(S3Action::DeleteObjectTaggingAction)).await
}
/// Checks whether the DeleteObjects request has accesses to the resources.
///
/// This method returns `Ok(())` by default.
async fn delete_objects(&self, req: &mut S3Request<DeleteObjectsInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
req_info.bucket = Some(req.input.bucket.clone());
req_info.object = None;
req_info.version_id = None;
authorize_request(req, Action::S3Action(S3Action::DeleteObjectAction)).await
}
/// Checks whether the DeletePublicAccessBlock request has accesses to the resources.
///
/// This method returns `Ok(())` by default.
async fn delete_public_access_block(&self, _req: &mut S3Request<DeletePublicAccessBlockInput>) -> S3Result<()> {
Ok(())
}
/// Checks whether the GetBucketAccelerateConfiguration request has accesses to the resources.
///
/// This method returns `Ok(())` by default.
async fn get_bucket_accelerate_configuration(
&self,
_req: &mut S3Request<GetBucketAccelerateConfigurationInput>,
) -> S3Result<()> {
Ok(())
}
/// Checks whether the GetBucketAcl request has accesses to the resources.
///
/// This method returns `Ok(())` by default.
async fn get_bucket_acl(&self, req: &mut S3Request<GetBucketAclInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::GetBucketPolicyAction)).await
}
/// Checks whether the GetBucketAnalyticsConfiguration request has accesses to the resources.
///
/// This method returns `Ok(())` by default.
async fn get_bucket_analytics_configuration(
&self,
_req: &mut S3Request<GetBucketAnalyticsConfigurationInput>,
) -> S3Result<()> {
Ok(())
}
/// Checks whether the GetBucketCors request has accesses to the resources.
///
/// This method returns `Ok(())` by default.
async fn get_bucket_cors(&self, req: &mut S3Request<GetBucketCorsInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::GetBucketCorsAction)).await
}
/// Checks whether the GetBucketEncryption request has accesses to the resources.
///
/// This method returns `Ok(())` by default.
async fn get_bucket_encryption(&self, req: &mut S3Request<GetBucketEncryptionInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::GetBucketEncryptionAction)).await
}
/// Checks whether the GetBucketIntelligentTieringConfiguration request has accesses to the resources.
///
/// This method returns `Ok(())` by default.
async fn get_bucket_intelligent_tiering_configuration(
&self,
_req: &mut S3Request<GetBucketIntelligentTieringConfigurationInput>,
) -> S3Result<()> {
Ok(())
}
/// Checks whether the GetBucketInventoryConfiguration request has accesses to the resources.
///
/// This method returns `Ok(())` by default.
async fn get_bucket_inventory_configuration(
&self,
_req: &mut S3Request<GetBucketInventoryConfigurationInput>,
) -> S3Result<()> {
Ok(())
}
/// Checks whether the GetBucketLifecycleConfiguration request has accesses to the resources.
///
/// This method returns `Ok(())` by default.
async fn get_bucket_lifecycle_configuration(
&self,
req: &mut S3Request<GetBucketLifecycleConfigurationInput>,
) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::GetBucketLifecycleAction)).await
}
/// Checks whether the GetBucketLocation request has accesses to the resources.
///
/// This method returns `Ok(())` by default.
async fn get_bucket_location(&self, req: &mut S3Request<GetBucketLocationInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::GetBucketLocationAction)).await
}
/// Checks whether the GetBucketLogging request has accesses to the resources.
///
/// This method returns `Ok(())` by default.
async fn get_bucket_logging(&self, _req: &mut S3Request<GetBucketLoggingInput>) -> S3Result<()> {
Ok(())
}
/// Checks whether the GetBucketMetricsConfiguration request has accesses to the resources.
///
/// This method returns `Ok(())` by default.
async fn get_bucket_metrics_configuration(&self, _req: &mut S3Request<GetBucketMetricsConfigurationInput>) -> S3Result<()> {
Ok(())
}
/// Checks whether the GetBucketNotificationConfiguration request has accesses to the resources.
///
/// This method returns `Ok(())` by default.
async fn get_bucket_notification_configuration(
&self,
req: &mut S3Request<GetBucketNotificationConfigurationInput>,
) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::GetBucketNotificationAction)).await
}
/// Checks whether the GetBucketOwnershipControls request has accesses to the resources.
///
/// This method returns `Ok(())` by default.
async fn get_bucket_ownership_controls(&self, _req: &mut S3Request<GetBucketOwnershipControlsInput>) -> S3Result<()> {
Ok(())
}
/// Checks whether the GetBucketPolicy request has accesses to the resources.
///
/// This method returns `Ok(())` by default.
async fn get_bucket_policy(&self, req: &mut S3Request<GetBucketPolicyInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::GetBucketPolicyAction)).await
}
/// Checks whether the GetBucketPolicyStatus request has accesses to the resources.
///
/// This method returns `Ok(())` by default.
async fn get_bucket_policy_status(&self, req: &mut S3Request<GetBucketPolicyStatusInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::GetBucketPolicyStatusAction)).await
}
/// Checks whether the GetBucketReplication request has accesses to the resources.
///
/// This method returns `Ok(())` by default.
async fn get_bucket_replication(&self, req: &mut S3Request<GetBucketReplicationInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::GetReplicationConfigurationAction)).await
}
/// Checks whether the GetBucketRequestPayment request has accesses to the resources.
///
/// This method returns `Ok(())` by default.
async fn get_bucket_request_payment(&self, _req: &mut S3Request<GetBucketRequestPaymentInput>) -> S3Result<()> {
Ok(())
}
/// Checks whether the GetBucketTagging request has accesses to the resources.
///
/// This method returns `Ok(())` by default.
async fn get_bucket_tagging(&self, req: &mut S3Request<GetBucketTaggingInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::GetBucketTaggingAction)).await
}
/// Checks whether the GetBucketVersioning request has accesses to the resources.
///
/// This method returns `Ok(())` by default.
async fn get_bucket_versioning(&self, req: &mut S3Request<GetBucketVersioningInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::GetBucketVersioningAction)).await
}
/// Checks whether the GetBucketWebsite request has accesses to the resources.
///
/// This method returns `Ok(())` by default.
async fn get_bucket_website(&self, _req: &mut S3Request<GetBucketWebsiteInput>) -> S3Result<()> {
Ok(())
}
/// Checks whether the GetObject request has accesses to the resources.
///
/// This method returns `Ok(())` by default.
async fn get_object(&self, req: &mut S3Request<GetObjectInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
req_info.bucket = Some(req.input.bucket.clone());
req_info.object = Some(req.input.key.clone());
req_info.version_id = req.input.version_id.clone();
authorize_request(req, Action::S3Action(S3Action::GetObjectAction)).await
}
/// Checks whether the GetObjectAcl request has accesses to the resources.
///
/// This method returns `Ok(())` by default.
async fn get_object_acl(&self, req: &mut S3Request<GetObjectAclInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
req_info.bucket = Some(req.input.bucket.clone());
req_info.object = Some(req.input.key.clone());
req_info.version_id = req.input.version_id.clone();
authorize_request(req, Action::S3Action(S3Action::GetBucketPolicyAction)).await
}
/// Checks whether the GetObjectAttributes request has accesses to the resources.
///
/// This method returns `Ok(())` by default.
async fn get_object_attributes(&self, req: &mut S3Request<GetObjectAttributesInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
req_info.bucket = Some(req.input.bucket.clone());
req_info.object = Some(req.input.key.clone());
req_info.version_id = req.input.version_id.clone();
if req.input.version_id.is_some() {
authorize_request(req, Action::S3Action(S3Action::GetObjectVersionAttributesAction)).await?;
authorize_request(req, Action::S3Action(S3Action::GetObjectVersionAction)).await?;
} else {
authorize_request(req, Action::S3Action(S3Action::GetObjectAttributesAction)).await?;
authorize_request(req, Action::S3Action(S3Action::GetObjectAction)).await?;
}
Ok(())
}
/// Checks whether the GetObjectLegalHold request has accesses to the resources.
///
/// This method returns `Ok(())` by default.
async fn get_object_legal_hold(&self, req: &mut S3Request<GetObjectLegalHoldInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
req_info.bucket = Some(req.input.bucket.clone());
req_info.object = Some(req.input.key.clone());
req_info.version_id = req.input.version_id.clone();
authorize_request(req, Action::S3Action(S3Action::GetObjectLegalHoldAction)).await
}
/// Checks whether the GetObjectLockConfiguration request has accesses to the resources.
///
/// This method returns `Ok(())` by default.
async fn get_object_lock_configuration(&self, req: &mut S3Request<GetObjectLockConfigurationInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::GetBucketObjectLockConfigurationAction)).await
}
/// Checks whether the GetObjectRetention request has accesses to the resources.
///
/// This method returns `Ok(())` by default.
async fn get_object_retention(&self, req: &mut S3Request<GetObjectRetentionInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
req_info.bucket = Some(req.input.bucket.clone());
req_info.object = Some(req.input.key.clone());
req_info.version_id = req.input.version_id.clone();
authorize_request(req, Action::S3Action(S3Action::GetObjectRetentionAction)).await
}
/// Checks whether the GetObjectTagging request has accesses to the resources.
///
/// This method returns `Ok(())` by default.
async fn get_object_tagging(&self, req: &mut S3Request<GetObjectTaggingInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
req_info.bucket = Some(req.input.bucket.clone());
req_info.object = Some(req.input.key.clone());
req_info.version_id = req.input.version_id.clone();
authorize_request(req, Action::S3Action(S3Action::GetObjectTaggingAction)).await
}
/// Checks whether the GetObjectTorrent request has accesses to the resources.
///
/// This method returns `Ok(())` by default.
async fn get_object_torrent(&self, _req: &mut S3Request<GetObjectTorrentInput>) -> S3Result<()> {
Ok(())
}
/// Checks whether the GetPublicAccessBlock request has accesses to the resources.
///
/// This method returns `Ok(())` by default.
async fn get_public_access_block(&self, _req: &mut S3Request<GetPublicAccessBlockInput>) -> S3Result<()> {
Ok(())
}
/// Checks whether the HeadBucket request has accesses to the resources.
///
/// This method returns `Ok(())` by default.
async fn head_bucket(&self, req: &mut S3Request<HeadBucketInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::ListBucketAction)).await
}
/// Checks whether the HeadObject request has accesses to the resources.
///
/// This method returns `Ok(())` by default.
async fn head_object(&self, req: &mut S3Request<HeadObjectInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
req_info.bucket = Some(req.input.bucket.clone());
req_info.object = Some(req.input.key.clone());
req_info.version_id = req.input.version_id.clone();
authorize_request(req, Action::S3Action(S3Action::GetObjectAction)).await
}
/// Checks whether the ListBucketAnalyticsConfigurations request has accesses to the resources.
///
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | true |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/storage/options.rs | rustfs/src/storage/options.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use http::{HeaderMap, HeaderValue};
use rustfs_ecstore::bucket::versioning_sys::BucketVersioningSys;
use rustfs_ecstore::error::Result;
use rustfs_ecstore::error::StorageError;
use rustfs_utils::http::AMZ_META_UNENCRYPTED_CONTENT_LENGTH;
use rustfs_utils::http::AMZ_META_UNENCRYPTED_CONTENT_MD5;
use s3s::header::X_AMZ_OBJECT_LOCK_MODE;
use s3s::header::X_AMZ_OBJECT_LOCK_RETAIN_UNTIL_DATE;
use crate::auth::UNSIGNED_PAYLOAD;
use crate::auth::UNSIGNED_PAYLOAD_TRAILER;
use rustfs_ecstore::store_api::{HTTPPreconditions, HTTPRangeSpec, ObjectOptions};
use rustfs_policy::service_type::ServiceType;
use rustfs_utils::hash::EMPTY_STRING_SHA256_HASH;
use rustfs_utils::http::AMZ_CONTENT_SHA256;
use rustfs_utils::http::RESERVED_METADATA_PREFIX_LOWER;
use rustfs_utils::http::RUSTFS_BUCKET_REPLICATION_DELETE_MARKER;
use rustfs_utils::http::RUSTFS_BUCKET_REPLICATION_REQUEST;
use rustfs_utils::http::RUSTFS_BUCKET_REPLICATION_SSEC_CHECKSUM;
use rustfs_utils::http::RUSTFS_BUCKET_SOURCE_VERSION_ID;
use rustfs_utils::path::is_dir_object;
use s3s::{S3Result, s3_error};
use std::collections::HashMap;
use std::sync::LazyLock;
use tracing::error;
use uuid::Uuid;
use crate::auth::AuthType;
use crate::auth::get_request_auth_type;
use crate::auth::is_request_presigned_signature_v4;
/// Creates options for deleting an object in a bucket.
pub async fn del_opts(
bucket: &str,
object: &str,
vid: Option<String>,
headers: &HeaderMap<HeaderValue>,
metadata: HashMap<String, String>,
) -> Result<ObjectOptions> {
let versioned = BucketVersioningSys::prefix_enabled(bucket, object).await;
let version_suspended = BucketVersioningSys::suspended(bucket).await;
let vid = if vid.is_none() {
headers
.get(RUSTFS_BUCKET_SOURCE_VERSION_ID)
.map(|v| v.to_str().unwrap().to_owned())
} else {
vid
};
let vid = vid.map(|v| v.as_str().trim().to_owned());
if let Some(ref id) = vid
&& *id != Uuid::nil().to_string()
&& let Err(err) = Uuid::parse_str(id.as_str())
{
error!("del_opts: invalid version id: {} error: {}", id, err);
return Err(StorageError::InvalidVersionID(bucket.to_owned(), object.to_owned(), id.clone()));
}
let mut opts = put_opts_from_headers(headers, metadata.clone()).map_err(|err| {
error!("del_opts: invalid argument: {} error: {}", object, err);
StorageError::InvalidArgument(bucket.to_owned(), object.to_owned(), err.to_string())
})?;
opts.version_id = {
if is_dir_object(object) && vid.is_none() {
Some(Uuid::nil().to_string())
} else {
vid
}
};
opts.version_suspended = version_suspended;
opts.versioned = versioned;
opts.delete_marker = headers
.get(RUSTFS_BUCKET_REPLICATION_DELETE_MARKER)
.map(|v| v.to_str().unwrap() == "true")
.unwrap_or_default();
fill_conditional_writes_opts_from_header(headers, &mut opts)?;
Ok(opts)
}
/// Creates options for getting an object from a bucket.
pub async fn get_opts(
bucket: &str,
object: &str,
vid: Option<String>,
part_num: Option<usize>,
headers: &HeaderMap<HeaderValue>,
) -> Result<ObjectOptions> {
let versioned = BucketVersioningSys::prefix_enabled(bucket, object).await;
let version_suspended = BucketVersioningSys::prefix_suspended(bucket, object).await;
let vid = vid.map(|v| v.as_str().trim().to_owned());
if let Some(ref id) = vid
&& *id != Uuid::nil().to_string()
&& let Err(_err) = Uuid::parse_str(id.as_str())
{
return Err(StorageError::InvalidVersionID(bucket.to_owned(), object.to_owned(), id.clone()));
}
let mut opts = get_default_opts(headers, HashMap::new(), false)
.map_err(|err| StorageError::InvalidArgument(bucket.to_owned(), object.to_owned(), err.to_string()))?;
opts.version_id = {
if is_dir_object(object) && vid.is_none() {
Some(Uuid::nil().to_string())
} else {
vid
}
};
opts.part_number = part_num;
opts.version_suspended = version_suspended;
opts.versioned = versioned;
fill_conditional_writes_opts_from_header(headers, &mut opts)?;
Ok(opts)
}
fn fill_conditional_writes_opts_from_header(headers: &HeaderMap<HeaderValue>, opts: &mut ObjectOptions) -> std::io::Result<()> {
if headers.contains_key("If-None-Match") || headers.contains_key("If-Match") {
let mut preconditions = HTTPPreconditions::default();
if let Some(if_none_match) = headers.get("If-None-Match") {
preconditions.if_none_match = Some(
if_none_match
.to_str()
.map_err(|_| std::io::Error::other("Invalid If-None-Match header"))?
.to_string(),
);
}
if let Some(if_match) = headers.get("If-Match") {
preconditions.if_match = Some(
if_match
.to_str()
.map_err(|_| std::io::Error::other("Invalid If-Match header"))?
.to_string(),
);
}
opts.http_preconditions = Some(preconditions);
}
Ok(())
}
/// Creates options for putting an object in a bucket.
pub async fn put_opts(
bucket: &str,
object: &str,
vid: Option<String>,
headers: &HeaderMap<HeaderValue>,
metadata: HashMap<String, String>,
) -> Result<ObjectOptions> {
let versioned = BucketVersioningSys::prefix_enabled(bucket, object).await;
let version_suspended = BucketVersioningSys::prefix_suspended(bucket, object).await;
let vid = if vid.is_none() {
headers
.get(RUSTFS_BUCKET_SOURCE_VERSION_ID)
.map(|v| v.to_str().unwrap().to_owned())
} else {
vid
};
let vid = vid.map(|v| v.as_str().trim().to_owned());
if let Some(ref id) = vid
&& *id != Uuid::nil().to_string()
&& let Err(_err) = Uuid::parse_str(id.as_str())
{
return Err(StorageError::InvalidVersionID(bucket.to_owned(), object.to_owned(), id.clone()));
}
let mut opts = put_opts_from_headers(headers, metadata)
.map_err(|err| StorageError::InvalidArgument(bucket.to_owned(), object.to_owned(), err.to_string()))?;
opts.version_id = {
if is_dir_object(object) && vid.is_none() {
Some(Uuid::nil().to_string())
} else {
vid
}
};
opts.version_suspended = version_suspended;
opts.versioned = versioned;
fill_conditional_writes_opts_from_header(headers, &mut opts)?;
Ok(opts)
}
pub fn get_complete_multipart_upload_opts(headers: &HeaderMap<HeaderValue>) -> std::io::Result<ObjectOptions> {
let mut user_defined = HashMap::new();
let mut replication_request = false;
if let Some(v) = headers.get(RUSTFS_BUCKET_REPLICATION_REQUEST) {
user_defined.insert(
format!("{RESERVED_METADATA_PREFIX_LOWER}Actual-Object-Size"),
v.to_str().unwrap_or_default().to_owned(),
);
replication_request = true;
}
if let Some(v) = headers.get(RUSTFS_BUCKET_REPLICATION_SSEC_CHECKSUM) {
user_defined.insert(
RUSTFS_BUCKET_REPLICATION_SSEC_CHECKSUM.to_string(),
v.to_str().unwrap_or_default().to_owned(),
);
}
let mut opts = ObjectOptions {
want_checksum: rustfs_rio::get_content_checksum(headers)?,
user_defined,
replication_request,
..Default::default()
};
fill_conditional_writes_opts_from_header(headers, &mut opts)?;
Ok(opts)
}
/// Creates options for copying an object in a bucket.
pub async fn copy_dst_opts(
bucket: &str,
object: &str,
vid: Option<String>,
headers: &HeaderMap<HeaderValue>,
metadata: HashMap<String, String>,
) -> Result<ObjectOptions> {
put_opts(bucket, object, vid, headers, metadata).await
}
pub fn copy_src_opts(_bucket: &str, _object: &str, headers: &HeaderMap<HeaderValue>) -> Result<ObjectOptions> {
get_default_opts(headers, HashMap::new(), false)
}
pub fn put_opts_from_headers(headers: &HeaderMap<HeaderValue>, metadata: HashMap<String, String>) -> Result<ObjectOptions> {
get_default_opts(headers, metadata, false)
}
/// Creates default options for getting an object from a bucket.
pub fn get_default_opts(
_headers: &HeaderMap<HeaderValue>,
metadata: HashMap<String, String>,
_copy_source: bool,
) -> Result<ObjectOptions> {
Ok(ObjectOptions {
user_defined: metadata,
..Default::default()
})
}
/// Extracts metadata from headers and returns it as a HashMap.
pub fn extract_metadata(headers: &HeaderMap<HeaderValue>) -> HashMap<String, String> {
let mut metadata = HashMap::new();
extract_metadata_from_mime(headers, &mut metadata);
metadata
}
/// Extracts metadata from headers and returns it as a HashMap.
pub fn extract_metadata_from_mime(headers: &HeaderMap<HeaderValue>, metadata: &mut HashMap<String, String>) {
extract_metadata_from_mime_with_object_name(headers, metadata, false, None);
}
/// Extracts metadata from headers and returns it as a HashMap with object name for MIME type detection.
pub fn extract_metadata_from_mime_with_object_name(
headers: &HeaderMap<HeaderValue>,
metadata: &mut HashMap<String, String>,
skip_content_type: bool,
object_name: Option<&str>,
) {
for (k, v) in headers.iter() {
if k.as_str() == "content-type" && skip_content_type {
continue;
}
if let Some(key) = k.as_str().strip_prefix("x-amz-meta-") {
if key.is_empty() {
continue;
}
metadata.insert(key.to_owned(), String::from_utf8_lossy(v.as_bytes()).to_string());
continue;
}
if let Some(key) = k.as_str().strip_prefix("x-rustfs-meta-") {
metadata.insert(key.to_owned(), String::from_utf8_lossy(v.as_bytes()).to_string());
continue;
}
for hd in SUPPORTED_HEADERS.iter() {
if k.as_str() == *hd {
metadata.insert(k.to_string(), String::from_utf8_lossy(v.as_bytes()).to_string());
continue;
}
}
}
if !metadata.contains_key("content-type") {
let default_content_type = if let Some(obj_name) = object_name {
detect_content_type_from_object_name(obj_name)
} else {
"binary/octet-stream".to_owned()
};
metadata.insert("content-type".to_owned(), default_content_type);
}
}
pub(crate) fn filter_object_metadata(metadata: &HashMap<String, String>) -> Option<HashMap<String, String>> {
let mut filtered_metadata = HashMap::new();
for (k, v) in metadata {
if k.starts_with(RESERVED_METADATA_PREFIX_LOWER) {
continue;
}
if v.is_empty() && (k == &X_AMZ_OBJECT_LOCK_MODE.to_string() || k == &X_AMZ_OBJECT_LOCK_RETAIN_UNTIL_DATE.to_string()) {
continue;
}
if k == AMZ_META_UNENCRYPTED_CONTENT_MD5 || k == AMZ_META_UNENCRYPTED_CONTENT_LENGTH {
continue;
}
let lower_key = k.to_ascii_lowercase();
if let Some(key) = lower_key.strip_prefix("x-amz-meta-") {
filtered_metadata.insert(key.to_string(), v.to_string());
continue;
}
if let Some(key) = lower_key.strip_prefix("x-rustfs-meta-") {
filtered_metadata.insert(key.to_string(), v.to_string());
continue;
}
filtered_metadata.insert(k.clone(), v.clone());
}
if filtered_metadata.is_empty() {
None
} else {
Some(filtered_metadata)
}
}
/// Detects content type from object name based on file extension.
pub(crate) fn detect_content_type_from_object_name(object_name: &str) -> String {
let lower_name = object_name.to_lowercase();
// Check for Parquet files specifically
if lower_name.ends_with(".parquet") {
return "application/vnd.apache.parquet".to_owned();
}
// Special handling for other data formats that mime_guess doesn't know
if lower_name.ends_with(".avro") {
return "application/avro".to_owned();
}
if lower_name.ends_with(".orc") {
return "application/orc".to_owned();
}
if lower_name.ends_with(".feather") {
return "application/feather".to_owned();
}
if lower_name.ends_with(".arrow") {
return "application/arrow".to_owned();
}
// Use mime_guess for standard file types
mime_guess::from_path(object_name).first_or_octet_stream().to_string()
}
/// List of supported headers.
static SUPPORTED_HEADERS: LazyLock<Vec<&'static str>> = LazyLock::new(|| {
vec![
"content-type",
"cache-control",
"content-language",
"content-encoding",
"content-disposition",
"x-amz-storage-class",
"x-amz-tagging",
"expires",
"x-amz-replication-status",
]
});
/// Parse copy source range string in format "bytes=start-end"
pub fn parse_copy_source_range(range_str: &str) -> S3Result<HTTPRangeSpec> {
if !range_str.starts_with("bytes=") {
return Err(s3_error!(InvalidArgument, "Invalid range format"));
}
let range_part = &range_str[6..]; // Remove "bytes=" prefix
if let Some(dash_pos) = range_part.find('-') {
let start_str = &range_part[..dash_pos];
let end_str = &range_part[dash_pos + 1..];
if start_str.is_empty() && end_str.is_empty() {
return Err(s3_error!(InvalidArgument, "Invalid range format"));
}
if start_str.is_empty() {
// Suffix range: bytes=-500 (last 500 bytes)
let length = end_str
.parse::<i64>()
.map_err(|_| s3_error!(InvalidArgument, "Invalid range format"))?;
Ok(HTTPRangeSpec {
is_suffix_length: true,
start: -length,
end: -1,
})
} else {
let start = start_str
.parse::<i64>()
.map_err(|_| s3_error!(InvalidArgument, "Invalid range format"))?;
let end = if end_str.is_empty() {
-1 // Open-ended range: bytes=500-
} else {
end_str
.parse::<i64>()
.map_err(|_| s3_error!(InvalidArgument, "Invalid range format"))?
};
if start < 0 || (end != -1 && end < start) {
return Err(s3_error!(InvalidArgument, "Invalid range format"));
}
Ok(HTTPRangeSpec {
is_suffix_length: false,
start,
end,
})
}
} else {
Err(s3_error!(InvalidArgument, "Invalid range format"))
}
}
pub(crate) fn get_content_sha256(headers: &HeaderMap<HeaderValue>) -> Option<String> {
match get_request_auth_type(headers) {
AuthType::Presigned | AuthType::Signed => {
if skip_content_sha256_cksum(headers) {
None
} else {
Some(get_content_sha256_cksum(headers, ServiceType::S3))
}
}
_ => None,
}
}
/// skip_content_sha256_cksum returns true if caller needs to skip
/// payload checksum, false if not.
fn skip_content_sha256_cksum(headers: &HeaderMap<HeaderValue>) -> bool {
let content_sha256 = if is_request_presigned_signature_v4(headers) {
// For presigned requests, check query params first, then headers
// Note: In a real implementation, you would need to check query parameters
// For now, we'll just check headers
headers.get(AMZ_CONTENT_SHA256)
} else {
headers.get(AMZ_CONTENT_SHA256)
};
// Skip if no header was set
let Some(header_value) = content_sha256 else {
return true;
};
let Ok(value) = header_value.to_str() else {
return true;
};
// If x-amz-content-sha256 is set and the value is not
// 'UNSIGNED-PAYLOAD' we should validate the content sha256.
match value {
v if v == UNSIGNED_PAYLOAD || v == UNSIGNED_PAYLOAD_TRAILER => true,
v if v == EMPTY_STRING_SHA256_HASH => {
// some broken clients set empty-sha256
// with > 0 content-length in the body,
// we should skip such clients and allow
// blindly such insecure clients only if
// S3 strict compatibility is disabled.
// We return true only in situations when
// deployment has asked RustFS to allow for
// such broken clients and content-length > 0.
// For now, we'll assume strict compatibility is disabled
// In a real implementation, you would check a global config
if let Some(content_length) = headers.get("content-length")
&& let Ok(length_str) = content_length.to_str()
&& let Ok(length) = length_str.parse::<i64>()
{
return length > 0; // && !global_server_ctxt.strict_s3_compat
}
false
}
_ => false,
}
}
/// Returns SHA256 for calculating canonical-request.
fn get_content_sha256_cksum(headers: &HeaderMap<HeaderValue>, service_type: ServiceType) -> String {
if service_type == ServiceType::STS {
// For STS requests, we would need to read the body and calculate SHA256
// This is a simplified implementation - in practice you'd need access to the request body
// For now, we'll return a placeholder
return "sts-body-sha256-placeholder".to_string();
}
let (default_sha256_cksum, content_sha256) = if is_request_presigned_signature_v4(headers) {
// For a presigned request we look at the query param for sha256.
// X-Amz-Content-Sha256, if not set in presigned requests, checksum
// will default to 'UNSIGNED-PAYLOAD'.
(UNSIGNED_PAYLOAD.to_string(), headers.get(AMZ_CONTENT_SHA256))
} else {
// X-Amz-Content-Sha256, if not set in signed requests, checksum
// will default to sha256([]byte("")).
(EMPTY_STRING_SHA256_HASH.to_string(), headers.get(AMZ_CONTENT_SHA256))
};
// We found 'X-Amz-Content-Sha256' return the captured value.
if let Some(header_value) = content_sha256
&& let Ok(value) = header_value.to_str()
{
return value.to_string();
}
// We couldn't find 'X-Amz-Content-Sha256'.
default_sha256_cksum
}
#[cfg(test)]
mod tests {
use super::*;
use http::{HeaderMap, HeaderValue};
use std::collections::HashMap;
use uuid::Uuid;
fn create_test_headers() -> HeaderMap<HeaderValue> {
let mut headers = HeaderMap::new();
headers.insert("content-type", HeaderValue::from_static("application/json"));
headers.insert("x-amz-meta-custom", HeaderValue::from_static("custom-value"));
headers.insert("x-rustfs-meta-internal", HeaderValue::from_static("internal-value"));
headers.insert("cache-control", HeaderValue::from_static("no-cache"));
headers
}
fn create_test_metadata() -> HashMap<String, String> {
let mut metadata = HashMap::new();
metadata.insert("key1".to_string(), "value1".to_string());
metadata.insert("key2".to_string(), "value2".to_string());
metadata
}
#[tokio::test]
async fn test_del_opts_basic() {
let headers = create_test_headers();
let metadata = create_test_metadata();
let result = del_opts("test-bucket", "test-object", None, &headers, metadata).await;
assert!(result.is_ok());
let opts = result.unwrap();
assert!(!opts.user_defined.is_empty());
assert_eq!(opts.version_id, None);
}
#[tokio::test]
async fn test_del_opts_with_directory_object() {
let headers = create_test_headers();
let result = del_opts("test-bucket", "test-dir/", None, &headers, HashMap::new()).await;
assert!(result.is_ok());
let opts = result.unwrap();
assert_eq!(opts.version_id, Some(Uuid::nil().to_string()));
}
#[tokio::test]
async fn test_del_opts_with_valid_version_id() {
let headers = create_test_headers();
let valid_uuid = Uuid::new_v4().to_string();
let result = del_opts("test-bucket", "test-object", Some(valid_uuid.clone()), &headers, HashMap::new()).await;
// This test may fail if versioning is not enabled for the bucket
// In a real test environment, you would mock BucketVersioningSys
match result {
Ok(opts) => {
assert_eq!(opts.version_id, Some(valid_uuid));
}
Err(_) => {
// Expected if versioning is not enabled
}
}
}
#[tokio::test]
async fn test_del_opts_with_invalid_version_id() {
let headers = create_test_headers();
let invalid_uuid = "invalid-uuid".to_string();
let result = del_opts("test-bucket", "test-object", Some(invalid_uuid), &headers, HashMap::new()).await;
assert!(result.is_err());
if let Err(err) = result {
match err {
StorageError::InvalidVersionID(bucket, object, version) => {
assert_eq!(bucket, "test-bucket");
assert_eq!(object, "test-object");
assert_eq!(version, "invalid-uuid");
}
_ => panic!("Expected InvalidVersionID error"),
}
}
}
#[tokio::test]
async fn test_get_opts_basic() {
let headers = create_test_headers();
let result = get_opts("test-bucket", "test-object", None, None, &headers).await;
assert!(result.is_ok());
let opts = result.unwrap();
assert_eq!(opts.part_number, None);
assert_eq!(opts.version_id, None);
}
#[tokio::test]
async fn test_get_opts_with_part_number() {
let headers = create_test_headers();
let result = get_opts("test-bucket", "test-object", None, Some(5), &headers).await;
assert!(result.is_ok());
let opts = result.unwrap();
assert_eq!(opts.part_number, Some(5));
}
#[tokio::test]
async fn test_get_opts_with_directory_object() {
let headers = create_test_headers();
let result = get_opts("test-bucket", "test-dir/", None, None, &headers).await;
assert!(result.is_ok());
let opts = result.unwrap();
assert_eq!(opts.version_id, Some(Uuid::nil().to_string()));
}
#[tokio::test]
async fn test_get_opts_with_invalid_version_id() {
let headers = create_test_headers();
let invalid_uuid = "invalid-uuid".to_string();
let result = get_opts("test-bucket", "test-object", Some(invalid_uuid), None, &headers).await;
assert!(result.is_err());
if let Err(err) = result {
match err {
StorageError::InvalidVersionID(bucket, object, version) => {
assert_eq!(bucket, "test-bucket");
assert_eq!(object, "test-object");
assert_eq!(version, "invalid-uuid");
}
_ => panic!("Expected InvalidVersionID error"),
}
}
}
#[tokio::test]
async fn test_put_opts_basic() {
let headers = create_test_headers();
let metadata = create_test_metadata();
let result = put_opts("test-bucket", "test-object", None, &headers, metadata).await;
assert!(result.is_ok());
let opts = result.unwrap();
assert!(!opts.user_defined.is_empty());
assert_eq!(opts.version_id, None);
}
#[tokio::test]
async fn test_put_opts_with_directory_object() {
let headers = create_test_headers();
let result = put_opts("test-bucket", "test-dir/", None, &headers, HashMap::new()).await;
assert!(result.is_ok());
let opts = result.unwrap();
assert_eq!(opts.version_id, Some(Uuid::nil().to_string()));
}
#[tokio::test]
async fn test_put_opts_with_invalid_version_id() {
let headers = create_test_headers();
let invalid_uuid = "invalid-uuid".to_string();
let result = put_opts("test-bucket", "test-object", Some(invalid_uuid), &headers, HashMap::new()).await;
assert!(result.is_err());
if let Err(err) = result {
match err {
StorageError::InvalidVersionID(bucket, object, version) => {
assert_eq!(bucket, "test-bucket");
assert_eq!(object, "test-object");
assert_eq!(version, "invalid-uuid");
}
_ => panic!("Expected InvalidVersionID error"),
}
}
}
#[tokio::test]
async fn test_copy_dst_opts() {
let headers = create_test_headers();
let metadata = create_test_metadata();
let result = copy_dst_opts("test-bucket", "test-object", None, &headers, metadata).await;
assert!(result.is_ok());
let opts = result.unwrap();
assert!(!opts.user_defined.is_empty());
}
#[test]
fn test_copy_src_opts() {
let headers = create_test_headers();
let result = copy_src_opts("test-bucket", "test-object", &headers);
assert!(result.is_ok());
let opts = result.unwrap();
assert!(opts.user_defined.is_empty());
}
#[test]
fn test_put_opts_from_headers() {
let headers = create_test_headers();
let metadata = create_test_metadata();
let result = put_opts_from_headers(&headers, metadata);
assert!(result.is_ok());
let opts = result.unwrap();
assert!(!opts.user_defined.is_empty());
let user_defined = opts.user_defined;
assert_eq!(user_defined.get("key1"), Some(&"value1".to_string()));
assert_eq!(user_defined.get("key2"), Some(&"value2".to_string()));
}
#[test]
fn test_get_default_opts_with_metadata() {
let headers = create_test_headers();
let metadata = create_test_metadata();
let result = get_default_opts(&headers, metadata, false);
assert!(result.is_ok());
let opts = result.unwrap();
assert!(!opts.user_defined.is_empty());
let user_defined = opts.user_defined;
assert_eq!(user_defined.get("key1"), Some(&"value1".to_string()));
assert_eq!(user_defined.get("key2"), Some(&"value2".to_string()));
}
#[test]
fn test_get_default_opts_without_metadata() {
let headers = create_test_headers();
let result = get_default_opts(&headers, HashMap::new(), false);
assert!(result.is_ok());
let opts = result.unwrap();
assert!(opts.user_defined.is_empty());
}
#[test]
fn test_extract_metadata_basic() {
let headers = create_test_headers();
let metadata = extract_metadata(&headers);
assert!(metadata.contains_key("content-type"));
assert_eq!(metadata.get("content-type"), Some(&"application/json".to_string()));
assert!(metadata.contains_key("cache-control"));
assert_eq!(metadata.get("cache-control"), Some(&"no-cache".to_string()));
assert!(metadata.contains_key("custom"));
assert_eq!(metadata.get("custom"), Some(&"custom-value".to_string()));
assert!(metadata.contains_key("internal"));
assert_eq!(metadata.get("internal"), Some(&"internal-value".to_string()));
}
#[test]
fn test_extract_metadata_from_mime_amz_meta() {
let mut headers = HeaderMap::new();
headers.insert("x-amz-meta-user-id", HeaderValue::from_static("12345"));
headers.insert("x-amz-meta-project", HeaderValue::from_static("test-project"));
headers.insert("x-amz-meta-", HeaderValue::from_static("empty-key")); // Should be ignored
let mut metadata = HashMap::new();
extract_metadata_from_mime(&headers, &mut metadata);
assert_eq!(metadata.get("user-id"), Some(&"12345".to_string()));
assert_eq!(metadata.get("project"), Some(&"test-project".to_string()));
assert!(!metadata.contains_key(""));
}
#[test]
fn test_extract_metadata_from_mime_rustfs_meta() {
let mut headers = HeaderMap::new();
headers.insert("x-rustfs-meta-internal-id", HeaderValue::from_static("67890"));
headers.insert("x-rustfs-meta-category", HeaderValue::from_static("documents"));
let mut metadata = HashMap::new();
extract_metadata_from_mime(&headers, &mut metadata);
assert_eq!(metadata.get("internal-id"), Some(&"67890".to_string()));
assert_eq!(metadata.get("category"), Some(&"documents".to_string()));
}
#[test]
fn test_extract_metadata_from_mime_supported_headers() {
let mut headers = HeaderMap::new();
headers.insert("content-type", HeaderValue::from_static("text/plain"));
headers.insert("cache-control", HeaderValue::from_static("max-age=3600"));
headers.insert("content-language", HeaderValue::from_static("en-US"));
headers.insert("content-encoding", HeaderValue::from_static("gzip"));
headers.insert("content-disposition", HeaderValue::from_static("attachment"));
headers.insert("x-amz-storage-class", HeaderValue::from_static("STANDARD"));
headers.insert("x-amz-tagging", HeaderValue::from_static("key1=value1&key2=value2"));
headers.insert("expires", HeaderValue::from_static("Wed, 21 Oct 2015 07:28:00 GMT"));
headers.insert("x-amz-replication-status", HeaderValue::from_static("COMPLETED"));
let mut metadata = HashMap::new();
extract_metadata_from_mime(&headers, &mut metadata);
assert_eq!(metadata.get("content-type"), Some(&"text/plain".to_string()));
assert_eq!(metadata.get("cache-control"), Some(&"max-age=3600".to_string()));
assert_eq!(metadata.get("content-language"), Some(&"en-US".to_string()));
assert_eq!(metadata.get("content-encoding"), Some(&"gzip".to_string()));
assert_eq!(metadata.get("content-disposition"), Some(&"attachment".to_string()));
assert_eq!(metadata.get("x-amz-storage-class"), Some(&"STANDARD".to_string()));
assert_eq!(metadata.get("x-amz-tagging"), Some(&"key1=value1&key2=value2".to_string()));
assert_eq!(metadata.get("expires"), Some(&"Wed, 21 Oct 2015 07:28:00 GMT".to_string()));
assert_eq!(metadata.get("x-amz-replication-status"), Some(&"COMPLETED".to_string()));
}
#[test]
fn test_extract_metadata_from_mime_default_content_type() {
let headers = HeaderMap::new();
let mut metadata = HashMap::new();
extract_metadata_from_mime(&headers, &mut metadata);
assert_eq!(metadata.get("content-type"), Some(&"binary/octet-stream".to_string()));
}
#[test]
fn test_extract_metadata_from_mime_existing_content_type() {
let mut headers = HeaderMap::new();
headers.insert("content-type", HeaderValue::from_static("application/json"));
let mut metadata = HashMap::new();
extract_metadata_from_mime(&headers, &mut metadata);
assert_eq!(metadata.get("content-type"), Some(&"application/json".to_string()));
}
#[test]
fn test_extract_metadata_from_mime_unicode_values() {
let mut headers = HeaderMap::new();
headers.insert("x-amz-meta-chinese", HeaderValue::from_bytes("test-value".as_bytes()).unwrap());
headers.insert("x-rustfs-meta-emoji", HeaderValue::from_bytes("🚀".as_bytes()).unwrap());
let mut metadata = HashMap::new();
extract_metadata_from_mime(&headers, &mut metadata);
assert_eq!(metadata.get("chinese"), Some(&"test-value".to_string()));
assert_eq!(metadata.get("emoji"), Some(&"🚀".to_string()));
}
#[test]
fn test_extract_metadata_from_mime_unsupported_headers() {
let mut headers = HeaderMap::new();
headers.insert("authorization", HeaderValue::from_static("Bearer token"));
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | true |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/storage/mod.rs | rustfs/src/storage/mod.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod access;
pub mod concurrency;
pub mod ecfs;
pub(crate) mod entity;
pub(crate) mod helper;
pub mod options;
pub mod tonic_service;
#[cfg(test)]
mod concurrent_get_object_test;
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/storage/concurrency.rs | rustfs/src/storage/concurrency.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Concurrency optimization module for high-performance object retrieval.
//!
//! This module provides intelligent concurrency management to prevent performance
//! degradation when multiple concurrent GetObject requests are processed. It addresses
//! the core issue where increasing concurrency from 1→2→4 requests caused latency to
//! degrade exponentially (59ms → 110ms → 200ms).
//!
//! # Key Features
//!
//! - **Adaptive Buffer Sizing**: Dynamically adjusts buffer sizes based on concurrent load
//! to prevent memory contention and thrashing under high concurrency.
//! - **Moka Cache Integration**: Lock-free hot object caching with automatic TTL/TTI expiration
//! for frequently accessed objects, providing sub-5ms response times on cache hits.
//! - **I/O Rate Limiting**: Semaphore-based disk read throttling prevents I/O queue saturation
//! and ensures fair resource allocation across concurrent requests.
//! - **Comprehensive Metrics**: Prometheus-compatible metrics for monitoring cache hit rates,
//! request latency, concurrency levels, and disk wait times.
//!
//! # Performance Characteristics
//!
//! - Low concurrency (1-2 requests): Optimizes for throughput with larger buffers (100%)
//! - Medium concurrency (3-4 requests): Balances throughput and fairness (75% buffers)
//! - High concurrency (5-8 requests): Optimizes for fairness (50% buffers)
//! - Very high concurrency (>8 requests): Ensures predictable latency (40% buffers)
//!
//! # Expected Performance Improvements
//!
//! | Concurrent Requests | Before | After | Improvement |
//! |---------------------|--------|-------|-------------|
//! | 2 requests | 110ms | 60-70ms | ~40% faster |
//! | 4 requests | 200ms | 75-90ms | ~55% faster |
//! | 8 requests | 400ms | 90-120ms | ~70% faster |
//!
//! # Usage Example
//!
//! ```ignore
//! use crate::storage::concurrency::ConcurrencyManager;
//!
//! async fn handle_get_object() {
//! // Automatic request tracking with RAII guard
//! let _guard = ConcurrencyManager::track_request();
//!
//! // Try cache first (sub-5ms if hit)
//! if let Some(data) = manager.get_cached(&key).await {
//! return serve_from_cache(data);
//! }
//!
//! // Rate-limited disk read
//! let _permit = manager.acquire_disk_read_permit().await;
//!
//! // Use adaptive buffer size
//! let buffer_size = get_concurrency_aware_buffer_size(file_size, base_buffer);
//! // ... read from disk ...
//! }
//! ```
use moka::future::Cache;
use rustfs_config::{KI_B, MI_B};
use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
use std::sync::{Arc, LazyLock, Mutex};
use std::time::{Duration, Instant};
use tokio::sync::Semaphore;
// ============================================
// Adaptive I/O Strategy Types
// ============================================
/// Load level classification based on disk permit wait times.
///
/// This enum represents the current I/O load on the system, determined by
/// analyzing disk permit acquisition wait times. Longer wait times indicate
/// higher contention and system load.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum IoLoadLevel {
/// Low load: wait time < 10ms. System has ample I/O capacity.
Low,
/// Medium load: wait time 10-50ms. System is moderately loaded.
Medium,
/// High load: wait time 50-200ms. System is under significant load.
High,
/// Critical load: wait time > 200ms. System is heavily congested.
Critical,
}
impl IoLoadLevel {
/// Determine load level from disk permit wait duration.
///
/// Thresholds are based on typical NVMe SSD characteristics:
/// - Low: < 10ms (normal operation)
/// - Medium: 10-50ms (moderate contention)
/// - High: 50-200ms (significant contention)
/// - Critical: > 200ms (severe congestion)
pub fn from_wait_duration(wait: Duration) -> Self {
let wait_ms = wait.as_millis();
if wait_ms < 10 {
IoLoadLevel::Low
} else if wait_ms < 50 {
IoLoadLevel::Medium
} else if wait_ms < 200 {
IoLoadLevel::High
} else {
IoLoadLevel::Critical
}
}
}
/// Adaptive I/O strategy calculated from current system load.
///
/// This structure provides optimized I/O parameters based on the observed
/// disk permit wait times. It helps balance throughput vs. latency and
/// prevents I/O saturation under high load.
///
/// # Usage Example
///
/// ```ignore
/// let strategy = manager.calculate_io_strategy(permit_wait_duration);
///
/// // Apply strategy to I/O operations
/// let buffer_size = strategy.buffer_size;
/// let enable_readahead = strategy.enable_readahead;
/// let enable_cache_writeback = strategy.cache_writeback_enabled;
/// ```
#[allow(dead_code)]
#[derive(Debug, Clone)]
pub struct IoStrategy {
/// Recommended buffer size for I/O operations (in bytes).
///
/// Under high load, this is reduced to improve fairness and reduce memory pressure.
/// Under low load, this is maximized for throughput.
pub buffer_size: usize,
/// Buffer size multiplier (0.4 - 1.0) applied to base buffer size.
///
/// - 1.0: Low load - use full buffer
/// - 0.75: Medium load - slightly reduced
/// - 0.5: High load - significantly reduced
/// - 0.4: Critical load - minimal buffer
pub buffer_multiplier: f64,
/// Whether to enable aggressive read-ahead for sequential reads.
///
/// Disabled under high load to reduce I/O amplification.
pub enable_readahead: bool,
/// Whether to enable cache writeback for this request.
///
/// May be disabled under extreme load to reduce memory pressure.
pub cache_writeback_enabled: bool,
/// Whether to use tokio BufReader for improved async I/O.
///
/// Always enabled for better async performance.
pub use_buffered_io: bool,
/// The detected I/O load level.
pub load_level: IoLoadLevel,
/// The raw permit wait duration that was used to calculate this strategy.
pub permit_wait_duration: Duration,
}
impl IoStrategy {
/// Create a new IoStrategy from disk permit wait time and base buffer size.
///
/// This analyzes the wait duration to determine the current I/O load level
/// and calculates appropriate I/O parameters.
///
/// # Arguments
///
/// * `permit_wait_duration` - Time spent waiting for disk read permit
/// * `base_buffer_size` - Base buffer size from workload configuration
///
/// # Returns
///
/// An IoStrategy with optimized parameters for the current load level.
pub fn from_wait_duration(permit_wait_duration: Duration, base_buffer_size: usize) -> Self {
let load_level = IoLoadLevel::from_wait_duration(permit_wait_duration);
// Calculate buffer multiplier based on load level
let buffer_multiplier = match load_level {
IoLoadLevel::Low => 1.0,
IoLoadLevel::Medium => 0.75,
IoLoadLevel::High => 0.5,
IoLoadLevel::Critical => 0.4,
};
// Calculate actual buffer size
let buffer_size = ((base_buffer_size as f64) * buffer_multiplier) as usize;
let buffer_size = buffer_size.clamp(32 * KI_B, MI_B);
// Determine feature toggles based on load
let enable_readahead = match load_level {
IoLoadLevel::Low | IoLoadLevel::Medium => true,
IoLoadLevel::High | IoLoadLevel::Critical => false,
};
let cache_writeback_enabled = match load_level {
IoLoadLevel::Low | IoLoadLevel::Medium | IoLoadLevel::High => true,
IoLoadLevel::Critical => false, // Disable under extreme load
};
Self {
buffer_size,
buffer_multiplier,
enable_readahead,
cache_writeback_enabled,
use_buffered_io: true, // Always enabled
load_level,
permit_wait_duration,
}
}
/// Get a human-readable description of the current I/O strategy.
#[allow(dead_code)]
pub fn description(&self) -> String {
format!(
"IoStrategy[{:?}]: buffer={}KB, multiplier={:.2}, readahead={}, cache_wb={}, wait={:?}",
self.load_level,
self.buffer_size / 1024,
self.buffer_multiplier,
self.enable_readahead,
self.cache_writeback_enabled,
self.permit_wait_duration
)
}
}
/// Rolling window metrics for I/O load tracking.
///
/// This structure maintains a sliding window of recent disk permit wait times
/// to provide smoothed load level estimates. This helps prevent strategy
/// oscillation from transient load spikes.
#[allow(dead_code)]
#[derive(Debug)]
struct IoLoadMetrics {
/// Recent permit wait durations (sliding window)
recent_waits: Vec<Duration>,
/// Maximum samples to keep in the window
max_samples: usize,
/// The earliest record index in the recent_waits vector
earliest_index: usize,
/// Total wait time observed (for averaging)
total_wait_ns: AtomicU64,
/// Total number of observations
observation_count: AtomicU64,
}
#[allow(dead_code)]
impl IoLoadMetrics {
fn new(max_samples: usize) -> Self {
Self {
recent_waits: Vec::with_capacity(max_samples),
max_samples,
earliest_index: 0,
total_wait_ns: AtomicU64::new(0),
observation_count: AtomicU64::new(0),
}
}
/// Record a new permit wait observation
fn record(&mut self, wait: Duration) {
// Add to recent waits (with eviction if full)
if self.recent_waits.len() < self.max_samples {
self.recent_waits.push(wait);
} else {
self.recent_waits[self.earliest_index] = wait;
self.earliest_index = (self.earliest_index + 1) % self.max_samples;
}
// Update totals for overall statistics
self.total_wait_ns.fetch_add(wait.as_nanos() as u64, Ordering::Relaxed);
self.observation_count.fetch_add(1, Ordering::Relaxed);
}
/// Get the average wait duration over the recent window
fn average_wait(&self) -> Duration {
if self.recent_waits.is_empty() {
return Duration::ZERO;
}
let total: Duration = self.recent_waits.iter().sum();
total / self.recent_waits.len() as u32
}
/// Get the maximum wait duration in the recent window
fn max_wait(&self) -> Duration {
self.recent_waits.iter().copied().max().unwrap_or(Duration::ZERO)
}
/// Get the P95 wait duration from the recent window
fn p95_wait(&self) -> Duration {
if self.recent_waits.is_empty() {
return Duration::ZERO;
}
let mut sorted = self.recent_waits.clone();
sorted.sort();
let p95_idx = ((sorted.len() as f64) * 0.95) as usize;
sorted.get(p95_idx.min(sorted.len() - 1)).copied().unwrap_or(Duration::ZERO)
}
/// Get the smoothed load level based on recent observations
fn smoothed_load_level(&self) -> IoLoadLevel {
IoLoadLevel::from_wait_duration(self.average_wait())
}
/// Get the overall average wait since startup
fn lifetime_average_wait(&self) -> Duration {
let total = self.total_wait_ns.load(Ordering::Relaxed);
let count = self.observation_count.load(Ordering::Relaxed);
if count == 0 {
Duration::ZERO
} else {
Duration::from_nanos(total / count)
}
}
/// Get the total observation count
fn observation_count(&self) -> u64 {
self.observation_count.load(Ordering::Relaxed)
}
}
/// Global concurrent request counter for adaptive buffer sizing.
///
/// This atomic counter tracks the number of active GetObject requests in real-time.
/// It's used by the buffer sizing algorithm to dynamically adjust memory allocation
/// based on current system load, preventing memory contention under high concurrency.
///
/// Access pattern: Lock-free atomic operations (Relaxed ordering for performance).
static ACTIVE_GET_REQUESTS: AtomicUsize = AtomicUsize::new(0);
/// Global concurrency manager instance
static CONCURRENCY_MANAGER: LazyLock<ConcurrencyManager> = LazyLock::new(ConcurrencyManager::new);
/// RAII guard for tracking active GetObject requests.
///
/// This guard automatically increments the concurrent request counter when created
/// and decrements it when dropped. This ensures accurate tracking even if requests
/// fail or panic, preventing counter leaks that could permanently degrade performance.
///
/// # Thread Safety
///
/// Safe to use across threads. The underlying atomic counter uses Relaxed ordering
/// for performance since exact synchronization isn't required for buffer sizing hints.
///
/// # Metrics
///
/// On drop, automatically records request completion and duration metrics (when the
/// "metrics" feature is enabled) for Prometheus monitoring and alerting.
///
/// # Example
///
/// ```ignore
/// async fn get_object() {
/// let _guard = GetObjectGuard::new();
/// // Request counter incremented automatically
/// // ... process request ...
/// // Counter decremented automatically when guard drops
/// }
/// ```
#[derive(Debug)]
pub struct GetObjectGuard {
/// Track when the request started for metrics collection.
/// Used to calculate end-to-end request latency in the Drop implementation.
start_time: Instant,
/// Reference to the concurrency manager for cleanup operations.
/// The underscore prefix indicates this is used implicitly (for type safety).
_manager: &'static ConcurrencyManager,
}
impl GetObjectGuard {
/// Create a new guard, incrementing the active request counter atomically.
///
/// This method is called automatically by `ConcurrencyManager::track_request()`.
/// The counter increment is guaranteed to be visible to concurrent readers
/// immediately due to atomic operations.
fn new() -> Self {
ACTIVE_GET_REQUESTS.fetch_add(1, Ordering::Relaxed);
Self {
start_time: Instant::now(),
_manager: &CONCURRENCY_MANAGER,
}
}
/// Get the elapsed time since the request started.
///
/// Useful for logging or metrics collection during request processing.
/// Called automatically in the Drop implementation for duration tracking.
pub fn elapsed(&self) -> Duration {
self.start_time.elapsed()
}
/// Get the current concurrent request count.
///
/// Returns the instantaneous number of active GetObject requests across all threads.
/// This value is used by buffer sizing algorithms to adapt to current system load.
///
/// # Returns
///
/// Current number of concurrent requests (including this one)
pub fn concurrent_requests() -> usize {
ACTIVE_GET_REQUESTS.load(Ordering::Relaxed)
}
}
impl Drop for GetObjectGuard {
/// Automatically called when the guard goes out of scope.
///
/// Performs cleanup operations:
/// 1. Decrements the concurrent request counter atomically
/// 2. Records completion and duration metrics (if metrics feature enabled)
///
/// This ensures accurate tracking even in error/panic scenarios, as Drop
/// is called during stack unwinding (unless explicitly forgotten).
fn drop(&mut self) {
// Decrement concurrent request counter
ACTIVE_GET_REQUESTS.fetch_sub(1, Ordering::Relaxed);
// Record Prometheus metrics for monitoring and alerting
#[cfg(feature = "metrics")]
{
use metrics::{counter, histogram};
// Track total completed requests for throughput calculation
counter!("rustfs.get.object.requests.completed").increment(1);
// Track request duration histogram for latency percentiles (P50, P95, P99)
histogram!("rustfs.get.object.duration.seconds").record(self.elapsed().as_secs_f64());
}
}
}
/// Concurrency-aware buffer size calculator
///
/// This function adapts buffer sizes based on the current concurrent request load
/// to optimize for both throughput and fairness.
///
/// # Strategy
///
/// - **Low concurrency (1-2)**: Use large buffers (512KB-1MB) for maximum throughput
/// - **Medium concurrency (3-8)**: Use moderate buffers (128KB-256KB) for balanced performance
/// - **High concurrency (>8)**: Use smaller buffers (64KB-128KB) for fairness and memory efficiency
///
/// # Arguments
///
/// * `file_size` - The size of the file being read, or -1 if unknown
/// * `base_buffer_size` - The baseline buffer size from workload profile
///
/// # Returns
///
/// Optimized buffer size in bytes for the current concurrency level
pub fn get_concurrency_aware_buffer_size(file_size: i64, base_buffer_size: usize) -> usize {
let concurrent_requests = ACTIVE_GET_REQUESTS.load(Ordering::Relaxed);
// Record concurrent request metrics
#[cfg(feature = "metrics")]
{
use metrics::gauge;
gauge!("rustfs.concurrent.get.requests").set(concurrent_requests as f64);
}
// For low concurrency, use the base buffer size for maximum throughput
if concurrent_requests <= 1 {
return base_buffer_size;
}
let medium_threshold = rustfs_utils::get_env_usize(
rustfs_config::ENV_OBJECT_MEDIUM_CONCURRENCY_THRESHOLD,
rustfs_config::DEFAULT_OBJECT_MEDIUM_CONCURRENCY_THRESHOLD,
);
let high_threshold = rustfs_utils::get_env_usize(
rustfs_config::ENV_OBJECT_HIGH_CONCURRENCY_THRESHOLD,
rustfs_config::DEFAULT_OBJECT_HIGH_CONCURRENCY_THRESHOLD,
);
// Calculate adaptive multiplier based on concurrency level
let adaptive_multiplier = if concurrent_requests <= 2 {
// Low concurrency (1-2): use full buffer for maximum throughput
1.0
} else if concurrent_requests <= medium_threshold {
// Medium concurrency (3-4): slightly reduce buffer size (75% of base)
0.75
} else if concurrent_requests <= high_threshold {
// Higher concurrency (5-8): more aggressive reduction (50% of base)
0.5
} else {
// Very high concurrency (>8): minimize memory per request (40% of base)
0.4
};
// Calculate the adjusted buffer size
let adjusted_size = (base_buffer_size as f64 * adaptive_multiplier) as usize;
// Ensure we stay within reasonable bounds
let min_buffer = if file_size > 0 && file_size < 100 * KI_B as i64 {
32 * KI_B // For very small files, use minimum buffer
} else {
64 * KI_B // Standard minimum buffer size
};
let max_buffer = if concurrent_requests > high_threshold {
256 * KI_B // Cap at 256KB for high concurrency
} else {
MI_B // Cap at 1MB for lower concurrency
};
adjusted_size.clamp(min_buffer, max_buffer)
}
/// Advanced concurrency-aware buffer sizing with file size optimization
///
/// This enhanced version considers both concurrency level and file size patterns
/// to provide even better performance characteristics.
///
/// # Arguments
///
/// * `file_size` - The size of the file being read, or -1 if unknown
/// * `base_buffer_size` - The baseline buffer size from workload profile
/// * `is_sequential` - Whether this is a sequential read (hint for optimization)
///
/// # Returns
///
/// Optimized buffer size in bytes
///
/// # Examples
///
/// ```ignore
/// let buffer_size = get_advanced_buffer_size(
/// 32 * 1024 * 1024, // 32MB file
/// 256 * 1024, // 256KB base buffer
/// true // sequential read
/// );
/// ```
#[allow(dead_code)]
pub fn get_advanced_buffer_size(file_size: i64, base_buffer_size: usize, is_sequential: bool) -> usize {
let concurrent_requests = ACTIVE_GET_REQUESTS.load(Ordering::Relaxed);
// For very small files, use smaller buffers regardless of concurrency
// Replace manual max/min chain with clamp
if file_size > 0 && file_size < 256 * KI_B as i64 {
return (file_size as usize / 4).clamp(16 * KI_B, 64 * KI_B);
}
// Base calculation from standard function
let standard_size = get_concurrency_aware_buffer_size(file_size, base_buffer_size);
let medium_threshold = rustfs_utils::get_env_usize(
rustfs_config::ENV_OBJECT_MEDIUM_CONCURRENCY_THRESHOLD,
rustfs_config::DEFAULT_OBJECT_MEDIUM_CONCURRENCY_THRESHOLD,
);
let high_threshold = rustfs_utils::get_env_usize(
rustfs_config::ENV_OBJECT_HIGH_CONCURRENCY_THRESHOLD,
rustfs_config::DEFAULT_OBJECT_HIGH_CONCURRENCY_THRESHOLD,
);
// For sequential reads, we can be more aggressive with buffer sizes
if is_sequential && concurrent_requests <= medium_threshold {
return ((standard_size as f64 * 1.5) as usize).min(2 * MI_B);
}
// For high concurrency with large files, optimize for parallel processing
if concurrent_requests > high_threshold && file_size > 10 * MI_B as i64 {
// Use smaller, more numerous buffers for better parallelism
return (standard_size as f64 * 0.8) as usize;
}
standard_size
}
/// High-performance cache for hot objects using Moka
///
/// This cache uses Moka for superior concurrent performance with features like:
/// - Lock-free reads and writes
/// - Automatic TTL and TTI expiration
/// - Size-based eviction with weigher function
/// - Built-in metrics collection
///
/// # Dual Cache Architecture
///
/// The cache maintains two separate Moka cache instances:
/// 1. `cache` - Simple byte array cache for raw object data (legacy support)
/// 2. `response_cache` - Full GetObject response cache with metadata
///
/// The response cache is preferred for new code as it stores complete response
/// metadata, enabling cache hits to bypass metadata lookups entirely.
#[derive(Clone)]
struct HotObjectCache {
/// Moka cache instance for simple byte data (legacy)
cache: Cache<String, Arc<CachedObject>>,
/// Moka cache instance for full GetObject responses with metadata
response_cache: Cache<String, Arc<CachedGetObjectInternal>>,
/// Maximum size of individual objects to cache (10MB by default)
max_object_size: usize,
/// Global cache hit counter
hit_count: Arc<AtomicU64>,
/// Global cache miss counter
miss_count: Arc<AtomicU64>,
}
impl std::fmt::Debug for HotObjectCache {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
use std::sync::atomic::Ordering;
f.debug_struct("HotObjectCache")
.field("max_object_size", &self.max_object_size)
.field("hit_count", &self.hit_count.load(Ordering::Relaxed))
.field("miss_count", &self.miss_count.load(Ordering::Relaxed))
.finish()
}
}
/// A cached object with metadata and metrics
#[derive(Clone)]
struct CachedObject {
/// The object data
data: Arc<Vec<u8>>,
/// When this object was cached
cached_at: Instant,
/// Object size in bytes
size: usize,
/// Number of times this object has been accessed
access_count: Arc<AtomicU64>,
}
/// Comprehensive cached object with full response metadata for GetObject operations.
///
/// This structure stores all necessary fields to reconstruct a complete GetObjectOutput
/// response from cache, avoiding repeated disk reads and metadata lookups for hot objects.
///
/// # Fields
///
/// All time fields are serialized as RFC3339 strings to avoid parsing issues with
/// `Last-Modified` and other time headers.
///
/// # Usage
///
/// ```ignore
/// let cached = CachedGetObject {
/// body: Bytes::from(data),
/// content_length: data.len() as i64,
/// content_type: Some("application/octet-stream".to_string()),
/// e_tag: Some("\"abc123\"".to_string()),
/// last_modified: Some("2024-01-01T00:00:00Z".to_string()),
/// ..Default::default()
/// };
/// manager.put_cached_object(cache_key, cached).await;
/// ```
#[allow(dead_code)]
#[derive(Clone, Debug)]
pub struct CachedGetObject {
/// The object body data
pub body: bytes::Bytes,
/// Content length in bytes
pub content_length: i64,
/// MIME content type
pub content_type: Option<String>,
/// Entity tag for the object
pub e_tag: Option<String>,
/// Last modified time as RFC3339 string (e.g., "2024-01-01T12:00:00Z")
pub last_modified: Option<String>,
/// Expiration time as RFC3339 string
pub expires: Option<String>,
/// Cache-Control header value
pub cache_control: Option<String>,
/// Content-Disposition header value
pub content_disposition: Option<String>,
/// Content-Encoding header value
pub content_encoding: Option<String>,
/// Content-Language header value
pub content_language: Option<String>,
/// Storage class (STANDARD, REDUCED_REDUNDANCY, etc.)
pub storage_class: Option<String>,
/// Version ID for versioned objects
pub version_id: Option<String>,
/// Whether this is a delete marker (for versioned buckets)
pub delete_marker: bool,
/// Number of tags associated with the object
pub tag_count: Option<i32>,
/// Replication status
pub replication_status: Option<String>,
/// User-defined metadata (x-amz-meta-*)
pub user_metadata: std::collections::HashMap<String, String>,
/// When this object was cached (for internal use, automatically set)
cached_at: Option<Instant>,
/// Access count for hot key tracking (automatically managed)
access_count: Arc<AtomicU64>,
}
impl Default for CachedGetObject {
fn default() -> Self {
Self {
body: bytes::Bytes::new(),
content_length: 0,
content_type: None,
e_tag: None,
last_modified: None,
expires: None,
cache_control: None,
content_disposition: None,
content_encoding: None,
content_language: None,
storage_class: None,
version_id: None,
delete_marker: false,
tag_count: None,
replication_status: None,
user_metadata: std::collections::HashMap::new(),
cached_at: None,
access_count: Arc::new(AtomicU64::new(0)),
}
}
}
impl CachedGetObject {
/// Create a new CachedGetObject with the given body and content length
pub fn new(body: bytes::Bytes, content_length: i64) -> Self {
Self {
body,
content_length,
cached_at: Some(Instant::now()),
access_count: Arc::new(AtomicU64::new(0)),
..Default::default()
}
}
/// Builder method to set content_type
pub fn with_content_type(mut self, content_type: String) -> Self {
self.content_type = Some(content_type);
self
}
/// Builder method to set e_tag
pub fn with_e_tag(mut self, e_tag: String) -> Self {
self.e_tag = Some(e_tag);
self
}
/// Builder method to set last_modified
pub fn with_last_modified(mut self, last_modified: String) -> Self {
self.last_modified = Some(last_modified);
self
}
/// Builder method to set cache_control
#[allow(dead_code)]
pub fn with_cache_control(mut self, cache_control: String) -> Self {
self.cache_control = Some(cache_control);
self
}
/// Builder method to set storage_class
#[allow(dead_code)]
pub fn with_storage_class(mut self, storage_class: String) -> Self {
self.storage_class = Some(storage_class);
self
}
/// Builder method to set version_id
#[allow(dead_code)]
pub fn with_version_id(mut self, version_id: String) -> Self {
self.version_id = Some(version_id);
self
}
/// Get the size in bytes for cache eviction calculations
pub fn size(&self) -> usize {
self.body.len()
}
/// Increment access count and return the new value
pub fn increment_access(&self) -> u64 {
self.access_count.fetch_add(1, Ordering::Relaxed) + 1
}
}
/// Internal wrapper for CachedGetObject in the Moka cache
#[derive(Clone)]
struct CachedGetObjectInternal {
/// The cached response data
data: Arc<CachedGetObject>,
/// When this object was cached
cached_at: Instant,
/// Size in bytes for weigher function
size: usize,
}
impl HotObjectCache {
/// Create a new hot object cache with Moka
///
/// Configures Moka with:
/// - Size-based eviction (100MB max)
/// - TTL of 5 minutes
/// - TTI of 2 minutes
/// - Weigher function for accurate size tracking
fn new() -> Self {
let max_capacity = rustfs_utils::get_env_u64(
rustfs_config::ENV_OBJECT_CACHE_CAPACITY_MB,
rustfs_config::DEFAULT_OBJECT_CACHE_CAPACITY_MB,
);
let cache_tti_secs =
rustfs_utils::get_env_u64(rustfs_config::ENV_OBJECT_CACHE_TTI_SECS, rustfs_config::DEFAULT_OBJECT_CACHE_TTI_SECS);
let cache_ttl_secs =
rustfs_utils::get_env_u64(rustfs_config::ENV_OBJECT_CACHE_TTL_SECS, rustfs_config::DEFAULT_OBJECT_CACHE_TTL_SECS);
// Legacy simple byte cache
let cache = Cache::builder()
.max_capacity(max_capacity * MI_B as u64)
.weigher(|_key: &String, value: &Arc<CachedObject>| -> u32 {
// Weight based on actual data size
value.size.min(u32::MAX as usize) as u32
})
.time_to_live(Duration::from_secs(cache_ttl_secs))
.time_to_idle(Duration::from_secs(cache_tti_secs))
.build();
// Full response cache with metadata
let response_cache = Cache::builder()
.max_capacity(max_capacity * MI_B as u64)
.weigher(|_key: &String, value: &Arc<CachedGetObjectInternal>| -> u32 {
// Weight based on actual data size
value.size.min(u32::MAX as usize) as u32
})
.time_to_live(Duration::from_secs(cache_ttl_secs))
.time_to_idle(Duration::from_secs(cache_tti_secs))
.build();
let max_object_size = rustfs_utils::get_env_usize(
rustfs_config::ENV_OBJECT_CACHE_MAX_OBJECT_SIZE_MB,
rustfs_config::DEFAULT_OBJECT_CACHE_MAX_OBJECT_SIZE_MB,
) * MI_B;
Self {
cache,
response_cache,
max_object_size,
hit_count: Arc::new(AtomicU64::new(0)),
miss_count: Arc::new(AtomicU64::new(0)),
}
}
/// Soft expiration determination, the number of hits is insufficient and exceeds the soft TTL
fn should_expire(&self, obj: &Arc<CachedObject>) -> bool {
let age_secs = obj.cached_at.elapsed().as_secs();
let cache_ttl_secs =
rustfs_utils::get_env_u64(rustfs_config::ENV_OBJECT_CACHE_TTL_SECS, rustfs_config::DEFAULT_OBJECT_CACHE_TTL_SECS);
let hot_object_min_hits_to_extend = rustfs_utils::get_env_usize(
rustfs_config::ENV_OBJECT_HOT_MIN_HITS_TO_EXTEND,
rustfs_config::DEFAULT_OBJECT_HOT_MIN_HITS_TO_EXTEND,
);
if age_secs >= cache_ttl_secs {
let hits = obj.access_count.load(Ordering::Relaxed);
return hits < hot_object_min_hits_to_extend as u64;
}
false
}
/// Get an object from cache with lock-free concurrent access
///
/// Moka provides lock-free reads, significantly improving concurrent performance.
async fn get(&self, key: &str) -> Option<Arc<Vec<u8>>> {
match self.cache.get(key).await {
Some(cached) => {
if self.should_expire(&cached) {
self.cache.invalidate(key).await;
self.miss_count.fetch_add(1, Ordering::Relaxed);
return None;
}
// Update access count
cached.access_count.fetch_add(1, Ordering::Relaxed);
self.hit_count.fetch_add(1, Ordering::Relaxed);
#[cfg(feature = "metrics")]
{
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | true |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/storage/concurrent_get_object_test.rs | rustfs/src/storage/concurrent_get_object_test.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Integration tests for concurrent GetObject performance optimization with Moka cache.
//!
//! This test suite validates the solution to issue #911 where concurrent GetObject
//! requests experienced exponential latency degradation (59ms → 110ms → 200ms for
//! 1→2→4 concurrent requests).
//!
//! # Test Coverage
//!
//! The suite includes 20 comprehensive tests organized into categories:
//!
//! ## Request Management (3 tests)
//! - **Request Tracking**: Validates RAII guards correctly track concurrent requests
//! - **Adaptive Buffer Sizing**: Ensures buffers scale inversely with concurrency
//! - **Buffer Size Bounds**: Verifies min/max constraints are enforced
//!
//! ## Cache Operations (11 tests)
//! - **Basic Operations**: Insert, retrieve, stats, and clear operations
//! - **Size Limits**: Large objects (>10MB) are correctly rejected
//! - **Automatic Eviction**: Moka's LRU eviction maintains cache within capacity
//! - **Batch Operations**: Multi-object retrieval with single lock acquisition
//! - **Cache Warming**: Pre-population on startup for immediate performance
//! - **Cache Removal**: Explicit invalidation for stale data
//! - **Hit Rate Calculation**: Accurate hit/miss ratio tracking
//! - **TTL Configuration**: Time-to-live and time-to-idle validation
//! - **Cache Writeback Flow**: Validates cache_object → get_cached round-trip
//! - **Cache Writeback Size Limit**: Objects >10MB not cached during writeback
//! - **Cache Writeback Concurrent**: Thread-safe concurrent writeback handling
//!
//! ## Performance (4 tests)
//! - **Hot Keys Tracking**: Access pattern analysis for optimization
//! - **Concurrent Access**: Lock-free performance under 100 concurrent tasks
//! - **Advanced Sizing**: File pattern optimization (small files, sequential reads)
//! - **Performance Benchmark**: Sequential vs concurrent access comparison
//!
//! ## Advanced Features (2 tests)
//! - **Disk I/O Permits**: Rate limiting prevents disk saturation
//! - **Side-Effect Free Checks**: `is_cached()` doesn't inflate metrics
//!
//! # Moka-Specific Test Patterns
//!
//! These tests account for Moka's lock-free, asynchronous nature:
//!
//! ```ignore
//! // Pattern 1: Allow time for async operations
//! manager.cache_object(key, data).await;
//! sleep(Duration::from_millis(50)).await; // Give Moka time to process
//!
//! // Pattern 2: Run pending tasks before assertions
//! manager.cache.run_pending_tasks().await;
//! let stats = manager.cache_stats().await;
//!
//! // Pattern 3: Tolerance for timing variance
//! assert!(stats.entries >= expected_min, "Allow for concurrent evictions");
//! ```
//!
//! # Running Tests
//!
//! ```bash
//! # Run all concurrency tests
//! cargo test --package rustfs concurrent_get_object
//!
//! # Run specific test with output
//! cargo test --package rustfs test_concurrent_cache_access -- --nocapture
//!
//! # Run with timing output
//! cargo test --package rustfs bench_concurrent_cache_performance -- --nocapture --show-output
//! ```
//!
//! # Performance Expectations
//!
//! - Basic cache operations: <100ms
//! - Concurrent access (100 tasks): <500ms (demonstrates lock-free advantage)
//! - Cache warming (5 objects): <200ms
//! - Eviction test: <500ms (includes Moka background cleanup time)
#[cfg(test)]
mod tests {
use crate::storage::concurrency::{
CachedGetObject, ConcurrencyManager, GetObjectGuard, get_advanced_buffer_size, get_concurrency_aware_buffer_size,
};
use rustfs_config::{KI_B, MI_B};
use std::sync::Arc;
use std::time::Duration;
use tokio::time::{Instant, sleep};
/// Test that concurrent requests are tracked correctly with RAII guards.
///
/// This test validates the core request tracking mechanism that enables adaptive
/// buffer sizing. The RAII guard pattern ensures accurate concurrent request counts
/// even in error/panic scenarios, which is critical for preventing performance
/// degradation under load.
///
/// # Test Strategy
///
/// 1. Record baseline concurrent request count
/// 2. Create multiple guards and verify counter increments
/// 3. Drop guards and verify counter decrements automatically
/// 4. Validate that no requests are "leaked" (counter returns to baseline)
///
/// # Why This Matters
///
/// Accurate request tracking is essential because the buffer sizing algorithm
/// uses `ACTIVE_GET_REQUESTS` to determine optimal buffer sizes. A leaked
/// counter would cause permanently reduced buffer sizes, degrading performance.
#[tokio::test]
async fn test_concurrent_request_tracking() {
// Start with current baseline (may not be zero if other tests are running)
let initial = GetObjectGuard::concurrent_requests();
// Create guards to simulate concurrent requests
let guard1 = ConcurrencyManager::track_request();
assert_eq!(GetObjectGuard::concurrent_requests(), initial + 1, "First guard should increment counter");
let guard2 = ConcurrencyManager::track_request();
assert_eq!(
GetObjectGuard::concurrent_requests(),
initial + 2,
"Second guard should increment counter"
);
let guard3 = ConcurrencyManager::track_request();
assert_eq!(GetObjectGuard::concurrent_requests(), initial + 3, "Third guard should increment counter");
// Drop guards and verify count decreases automatically (RAII pattern)
drop(guard1);
sleep(Duration::from_millis(10)).await;
assert_eq!(
GetObjectGuard::concurrent_requests(),
initial + 2,
"Counter should decrement when guard1 drops"
);
drop(guard2);
sleep(Duration::from_millis(10)).await;
assert_eq!(
GetObjectGuard::concurrent_requests(),
initial + 1,
"Counter should decrement when guard2 drops"
);
drop(guard3);
sleep(Duration::from_millis(10)).await;
assert_eq!(
GetObjectGuard::concurrent_requests(),
initial,
"Counter should return to baseline - no leaks!"
);
}
/// Test adaptive buffer sizing under different concurrency levels.
///
/// This test validates the core solution to issue #911. The adaptive buffer sizing
/// algorithm prevents the exponential latency degradation seen in the original issue
/// by reducing buffer sizes as concurrency increases, preventing memory contention.
///
/// # Original Issue
///
/// - 1 concurrent request: 59ms (fixed 1MB buffers OK)
/// - 2 concurrent requests: 110ms (2MB total → memory contention starts)
/// - 4 concurrent requests: 200ms (4MB total → severe contention)
///
/// # Solution
///
/// Adaptive buffer sizing scales buffers inversely with concurrency:
/// - 1-2 requests: 100% buffers (256KB → 256KB) - optimize for throughput
/// - 3-4 requests: 75% buffers (256KB → 192KB) - balance performance
/// - 5-8 requests: 50% buffers (256KB → 128KB) - reduce memory pressure
/// - >8 requests: 40% buffers (256KB → 102KB) - fairness and predictability
///
/// # Test Strategy
///
/// For each concurrency level, creates guard objects to simulate active requests,
/// then validates the buffer sizing algorithm returns the expected buffer size
/// with reasonable tolerance for rounding.
///
/// Note: This test may be affected by parallel test execution since
/// ACTIVE_GET_REQUESTS is a global atomic counter. The test uses widened
/// tolerances to account for this.
#[tokio::test]
async fn test_adaptive_buffer_sizing() {
let file_size = 32 * MI_B as i64; // 32MB file (matches issue #911 test case)
let base_buffer = 256 * KI_B; // 256KB base buffer (typical for S3-like workloads)
// Test cases: (concurrent_requests, description)
// Note: Tests are ordered to work with parallel execution - starting with high concurrency
// where additional requests from other tests have less impact
let test_cases = vec![
(10, "Very high concurrency: should reduce to 40% for fairness"),
(6, "High concurrency: should reduce to 50% to prevent memory contention"),
(3, "Medium concurrency: should reduce to 75% to balance performance"),
];
for (concurrent_requests, description) in test_cases {
// Create guards to simulate concurrent requests
let _guards: Vec<_> = (0..concurrent_requests)
.map(|_| ConcurrencyManager::track_request())
.collect();
let buffer_size = get_concurrency_aware_buffer_size(file_size, base_buffer);
// Allow widened range due to parallel test execution affecting global counter
assert!(
(64 * KI_B..=MI_B).contains(&buffer_size),
"{description}: buffer should be in valid range 64KB-1MB, got {buffer_size} bytes"
);
}
}
/// Test buffer size bounds and minimum/maximum constraints
#[tokio::test]
async fn test_buffer_size_bounds() {
// Test minimum buffer size for tiny files (<100KB uses 32KB minimum)
let small_file = 1024i64; // 1KB file
let min_buffer = get_concurrency_aware_buffer_size(small_file, 64 * KI_B);
assert!(
min_buffer >= 32 * KI_B,
"Buffer should have minimum size of 32KB for tiny files, got {min_buffer}"
);
// Test maximum buffer size (capped at 1MB when base is reasonable)
let huge_file = 10 * 1024 * MI_B as i64; // 10GB file
let max_buffer = get_concurrency_aware_buffer_size(huge_file, MI_B);
assert!(max_buffer <= MI_B, "Buffer should not exceed 1MB cap when requested, got {max_buffer}");
// Test buffer size scaling with base - when base is small, result respects the limits
let medium_file = 200 * KI_B as i64; // 200KB file (>100KB so minimum is 64KB)
let buffer = get_concurrency_aware_buffer_size(medium_file, 128 * KI_B);
assert!(
(64 * KI_B..=MI_B).contains(&buffer),
"Buffer should be between 64KB and 1MB, got {buffer}"
);
}
/// Test disk I/O permit acquisition for rate limiting
#[tokio::test]
async fn test_disk_io_permits() {
let manager = ConcurrencyManager::new();
let start = Instant::now();
// Acquire multiple permits concurrently
let handles: Vec<_> = (0..10)
.map(|_| {
let mgr = Arc::new(manager.clone());
tokio::spawn(async move {
let _permit = mgr.acquire_disk_read_permit().await;
sleep(Duration::from_millis(10)).await;
})
})
.collect();
for handle in handles {
handle.await.expect("Task should complete");
}
let elapsed = start.elapsed();
// With 64 permits, 10 concurrent tasks should complete quickly
assert!(elapsed < Duration::from_secs(1), "Should complete within 1 second, took {elapsed:?}");
}
/// Test Moka cache operations: insert, retrieve, stats, and clear.
///
/// This test validates the fundamental cache operations that enable sub-5ms
/// response times for frequently accessed objects. Moka's lock-free design
/// allows these operations to scale linearly with concurrency (see
/// test_concurrent_cache_access for performance validation).
///
/// # Cache Benefits
///
/// - Cache hit: <5ms (vs 50-200ms disk read in original issue)
/// - Lock-free concurrent access (vs LRU's RwLock bottleneck)
/// - Automatic TTL (5 min) and TTI (2 min) expiration
/// - Size-based eviction (100MB capacity, 10MB max object size)
///
/// # Moka-Specific Behaviors
///
/// Moka processes insertions and evictions asynchronously in background tasks.
/// This test includes appropriate `sleep()` calls to allow Moka time to process
/// operations before asserting on cache state.
///
/// # Test Coverage
///
/// - Initial state verification (empty cache)
/// - Object insertion and retrieval
/// - Cache statistics accuracy
/// - Miss behavior (non-existent keys)
/// - Cache clearing
#[tokio::test]
async fn test_moka_cache_operations() {
let manager = ConcurrencyManager::new();
// Initially empty cache - verify clean state
let stats = manager.cache_stats().await;
assert_eq!(stats.entries, 0, "New cache should have no entries");
assert_eq!(stats.size, 0, "New cache should have zero size");
// Cache a small object (1MB - well under 10MB limit)
let key = "test/object1".to_string();
let data = vec![1u8; 1024 * 1024]; // 1MB
manager.cache_object(key.clone(), data.clone()).await;
// Give Moka time to process the async insert operation
sleep(Duration::from_millis(50)).await;
// Verify it was cached successfully
let cached = manager.get_cached(&key).await;
assert!(cached.is_some(), "Object should be cached after insert");
assert_eq!(*cached.unwrap(), data, "Cached data should match original data exactly");
// Verify stats updated correctly
let stats = manager.cache_stats().await;
assert_eq!(stats.entries, 1, "Should have exactly 1 entry after insert");
assert!(
stats.size >= data.len(),
"Cache size should be at least data length (may include overhead)"
);
// Try to get non-existent key - should miss cleanly
let missing = manager.get_cached("missing/key").await;
assert!(missing.is_none(), "Missing key should return None (not panic)");
// Clear cache and verify cleanup
manager.clear_cache().await;
sleep(Duration::from_millis(50)).await; // Allow Moka to process invalidations
let stats = manager.cache_stats().await;
assert_eq!(stats.entries, 0, "Cache should be empty after clear operation");
}
/// Test that large objects are not cached (exceed max object size)
#[tokio::test]
async fn test_large_object_not_cached() {
let manager = ConcurrencyManager::new();
// Try to cache a large object (> 10MB)
let key = "test/large".to_string();
let large_data = vec![1u8; 15 * MI_B]; // 15MB
manager.cache_object(key.clone(), large_data).await;
sleep(Duration::from_millis(50)).await;
// Should not be cached due to size limit
let cached = manager.get_cached(&key).await;
assert!(cached.is_none(), "Large object should not be cached");
// Cache stats should still be empty
let stats = manager.cache_stats().await;
assert_eq!(stats.entries, 0, "No objects should be cached");
}
/// Test Moka's automatic eviction under memory pressure
#[tokio::test]
async fn test_moka_cache_eviction() {
let manager = ConcurrencyManager::new();
// Cache multiple objects to exceed the limit
let object_size = 6 * MI_B; // 6MB each
let num_objects = 20; // Total 120MB > 100MB limit
for i in 0..num_objects {
let key = format!("test/object{i}");
let data = vec![i as u8; object_size];
manager.cache_object(key, data).await;
sleep(Duration::from_millis(10)).await; // Give Moka time to process
}
// Give Moka time to evict
sleep(Duration::from_millis(200)).await;
// Verify cache size is within limit (Moka manages this automatically)
let stats = manager.cache_stats().await;
assert!(
stats.size <= stats.max_size,
"Moka should keep cache size {} within max {}",
stats.size,
stats.max_size
);
// Some objects should have been evicted
assert!(
stats.entries < num_objects,
"Expected eviction, but all {} objects might still be cached (entries: {})",
num_objects,
stats.entries
);
}
/// Test batch cache operations for efficient multi-object retrieval
#[tokio::test]
async fn test_cache_batch_operations() {
let manager = ConcurrencyManager::new();
// Cache multiple objects
for i in 0..10 {
let key = format!("batch/object{i}");
let data = vec![i as u8; 100 * KI_B]; // 100KB each
manager.cache_object(key, data).await;
}
sleep(Duration::from_millis(100)).await;
// Test batch get
let keys: Vec<String> = (0..10).map(|i| format!("batch/object{i}")).collect();
let results = manager.get_cached_batch(&keys).await;
assert_eq!(results.len(), 10, "Should return result for each key");
// Verify all objects were retrieved
let hits = results.iter().filter(|r| r.is_some()).count();
assert!(hits >= 8, "Most objects should be cached (got {hits}/10 hits)");
// Mix of existing and non-existing keys
let mixed_keys = vec![
"batch/object0".to_string(),
"nonexistent1".to_string(),
"batch/object5".to_string(),
"nonexistent2".to_string(),
];
let mixed_results = manager.get_cached_batch(&mixed_keys).await;
assert_eq!(mixed_results.len(), 4, "Should return result for each key");
}
/// Test cache warming (pre-population)
#[tokio::test]
async fn test_cache_warming() {
let manager = ConcurrencyManager::new();
// Prepare objects for warming
let objects: Vec<(String, Vec<u8>)> = (0..5)
.map(|i| (format!("warm/object{i}"), vec![i as u8; 500 * KI_B]))
.collect();
// Warm cache
manager.warm_cache(objects.clone()).await;
sleep(Duration::from_millis(100)).await;
// Verify all objects are cached
for (key, data) in objects {
let cached = manager.get_cached(&key).await;
assert!(cached.is_some(), "Warmed object {key} should be cached");
assert_eq!(*cached.unwrap(), data, "Cached data for {key} should match");
}
let stats = manager.cache_stats().await;
assert_eq!(stats.entries, 5, "Should have 5 warmed objects");
}
/// Test hot keys tracking with access count
#[tokio::test]
async fn test_hot_keys_tracking() {
let manager = ConcurrencyManager::new();
// Cache objects with different access patterns
for i in 0..5 {
let key = format!("hot/object{i}");
let data = vec![i as u8; 100 * KI_B];
manager.cache_object(key, data).await;
}
sleep(Duration::from_millis(50)).await;
// Simulate access patterns (object 0 and 1 are hot)
for _ in 0..10 {
let _ = manager.get_cached("hot/object0").await;
}
for _ in 0..5 {
let _ = manager.get_cached("hot/object1").await;
}
for _ in 0..2 {
let _ = manager.get_cached("hot/object2").await;
}
// Get hot keys
let hot_keys = manager.get_hot_keys(3).await;
assert!(hot_keys.len() >= 3, "Should return at least 3 keys, got {}", hot_keys.len());
// Verify hot keys are sorted by access count
if hot_keys.len() >= 3 {
assert!(hot_keys[0].1 >= hot_keys[1].1, "Hot keys should be sorted by access count");
assert!(hot_keys[1].1 >= hot_keys[2].1, "Hot keys should be sorted by access count");
}
// Most accessed should have highest count
let top_key = &hot_keys[0];
assert!(top_key.1 >= 10, "Most accessed object should have at least 10 hits, got {}", top_key.1);
}
/// Test cache removal functionality
#[tokio::test]
async fn test_cache_removal() {
let manager = ConcurrencyManager::new();
// Cache an object
let key = "remove/test".to_string();
let data = vec![1u8; 100 * KI_B];
manager.cache_object(key.clone(), data).await;
sleep(Duration::from_millis(50)).await;
// Verify it's cached
assert!(manager.is_cached(&key).await, "Object should be cached initially");
// Remove it
let removed = manager.remove_cached(&key).await;
assert!(removed, "Should successfully remove cached object");
sleep(Duration::from_millis(50)).await;
// Verify it's gone
assert!(!manager.is_cached(&key).await, "Object should no longer be cached");
// Try to remove non-existent key
let not_removed = manager.remove_cached("nonexistent").await;
assert!(!not_removed, "Should return false for non-existent key");
}
/// Test advanced buffer sizing with file patterns
#[tokio::test]
async fn test_advanced_buffer_sizing() {
crate::storage::concurrency::reset_active_get_requests();
let base_buffer = 256 * KI_B; // 256KB base
// Test small file optimization
let small_size = get_advanced_buffer_size(128 * KI_B as i64, base_buffer, false);
assert!(
small_size < base_buffer,
"Small files should use smaller buffers: {small_size} < {base_buffer}"
);
assert!(small_size >= 16 * KI_B, "Should not go below minimum: {small_size}");
// Test sequential read optimization
let seq_size = get_advanced_buffer_size(32 * MI_B as i64, base_buffer, true);
assert!(
seq_size >= base_buffer,
"Sequential reads should use larger buffers: {seq_size} >= {base_buffer}"
);
// Test large file with high concurrency
let _guards: Vec<_> = (0..10).map(|_| ConcurrencyManager::track_request()).collect();
let large_concurrent = get_advanced_buffer_size(100 * MI_B as i64, base_buffer, false);
assert!(
large_concurrent <= base_buffer,
"High concurrency should reduce buffer: {large_concurrent} <= {base_buffer}"
);
}
/// Test concurrent cache access performance (lock-free)
#[tokio::test]
async fn test_concurrent_cache_access() {
let manager = Arc::new(ConcurrencyManager::new());
// Pre-populate cache
for i in 0..20 {
let key = format!("concurrent/object{i}");
let data = vec![i as u8; 100 * KI_B];
manager.cache_object(key, data).await;
}
sleep(Duration::from_millis(100)).await;
let start = Instant::now();
// Simulate heavy concurrent access
let tasks: Vec<_> = (0..100)
.map(|i| {
let mgr: Arc<ConcurrencyManager> = Arc::clone(&manager);
tokio::spawn(async move {
let key = format!("concurrent/object{}", i % 20);
let _ = mgr.get_cached(&key).await;
})
})
.collect();
for task in tasks {
task.await.expect("Task should complete");
}
let elapsed = start.elapsed();
// Moka's lock-free design should handle this quickly
assert!(
elapsed < Duration::from_millis(500),
"Concurrent cache access should be fast (took {elapsed:?})"
);
}
/// Test that is_cached doesn't affect LRU order or access counts
#[tokio::test]
async fn test_is_cached_no_side_effects() {
let manager = ConcurrencyManager::new();
let key = "check/object".to_string();
let data = vec![42u8; 100 * KI_B];
manager.cache_object(key.clone(), data).await;
sleep(Duration::from_millis(50)).await;
// Check if cached multiple times
for _ in 0..10 {
assert!(manager.is_cached(&key).await, "Object should be cached");
}
// Access count should be minimal (contains check shouldn't increment much)
let hot_keys = manager.get_hot_keys(10).await;
if let Some(entry) = hot_keys.iter().find(|(k, _)| k == &key) {
// is_cached should not increment access_count significantly
assert!(entry.1 <= 2, "is_cached should not inflate access count, got {}", entry.1);
}
}
/// Test cache hit rate calculation
#[tokio::test]
async fn test_cache_hit_rate() {
let manager = ConcurrencyManager::new();
// Cache some objects
for i in 0..5 {
let key = format!("hitrate/object{i}");
let data = vec![i as u8; 100 * KI_B];
manager.cache_object(key, data).await;
}
sleep(Duration::from_millis(100)).await;
// Mix of hits and misses
for i in 0..10 {
let key = if i < 5 {
format!("hitrate/object{i}") // Hit
} else {
format!("hitrate/missing{i}") // Miss
};
let _ = manager.get_cached(&key).await;
}
// Hit rate should be around 50%
let hit_rate = manager.cache_hit_rate();
assert!((40.0..=60.0).contains(&hit_rate), "Hit rate should be ~50%, got {hit_rate:.1}%");
}
/// Test TTL expiration (Moka automatic cleanup)
#[tokio::test]
async fn test_ttl_expiration() {
// Note: This test would require waiting 5 minutes for TTL
// We'll just verify the cache is configured with TTL
let manager = ConcurrencyManager::new();
let key = "ttl/test".to_string();
let data = vec![1u8; 100 * KI_B];
manager.cache_object(key.clone(), data).await;
sleep(Duration::from_millis(50)).await;
// Verify object is initially cached
assert!(manager.is_cached(&key).await, "Object should be cached");
// In a real scenario, after TTL (5 min) or TTI (2 min) expires,
// Moka would automatically remove the entry
// For testing, we just verify the mechanism is in place
let stats = manager.cache_stats().await;
assert!(stats.max_size > 0, "Cache should be configured with limits");
}
/// Benchmark: Compare performance of single vs concurrent cache access
#[tokio::test]
async fn bench_concurrent_cache_performance() {
let manager = Arc::new(ConcurrencyManager::new());
// Pre-populate
for i in 0..50 {
let key = format!("bench/object{i}");
let data = vec![i as u8; 500 * KI_B];
manager.cache_object(key, data).await;
}
sleep(Duration::from_millis(100)).await;
// Sequential access
let seq_start = Instant::now();
for i in 0..100 {
let key = format!("bench/object{}", i % 50);
let _ = manager.get_cached(&key).await;
}
let seq_duration = seq_start.elapsed();
// Concurrent access
let conc_start = Instant::now();
let tasks: Vec<_> = (0..100)
.map(|i| {
let mgr: Arc<ConcurrencyManager> = Arc::clone(&manager);
tokio::spawn(async move {
let key = format!("bench/object{}", i % 50);
let _ = mgr.get_cached(&key).await;
})
})
.collect();
for task in tasks {
task.await.expect("Task should complete");
}
let conc_duration = conc_start.elapsed();
println!(
"Sequential: {:?}, Concurrent: {:?}, Speedup: {:.2}x",
seq_duration,
conc_duration,
seq_duration.as_secs_f64() / conc_duration.as_secs_f64()
);
assert!(seq_duration > Duration::from_micros(0), "Sequential access should take some time");
assert!(conc_duration > Duration::from_micros(0), "Concurrent access should take some time");
// Record performance indicators for analysis, but not as a basis for testing failure
let speedup_ratio = seq_duration.as_secs_f64() / conc_duration.as_secs_f64();
if speedup_ratio < 0.8 {
println!("Warning: Concurrent access is significantly slower than sequential ({speedup_ratio:.2}x)");
} else if speedup_ratio > 1.2 {
println!("Info: Concurrent access is significantly faster than sequential ({speedup_ratio:.2}x)");
} else {
println!("Info: Performance difference between concurrent and sequential access is modest ({speedup_ratio:.2}x)");
}
}
/// Test cache writeback mechanism
///
/// This test validates that the cache_object method correctly stores objects
/// and they can be retrieved later. This simulates the cache writeback flow
/// implemented in ecfs.rs for objects meeting the caching criteria.
///
/// # Cache Criteria (from ecfs.rs)
///
/// Objects are cached when:
/// - No range/part request (full object)
/// - Object size <= 10MB (max_object_size threshold)
/// - Not encrypted (SSE-C or managed encryption)
///
/// This test verifies the underlying cache_object → get_cached flow works correctly.
#[tokio::test]
async fn test_cache_writeback_flow() {
let manager = ConcurrencyManager::new();
// Simulate cache writeback for a small object (1MB)
let cache_key = "bucket/key".to_string();
let object_data = vec![42u8; MI_B]; // 1MB object
// Verify not in cache initially
let initial = manager.get_cached(&cache_key).await;
assert!(initial.is_none(), "Object should not be in cache initially");
// Simulate cache writeback (as done in ecfs.rs background task)
manager.cache_object(cache_key.clone(), object_data.clone()).await;
// Give Moka time to process the async insert
sleep(Duration::from_millis(50)).await;
// Verify object is now cached
let cached = manager.get_cached(&cache_key).await;
assert!(cached.is_some(), "Object should be cached after writeback");
assert_eq!(*cached.unwrap(), object_data, "Cached data should match original");
// Verify cache stats
let stats = manager.cache_stats().await;
assert_eq!(stats.entries, 1, "Should have exactly 1 cached entry");
assert!(stats.size >= object_data.len(), "Cache size should reflect object size");
// Second access should hit cache
let second_access = manager.get_cached(&cache_key).await;
assert!(second_access.is_some(), "Second access should hit cache");
// Verify hit count increased
let hit_rate = manager.cache_hit_rate();
assert!(hit_rate > 0.0, "Hit rate should be positive after cache hit");
}
/// Test cache writeback respects size limits
///
/// Objects larger than 10MB should NOT be cached, even if cache_object is called.
/// This validates the size check in HotObjectCache::put().
#[tokio::test]
async fn test_cache_writeback_size_limit() {
let manager = ConcurrencyManager::new();
// Try to cache an object that exceeds the 10MB limit
let large_key = "bucket/large_object".to_string();
let large_data = vec![0u8; 12 * MI_B]; // 12MB > 10MB limit
manager.cache_object(large_key.clone(), large_data).await;
sleep(Duration::from_millis(50)).await;
// Should NOT be cached due to size limit
let cached = manager.get_cached(&large_key).await;
assert!(cached.is_none(), "Large object should not be cached");
// Cache should remain empty
let stats = manager.cache_stats().await;
assert_eq!(stats.entries, 0, "No entries should be cached");
}
/// Test cache writeback with concurrent requests
///
/// Simulates multiple concurrent GetObject requests all trying to cache
/// the same object. Moka should handle this gracefully without data races.
#[tokio::test]
async fn test_cache_writeback_concurrent() {
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | true |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/storage/tonic_service.rs | rustfs/src/storage/tonic_service.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use bytes::Bytes;
use futures::Stream;
use futures_util::future::join_all;
use rmp_serde::{Deserializer, Serializer};
use rustfs_common::{GLOBAL_LOCAL_NODE_NAME, heal_channel::HealOpts};
use rustfs_ecstore::{
admin_server_info::get_local_server_property,
bucket::{metadata::load_bucket_metadata, metadata_sys},
disk::{
DeleteOptions, DiskAPI, DiskInfoOptions, DiskStore, FileInfoVersions, ReadMultipleReq, ReadOptions, UpdateMetadataOpts,
error::DiskError,
},
metrics_realtime::{CollectMetricsOpts, MetricType, collect_local_metrics},
new_object_layer_fn,
rpc::{LocalPeerS3Client, PeerS3Client},
store::{all_local_disk_path, find_local_disk},
store_api::{BucketOptions, DeleteBucketOptions, MakeBucketOptions, StorageAPI},
};
use rustfs_filemeta::{FileInfo, MetacacheReader};
use rustfs_iam::{get_global_iam_sys, store::UserType};
use rustfs_lock::{LockClient, LockRequest};
use rustfs_madmin::health::{
get_cpus, get_mem_info, get_os_info, get_partitions, get_proc_info, get_sys_config, get_sys_errors, get_sys_services,
};
use rustfs_madmin::net::get_net_info;
use rustfs_protos::{
models::{PingBody, PingBodyBuilder},
proto_gen::node_service::{node_service_server::NodeService as Node, *},
};
use serde::{Deserialize, Serialize};
use std::{collections::HashMap, io::Cursor, pin::Pin, sync::Arc};
use tokio::spawn;
use tokio::sync::mpsc;
use tokio_stream::wrappers::ReceiverStream;
use tonic::{Request, Response, Status, Streaming};
use tracing::{debug, error, info, warn};
type ResponseStream<T> = Pin<Box<dyn Stream<Item = Result<T, Status>> + Send>>;
// fn match_for_io_error(err_status: &Status) -> Option<&std::io::Error> {
// let mut err: &(dyn Error + 'static) = err_status;
// loop {
// if let Some(io_err) = err.downcast_ref::<std::io::Error>() {
// return Some(io_err);
// }
// // h2::Error do not expose std::io::Error with `source()`
// // https://github.com/hyperium/h2/pull/462
// if let Some(h2_err) = err.downcast_ref::<h2::Error>() {
// if let Some(io_err) = h2_err.get_io() {
// return Some(io_err);
// }
// }
// err = err.source()?;
// }
// }
#[derive(Debug)]
pub struct NodeService {
local_peer: LocalPeerS3Client,
lock_manager: Arc<rustfs_lock::LocalClient>,
}
pub fn make_server() -> NodeService {
let local_peer = LocalPeerS3Client::new(None, None);
let lock_manager = Arc::new(rustfs_lock::LocalClient::new());
NodeService {
local_peer,
lock_manager,
}
}
impl NodeService {
async fn find_disk(&self, disk_path: &String) -> Option<DiskStore> {
find_local_disk(disk_path).await
}
async fn all_disk(&self) -> Vec<String> {
all_local_disk_path().await
}
}
#[tonic::async_trait]
impl Node for NodeService {
async fn ping(&self, request: Request<PingRequest>) -> Result<Response<PingResponse>, Status> {
debug!("PING");
let ping_req = request.into_inner();
let ping_body = flatbuffers::root::<PingBody>(&ping_req.body);
if let Err(e) = ping_body {
error!("{}", e);
} else {
info!("ping_req:body(flatbuffer): {:?}", ping_body);
}
let mut fbb = flatbuffers::FlatBufferBuilder::new();
let payload = fbb.create_vector(b"hello, caller");
let mut builder = PingBodyBuilder::new(&mut fbb);
builder.add_payload(payload);
let root = builder.finish();
fbb.finish(root, None);
let finished_data = fbb.finished_data();
Ok(Response::new(PingResponse {
version: 1,
body: Bytes::copy_from_slice(finished_data),
}))
}
async fn heal_bucket(&self, request: Request<HealBucketRequest>) -> Result<Response<HealBucketResponse>, Status> {
debug!("heal bucket");
let request = request.into_inner();
let options = match serde_json::from_str::<HealOpts>(&request.options) {
Ok(options) => options,
Err(err) => {
return Ok(Response::new(HealBucketResponse {
success: false,
error: Some(DiskError::other(format!("decode HealOpts failed: {err}")).into()),
}));
}
};
match self.local_peer.heal_bucket(&request.bucket, &options).await {
Ok(_) => Ok(Response::new(HealBucketResponse {
success: true,
error: None,
})),
Err(err) => Ok(Response::new(HealBucketResponse {
success: false,
error: Some(err.into()),
})),
}
}
async fn list_bucket(&self, request: Request<ListBucketRequest>) -> Result<Response<ListBucketResponse>, Status> {
debug!("list bucket");
let request = request.into_inner();
let options = match serde_json::from_str::<BucketOptions>(&request.options) {
Ok(options) => options,
Err(err) => {
return Ok(Response::new(ListBucketResponse {
success: false,
bucket_infos: Vec::new(),
error: Some(DiskError::other(format!("decode BucketOptions failed: {err}")).into()),
}));
}
};
match self.local_peer.list_bucket(&options).await {
Ok(bucket_infos) => {
let bucket_infos = bucket_infos
.into_iter()
.filter_map(|bucket_info| serde_json::to_string(&bucket_info).ok())
.collect();
Ok(Response::new(ListBucketResponse {
success: true,
bucket_infos,
error: None,
}))
}
Err(err) => Ok(Response::new(ListBucketResponse {
success: false,
bucket_infos: Vec::new(),
error: Some(err.into()),
})),
}
}
async fn make_bucket(&self, request: Request<MakeBucketRequest>) -> Result<Response<MakeBucketResponse>, Status> {
debug!("make bucket");
let request = request.into_inner();
let options = match serde_json::from_str::<MakeBucketOptions>(&request.options) {
Ok(options) => options,
Err(err) => {
return Ok(Response::new(MakeBucketResponse {
success: false,
error: Some(DiskError::other(format!("decode MakeBucketOptions failed: {err}")).into()),
}));
}
};
match self.local_peer.make_bucket(&request.name, &options).await {
Ok(_) => Ok(Response::new(MakeBucketResponse {
success: true,
error: None,
})),
Err(err) => Ok(Response::new(MakeBucketResponse {
success: false,
error: Some(err.into()),
})),
}
}
async fn get_bucket_info(&self, request: Request<GetBucketInfoRequest>) -> Result<Response<GetBucketInfoResponse>, Status> {
debug!("get bucket info");
let request = request.into_inner();
let options = match serde_json::from_str::<BucketOptions>(&request.options) {
Ok(options) => options,
Err(err) => {
return Ok(Response::new(GetBucketInfoResponse {
success: false,
bucket_info: String::new(),
error: Some(DiskError::other(format!("decode BucketOptions failed: {err}")).into()),
}));
}
};
match self.local_peer.get_bucket_info(&request.bucket, &options).await {
Ok(bucket_info) => {
let bucket_info = match serde_json::to_string(&bucket_info) {
Ok(bucket_info) => bucket_info,
Err(err) => {
return Ok(Response::new(GetBucketInfoResponse {
success: false,
bucket_info: String::new(),
error: Some(DiskError::other(format!("encode data failed: {err}")).into()),
}));
}
};
Ok(Response::new(GetBucketInfoResponse {
success: true,
bucket_info,
error: None,
}))
}
// println!("vuc")
Err(err) => Ok(Response::new(GetBucketInfoResponse {
success: false,
bucket_info: String::new(),
error: Some(err.into()),
})),
}
}
async fn delete_bucket(&self, request: Request<DeleteBucketRequest>) -> Result<Response<DeleteBucketResponse>, Status> {
debug!("make bucket");
let request = request.into_inner();
match self
.local_peer
.delete_bucket(
&request.bucket,
&DeleteBucketOptions {
force: false,
..Default::default()
},
)
.await
{
Ok(_) => Ok(Response::new(DeleteBucketResponse {
success: true,
error: None,
})),
Err(err) => Ok(Response::new(DeleteBucketResponse {
success: false,
error: Some(err.into()),
})),
}
}
async fn read_all(&self, request: Request<ReadAllRequest>) -> Result<Response<ReadAllResponse>, Status> {
debug!("read all");
let request = request.into_inner();
if let Some(disk) = self.find_disk(&request.disk).await {
match disk.read_all(&request.volume, &request.path).await {
Ok(data) => Ok(Response::new(ReadAllResponse {
success: true,
data,
error: None,
})),
Err(err) => Ok(Response::new(ReadAllResponse {
success: false,
data: Bytes::new(),
error: Some(err.into()),
})),
}
} else {
Ok(Response::new(ReadAllResponse {
success: false,
data: Bytes::new(),
error: Some(DiskError::other("can not find disk".to_string()).into()),
}))
}
}
async fn write_all(&self, request: Request<WriteAllRequest>) -> Result<Response<WriteAllResponse>, Status> {
let request = request.into_inner();
if let Some(disk) = self.find_disk(&request.disk).await {
match disk.write_all(&request.volume, &request.path, request.data).await {
Ok(_) => Ok(Response::new(WriteAllResponse {
success: true,
error: None,
})),
Err(err) => Ok(Response::new(WriteAllResponse {
success: false,
error: Some(err.into()),
})),
}
} else {
Ok(Response::new(WriteAllResponse {
success: false,
error: Some(DiskError::other("can not find disk".to_string()).into()),
}))
}
}
async fn delete(&self, request: Request<DeleteRequest>) -> Result<Response<DeleteResponse>, Status> {
let request = request.into_inner();
if let Some(disk) = self.find_disk(&request.disk).await {
let options = match serde_json::from_str::<DeleteOptions>(&request.options) {
Ok(options) => options,
Err(err) => {
return Ok(Response::new(DeleteResponse {
success: false,
error: Some(DiskError::other(format!("decode DeleteOptions failed: {err}")).into()),
}));
}
};
match disk.delete(&request.volume, &request.path, options).await {
Ok(_) => Ok(Response::new(DeleteResponse {
success: true,
error: None,
})),
Err(err) => Ok(Response::new(DeleteResponse {
success: false,
error: Some(err.into()),
})),
}
} else {
Ok(Response::new(DeleteResponse {
success: false,
error: Some(DiskError::other("can not find disk".to_string()).into()),
}))
}
}
async fn verify_file(&self, request: Request<VerifyFileRequest>) -> Result<Response<VerifyFileResponse>, Status> {
let request = request.into_inner();
if let Some(disk) = self.find_disk(&request.disk).await {
let file_info = match serde_json::from_str::<FileInfo>(&request.file_info) {
Ok(file_info) => file_info,
Err(err) => {
return Ok(Response::new(VerifyFileResponse {
success: false,
check_parts_resp: "".to_string(),
error: Some(DiskError::other(format!("decode FileInfo failed: {err}")).into()),
}));
}
};
match disk.verify_file(&request.volume, &request.path, &file_info).await {
Ok(check_parts_resp) => {
let check_parts_resp = match serde_json::to_string(&check_parts_resp) {
Ok(check_parts_resp) => check_parts_resp,
Err(err) => {
return Ok(Response::new(VerifyFileResponse {
success: false,
check_parts_resp: String::new(),
error: Some(DiskError::other(format!("encode data failed: {err}")).into()),
}));
}
};
Ok(Response::new(VerifyFileResponse {
success: true,
check_parts_resp,
error: None,
}))
}
Err(err) => Ok(Response::new(VerifyFileResponse {
success: false,
check_parts_resp: "".to_string(),
error: Some(err.into()),
})),
}
} else {
Ok(Response::new(VerifyFileResponse {
success: false,
check_parts_resp: "".to_string(),
error: Some(DiskError::other("can not find disk".to_string()).into()),
}))
}
}
async fn read_parts(&self, request: Request<ReadPartsRequest>) -> Result<Response<ReadPartsResponse>, Status> {
let request = request.into_inner();
if let Some(disk) = self.find_disk(&request.disk).await {
match disk.read_parts(&request.bucket, &request.paths).await {
Ok(data) => {
let data = match rmp_serde::to_vec(&data) {
Ok(data) => data,
Err(err) => {
return Ok(Response::new(ReadPartsResponse {
success: false,
object_part_infos: Bytes::new(),
error: Some(DiskError::other(format!("encode data failed: {err}")).into()),
}));
}
};
Ok(Response::new(ReadPartsResponse {
success: true,
object_part_infos: Bytes::copy_from_slice(&data),
error: None,
}))
}
Err(err) => Ok(Response::new(ReadPartsResponse {
success: false,
object_part_infos: Bytes::new(),
error: Some(err.into()),
})),
}
} else {
Ok(Response::new(ReadPartsResponse {
success: false,
object_part_infos: Bytes::new(),
error: Some(DiskError::other("can not find disk".to_string()).into()),
}))
}
}
async fn check_parts(&self, request: Request<CheckPartsRequest>) -> Result<Response<CheckPartsResponse>, Status> {
let request = request.into_inner();
if let Some(disk) = self.find_disk(&request.disk).await {
let file_info = match serde_json::from_str::<FileInfo>(&request.file_info) {
Ok(file_info) => file_info,
Err(err) => {
return Ok(Response::new(CheckPartsResponse {
success: false,
check_parts_resp: "".to_string(),
error: Some(DiskError::other(format!("decode FileInfo failed: {err}")).into()),
}));
}
};
match disk.check_parts(&request.volume, &request.path, &file_info).await {
Ok(check_parts_resp) => {
let check_parts_resp = match serde_json::to_string(&check_parts_resp) {
Ok(check_parts_resp) => check_parts_resp,
Err(err) => {
return Ok(Response::new(CheckPartsResponse {
success: false,
check_parts_resp: String::new(),
error: Some(DiskError::other(format!("encode data failed: {err}")).into()),
}));
}
};
Ok(Response::new(CheckPartsResponse {
success: true,
check_parts_resp,
error: None,
}))
}
Err(err) => Ok(Response::new(CheckPartsResponse {
success: false,
check_parts_resp: "".to_string(),
error: Some(err.into()),
})),
}
} else {
Ok(Response::new(CheckPartsResponse {
success: false,
check_parts_resp: "".to_string(),
error: Some(DiskError::other("can not find disk".to_string()).into()),
}))
}
}
async fn rename_part(&self, request: Request<RenamePartRequest>) -> Result<Response<RenamePartResponse>, Status> {
let request = request.into_inner();
if let Some(disk) = self.find_disk(&request.disk).await {
match disk
.rename_part(
&request.src_volume,
&request.src_path,
&request.dst_volume,
&request.dst_path,
request.meta,
)
.await
{
Ok(_) => Ok(Response::new(RenamePartResponse {
success: true,
error: None,
})),
Err(err) => Ok(Response::new(RenamePartResponse {
success: false,
error: Some(err.into()),
})),
}
} else {
Ok(Response::new(RenamePartResponse {
success: false,
error: Some(DiskError::other("can not find disk".to_string()).into()),
}))
}
}
async fn rename_file(&self, request: Request<RenameFileRequest>) -> Result<Response<RenameFileResponse>, Status> {
let request = request.into_inner();
if let Some(disk) = self.find_disk(&request.disk).await {
match disk
.rename_file(&request.src_volume, &request.src_path, &request.dst_volume, &request.dst_path)
.await
{
Ok(_) => Ok(Response::new(RenameFileResponse {
success: true,
error: None,
})),
Err(err) => Ok(Response::new(RenameFileResponse {
success: false,
error: Some(err.into()),
})),
}
} else {
Ok(Response::new(RenameFileResponse {
success: false,
error: Some(DiskError::other("can not find disk".to_string()).into()),
}))
}
}
async fn write(&self, _request: Request<WriteRequest>) -> Result<Response<WriteResponse>, Status> {
unimplemented!("write");
// let request = request.into_inner();
// if let Some(disk) = self.find_disk(&request.disk).await {
// let file_writer = if request.is_append {
// disk.append_file(&request.volume, &request.path).await
// } else {
// disk.create_file("", &request.volume, &request.path, 0).await
// };
// match file_writer {
// Ok(mut file_writer) => match file_writer.write(&request.data).await {
// Ok(_) => Ok(Response::new(WriteResponse {
// success: true,
// error: None,
// })),
// Err(err) => Ok(Response::new(WriteResponse {
// success: false,
// error: Some(err_to_proto_err(&err, &format!("write failed: {}", err))),
// })),
// },
// Err(err) => Ok(Response::new(WriteResponse {
// success: false,
// error: Some(err_to_proto_err(&err, &format!("get writer failed: {}", err))),
// })),
// }
// } else {
// Ok(Response::new(WriteResponse {
// success: false,
// error: Some(err_to_proto_err(
// &EcsError::new(StorageError::InvalidArgument(Default::default(), Default::default(), Default::default())),
// "can not find disk",
// )),
// }))
// }
}
type WriteStreamStream = ResponseStream<WriteResponse>;
async fn write_stream(
&self,
_request: Request<Streaming<WriteRequest>>,
) -> Result<Response<Self::WriteStreamStream>, Status> {
info!("write_stream");
unimplemented!("write_stream");
// let mut in_stream = request.into_inner();
// let (tx, rx) = mpsc::channel(128);
// tokio::spawn(async move {
// let mut file_ref = None;
// while let Some(result) = in_stream.next().await {
// match result {
// // Ok(v) => tx
// // .send(Ok(EchoResponse { message: v.message }))
// // .await
// // .expect("working rx"),
// Ok(v) => {
// match file_ref.as_ref() {
// Some(_) => (),
// None => {
// if let Some(disk) = find_local_disk(&v.disk).await {
// let file_writer = if v.is_append {
// disk.append_file(&v.volume, &v.path).await
// } else {
// disk.create_file("", &v.volume, &v.path, 0).await
// };
// match file_writer {
// Ok(file_writer) => file_ref = Some(file_writer),
// Err(err) => {
// tx.send(Ok(WriteResponse {
// success: false,
// error: Some(err_to_proto_err(
// &err,
// &format!("get file writer failed: {}", err),
// )),
// }))
// .await
// .expect("working rx");
// break;
// }
// }
// } else {
// tx.send(Ok(WriteResponse {
// success: false,
// error: Some(err_to_proto_err(
// &EcsError::new(StorageError::InvalidArgument(
// Default::default(),
// Default::default(),
// Default::default(),
// )),
// "can not find disk",
// )),
// }))
// .await
// .expect("working rx");
// break;
// }
// }
// };
// match file_ref.as_mut().unwrap().write(&v.data).await {
// Ok(_) => tx.send(Ok(WriteResponse {
// success: true,
// error: None,
// })),
// Err(err) => tx.send(Ok(WriteResponse {
// success: false,
// error: Some(err_to_proto_err(&err, &format!("write failed: {}", err))),
// })),
// }
// .await
// .unwrap();
// }
// Err(err) => {
// if let Some(io_err) = match_for_io_error(&err) {
// if io_err.kind() == ErrorKind::BrokenPipe {
// // here you can handle special case when client
// // disconnected in unexpected way
// eprintln!("\tclient disconnected: broken pipe");
// break;
// }
// }
// match tx.send(Err(err)).await {
// Ok(_) => (),
// Err(_err) => break, // response was dropped
// }
// }
// }
// }
// println!("\tstream ended");
// });
// let out_stream = ReceiverStream::new(rx);
// Ok(Response::new(Box::pin(out_stream)))
}
type ReadAtStream = ResponseStream<ReadAtResponse>;
async fn read_at(&self, _request: Request<Streaming<ReadAtRequest>>) -> Result<Response<Self::ReadAtStream>, Status> {
info!("read_at");
unimplemented!("read_at");
// let mut in_stream = request.into_inner();
// let (tx, rx) = mpsc::channel(128);
// tokio::spawn(async move {
// let mut file_ref = None;
// while let Some(result) = in_stream.next().await {
// match result {
// Ok(v) => {
// match file_ref.as_ref() {
// Some(_) => (),
// None => {
// if let Some(disk) = find_local_disk(&v.disk).await {
// match disk.read_file(&v.volume, &v.path).await {
// Ok(file_reader) => file_ref = Some(file_reader),
// Err(err) => {
// tx.send(Ok(ReadAtResponse {
// success: false,
// data: Vec::new(),
// error: Some(err_to_proto_err(&err, &format!("read file failed: {}", err))),
// read_size: -1,
// }))
// .await
// .expect("working rx");
// break;
// }
// }
// } else {
// tx.send(Ok(ReadAtResponse {
// success: false,
// data: Vec::new(),
// error: Some(err_to_proto_err(
// &EcsError::new(StorageError::InvalidArgument(
// Default::default(),
// Default::default(),
// Default::default(),
// )),
// "can not find disk",
// )),
// read_size: -1,
// }))
// .await
// .expect("working rx");
// break;
// }
// }
// };
// let mut data = vec![0u8; v.length.try_into().unwrap()];
// match file_ref
// .as_mut()
// .unwrap()
// .read_at(v.offset.try_into().unwrap(), &mut data)
// .await
// {
// Ok(read_size) => tx.send(Ok(ReadAtResponse {
// success: true,
// data,
// read_size: read_size.try_into().unwrap(),
// error: None,
// })),
// Err(err) => tx.send(Ok(ReadAtResponse {
// success: false,
// data: Vec::new(),
// error: Some(err_to_proto_err(&err, &format!("read at failed: {}", err))),
// read_size: -1,
// })),
// }
// .await
// .unwrap();
// }
// Err(err) => {
// if let Some(io_err) = match_for_io_error(&err) {
// if io_err.kind() == ErrorKind::BrokenPipe {
// // here you can handle special case when client
// // disconnected in unexpected way
// eprintln!("\tclient disconnected: broken pipe");
// break;
// }
// }
// match tx.send(Err(err)).await {
// Ok(_) => (),
// Err(_err) => break, // response was dropped
// }
// }
// }
// }
// println!("\tstream ended");
// });
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | true |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/admin/router.rs | rustfs/src/admin/router.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::admin::console::is_console_path;
use crate::admin::console::make_console_server;
use crate::server::{ADMIN_PREFIX, HEALTH_PREFIX, PROFILE_CPU_PATH, PROFILE_MEMORY_PATH, RPC_PREFIX};
use hyper::HeaderMap;
use hyper::Method;
use hyper::StatusCode;
use hyper::Uri;
use hyper::http::Extensions;
use matchit::Params;
use matchit::Router;
use rustfs_ecstore::rpc::verify_rpc_signature;
use s3s::Body;
use s3s::S3Request;
use s3s::S3Response;
use s3s::S3Result;
use s3s::header;
use s3s::route::S3Route;
use s3s::s3_error;
use tower::Service;
use tracing::error;
pub struct S3Router<T> {
router: Router<T>,
console_enabled: bool,
console_router: Option<axum::routing::RouterIntoService<Body>>,
}
impl<T: Operation> S3Router<T> {
pub fn new(console_enabled: bool) -> Self {
let router = Router::new();
let console_router = if console_enabled {
Some(make_console_server().into_service::<Body>())
} else {
None
};
Self {
router,
console_enabled,
console_router,
}
}
pub fn insert(&mut self, method: Method, path: &str, operation: T) -> std::io::Result<()> {
let path = Self::make_route_str(method, path);
// warn!("set uri {}", &path);
self.router.insert(path, operation).map_err(std::io::Error::other)?;
Ok(())
}
fn make_route_str(method: Method, path: &str) -> String {
format!("{}|{}", method.as_str(), path)
}
}
impl<T: Operation> Default for S3Router<T> {
fn default() -> Self {
Self::new(false)
}
}
#[async_trait::async_trait]
impl<T> S3Route for S3Router<T>
where
T: Operation,
{
fn is_match(&self, method: &Method, uri: &Uri, headers: &HeaderMap, _: &mut Extensions) -> bool {
let path = uri.path();
// Profiling endpoints
if method == Method::GET && (path == PROFILE_CPU_PATH || path == PROFILE_MEMORY_PATH) {
return true;
}
// Health check
if (method == Method::HEAD || method == Method::GET) && path == HEALTH_PREFIX {
return true;
}
// AssumeRole
if method == Method::POST
&& path == "/"
&& headers
.get(header::CONTENT_TYPE)
.and_then(|v| v.to_str().ok())
.map(|ct| ct.split(';').next().unwrap_or("").trim().to_lowercase())
.map(|ct| ct == "application/x-www-form-urlencoded")
.unwrap_or(false)
{
return true;
}
path.starts_with(ADMIN_PREFIX) || path.starts_with(RPC_PREFIX) || is_console_path(path)
}
// check_access before call
async fn check_access(&self, req: &mut S3Request<Body>) -> S3Result<()> {
// Allow unauthenticated access to health check
let path = req.uri.path();
// Profiling endpoints
if req.method == Method::GET && (path == PROFILE_CPU_PATH || path == PROFILE_MEMORY_PATH) {
return Ok(());
}
// Health check
if (req.method == Method::HEAD || req.method == Method::GET) && path == HEALTH_PREFIX {
return Ok(());
}
// Allow unauthenticated access to console static files if console is enabled
if self.console_enabled && is_console_path(path) {
return Ok(());
}
// Check RPC signature verification
if req.uri.path().starts_with(RPC_PREFIX) {
// Skip signature verification for HEAD requests (health checks)
if req.method != Method::HEAD {
verify_rpc_signature(&req.uri.to_string(), &req.method, &req.headers).map_err(|e| {
error!("RPC signature verification failed: {}", e);
s3_error!(AccessDenied, "{}", e)
})?;
}
return Ok(());
}
// For non-RPC admin requests, check credentials
match req.credentials {
Some(_) => Ok(()),
None => Err(s3_error!(AccessDenied, "Signature is required")),
}
}
async fn call(&self, req: S3Request<Body>) -> S3Result<S3Response<Body>> {
if self.console_enabled && is_console_path(req.uri.path()) {
if let Some(console_router) = &self.console_router {
let mut console_router = console_router.clone();
let req = convert_request(req);
let result = console_router.call(req).await;
return match result {
Ok(resp) => Ok(convert_response(resp)),
Err(e) => Err(s3_error!(InternalError, "{}", e)),
};
}
return Err(s3_error!(InternalError, "console is not enabled"));
}
let uri = format!("{}|{}", &req.method, req.uri.path());
if let Ok(mat) = self.router.at(&uri) {
let op: &T = mat.value;
let mut resp = op.call(req, mat.params).await?;
resp.status = Some(resp.output.0);
return Ok(resp.map_output(|x| x.1));
}
Err(s3_error!(NotImplemented))
}
}
#[async_trait::async_trait]
pub trait Operation: Send + Sync + 'static {
// fn method() -> Method;
// fn uri() -> &'static str;
async fn call(&self, req: S3Request<Body>, params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>>;
}
pub struct AdminOperation(pub &'static dyn Operation);
#[async_trait::async_trait]
impl Operation for AdminOperation {
async fn call(&self, req: S3Request<Body>, params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
self.0.call(req, params).await
}
}
#[allow(dead_code)]
#[derive(Debug, Clone)]
pub struct Extra {
pub credentials: Option<s3s::auth::Credentials>,
pub region: Option<String>,
pub service: Option<String>,
}
fn convert_request(req: S3Request<Body>) -> http::Request<Body> {
let (mut parts, _) = http::Request::new(Body::empty()).into_parts();
parts.method = req.method;
parts.uri = req.uri;
parts.headers = req.headers;
parts.extensions = req.extensions;
parts.extensions.insert(Extra {
credentials: req.credentials,
region: req.region,
service: req.service,
});
http::Request::from_parts(parts, req.input)
}
fn convert_response(resp: http::Response<axum::body::Body>) -> S3Response<Body> {
let (parts, body) = resp.into_parts();
let mut s3_resp = S3Response::new(Body::http_body_unsync(body));
s3_resp.status = Some(parts.status);
s3_resp.headers = parts.headers;
s3_resp.extensions = parts.extensions;
s3_resp
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/admin/handlers.rs | rustfs/src/admin/handlers.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::router::Operation;
use crate::admin::auth::validate_admin_request;
use crate::auth::check_key_valid;
use crate::auth::get_condition_values;
use crate::auth::get_session_token;
use crate::error::ApiError;
use crate::server::RemoteAddr;
use bytes::Bytes;
use futures::{Stream, StreamExt};
use http::{HeaderMap, HeaderValue, Uri};
use hyper::StatusCode;
use matchit::Params;
use rustfs_common::heal_channel::HealOpts;
use rustfs_config::{MAX_ADMIN_REQUEST_BODY_SIZE, MAX_HEAL_REQUEST_SIZE};
use rustfs_credentials::get_global_action_cred;
use rustfs_ecstore::admin_server_info::get_server_info;
use rustfs_ecstore::bucket::bucket_target_sys::BucketTargetSys;
use rustfs_ecstore::bucket::metadata::BUCKET_TARGETS_FILE;
use rustfs_ecstore::bucket::metadata_sys;
use rustfs_ecstore::bucket::target::BucketTarget;
use rustfs_ecstore::bucket::versioning_sys::BucketVersioningSys;
use rustfs_ecstore::data_usage::{
aggregate_local_snapshots, compute_bucket_usage, load_data_usage_from_backend, store_data_usage_in_backend,
};
use rustfs_ecstore::error::StorageError;
use rustfs_ecstore::global::global_rustfs_port;
use rustfs_ecstore::metrics_realtime::{CollectMetricsOpts, MetricType, collect_local_metrics};
use rustfs_ecstore::new_object_layer_fn;
use rustfs_ecstore::pools::{get_total_usable_capacity, get_total_usable_capacity_free};
use rustfs_ecstore::store::is_valid_object_prefix;
use rustfs_ecstore::store_api::BucketOptions;
use rustfs_ecstore::store_api::StorageAPI;
use rustfs_ecstore::store_utils::is_reserved_or_invalid_bucket;
use rustfs_iam::store::MappedPolicy;
use rustfs_madmin::metrics::RealtimeMetrics;
use rustfs_madmin::utils::parse_duration;
use rustfs_policy::policy::Args;
use rustfs_policy::policy::BucketPolicy;
use rustfs_policy::policy::action::Action;
use rustfs_policy::policy::action::AdminAction;
use rustfs_policy::policy::action::S3Action;
use rustfs_policy::policy::default::DEFAULT_POLICIES;
use rustfs_utils::path::path_join;
use s3s::header::CONTENT_TYPE;
use s3s::stream::{ByteStream, DynByteStream};
use s3s::{Body, S3Error, S3Request, S3Response, S3Result, s3_error};
use s3s::{S3ErrorCode, StdError};
use serde::{Deserialize, Serialize};
use std::collections::{HashMap, HashSet};
use std::path::PathBuf;
use std::pin::Pin;
use std::sync::Arc;
use std::task::{Context, Poll};
use std::time::Duration as std_Duration;
use tokio::sync::mpsc::{self};
use tokio::time::interval;
use tokio::{select, spawn};
use tokio_stream::wrappers::ReceiverStream;
use tracing::debug;
use tracing::{error, info, warn};
use url::Host;
pub mod bucket_meta;
pub mod event;
pub mod group;
pub mod kms;
pub mod kms_dynamic;
pub mod kms_keys;
pub mod policies;
pub mod pools;
pub mod profile;
pub mod rebalance;
pub mod service_account;
pub mod sts;
pub mod tier;
pub mod trace;
pub mod user;
#[derive(Debug, Serialize)]
pub struct IsAdminResponse {
pub is_admin: bool,
pub access_key: String,
pub message: String,
}
#[allow(dead_code)]
#[derive(Debug, Serialize, Default)]
#[serde(rename_all = "PascalCase", default)]
pub struct AccountInfo {
pub account_name: String,
pub server: rustfs_madmin::BackendInfo,
pub policy: BucketPolicy,
}
/// Health check handler for endpoint monitoring
pub struct HealthCheckHandler {}
#[async_trait::async_trait]
impl Operation for HealthCheckHandler {
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
use serde_json::json;
// Extract the original HTTP Method (encapsulated by s3s into S3Request)
let method = req.method;
// Only GET and HEAD are allowed
if method != http::Method::GET && method != http::Method::HEAD {
// 405 Method Not Allowed
let mut headers = HeaderMap::new();
headers.insert(http::header::ALLOW, HeaderValue::from_static("GET, HEAD"));
return Ok(S3Response::with_headers(
(StatusCode::METHOD_NOT_ALLOWED, Body::from("Method Not Allowed".to_string())),
headers,
));
}
let health_info = json!({
"status": "ok",
"service": "rustfs-endpoint",
"timestamp": chrono::Utc::now().to_rfc3339(),
"version": env!("CARGO_PKG_VERSION")
});
let mut headers = HeaderMap::new();
headers.insert(CONTENT_TYPE, HeaderValue::from_static("application/json"));
if method == http::Method::HEAD {
// HEAD: only returns the header and status code, not the body
return Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), headers));
}
// GET: Return JSON body normally
let body_str = serde_json::to_string(&health_info).unwrap_or_else(|_| "{}".to_string());
let body = Body::from(body_str);
Ok(S3Response::with_headers((StatusCode::OK, body), headers))
}
}
pub struct IsAdminHandler {}
#[async_trait::async_trait]
impl Operation for IsAdminHandler {
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
let Some(input_cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "get cred failed"));
};
let (cred, _owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
let access_key_to_check = input_cred.access_key.clone();
// Check if the user is admin by comparing with global credentials
let is_admin = if let Some(sys_cred) = get_global_action_cred() {
crate::auth::constant_time_eq(&access_key_to_check, &sys_cred.access_key)
|| crate::auth::constant_time_eq(&cred.parent_user, &sys_cred.access_key)
} else {
false
};
let response = IsAdminResponse {
is_admin,
access_key: access_key_to_check,
message: format!("User is {}an administrator", if is_admin { "" } else { "not " }),
};
let data = serde_json::to_vec(&response)
.map_err(|_e| S3Error::with_message(S3ErrorCode::InternalError, "parse IsAdminResponse failed"))?;
let mut header = HeaderMap::new();
header.insert(CONTENT_TYPE, HeaderValue::from_static("application/json"));
Ok(S3Response::with_headers((StatusCode::OK, Body::from(data)), header))
}
}
pub struct AccountInfoHandler {}
#[async_trait::async_trait]
impl Operation for AccountInfoHandler {
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
let Some(store) = new_object_layer_fn() else {
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string()));
};
let Some(input_cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "get cred failed"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
let Ok(iam_store) = rustfs_iam::get() else {
return Err(s3_error!(InvalidRequest, "iam not init"));
};
let default_claims = HashMap::new();
let claims = cred.claims.as_ref().unwrap_or(&default_claims);
let cred_clone = cred.clone();
let remote_addr = req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0));
let conditions = get_condition_values(&req.headers, &cred_clone, None, None, remote_addr);
let cred_clone = Arc::new(cred_clone);
let conditions = Arc::new(conditions);
let is_allow = Box::new({
let iam_clone = Arc::clone(&iam_store);
let cred_clone = Arc::clone(&cred_clone);
let conditions = Arc::clone(&conditions);
move |name: String| {
let iam_clone = Arc::clone(&iam_clone);
let cred_clone = Arc::clone(&cred_clone);
let conditions = Arc::clone(&conditions);
async move {
let (mut rd, mut wr) = (false, false);
if !iam_clone
.is_allowed(&Args {
account: &cred_clone.access_key,
groups: &cred_clone.groups,
action: Action::S3Action(S3Action::ListBucketAction),
bucket: &name,
conditions: &conditions,
is_owner: owner,
object: "",
claims,
deny_only: false,
})
.await
{
rd = true
}
if !iam_clone
.is_allowed(&Args {
account: &cred_clone.access_key,
groups: &cred_clone.groups,
action: Action::S3Action(S3Action::GetBucketLocationAction),
bucket: &name,
conditions: &conditions,
is_owner: owner,
object: "",
claims,
deny_only: false,
})
.await
{
rd = true
}
if !iam_clone
.is_allowed(&Args {
account: &cred_clone.access_key,
groups: &cred_clone.groups,
action: Action::S3Action(S3Action::PutObjectAction),
bucket: &name,
conditions: &conditions,
is_owner: owner,
object: "",
claims,
deny_only: false,
})
.await
{
wr = true
}
(rd, wr)
}
}
});
let account_name = if cred.is_temp() || cred.is_service_account() {
cred.parent_user.clone()
} else {
cred.access_key.clone()
};
let claims_args = Args {
account: "",
groups: &None,
action: Action::None,
bucket: "",
conditions: &HashMap::new(),
is_owner: false,
object: "",
claims,
deny_only: false,
};
let role_arn = claims_args.get_role_arn();
// TODO: get_policies_from_claims(claims);
let Some(admin_cred) = get_global_action_cred() else {
return Err(S3Error::with_message(
S3ErrorCode::InternalError,
"get_global_action_cred failed".to_string(),
));
};
let mut effective_policy: rustfs_policy::policy::Policy = Default::default();
if account_name == admin_cred.access_key {
for (name, p) in DEFAULT_POLICIES.iter() {
if *name == "consoleAdmin" {
effective_policy = p.clone();
break;
}
}
} else if let Some(arn) = role_arn {
let (_, policy_name) = iam_store
.get_role_policy(arn)
.await
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, e.to_string()))?;
let policies = MappedPolicy::new(&policy_name).to_slice();
effective_policy = iam_store.get_combined_policy(&policies).await;
} else {
let policies = iam_store
.policy_db_get(&account_name, &cred.groups)
.await
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("get policy failed: {e}")))?;
effective_policy = iam_store.get_combined_policy(&policies).await;
};
let policy_str = serde_json::to_string(&effective_policy)
.map_err(|_e| S3Error::with_message(S3ErrorCode::InternalError, "parse policy failed"))?;
let mut account_info = rustfs_madmin::AccountInfo {
account_name,
server: store.backend_info().await,
policy: serde_json::Value::String(policy_str),
..Default::default()
};
// TODO: bucket policy
let buckets = store
.list_bucket(&BucketOptions {
cached: true,
..Default::default()
})
.await
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, e.to_string()))?;
for bucket in buckets.iter() {
let (rd, wr) = is_allow(bucket.name.clone()).await;
if rd || wr {
// TODO: BucketQuotaSys
// TODO: other attributes
account_info.buckets.push(rustfs_madmin::BucketAccessInfo {
name: bucket.name.clone(),
details: Some(rustfs_madmin::BucketDetails {
versioning: BucketVersioningSys::enabled(bucket.name.as_str()).await,
versioning_suspended: BucketVersioningSys::suspended(bucket.name.as_str()).await,
..Default::default()
}),
created: bucket.created,
access: rustfs_madmin::AccountAccess { read: rd, write: wr },
..Default::default()
});
}
}
let data = serde_json::to_vec(&account_info)
.map_err(|_e| S3Error::with_message(S3ErrorCode::InternalError, "parse accountInfo failed"))?;
let mut header = HeaderMap::new();
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
Ok(S3Response::with_headers((StatusCode::OK, Body::from(data)), header))
}
}
pub struct ServiceHandle {}
#[async_trait::async_trait]
impl Operation for ServiceHandle {
async fn call(&self, _req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
warn!("handle ServiceHandle");
Err(s3_error!(NotImplemented))
}
}
pub struct ServerInfoHandler {}
#[async_trait::async_trait]
impl Operation for ServerInfoHandler {
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
let Some(input_cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "get cred failed"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
let remote_addr = req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0));
validate_admin_request(
&req.headers,
&cred,
owner,
false,
vec![Action::AdminAction(AdminAction::ServerInfoAdminAction)],
remote_addr,
)
.await?;
let info = get_server_info(true).await;
let data = serde_json::to_vec(&info)
.map_err(|_e| S3Error::with_message(S3ErrorCode::InternalError, "parse serverInfo failed"))?;
let mut header = HeaderMap::new();
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
Ok(S3Response::with_headers((StatusCode::OK, Body::from(data)), header))
}
}
pub struct InspectDataHandler {}
#[async_trait::async_trait]
impl Operation for InspectDataHandler {
async fn call(&self, _req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
warn!("handle InspectDataHandler");
Err(s3_error!(NotImplemented))
}
}
pub struct StorageInfoHandler {}
#[async_trait::async_trait]
impl Operation for StorageInfoHandler {
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
warn!("handle StorageInfoHandler");
let Some(input_cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "get cred failed"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
let remote_addr = req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0));
validate_admin_request(
&req.headers,
&cred,
owner,
false,
vec![Action::AdminAction(AdminAction::StorageInfoAdminAction)],
remote_addr,
)
.await?;
let Some(store) = new_object_layer_fn() else {
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string()));
};
// TODO:getAggregatedBackgroundHealState
let info = store.storage_info().await;
let data = serde_json::to_vec(&info)
.map_err(|_e| S3Error::with_message(S3ErrorCode::InternalError, "parse accountInfo failed"))?;
let mut header = HeaderMap::new();
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
Ok(S3Response::with_headers((StatusCode::OK, Body::from(data)), header))
}
}
pub struct DataUsageInfoHandler {}
#[async_trait::async_trait]
impl Operation for DataUsageInfoHandler {
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
warn!("handle DataUsageInfoHandler");
let Some(input_cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "get cred failed"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
let remote_addr = req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0));
validate_admin_request(
&req.headers,
&cred,
owner,
false,
vec![
Action::AdminAction(AdminAction::DataUsageInfoAdminAction),
Action::S3Action(S3Action::ListBucketAction),
],
remote_addr,
)
.await?;
let Some(store) = new_object_layer_fn() else {
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string()));
};
let (disk_statuses, mut info) = match aggregate_local_snapshots(store.clone()).await {
Ok((statuses, usage)) => (statuses, usage),
Err(err) => {
warn!("aggregate_local_snapshots failed: {:?}", err);
(
Vec::new(),
load_data_usage_from_backend(store.clone()).await.map_err(|e| {
error!("load_data_usage_from_backend failed {:?}", e);
s3_error!(InternalError, "load_data_usage_from_backend failed")
})?,
)
}
};
let snapshots_available = disk_statuses.iter().any(|status| status.snapshot_exists);
if !snapshots_available {
if let Ok(fallback) = load_data_usage_from_backend(store.clone()).await {
let mut fallback_info = fallback;
fallback_info.disk_usage_status = disk_statuses.clone();
info = fallback_info;
}
} else {
info.disk_usage_status = disk_statuses.clone();
}
let last_update_age = info.last_update.and_then(|ts| ts.elapsed().ok());
let data_missing = info.objects_total_count == 0 && info.buckets_count == 0;
let stale = last_update_age
.map(|elapsed| elapsed > std::time::Duration::from_secs(300))
.unwrap_or(true);
if data_missing {
info!("No data usage statistics found, attempting real-time collection");
if let Err(e) = collect_realtime_data_usage(&mut info, store.clone()).await {
warn!("Failed to collect real-time data usage: {}", e);
} else if let Err(e) = store_data_usage_in_backend(info.clone(), store.clone()).await {
warn!("Failed to persist refreshed data usage: {}", e);
}
} else if stale {
info!(
"Data usage statistics are stale (last update {:?} ago), refreshing asynchronously",
last_update_age
);
let mut info_for_refresh = info.clone();
let store_for_refresh = store.clone();
spawn(async move {
if let Err(e) = collect_realtime_data_usage(&mut info_for_refresh, store_for_refresh.clone()).await {
warn!("Background data usage refresh failed: {}", e);
return;
}
if let Err(e) = store_data_usage_in_backend(info_for_refresh, store_for_refresh).await {
warn!("Background data usage persistence failed: {}", e);
}
});
}
info.disk_usage_status = disk_statuses;
// Set capacity information
let sinfo = store.storage_info().await;
info.total_capacity = get_total_usable_capacity(&sinfo.disks, &sinfo) as u64;
info.total_free_capacity = get_total_usable_capacity_free(&sinfo.disks, &sinfo) as u64;
if info.total_capacity > info.total_free_capacity {
info.total_used_capacity = info.total_capacity - info.total_free_capacity;
}
let data = serde_json::to_vec(&info)
.map_err(|_e| S3Error::with_message(S3ErrorCode::InternalError, "parse DataUsageInfo failed"))?;
let mut header = HeaderMap::new();
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
Ok(S3Response::with_headers((StatusCode::OK, Body::from(data)), header))
}
}
#[derive(Debug, Serialize, Deserialize)]
struct MetricsParams {
disks: String,
hosts: String,
#[serde(rename = "interval")]
tick: String,
n: u64,
types: u32,
#[serde(rename = "by-disk")]
by_disk: String,
#[serde(rename = "by-host")]
by_host: String,
#[serde(rename = "by-jobID")]
by_job_id: String,
#[serde(rename = "by-depID")]
by_dep_id: String,
}
impl Default for MetricsParams {
fn default() -> Self {
Self {
disks: Default::default(),
hosts: Default::default(),
tick: Default::default(),
n: u64::MAX,
types: Default::default(),
by_disk: Default::default(),
by_host: Default::default(),
by_job_id: Default::default(),
by_dep_id: Default::default(),
}
}
}
fn extract_metrics_init_params(uri: &Uri) -> MetricsParams {
let mut mp = MetricsParams::default();
if let Some(query) = uri.query() {
let params: Vec<&str> = query.split('&').collect();
for param in params {
let mut parts = param.split('=');
if let Some(key) = parts.next() {
if key == "disks"
&& let Some(value) = parts.next()
{
mp.disks = value.to_string();
}
if key == "hosts"
&& let Some(value) = parts.next()
{
mp.hosts = value.to_string();
}
if key == "interval"
&& let Some(value) = parts.next()
{
mp.tick = value.to_string();
}
if key == "n"
&& let Some(value) = parts.next()
{
mp.n = value.parse::<u64>().unwrap_or(u64::MAX);
}
if key == "types"
&& let Some(value) = parts.next()
{
mp.types = value.parse::<u32>().unwrap_or_default();
}
if key == "by-disk"
&& let Some(value) = parts.next()
{
mp.by_disk = value.to_string();
}
if key == "by-host"
&& let Some(value) = parts.next()
{
mp.by_host = value.to_string();
}
if key == "by-jobID"
&& let Some(value) = parts.next()
{
mp.by_job_id = value.to_string();
}
if key == "by-depID"
&& let Some(value) = parts.next()
{
mp.by_dep_id = value.to_string();
}
}
}
}
mp
}
struct MetricsStream {
inner: ReceiverStream<Result<Bytes, StdError>>,
}
impl Stream for MetricsStream {
type Item = Result<Bytes, StdError>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
info!("MetricsStream poll_next");
let this = Pin::into_inner(self);
this.inner.poll_next_unpin(cx)
}
}
impl ByteStream for MetricsStream {}
pub struct MetricsHandler {}
#[async_trait::async_trait]
impl Operation for MetricsHandler {
async fn call(&self, req: S3Request<Body>, params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
info!("handle MetricsHandler, req: {:?}, params: {:?}", req, params);
let Some(cred) = req.credentials else { return Err(s3_error!(InvalidRequest, "get cred failed")) };
info!("cred: {:?}", cred);
let mp = extract_metrics_init_params(&req.uri);
info!("mp: {:?}", mp);
let tick = parse_duration(&mp.tick).unwrap_or_else(|_| std_Duration::from_secs(3));
let mut n = mp.n;
if n == 0 {
n = u64::MAX;
}
let types = if mp.types != 0 {
MetricType::new(mp.types)
} else {
MetricType::ALL
};
fn parse_comma_separated(s: &str) -> HashSet<String> {
s.split(',').filter(|part| !part.is_empty()).map(String::from).collect()
}
let disks = parse_comma_separated(&mp.disks);
let by_disk = mp.by_disk == "true";
let disk_map = disks;
let job_id = mp.by_job_id;
let hosts = parse_comma_separated(&mp.hosts);
let by_host = mp.by_host == "true";
let host_map = hosts;
let d_id = mp.by_dep_id;
let mut interval = interval(tick);
let opts = CollectMetricsOpts {
hosts: host_map,
disks: disk_map,
job_id,
dep_id: d_id,
};
let (tx, rx) = mpsc::channel(10);
let in_stream: DynByteStream = Box::pin(MetricsStream {
inner: ReceiverStream::new(rx),
});
let body = Body::from(in_stream);
spawn(async move {
while n > 0 {
info!("loop, n: {n}");
let mut m = RealtimeMetrics::default();
let m_local = collect_local_metrics(types, &opts).await;
m.merge(m_local);
if !by_host {
m.by_host = HashMap::new();
}
if !by_disk {
m.by_disk = HashMap::new();
}
m.finally = n <= 1;
// todo write resp
match serde_json::to_vec(&m) {
Ok(re) => {
info!("got metrics, send it to client, m: {m:?}");
let _ = tx.send(Ok(Bytes::from(re))).await;
}
Err(e) => {
error!("MetricsHandler: json encode failed, err: {:?}", e);
return;
}
}
n -= 1;
if n == 0 {
break;
}
select! {
_ = tx.closed() => { return; }
_ = interval.tick() => {}
}
}
});
Ok(S3Response::new((StatusCode::OK, body)))
}
}
#[derive(Debug, Default, Serialize, Deserialize)]
struct HealInitParams {
bucket: String,
obj_prefix: String,
hs: HealOpts,
client_token: String,
force_start: bool,
force_stop: bool,
}
fn extract_heal_init_params(body: &Bytes, uri: &Uri, params: Params<'_, '_>) -> S3Result<HealInitParams> {
let mut hip = HealInitParams {
bucket: params.get("bucket").map(|s| s.to_string()).unwrap_or_default(),
obj_prefix: params.get("prefix").map(|s| s.to_string()).unwrap_or_default(),
..Default::default()
};
if hip.bucket.is_empty() && !hip.obj_prefix.is_empty() {
return Err(s3_error!(InvalidRequest, "invalid bucket name"));
}
if is_reserved_or_invalid_bucket(&hip.bucket, false) {
return Err(s3_error!(InvalidRequest, "invalid bucket name"));
}
if !is_valid_object_prefix(&hip.obj_prefix) {
return Err(s3_error!(InvalidRequest, "invalid object name"));
}
if let Some(query) = uri.query() {
let params: Vec<&str> = query.split('&').collect();
for param in params {
let mut parts = param.split('=');
if let Some(key) = parts.next() {
if key == "clientToken"
&& let Some(value) = parts.next()
{
hip.client_token = value.to_string();
}
if key == "forceStart" && parts.next().is_some() {
hip.force_start = true;
}
if key == "forceStop" && parts.next().is_some() {
hip.force_stop = true;
}
}
}
}
if (hip.force_start && hip.force_stop) || (!hip.client_token.is_empty() && (hip.force_start || hip.force_stop)) {
return Err(s3_error!(InvalidRequest, ""));
}
if hip.client_token.is_empty() {
hip.hs = serde_json::from_slice(body).map_err(|e| {
info!("err request body parse, err: {:?}", e);
s3_error!(InvalidRequest, "err request body parse")
})?;
}
Ok(hip)
}
pub struct HealHandler {}
#[async_trait::async_trait]
impl Operation for HealHandler {
async fn call(&self, req: S3Request<Body>, params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
warn!("handle HealHandler, req: {:?}, params: {:?}", req, params);
let Some(cred) = req.credentials else { return Err(s3_error!(InvalidRequest, "get cred failed")) };
info!("cred: {:?}", cred);
let mut input = req.input;
let bytes = match input.store_all_limited(MAX_HEAL_REQUEST_SIZE).await {
Ok(b) => b,
Err(e) => {
warn!("get body failed, e: {:?}", e);
return Err(s3_error!(InvalidRequest, "heal request body too large or failed to read"));
}
};
info!("bytes: {:?}", bytes);
let hip = extract_heal_init_params(&bytes, &req.uri, params)?;
info!("body: {:?}", hip);
#[derive(Default)]
struct HealResp {
resp_bytes: Vec<u8>,
_api_err: Option<StorageError>,
_err_body: String,
}
let heal_path = path_join(&[PathBuf::from(hip.bucket.clone()), PathBuf::from(hip.obj_prefix.clone())]);
let (tx, mut rx) = mpsc::channel(1);
if !hip.client_token.is_empty() && !hip.force_start && !hip.force_stop {
// Query heal status
let tx_clone = tx.clone();
let heal_path_str = heal_path.to_str().unwrap_or_default().to_string();
let client_token = hip.client_token.clone();
spawn(async move {
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | true |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/admin/auth.rs | rustfs/src/admin/auth.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::auth::get_condition_values;
use http::HeaderMap;
use rustfs_credentials::Credentials;
use rustfs_iam::store::object::ObjectStore;
use rustfs_iam::sys::IamSys;
use rustfs_policy::policy::Args;
use rustfs_policy::policy::action::Action;
use s3s::S3Result;
use s3s::s3_error;
use std::collections::HashMap;
use std::sync::Arc;
pub async fn validate_admin_request(
headers: &HeaderMap,
cred: &Credentials,
is_owner: bool,
deny_only: bool,
actions: Vec<Action>,
remote_addr: Option<std::net::SocketAddr>,
) -> S3Result<()> {
let Ok(iam_store) = rustfs_iam::get() else {
return Err(s3_error!(InternalError, "iam not init"));
};
for action in actions {
match check_admin_request_auth(iam_store.clone(), headers, cred, is_owner, deny_only, action, remote_addr).await {
Ok(_) => return Ok(()),
Err(_) => {
continue;
}
}
}
Err(s3_error!(AccessDenied, "Access Denied"))
}
async fn check_admin_request_auth(
iam_store: Arc<IamSys<ObjectStore>>,
headers: &HeaderMap,
cred: &Credentials,
is_owner: bool,
deny_only: bool,
action: Action,
remote_addr: Option<std::net::SocketAddr>,
) -> S3Result<()> {
let conditions = get_condition_values(headers, cred, None, None, remote_addr);
if !iam_store
.is_allowed(&Args {
account: &cred.access_key,
groups: &cred.groups,
action,
conditions: &conditions,
is_owner,
claims: cred.claims.as_ref().unwrap_or(&HashMap::new()),
deny_only,
bucket: "",
object: "",
})
.await
{
return Err(s3_error!(AccessDenied, "Access Denied"));
}
Ok(())
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/admin/utils.rs | rustfs/src/admin/utils.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub(crate) fn has_space_be(s: &str) -> bool {
s.trim().len() != s.len()
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/admin/mod.rs | rustfs/src/admin/mod.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
mod auth;
pub mod console;
pub mod handlers;
pub mod router;
mod rpc;
pub mod utils;
#[cfg(test)]
mod console_test;
use crate::server::{ADMIN_PREFIX, HEALTH_PREFIX, PROFILE_CPU_PATH, PROFILE_MEMORY_PATH};
use handlers::{
GetReplicationMetricsHandler, HealthCheckHandler, IsAdminHandler, ListRemoteTargetHandler, RemoveRemoteTargetHandler,
SetRemoteTargetHandler, bucket_meta,
event::{ListNotificationTargets, ListTargetsArns, NotificationTarget, RemoveNotificationTarget},
group, kms, kms_dynamic, kms_keys, policies, pools,
profile::{TriggerProfileCPU, TriggerProfileMemory},
rebalance,
service_account::{AddServiceAccount, DeleteServiceAccount, InfoServiceAccount, ListServiceAccount, UpdateServiceAccount},
sts, tier, user,
};
use hyper::Method;
use router::{AdminOperation, S3Router};
use rpc::register_rpc_route;
use s3s::route::S3Route;
/// Create admin router
///
/// # Arguments
/// * `console_enabled` - Whether the console is enabled
///
/// # Returns
/// An instance of S3Route for admin operations
pub fn make_admin_route(console_enabled: bool) -> std::io::Result<impl S3Route> {
let mut r: S3Router<AdminOperation> = S3Router::new(console_enabled);
// Health check endpoint for monitoring and orchestration
r.insert(Method::GET, HEALTH_PREFIX, AdminOperation(&HealthCheckHandler {}))?;
r.insert(Method::HEAD, HEALTH_PREFIX, AdminOperation(&HealthCheckHandler {}))?;
r.insert(Method::GET, PROFILE_CPU_PATH, AdminOperation(&TriggerProfileCPU {}))?;
r.insert(Method::GET, PROFILE_MEMORY_PATH, AdminOperation(&TriggerProfileMemory {}))?;
// 1
r.insert(Method::POST, "/", AdminOperation(&sts::AssumeRoleHandle {}))?;
r.insert(
Method::GET,
format!("{}{}", ADMIN_PREFIX, "/v3/is-admin").as_str(),
AdminOperation(&IsAdminHandler {}),
)?;
register_rpc_route(&mut r)?;
register_user_route(&mut r)?;
r.insert(
Method::POST,
format!("{}{}", ADMIN_PREFIX, "/v3/service").as_str(),
AdminOperation(&handlers::ServiceHandle {}),
)?;
// 1
r.insert(
Method::GET,
format!("{}{}", ADMIN_PREFIX, "/v3/info").as_str(),
AdminOperation(&handlers::ServerInfoHandler {}),
)?;
r.insert(
Method::GET,
format!("{}{}", ADMIN_PREFIX, "/v3/inspect-data").as_str(),
AdminOperation(&handlers::InspectDataHandler {}),
)?;
r.insert(
Method::POST,
format!("{}{}", ADMIN_PREFIX, "/v3/inspect-data").as_str(),
AdminOperation(&handlers::InspectDataHandler {}),
)?;
// 1
r.insert(
Method::GET,
format!("{}{}", ADMIN_PREFIX, "/v3/storageinfo").as_str(),
AdminOperation(&handlers::StorageInfoHandler {}),
)?;
// 1
r.insert(
Method::GET,
format!("{}{}", ADMIN_PREFIX, "/v3/datausageinfo").as_str(),
AdminOperation(&handlers::DataUsageInfoHandler {}),
)?;
r.insert(
Method::GET,
format!("{}{}", ADMIN_PREFIX, "/v3/metrics").as_str(),
AdminOperation(&handlers::MetricsHandler {}),
)?;
// 1
r.insert(
Method::GET,
format!("{}{}", ADMIN_PREFIX, "/v3/pools/list").as_str(),
AdminOperation(&pools::ListPools {}),
)?;
// 1
r.insert(
Method::GET,
format!("{}{}", ADMIN_PREFIX, "/v3/pools/status").as_str(),
AdminOperation(&pools::StatusPool {}),
)?;
// todo
r.insert(
Method::POST,
format!("{}{}", ADMIN_PREFIX, "/v3/pools/decommission").as_str(),
AdminOperation(&pools::StartDecommission {}),
)?;
// todo
r.insert(
Method::POST,
format!("{}{}", ADMIN_PREFIX, "/v3/pools/cancel").as_str(),
AdminOperation(&pools::CancelDecommission {}),
)?;
r.insert(
Method::POST,
format!("{}{}", ADMIN_PREFIX, "/v3/rebalance/start").as_str(),
AdminOperation(&rebalance::RebalanceStart {}),
)?;
r.insert(
Method::GET,
format!("{}{}", ADMIN_PREFIX, "/v3/rebalance/status").as_str(),
AdminOperation(&rebalance::RebalanceStatus {}),
)?;
r.insert(
Method::POST,
format!("{}{}", ADMIN_PREFIX, "/v3/rebalance/stop").as_str(),
AdminOperation(&rebalance::RebalanceStop {}),
)?;
// Some APIs are only available in EC mode
// if is_dist_erasure().await || is_erasure().await {
r.insert(
Method::POST,
format!("{}{}", ADMIN_PREFIX, "/v3/heal/{bucket}").as_str(),
AdminOperation(&handlers::HealHandler {}),
)?;
r.insert(
Method::POST,
format!("{}{}", ADMIN_PREFIX, "/v3/heal/{bucket}/{prefix}").as_str(),
AdminOperation(&handlers::HealHandler {}),
)?;
r.insert(
Method::POST,
format!("{}{}", ADMIN_PREFIX, "/v3/background-heal/status").as_str(),
AdminOperation(&handlers::BackgroundHealStatusHandler {}),
)?;
// ?
r.insert(
Method::GET,
format!("{}{}", ADMIN_PREFIX, "/v3/tier").as_str(),
AdminOperation(&tier::ListTiers {}),
)?;
// ?
r.insert(
Method::GET,
format!("{}{}", ADMIN_PREFIX, "/v3/tier-stats").as_str(),
AdminOperation(&tier::GetTierInfo {}),
)?;
// ?force=xxx
r.insert(
Method::DELETE,
format!("{}{}", ADMIN_PREFIX, "/v3/tier/{tiername}").as_str(),
AdminOperation(&tier::RemoveTier {}),
)?;
// ?force=xxx
// body: AddOrUpdateTierReq
r.insert(
Method::PUT,
format!("{}{}", ADMIN_PREFIX, "/v3/tier").as_str(),
AdminOperation(&tier::AddTier {}),
)?;
// ?
// body: AddOrUpdateTierReq
r.insert(
Method::POST,
format!("{}{}", ADMIN_PREFIX, "/v3/tier/{tiername}").as_str(),
AdminOperation(&tier::EditTier {}),
)?;
r.insert(
Method::POST,
format!("{}{}", ADMIN_PREFIX, "/v3/tier/clear").as_str(),
AdminOperation(&tier::ClearTier {}),
)?;
r.insert(
Method::GET,
format!("{}{}", ADMIN_PREFIX, "/export-bucket-metadata").as_str(),
AdminOperation(&bucket_meta::ExportBucketMetadata {}),
)?;
r.insert(
Method::PUT,
format!("{}{}", ADMIN_PREFIX, "/import-bucket-metadata").as_str(),
AdminOperation(&bucket_meta::ImportBucketMetadata {}),
)?;
r.insert(
Method::GET,
format!("{}{}", ADMIN_PREFIX, "/v3/list-remote-targets").as_str(),
AdminOperation(&ListRemoteTargetHandler {}),
)?;
r.insert(
Method::GET,
format!("{}{}", ADMIN_PREFIX, "/v3/replicationmetrics").as_str(),
AdminOperation(&GetReplicationMetricsHandler {}),
)?;
r.insert(
Method::PUT,
format!("{}{}", ADMIN_PREFIX, "/v3/set-remote-target").as_str(),
AdminOperation(&SetRemoteTargetHandler {}),
)?;
r.insert(
Method::DELETE,
format!("{}{}", ADMIN_PREFIX, "/v3/remove-remote-target").as_str(),
AdminOperation(&RemoveRemoteTargetHandler {}),
)?;
// Performance profiling endpoints (available on all platforms, with platform-specific responses)
#[cfg(not(target_os = "windows"))]
r.insert(
Method::GET,
format!("{}{}", ADMIN_PREFIX, "/debug/pprof/profile").as_str(),
AdminOperation(&handlers::ProfileHandler {}),
)?;
#[cfg(not(target_os = "windows"))]
r.insert(
Method::GET,
format!("{}{}", ADMIN_PREFIX, "/debug/pprof/status").as_str(),
AdminOperation(&handlers::ProfileStatusHandler {}),
)?;
// KMS management endpoints
r.insert(
Method::POST,
format!("{}{}", ADMIN_PREFIX, "/v3/kms/create-key").as_str(),
AdminOperation(&kms::CreateKeyHandler {}),
)?;
r.insert(
Method::GET,
format!("{}{}", ADMIN_PREFIX, "/v3/kms/describe-key").as_str(),
AdminOperation(&kms::DescribeKeyHandler {}),
)?;
r.insert(
Method::GET,
format!("{}{}", ADMIN_PREFIX, "/v3/kms/list-keys").as_str(),
AdminOperation(&kms::ListKeysHandler {}),
)?;
r.insert(
Method::POST,
format!("{}{}", ADMIN_PREFIX, "/v3/kms/generate-data-key").as_str(),
AdminOperation(&kms::GenerateDataKeyHandler {}),
)?;
r.insert(
Method::GET,
format!("{}{}", ADMIN_PREFIX, "/v3/kms/status").as_str(),
AdminOperation(&kms::KmsStatusHandler {}),
)?;
r.insert(
Method::GET,
format!("{}{}", ADMIN_PREFIX, "/v3/kms/config").as_str(),
AdminOperation(&kms::KmsConfigHandler {}),
)?;
r.insert(
Method::POST,
format!("{}{}", ADMIN_PREFIX, "/v3/kms/clear-cache").as_str(),
AdminOperation(&kms::KmsClearCacheHandler {}),
)?;
// KMS Dynamic Configuration APIs
r.insert(
Method::POST,
format!("{}{}", ADMIN_PREFIX, "/v3/kms/configure").as_str(),
AdminOperation(&kms_dynamic::ConfigureKmsHandler {}),
)?;
r.insert(
Method::POST,
format!("{}{}", ADMIN_PREFIX, "/v3/kms/start").as_str(),
AdminOperation(&kms_dynamic::StartKmsHandler {}),
)?;
r.insert(
Method::POST,
format!("{}{}", ADMIN_PREFIX, "/v3/kms/stop").as_str(),
AdminOperation(&kms_dynamic::StopKmsHandler {}),
)?;
r.insert(
Method::GET,
format!("{}{}", ADMIN_PREFIX, "/v3/kms/service-status").as_str(),
AdminOperation(&kms_dynamic::GetKmsStatusHandler {}),
)?;
r.insert(
Method::POST,
format!("{}{}", ADMIN_PREFIX, "/v3/kms/reconfigure").as_str(),
AdminOperation(&kms_dynamic::ReconfigureKmsHandler {}),
)?;
// KMS key management endpoints
r.insert(
Method::POST,
format!("{}{}", ADMIN_PREFIX, "/v3/kms/keys").as_str(),
AdminOperation(&kms_keys::CreateKmsKeyHandler {}),
)?;
r.insert(
Method::DELETE,
format!("{}{}", ADMIN_PREFIX, "/v3/kms/keys/delete").as_str(),
AdminOperation(&kms_keys::DeleteKmsKeyHandler {}),
)?;
r.insert(
Method::POST,
format!("{}{}", ADMIN_PREFIX, "/v3/kms/keys/cancel-deletion").as_str(),
AdminOperation(&kms_keys::CancelKmsKeyDeletionHandler {}),
)?;
r.insert(
Method::GET,
format!("{}{}", ADMIN_PREFIX, "/v3/kms/keys").as_str(),
AdminOperation(&kms_keys::ListKmsKeysHandler {}),
)?;
r.insert(
Method::GET,
format!("{}{}", ADMIN_PREFIX, "/v3/kms/keys/{key_id}").as_str(),
AdminOperation(&kms_keys::DescribeKmsKeyHandler {}),
)?;
Ok(r)
}
/// user router
fn register_user_route(r: &mut S3Router<AdminOperation>) -> std::io::Result<()> {
// 1
r.insert(
Method::GET,
format!("{}{}", ADMIN_PREFIX, "/v3/accountinfo").as_str(),
AdminOperation(&handlers::AccountInfoHandler {}),
)?;
// ?[bucket=xxx]
r.insert(
Method::GET,
format!("{}{}", ADMIN_PREFIX, "/v3/list-users").as_str(),
AdminOperation(&user::ListUsers {}),
)?;
// ?accessKey=xxx
r.insert(
Method::GET,
format!("{}{}", ADMIN_PREFIX, "/v3/user-info").as_str(),
AdminOperation(&user::GetUserInfo {}),
)?;
// ?accessKey=xxx
r.insert(
Method::DELETE,
format!("{}{}", ADMIN_PREFIX, "/v3/remove-user").as_str(),
AdminOperation(&user::RemoveUser {}),
)?;
// ?accessKey=xxx
// body: AddOrUpdateUserReq
r.insert(
Method::PUT,
format!("{}{}", ADMIN_PREFIX, "/v3/add-user").as_str(),
AdminOperation(&user::AddUser {}),
)?;
// ?accessKey=xxx&status=enabled
r.insert(
Method::PUT,
format!("{}{}", ADMIN_PREFIX, "/v3/set-user-status").as_str(),
AdminOperation(&user::SetUserStatus {}),
)?;
r.insert(
Method::GET,
format!("{}{}", ADMIN_PREFIX, "/v3/groups").as_str(),
AdminOperation(&group::ListGroups {}),
)?;
// ?group=xxx
r.insert(
Method::GET,
format!("{}{}", ADMIN_PREFIX, "/v3/group").as_str(),
AdminOperation(&group::GetGroup {}),
)?;
// ?group=xxx&status=xxx
r.insert(
Method::PUT,
format!("{}{}", ADMIN_PREFIX, "/v3/set-group-status").as_str(),
AdminOperation(&group::SetGroupStatus {}),
)?;
// @body GroupAddRemove
r.insert(
Method::PUT,
format!("{}{}", ADMIN_PREFIX, "/v3/update-group-members").as_str(),
AdminOperation(&group::UpdateGroupMembers {}),
)?;
// Service accounts
// ?accessKey=xxx
// @body: UpdateServiceAccountReq
r.insert(
Method::POST,
format!("{}{}", ADMIN_PREFIX, "/v3/update-service-account").as_str(),
AdminOperation(&UpdateServiceAccount {}),
)?;
// ?accessKey=xxx
r.insert(
Method::GET,
format!("{}{}", ADMIN_PREFIX, "/v3/info-service-account").as_str(),
AdminOperation(&InfoServiceAccount {}),
)?;
// ?[user=xxx]
r.insert(
Method::GET,
format!("{}{}", ADMIN_PREFIX, "/v3/list-service-accounts").as_str(),
AdminOperation(&ListServiceAccount {}),
)?;
// ?accessKey=xxx
r.insert(
Method::DELETE,
format!("{}{}", ADMIN_PREFIX, "/v3/delete-service-accounts").as_str(),
AdminOperation(&DeleteServiceAccount {}),
)?;
// @body: AddServiceAccountReq
r.insert(
Method::PUT,
format!("{}{}", ADMIN_PREFIX, "/v3/add-service-accounts").as_str(),
AdminOperation(&AddServiceAccount {}),
)?;
r.insert(
Method::GET,
format!("{}{}", ADMIN_PREFIX, "/v3/export-iam").as_str(),
AdminOperation(&user::ExportIam {}),
)?;
r.insert(
Method::PUT,
format!("{}{}", ADMIN_PREFIX, "/v3/import-iam").as_str(),
AdminOperation(&user::ImportIam {}),
)?;
// list-canned-policies?bucket=xxx
r.insert(
Method::GET,
format!("{}{}", ADMIN_PREFIX, "/v3/list-canned-policies").as_str(),
AdminOperation(&policies::ListCannedPolicies {}),
)?;
// info-canned-policy?name=xxx
r.insert(
Method::GET,
format!("{}{}", ADMIN_PREFIX, "/v3/info-canned-policy").as_str(),
AdminOperation(&policies::InfoCannedPolicy {}),
)?;
// add-canned-policy?name=xxx
r.insert(
Method::PUT,
format!("{}{}", ADMIN_PREFIX, "/v3/add-canned-policy").as_str(),
AdminOperation(&policies::AddCannedPolicy {}),
)?;
// remove-canned-policy?name=xxx
r.insert(
Method::DELETE,
format!("{}{}", ADMIN_PREFIX, "/v3/remove-canned-policy").as_str(),
AdminOperation(&policies::RemoveCannedPolicy {}),
)?;
// set-user-or-group-policy?policyName=xxx&userOrGroup=xxx&isGroup=xxx
r.insert(
Method::PUT,
format!("{}{}", ADMIN_PREFIX, "/v3/set-user-or-group-policy").as_str(),
AdminOperation(&policies::SetPolicyForUserOrGroup {}),
)?;
r.insert(
Method::GET,
format!("{}{}", ADMIN_PREFIX, "/v3/target/list").as_str(),
AdminOperation(&ListNotificationTargets {}),
)?;
r.insert(
Method::PUT,
format!("{}{}", ADMIN_PREFIX, "/v3/target/{target_type}/{target_name}").as_str(),
AdminOperation(&NotificationTarget {}),
)?;
// Remove notification target
// This endpoint removes a notification target based on its type and name.
// target-remove?target_type=xxx&target_name=xxx
// * `target_type` - Target type, such as "notify_webhook" or "notify_mqtt".
// * `target_name` - A unique name for a Target, such as "1".
r.insert(
Method::DELETE,
format!("{}{}", ADMIN_PREFIX, "/v3/target/{target_type}/{target_name}/reset").as_str(),
AdminOperation(&RemoveNotificationTarget {}),
)?;
// arns list
r.insert(
Method::GET,
format!("{}{}", ADMIN_PREFIX, "/v3/target/arns").as_str(),
AdminOperation(&ListTargetsArns {}),
)?;
Ok(())
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/admin/console_test.rs | rustfs/src/admin/console_test.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#[cfg(test)]
mod tests {
use crate::config::Opt;
use clap::Parser;
#[tokio::test]
async fn test_console_cors_configuration() {
// Test CORS configuration parsing
use crate::admin::console::parse_cors_origins;
// Test wildcard origin
let cors_wildcard = Some("*".to_string());
let _layer1 = parse_cors_origins(cors_wildcard.as_ref());
// Should create a layer without error
// Test specific origins
let cors_specific = Some("http://localhost:3000,https://admin.example.com".to_string());
let _layer2 = parse_cors_origins(cors_specific.as_ref());
// Should create a layer without error
// Test empty origin
let cors_empty = Some("".to_string());
let _layer3 = parse_cors_origins(cors_empty.as_ref());
// Should create a layer without error (falls back to permissive)
// Test no origin
let _layer4 = parse_cors_origins(None);
// Should create a layer without error (uses default)
}
#[tokio::test]
async fn test_console_tls_configuration() {
// Test TLS configuration options (now uses shared tls_path)
let args = vec!["rustfs", "/tmp/test", "--tls-path", "/path/to/tls"];
let opt = Opt::parse_from(args);
assert_eq!(opt.tls_path, Some("/path/to/tls".to_string()));
}
#[tokio::test]
async fn test_console_health_check_endpoint() {
// Test that console health check can be called
// This test would need a running server to be comprehensive
// For now, we test configuration and startup behavior
let args = vec!["rustfs", "/tmp/test", "--console-address", ":0"];
let opt = Opt::parse_from(args);
// Verify the configuration supports health checks
assert!(opt.console_enable, "Console should be enabled for health checks");
}
#[tokio::test]
async fn test_console_separate_logging_target() {
// Test that console uses separate logging targets
use tracing::info;
// This test verifies that logging targets are properly set up
info!(target: "rustfs::console::startup", "Test console startup log");
info!(target: "rustfs::console::access", "Test console access log");
info!(target: "rustfs::console::error", "Test console error log");
info!(target: "rustfs::console::shutdown", "Test console shutdown log");
// In a real implementation, we would verify these logs are captured separately
}
#[tokio::test]
async fn test_console_configuration_validation() {
// Test configuration validation
let args = vec![
"rustfs",
"/tmp/test",
"--console-enable",
"true",
"--console-address",
":9001",
];
let opt = Opt::parse_from(args);
// Verify all console-related configuration is parsed correctly
assert!(opt.console_enable);
assert_eq!(opt.console_address, ":9001");
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/admin/rpc.rs | rustfs/src/admin/rpc.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::router::AdminOperation;
use super::router::Operation;
use super::router::S3Router;
use crate::server::RPC_PREFIX;
use futures::StreamExt;
use http::StatusCode;
use hyper::Method;
use matchit::Params;
use rustfs_config::MAX_ADMIN_REQUEST_BODY_SIZE;
use rustfs_ecstore::disk::DiskAPI;
use rustfs_ecstore::disk::WalkDirOptions;
use rustfs_ecstore::set_disk::DEFAULT_READ_BUFFER_SIZE;
use rustfs_ecstore::store::find_local_disk;
use rustfs_utils::net::bytes_stream;
use s3s::Body;
use s3s::S3Request;
use s3s::S3Response;
use s3s::S3Result;
use s3s::dto::StreamingBlob;
use s3s::s3_error;
use serde_urlencoded::from_bytes;
use tokio::io::AsyncWriteExt;
use tokio_util::io::ReaderStream;
use tracing::warn;
pub fn register_rpc_route(r: &mut S3Router<AdminOperation>) -> std::io::Result<()> {
r.insert(
Method::GET,
format!("{}{}", RPC_PREFIX, "/read_file_stream").as_str(),
AdminOperation(&ReadFile {}),
)?;
r.insert(
Method::HEAD,
format!("{}{}", RPC_PREFIX, "/read_file_stream").as_str(),
AdminOperation(&ReadFile {}),
)?;
r.insert(
Method::PUT,
format!("{}{}", RPC_PREFIX, "/put_file_stream").as_str(),
AdminOperation(&PutFile {}),
)?;
r.insert(
Method::GET,
format!("{}{}", RPC_PREFIX, "/walk_dir").as_str(),
AdminOperation(&WalkDir {}),
)?;
r.insert(
Method::HEAD,
format!("{}{}", RPC_PREFIX, "/walk_dir").as_str(),
AdminOperation(&WalkDir {}),
)?;
Ok(())
}
// /rustfs/rpc/read_file_stream?disk={}&volume={}&path={}&offset={}&length={}"
#[derive(Debug, Default, serde::Deserialize)]
pub struct ReadFileQuery {
disk: String,
volume: String,
path: String,
offset: usize,
length: usize,
}
pub struct ReadFile {}
#[async_trait::async_trait]
impl Operation for ReadFile {
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
if req.method == Method::HEAD {
return Ok(S3Response::new((StatusCode::OK, Body::empty())));
}
let query = {
if let Some(query) = req.uri.query() {
let input: ReadFileQuery =
from_bytes(query.as_bytes()).map_err(|e| s3_error!(InvalidArgument, "get query failed1 {:?}", e))?;
input
} else {
ReadFileQuery::default()
}
};
let Some(disk) = find_local_disk(&query.disk).await else {
return Err(s3_error!(InvalidArgument, "disk not found"));
};
let file = disk
.read_file_stream(&query.volume, &query.path, query.offset, query.length)
.await
.map_err(|e| s3_error!(InternalError, "read file err {}", e))?;
Ok(S3Response::new((
StatusCode::OK,
Body::from(StreamingBlob::wrap(bytes_stream(
ReaderStream::with_capacity(file, DEFAULT_READ_BUFFER_SIZE),
query.length,
))),
)))
}
}
#[derive(Debug, Default, serde::Deserialize)]
pub struct WalkDirQuery {
disk: String,
}
pub struct WalkDir {}
#[async_trait::async_trait]
impl Operation for WalkDir {
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
if req.method == Method::HEAD {
return Ok(S3Response::new((StatusCode::OK, Body::empty())));
}
let query = {
if let Some(query) = req.uri.query() {
let input: WalkDirQuery =
from_bytes(query.as_bytes()).map_err(|e| s3_error!(InvalidArgument, "get query failed1 {:?}", e))?;
input
} else {
WalkDirQuery::default()
}
};
let mut input = req.input;
let body = match input.store_all_limited(MAX_ADMIN_REQUEST_BODY_SIZE).await {
Ok(b) => b,
Err(e) => {
warn!("get body failed, e: {:?}", e);
return Err(s3_error!(InvalidRequest, "RPC request body too large or failed to read"));
}
};
// let body_bytes = decrypt_data(input_cred.secret_key.expose().as_bytes(), &body)
// .map_err(|e| S3Error::with_message(S3ErrorCode::InvalidArgument, format!("decrypt_data err {}", e)))?;
let args: WalkDirOptions =
serde_json::from_slice(&body).map_err(|e| s3_error!(InternalError, "unmarshal body err {}", e))?;
let Some(disk) = find_local_disk(&query.disk).await else {
return Err(s3_error!(InvalidArgument, "disk not found"));
};
let (rd, mut wd) = tokio::io::duplex(DEFAULT_READ_BUFFER_SIZE);
tokio::spawn(async move {
if let Err(e) = disk.walk_dir(args, &mut wd).await {
warn!("walk dir err {}", e);
}
});
let body = Body::from(StreamingBlob::wrap(ReaderStream::with_capacity(rd, DEFAULT_READ_BUFFER_SIZE)));
Ok(S3Response::new((StatusCode::OK, body)))
}
}
// /rustfs/rpc/read_file_stream?disk={}&volume={}&path={}&offset={}&length={}"
#[derive(Debug, Default, serde::Deserialize)]
pub struct PutFileQuery {
disk: String,
volume: String,
path: String,
append: bool,
size: i64,
}
pub struct PutFile {}
#[async_trait::async_trait]
impl Operation for PutFile {
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
let query = {
if let Some(query) = req.uri.query() {
let input: PutFileQuery =
from_bytes(query.as_bytes()).map_err(|e| s3_error!(InvalidArgument, "get query failed1 {:?}", e))?;
input
} else {
PutFileQuery::default()
}
};
let Some(disk) = find_local_disk(&query.disk).await else {
return Err(s3_error!(InvalidArgument, "disk not found"));
};
let mut file = if query.append {
disk.append_file(&query.volume, &query.path)
.await
.map_err(|e| s3_error!(InternalError, "append file err {}", e))?
} else {
disk.create_file("", &query.volume, &query.path, query.size)
.await
.map_err(|e| s3_error!(InternalError, "read file err {}", e))?
};
let mut body = req.input;
while let Some(item) = body.next().await {
let bytes = item.map_err(|e| s3_error!(InternalError, "body stream err {}", e))?;
let result = file.write_all(&bytes).await;
result.map_err(|e| s3_error!(InternalError, "write file err {}", e))?;
}
Ok(S3Response::new((StatusCode::OK, Body::empty())))
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/admin/console.rs | rustfs/src/admin/console.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::config::build;
use crate::license::get_license;
use crate::server::{CONSOLE_PREFIX, FAVICON_PATH, HEALTH_PREFIX, RUSTFS_ADMIN_PREFIX};
use axum::{
Router,
body::Body,
extract::Request,
middleware,
response::{IntoResponse, Response},
routing::get,
};
use axum_server::tls_rustls::RustlsConfig;
use http::{HeaderMap, HeaderName, HeaderValue, Method, StatusCode, Uri};
use mime_guess::from_path;
use rust_embed::RustEmbed;
use rustfs_config::{RUSTFS_TLS_CERT, RUSTFS_TLS_KEY};
use serde::Serialize;
use serde_json::json;
use std::{
io::Result,
net::{IpAddr, SocketAddr},
sync::{Arc, OnceLock},
time::Duration,
};
use tokio_rustls::rustls::ServerConfig;
use tower_http::catch_panic::CatchPanicLayer;
use tower_http::compression::CompressionLayer;
use tower_http::cors::{AllowOrigin, Any, CorsLayer};
use tower_http::limit::RequestBodyLimitLayer;
use tower_http::timeout::TimeoutLayer;
use tower_http::trace::TraceLayer;
use tracing::{debug, error, info, instrument, warn};
#[derive(RustEmbed)]
#[folder = "$CARGO_MANIFEST_DIR/static"]
struct StaticFiles;
/// Static file handler
///
/// Serves static files embedded in the binary using rust-embed.
/// If the requested file is not found, it serves index.html as a fallback.
/// If index.html is also not found, it returns a 404 Not Found response.
///
/// # Arguments:
/// - `uri`: The request URI.
///
/// # Returns:
/// - An `impl IntoResponse` containing the static file content or a 404 response.
///
async fn static_handler(uri: Uri) -> impl IntoResponse {
let mut path = uri.path().trim_start_matches('/');
if path.is_empty() {
path = "index.html"
}
if let Some(file) = StaticFiles::get(path) {
let mime_type = from_path(path).first_or_octet_stream();
Response::builder()
.status(StatusCode::OK)
.header("Content-Type", mime_type.to_string())
.body(Body::from(file.data))
.unwrap()
} else if let Some(file) = StaticFiles::get("index.html") {
let mime_type = from_path("index.html").first_or_octet_stream();
Response::builder()
.status(StatusCode::OK)
.header("Content-Type", mime_type.to_string())
.body(Body::from(file.data))
.unwrap()
} else {
Response::builder()
.status(StatusCode::NOT_FOUND)
.body(Body::from(" 404 Not Found \n RustFS "))
.unwrap()
}
}
#[derive(Debug, Serialize, Clone)]
pub(crate) struct Config {
#[serde(skip)]
port: u16,
api: Api,
s3: S3,
release: Release,
license: License,
doc: String,
}
impl Config {
fn new(local_ip: IpAddr, port: u16, version: &str, date: &str) -> Self {
Config {
port,
api: Api {
base_url: format!("http://{local_ip}:{port}/{RUSTFS_ADMIN_PREFIX}"),
},
s3: S3 {
endpoint: format!("http://{local_ip}:{port}"),
region: "cn-east-1".to_owned(),
},
release: Release {
version: version.to_string(),
date: date.to_string(),
},
license: License {
name: "Apache-2.0".to_string(),
url: "https://www.apache.org/licenses/LICENSE-2.0".to_string(),
},
doc: "https://rustfs.com/docs/".to_string(),
}
}
fn to_json(&self) -> String {
serde_json::to_string(self).unwrap_or_default()
}
#[allow(dead_code)]
pub(crate) fn version_info(&self) -> String {
format!(
"RELEASE.{}@{} (rust {} {})",
self.release.date.clone(),
self.release.version.clone().trim_start_matches('@'),
build::RUST_VERSION,
build::BUILD_TARGET
)
}
#[allow(dead_code)]
pub(crate) fn version(&self) -> String {
self.release.version.clone()
}
#[allow(dead_code)]
pub(crate) fn license(&self) -> String {
format!("{} {}", self.license.name.clone(), self.license.url.clone())
}
#[allow(dead_code)]
pub(crate) fn doc(&self) -> String {
self.doc.clone()
}
}
#[derive(Debug, Serialize, Clone)]
struct Api {
#[serde(rename = "baseURL")]
base_url: String,
}
#[derive(Debug, Serialize, Clone)]
struct S3 {
endpoint: String,
region: String,
}
#[derive(Debug, Serialize, Clone)]
struct Release {
version: String,
date: String,
}
#[derive(Debug, Serialize, Clone)]
struct License {
name: String,
url: String,
}
/// Global console configuration
static CONSOLE_CONFIG: OnceLock<Config> = OnceLock::new();
#[allow(clippy::const_is_empty)]
pub(crate) fn init_console_cfg(local_ip: IpAddr, port: u16) {
CONSOLE_CONFIG.get_or_init(|| {
let ver = {
if !build::TAG.is_empty() {
build::TAG.to_string()
} else if !build::SHORT_COMMIT.is_empty() {
format!("@{}", build::SHORT_COMMIT)
} else {
build::PKG_VERSION.to_string()
}
};
Config::new(local_ip, port, ver.as_str(), build::COMMIT_DATE_3339)
});
}
/// License handler
/// Returns the current license information of the console.
///
/// # Returns:
/// - 200 OK with JSON body containing license details.
#[instrument]
async fn license_handler() -> impl IntoResponse {
let license = get_license().unwrap_or_default();
Response::builder()
.header("content-type", "application/json")
.status(StatusCode::OK)
.body(Body::from(serde_json::to_string(&license).unwrap_or_default()))
.unwrap()
}
/// Check if the given IP address is a private IP
fn _is_private_ip(ip: IpAddr) -> bool {
match ip {
IpAddr::V4(ip) => {
let octets = ip.octets();
// 10.0.0.0/8
octets[0] == 10 ||
// 172.16.0.0/12
(octets[0] == 172 && (octets[1] >= 16 && octets[1] <= 31)) ||
// 192.168.0.0/16
(octets[0] == 192 && octets[1] == 168)
}
IpAddr::V6(_) => false,
}
}
/// Version handler
/// Returns the current version information of the console.
///
/// # Returns:
/// - 200 OK with JSON body containing version details if configuration is initialized.
/// - 500 Internal Server Error if configuration is not initialized.
#[instrument]
async fn version_handler() -> impl IntoResponse {
match CONSOLE_CONFIG.get() {
Some(cfg) => Response::builder()
.header("content-type", "application/json")
.status(StatusCode::OK)
.body(Body::from(
json!({
"version": cfg.release.version,
"version_info": cfg.version_info(),
"date": cfg.release.date,
})
.to_string(),
))
.unwrap(),
None => Response::builder()
.status(StatusCode::INTERNAL_SERVER_ERROR)
.body(Body::from("Console configuration not initialized"))
.unwrap(),
}
}
/// Configuration handler
/// Returns the current console configuration in JSON format.
/// The configuration is dynamically adjusted based on the request's host and scheme.
///
/// # Arguments:
/// - `uri`: The request URI.
/// - `headers`: The request headers.
///
/// # Returns:
/// - 200 OK with JSON body containing the console configuration if initialized.
/// - 500 Internal Server Error if configuration is not initialized.
#[instrument(fields(uri))]
async fn config_handler(uri: Uri, headers: HeaderMap) -> impl IntoResponse {
// Get the scheme from the headers or use the URI scheme
let scheme = headers
.get(HeaderName::from_static("x-forwarded-proto"))
.and_then(|value| value.to_str().ok())
.unwrap_or_else(|| uri.scheme().map(|s| s.as_str()).unwrap_or("http"));
// Prefer URI host, fallback to `Host` header
let header_host = headers
.get(http::header::HOST)
.and_then(|v| v.to_str().ok())
.unwrap_or_default();
let raw_host = uri.host().unwrap_or(header_host);
let host_for_url = if let Ok(socket_addr) = raw_host.parse::<SocketAddr>() {
// Successfully parsed, it's in IP:Port format.
// For IPv6, we need to enclose it in brackets to form a valid URL.
let ip = socket_addr.ip();
if ip.is_ipv6() { format!("[{ip}]") } else { format!("{ip}") }
} else if let Ok(ip) = raw_host.parse::<IpAddr>() {
// Pure IP (no ports)
if ip.is_ipv6() { format!("[{ip}]") } else { ip.to_string() }
} else {
// The domain name may not be able to resolve directly to IP, remove the port
raw_host.split(':').next().unwrap_or(raw_host).to_string()
};
// Make a copy of the current configuration
let mut cfg = match CONSOLE_CONFIG.get() {
Some(cfg) => cfg.clone(),
None => {
error!("Console configuration not initialized");
return Response::builder()
.status(StatusCode::INTERNAL_SERVER_ERROR)
.body(Body::from("Console configuration not initialized"))
.unwrap();
}
};
let url = format!("{}://{}:{}", scheme, host_for_url, cfg.port);
cfg.api.base_url = format!("{url}{RUSTFS_ADMIN_PREFIX}");
cfg.s3.endpoint = url;
Response::builder()
.header("content-type", "application/json")
.status(StatusCode::OK)
.body(Body::from(cfg.to_json()))
.unwrap()
}
/// Console access logging middleware
/// Logs each console access with method, URI, status code, and duration.
///
/// # Arguments:
/// - `req`: The incoming request.
/// - `next`: The next middleware or handler in the chain.
///
/// # Returns:
/// - The response from the next middleware or handler.
async fn console_logging_middleware(req: Request, next: middleware::Next) -> Response {
let method = req.method().clone();
let uri = req.uri().clone();
let start = std::time::Instant::now();
let response = next.run(req).await;
let duration = start.elapsed();
info!(
target: "rustfs::console::access",
method = %method,
uri = %uri,
status = %response.status(),
duration_ms = %duration.as_millis(),
"Console access"
);
response
}
/// Setup TLS configuration for console using axum-server, following endpoint TLS implementation logic
#[instrument(skip(tls_path))]
async fn _setup_console_tls_config(tls_path: Option<&String>) -> Result<Option<RustlsConfig>> {
let tls_path = match tls_path {
Some(path) if !path.is_empty() => path,
_ => {
debug!("TLS path is not provided, console starting with HTTP");
return Ok(None);
}
};
if tokio::fs::metadata(tls_path).await.is_err() {
debug!("TLS path does not exist, console starting with HTTP");
return Ok(None);
}
debug!("Found TLS directory for console, checking for certificates");
// Make sure to use a modern encryption suite
let _ = rustls::crypto::aws_lc_rs::default_provider().install_default();
// 1. Attempt to load all certificates in the directory (multi-certificate support, for SNI)
if let Ok(cert_key_pairs) = rustfs_utils::load_all_certs_from_directory(tls_path)
&& !cert_key_pairs.is_empty()
{
debug!(
"Found {} certificates for console, creating SNI-aware multi-cert resolver",
cert_key_pairs.len()
);
// Create an SNI-enabled certificate resolver
let resolver = rustfs_utils::create_multi_cert_resolver(cert_key_pairs)?;
// Configure the server to enable SNI support
let mut server_config = ServerConfig::builder()
.with_no_client_auth()
.with_cert_resolver(Arc::new(resolver));
// Configure ALPN protocol priority
server_config.alpn_protocols = vec![b"h2".to_vec(), b"http/1.1".to_vec(), b"http/1.0".to_vec()];
// Log SNI requests
if rustfs_utils::tls_key_log() {
server_config.key_log = Arc::new(rustls::KeyLogFile::new());
}
info!(target: "rustfs::console::tls", "Console TLS enabled with multi-certificate SNI support");
return Ok(Some(RustlsConfig::from_config(Arc::new(server_config))));
}
// 2. Revert to the traditional single-certificate mode
let key_path = format!("{tls_path}/{RUSTFS_TLS_KEY}");
let cert_path = format!("{tls_path}/{RUSTFS_TLS_CERT}");
if tokio::try_join!(tokio::fs::metadata(&key_path), tokio::fs::metadata(&cert_path)).is_ok() {
debug!("Found legacy single TLS certificate for console, starting with HTTPS");
return match RustlsConfig::from_pem_file(cert_path, key_path).await {
Ok(config) => {
info!(target: "rustfs::console::tls", "Console TLS enabled with single certificate");
Ok(Some(config))
}
Err(e) => {
error!(target: "rustfs::console::error", error = %e, "Failed to create TLS config for console");
Err(std::io::Error::other(e))
}
};
}
debug!("No valid TLS certificates found in the directory for console, starting with HTTP");
Ok(None)
}
/// Get console configuration from environment variables
/// Returns a tuple containing console configuration values from environment variables.
///
/// # Returns:
/// - rate_limit_enable: bool indicating if rate limiting is enabled.
/// - rate_limit_rpm: u32 indicating the rate limit in requests per minute.
/// - auth_timeout: u64 indicating the authentication timeout in seconds.
/// - cors_allowed_origins: String containing allowed CORS origins.
///
fn get_console_config_from_env() -> (bool, u32, u64, String) {
let rate_limit_enable = std::env::var(rustfs_config::ENV_CONSOLE_RATE_LIMIT_ENABLE)
.unwrap_or_else(|_| rustfs_config::DEFAULT_CONSOLE_RATE_LIMIT_ENABLE.to_string())
.parse::<bool>()
.unwrap_or(rustfs_config::DEFAULT_CONSOLE_RATE_LIMIT_ENABLE);
let rate_limit_rpm = std::env::var(rustfs_config::ENV_CONSOLE_RATE_LIMIT_RPM)
.unwrap_or_else(|_| rustfs_config::DEFAULT_CONSOLE_RATE_LIMIT_RPM.to_string())
.parse::<u32>()
.unwrap_or(rustfs_config::DEFAULT_CONSOLE_RATE_LIMIT_RPM);
let auth_timeout = std::env::var(rustfs_config::ENV_CONSOLE_AUTH_TIMEOUT)
.unwrap_or_else(|_| rustfs_config::DEFAULT_CONSOLE_AUTH_TIMEOUT.to_string())
.parse::<u64>()
.unwrap_or(rustfs_config::DEFAULT_CONSOLE_AUTH_TIMEOUT);
let cors_allowed_origins = std::env::var(rustfs_config::ENV_CONSOLE_CORS_ALLOWED_ORIGINS)
.unwrap_or_else(|_| rustfs_config::DEFAULT_CONSOLE_CORS_ALLOWED_ORIGINS.to_string())
.parse::<String>()
.unwrap_or(rustfs_config::DEFAULT_CONSOLE_CORS_ALLOWED_ORIGINS.to_string());
(rate_limit_enable, rate_limit_rpm, auth_timeout, cors_allowed_origins)
}
/// Check if the given path is for console access
///
/// # Arguments:
/// - `path`: The request path.
///
/// # Returns:
/// - `true` if the path is for console access, `false` otherwise.
pub fn is_console_path(path: &str) -> bool {
path == FAVICON_PATH || path.starts_with(CONSOLE_PREFIX)
}
/// Setup comprehensive middleware stack with tower-http features
///
/// # Arguments:
/// - `cors_layer`: The CORS layer to apply.
/// - `rate_limit_enable`: bool indicating if rate limiting is enabled.
/// - `rate_limit_rpm`: u32 indicating the rate limit in requests per minute.
/// - `auth_timeout`: u64 indicating the authentication timeout in seconds.
///
/// # Returns:
/// - A `Router` with the configured middleware stack.
fn setup_console_middleware_stack(
cors_layer: CorsLayer,
rate_limit_enable: bool,
rate_limit_rpm: u32,
auth_timeout: u64,
) -> Router {
let mut app = Router::new()
.route(FAVICON_PATH, get(static_handler))
.route(&format!("{CONSOLE_PREFIX}/license"), get(license_handler))
.route(&format!("{CONSOLE_PREFIX}/config.json"), get(config_handler))
.route(&format!("{CONSOLE_PREFIX}/version"), get(version_handler))
.route(&format!("{CONSOLE_PREFIX}{HEALTH_PREFIX}"), get(health_check).head(health_check))
.nest(CONSOLE_PREFIX, Router::new().fallback_service(get(static_handler)))
.fallback_service(get(static_handler));
// Add comprehensive middleware layers using tower-http features
app = app
.layer(CatchPanicLayer::new())
.layer(TraceLayer::new_for_http())
// Compress responses
.layer(CompressionLayer::new())
.layer(middleware::from_fn(console_logging_middleware))
.layer(cors_layer)
// Add timeout layer - convert auth_timeout from seconds to Duration
.layer(TimeoutLayer::with_status_code(
StatusCode::REQUEST_TIMEOUT,
Duration::from_secs(auth_timeout),
))
// Add request body limit (10MB for console uploads)
.layer(RequestBodyLimitLayer::new(5 * 1024 * 1024 * 1024));
// Add rate limiting if enabled
if rate_limit_enable {
info!("Console rate limiting enabled: {} requests per minute", rate_limit_rpm);
// Note: tower-http doesn't provide a built-in rate limiter, but we have the foundation
// For production, you would integrate with a rate limiting service like Redis
// For now, we log that it's configured and ready for integration
}
app
}
/// Console health check handler with comprehensive health information
///
/// # Arguments:
/// - `method`: The HTTP method of the request.
///
/// # Returns:
/// - A `Response` containing the health check result.
#[instrument]
async fn health_check(method: Method) -> Response {
let builder = Response::builder()
.status(StatusCode::OK)
.header("content-type", "application/json");
match method {
// GET: Returns complete JSON
Method::GET => {
let mut health_status = "ok";
let mut details = json!({});
// Check storage backend health
if let Some(_store) = rustfs_ecstore::new_object_layer_fn() {
details["storage"] = json!({"status": "connected"});
} else {
health_status = "degraded";
details["storage"] = json!({"status": "disconnected"});
}
// Check IAM system health
match rustfs_iam::get() {
Ok(_) => {
details["iam"] = json!({"status": "connected"});
}
Err(_) => {
health_status = "degraded";
details["iam"] = json!({"status": "disconnected"});
}
}
let body_json = json!({
"status": health_status,
"service": "rustfs-console",
"timestamp": chrono::Utc::now().to_rfc3339(),
"version": env!("CARGO_PKG_VERSION"),
"details": details,
"uptime": std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_secs()
});
// Return a minimal JSON when serialization fails to avoid panic
let body_str = serde_json::to_string(&body_json).unwrap_or_else(|e| {
error!(
target: "rustfs::console::health",
"failed to serialize health check body: {}",
e
);
// Simplified back-up JSON
"{\"status\":\"error\",\"service\":\"rustfs-console\"}".to_string()
});
builder.body(Body::from(body_str)).unwrap_or_else(|e| {
error!(
target: "rustfs::console::health",
"failed to build GET health response: {}",
e
);
Response::builder()
.status(StatusCode::INTERNAL_SERVER_ERROR)
.body(Body::from("failed to build response"))
.unwrap_or_else(|_| Response::new(Body::from("")))
})
}
// HEAD: Only status + headers are returned, body is empty
Method::HEAD => builder.body(Body::empty()).unwrap_or_else(|e| {
error!(
target: "rustfs::console::health",
"failed to build HEAD health response: {}",
e
);
Response::builder()
.status(StatusCode::INTERNAL_SERVER_ERROR)
.body(Body::from("failed to build response"))
.unwrap_or_else(|e| {
error!(
target: "rustfs::console::health",
"failed to build HEAD health empty response, reason: {}",
e
);
Response::new(Body::from(""))
})
}),
// Other methods: 405
_ => Response::builder()
.status(StatusCode::METHOD_NOT_ALLOWED)
.header("allow", "GET, HEAD")
.body(Body::from("Method Not Allowed"))
.unwrap_or_else(|e| {
error!(
target: "rustfs::console::health",
"failed to build 405 response: {}",
e
);
Response::new(Body::from("Method Not Allowed"))
}),
}
}
/// Parse CORS allowed origins from configuration
///
/// # Arguments:
/// - `origins`: An optional reference to a string containing allowed origins.
///
/// # Returns:
/// - A `CorsLayer` configured with the specified origins.
pub fn parse_cors_origins(origins: Option<&String>) -> CorsLayer {
let cors_layer = CorsLayer::new()
.allow_methods([Method::GET, Method::POST, Method::PUT, Method::DELETE, Method::OPTIONS])
.allow_headers(Any);
match origins {
Some(origins_str) if origins_str == "*" => cors_layer.allow_origin(Any).expose_headers(Any),
Some(origins_str) => {
let origins: Vec<&str> = origins_str.split(',').map(|s| s.trim()).collect();
if origins.is_empty() {
warn!("Empty CORS origins provided, using permissive CORS");
cors_layer.allow_origin(Any).expose_headers(Any)
} else {
// Parse origins with proper error handling
let mut valid_origins = Vec::new();
for origin in origins {
match origin.parse::<HeaderValue>() {
Ok(header_value) => {
valid_origins.push(header_value);
}
Err(e) => {
warn!("Invalid CORS origin '{}': {}", origin, e);
}
}
}
if valid_origins.is_empty() {
warn!("No valid CORS origins found, using permissive CORS");
cors_layer.allow_origin(Any).expose_headers(Any)
} else {
info!("Console CORS origins configured: {:?}", valid_origins);
cors_layer.allow_origin(AllowOrigin::list(valid_origins)).expose_headers(Any)
}
}
}
None => {
debug!("No CORS origins configured for console, using permissive CORS");
cors_layer.allow_origin(Any)
}
}
}
/// Create and configure the console server router
///
/// # Returns:
/// - A `Router` configured for the console server with middleware.
pub(crate) fn make_console_server() -> Router {
let (rate_limit_enable, rate_limit_rpm, auth_timeout, cors_allowed_origins) = get_console_config_from_env();
// String to Option<&String>
let cors_allowed_origins = if cors_allowed_origins.is_empty() {
None
} else {
Some(&cors_allowed_origins)
};
// Configure CORS based on settings
let cors_layer = parse_cors_origins(cors_allowed_origins);
// Build console router with enhanced middleware stack using tower-http features
setup_console_middleware_stack(cors_layer, rate_limit_enable, rate_limit_rpm, auth_timeout)
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/admin/handlers/user.rs | rustfs/src/admin/handlers/user.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::{
admin::{auth::validate_admin_request, router::Operation, utils::has_space_be},
auth::{check_key_valid, constant_time_eq, get_session_token},
server::RemoteAddr,
};
use http::{HeaderMap, StatusCode};
use matchit::Params;
use rustfs_config::{MAX_ADMIN_REQUEST_BODY_SIZE, MAX_IAM_IMPORT_SIZE};
use rustfs_credentials::get_global_action_cred;
use rustfs_iam::{
store::{GroupInfo, MappedPolicy, UserType},
sys::NewServiceAccountOpts,
};
use rustfs_madmin::{
AccountStatus, AddOrUpdateUserReq, IAMEntities, IAMErrEntities, IAMErrEntity, IAMErrPolicyEntity,
user::{ImportIAMResult, SRSessionPolicy, SRSvcAccCreate},
};
use rustfs_policy::policy::action::{Action, AdminAction};
use rustfs_utils::path::path_join_buf;
use s3s::{
Body, S3Error, S3ErrorCode, S3Request, S3Response, S3Result,
header::{CONTENT_DISPOSITION, CONTENT_LENGTH, CONTENT_TYPE},
s3_error,
};
use serde::Deserialize;
use serde_urlencoded::from_bytes;
use std::io::{Read as _, Write};
use std::{collections::HashMap, io::Cursor, str::from_utf8};
use tracing::warn;
use zip::{ZipArchive, ZipWriter, result::ZipError, write::SimpleFileOptions};
#[derive(Debug, Deserialize, Default)]
pub struct AddUserQuery {
#[serde(rename = "accessKey")]
pub access_key: Option<String>,
pub status: Option<String>,
}
pub struct AddUser {}
#[async_trait::async_trait]
impl Operation for AddUser {
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
let query = {
if let Some(query) = req.uri.query() {
let input: AddUserQuery =
from_bytes(query.as_bytes()).map_err(|_e| s3_error!(InvalidArgument, "get body failed1"))?;
input
} else {
AddUserQuery::default()
}
};
let Some(input_cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "get cred failed"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
let ak = query.access_key.as_deref().unwrap_or_default();
if ak.is_empty() {
return Err(s3_error!(InvalidArgument, "access key is empty"));
}
let mut input = req.input;
let body = match input.store_all_limited(MAX_ADMIN_REQUEST_BODY_SIZE).await {
Ok(b) => b,
Err(e) => {
warn!("get body failed, e: {:?}", e);
return Err(s3_error!(InvalidRequest, "get body failed"));
}
};
// let body_bytes = decrypt_data(input_cred.secret_key.expose().as_bytes(), &body)
// .map_err(|e| S3Error::with_message(S3ErrorCode::InvalidArgument, format!("decrypt_data err {}", e)))?;
let args: AddOrUpdateUserReq = serde_json::from_slice(&body)
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("unmarshal body err {e}")))?;
if args.secret_key.is_empty() {
return Err(s3_error!(InvalidArgument, "access key is empty"));
}
if let Some(sys_cred) = get_global_action_cred()
&& constant_time_eq(&sys_cred.access_key, ak)
{
return Err(s3_error!(InvalidArgument, "can't create user with system access key"));
}
let Ok(iam_store) = rustfs_iam::get() else {
return Err(s3_error!(InvalidRequest, "iam not init"));
};
if let Some(user) = iam_store.get_user(ak).await {
if (user.credentials.is_temp() || user.credentials.is_service_account()) && cred.parent_user == ak {
return Err(s3_error!(InvalidArgument, "can't create user with service account access key"));
}
} else if has_space_be(ak) {
return Err(s3_error!(InvalidArgument, "access key has space"));
}
if from_utf8(ak.as_bytes()).is_err() {
return Err(s3_error!(InvalidArgument, "access key is not utf8"));
}
let deny_only = ak == cred.access_key;
validate_admin_request(
&req.headers,
&cred,
owner,
deny_only,
vec![Action::AdminAction(AdminAction::CreateUserAdminAction)],
req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0)),
)
.await?;
iam_store
.create_user(ak, &args)
.await
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("create_user err {e}")))?;
let mut header = HeaderMap::new();
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
}
}
pub struct SetUserStatus {}
#[async_trait::async_trait]
impl Operation for SetUserStatus {
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
let query = {
if let Some(query) = req.uri.query() {
let input: AddUserQuery =
from_bytes(query.as_bytes()).map_err(|_e| s3_error!(InvalidArgument, "get body failed"))?;
input
} else {
AddUserQuery::default()
}
};
let ak = query.access_key.as_deref().unwrap_or_default();
if ak.is_empty() {
return Err(s3_error!(InvalidArgument, "access key is empty"));
}
let Some(input_cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "get cred failed"));
};
if constant_time_eq(&input_cred.access_key, ak) {
return Err(s3_error!(InvalidArgument, "can't change status of self"));
}
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
validate_admin_request(
&req.headers,
&cred,
owner,
false,
vec![Action::AdminAction(AdminAction::EnableUserAdminAction)],
req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0)),
)
.await?;
let status = AccountStatus::try_from(query.status.as_deref().unwrap_or_default())
.map_err(|e| S3Error::with_message(S3ErrorCode::InvalidArgument, e))?;
let Ok(iam_store) = rustfs_iam::get() else {
return Err(s3_error!(InvalidRequest, "iam not init"));
};
iam_store
.set_user_status(ak, status)
.await
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("set_user_status err {e}")))?;
let mut header = HeaderMap::new();
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
}
}
#[derive(Debug, Deserialize, Default)]
pub struct BucketQuery {
#[serde(rename = "bucket")]
pub bucket: String,
}
pub struct ListUsers {}
#[async_trait::async_trait]
impl Operation for ListUsers {
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
let Some(input_cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "get cred failed"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
validate_admin_request(
&req.headers,
&cred,
owner,
false,
vec![Action::AdminAction(AdminAction::ListUsersAdminAction)],
req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0)),
)
.await?;
let query = {
if let Some(query) = req.uri.query() {
let input: BucketQuery =
from_bytes(query.as_bytes()).map_err(|_e| s3_error!(InvalidArgument, "get body failed"))?;
input
} else {
BucketQuery::default()
}
};
let Ok(iam_store) = rustfs_iam::get() else {
return Err(s3_error!(InvalidRequest, "iam not init"));
};
let users = {
if !query.bucket.is_empty() {
iam_store
.list_bucket_users(query.bucket.as_str())
.await
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, e.to_string()))?
} else {
iam_store
.list_users()
.await
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, e.to_string()))?
}
};
let data = serde_json::to_vec(&users)
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("marshal users err {e}")))?;
let mut header = HeaderMap::new();
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
Ok(S3Response::with_headers((StatusCode::OK, Body::from(data)), header))
}
}
pub struct RemoveUser {}
#[async_trait::async_trait]
impl Operation for RemoveUser {
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
let Some(input_cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "get cred failed"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
validate_admin_request(
&req.headers,
&cred,
owner,
false,
vec![Action::AdminAction(AdminAction::DeleteUserAdminAction)],
req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0)),
)
.await?;
let query = {
if let Some(query) = req.uri.query() {
let input: AddUserQuery =
from_bytes(query.as_bytes()).map_err(|_e| s3_error!(InvalidArgument, "get body failed"))?;
input
} else {
AddUserQuery::default()
}
};
let ak = query.access_key.as_deref().unwrap_or_default();
if ak.is_empty() {
return Err(s3_error!(InvalidArgument, "access key is empty"));
}
let sys_cred = get_global_action_cred()
.ok_or_else(|| S3Error::with_message(S3ErrorCode::InternalError, "get_global_action_cred failed"))?;
if ak == sys_cred.access_key || ak == cred.access_key || cred.parent_user == ak {
return Err(s3_error!(InvalidArgument, "can't remove self"));
}
let Ok(iam_store) = rustfs_iam::get() else {
return Err(s3_error!(InvalidRequest, "iam not init"));
};
let (is_temp, _) = iam_store
.is_temp_user(ak)
.await
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("is_temp_user err {e}")))?;
if is_temp {
return Err(s3_error!(InvalidArgument, "can't remove temp user"));
}
let (is_service_account, _) = iam_store
.is_service_account(ak)
.await
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("is_service_account err {e}")))?;
if is_service_account {
return Err(s3_error!(InvalidArgument, "can't remove service account"));
}
iam_store
.delete_user(ak, true)
.await
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("delete_user err {e}")))?;
// TODO: IAMChangeHook
let mut header = HeaderMap::new();
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
}
}
pub struct GetUserInfo {}
#[async_trait::async_trait]
impl Operation for GetUserInfo {
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
let query = {
if let Some(query) = req.uri.query() {
let input: AddUserQuery =
from_bytes(query.as_bytes()).map_err(|_e| s3_error!(InvalidArgument, "get body failed"))?;
input
} else {
AddUserQuery::default()
}
};
let ak = query.access_key.as_deref().unwrap_or_default();
if ak.is_empty() {
return Err(s3_error!(InvalidArgument, "access key is empty"));
}
let Ok(iam_store) = rustfs_iam::get() else {
return Err(s3_error!(InvalidRequest, "iam not init"));
};
let Some(input_cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "get cred failed"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
let deny_only = ak == cred.access_key;
validate_admin_request(
&req.headers,
&cred,
owner,
deny_only,
vec![Action::AdminAction(AdminAction::GetUserAdminAction)],
req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0)),
)
.await?;
let info = iam_store
.get_user_info(ak)
.await
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, e.to_string()))?;
let data = serde_json::to_vec(&info)
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("marshal user err {e}")))?;
let mut header = HeaderMap::new();
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
Ok(S3Response::with_headers((StatusCode::OK, Body::from(data)), header))
}
}
const ALL_POLICIES_FILE: &str = "policies.json";
const ALL_USERS_FILE: &str = "users.json";
const ALL_GROUPS_FILE: &str = "groups.json";
const ALL_SVC_ACCTS_FILE: &str = "svcaccts.json";
const USER_POLICY_MAPPINGS_FILE: &str = "user_mappings.json";
const GROUP_POLICY_MAPPINGS_FILE: &str = "group_mappings.json";
const STS_USER_POLICY_MAPPINGS_FILE: &str = "stsuser_mappings.json";
const IAM_ASSETS_DIR: &str = "iam-assets";
const IAM_EXPORT_FILES: &[&str] = &[
ALL_POLICIES_FILE,
ALL_USERS_FILE,
ALL_GROUPS_FILE,
ALL_SVC_ACCTS_FILE,
USER_POLICY_MAPPINGS_FILE,
GROUP_POLICY_MAPPINGS_FILE,
STS_USER_POLICY_MAPPINGS_FILE,
];
pub struct ExportIam {}
#[async_trait::async_trait]
impl Operation for ExportIam {
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
let Some(input_cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "get cred failed"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
validate_admin_request(
&req.headers,
&cred,
owner,
false,
vec![Action::AdminAction(AdminAction::ExportIAMAction)],
req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0)),
)
.await?;
let Ok(iam_store) = rustfs_iam::get() else {
return Err(s3_error!(InvalidRequest, "iam not init"));
};
let mut zip_writer = ZipWriter::new(Cursor::new(Vec::new()));
let options = SimpleFileOptions::default();
for &file in IAM_EXPORT_FILES {
let file_path = path_join_buf(&[IAM_ASSETS_DIR, file]);
match file {
ALL_POLICIES_FILE => {
let policies: HashMap<String, rustfs_policy::policy::Policy> = iam_store
.list_polices("")
.await
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, e.to_string()))?;
let json_str = serde_json::to_vec(&policies)
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, e.to_string()))?;
zip_writer
.start_file(file_path, options)
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, e.to_string()))?;
zip_writer
.write_all(&json_str)
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, e.to_string()))?;
}
ALL_USERS_FILE => {
let mut users = HashMap::new();
iam_store
.load_users(UserType::Reg, &mut users)
.await
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, e.to_string()))?;
let users: HashMap<String, AddOrUpdateUserReq> = users
.into_iter()
.map(|(k, v)| {
(
k,
AddOrUpdateUserReq {
secret_key: v.credentials.secret_key,
status: {
if v.credentials.status == "off" {
AccountStatus::Disabled
} else {
AccountStatus::Enabled
}
},
policy: None,
},
)
})
.collect::<HashMap<String, AddOrUpdateUserReq>>();
let json_str = serde_json::to_vec(&users)
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, e.to_string()))?;
zip_writer
.start_file(file_path, options)
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, e.to_string()))?;
zip_writer
.write_all(&json_str)
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, e.to_string()))?;
}
ALL_GROUPS_FILE => {
let mut groups: HashMap<String, GroupInfo> = HashMap::new();
iam_store
.load_groups(&mut groups)
.await
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, e.to_string()))?;
let json_str = serde_json::to_vec(&groups)
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, e.to_string()))?;
zip_writer
.start_file(file_path, options)
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, e.to_string()))?;
zip_writer
.write_all(&json_str)
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, e.to_string()))?;
}
ALL_SVC_ACCTS_FILE => {
let mut service_accounts = HashMap::new();
iam_store
.load_users(UserType::Svc, &mut service_accounts)
.await
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, e.to_string()))?;
let mut svc_accts: HashMap<String, SRSvcAccCreate> = HashMap::new();
for (k, acc) in service_accounts {
if k == "siteReplicatorSvcAcc" {
continue;
}
let claims = iam_store
.get_claims_for_svc_acc(&acc.credentials.access_key)
.await
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, e.to_string()))?;
let (sa, police) = iam_store
.get_service_account(&acc.credentials.access_key)
.await
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, e.to_string()))?;
let police_json = if let Some(police) = police {
serde_json::to_string(&police)
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, e.to_string()))?
} else {
"null".to_string()
};
let svc_acc_create_req = SRSvcAccCreate {
parent: acc.credentials.parent_user,
access_key: k.clone(),
secret_key: acc.credentials.secret_key,
groups: acc.credentials.groups.unwrap_or_default(),
claims,
session_policy: SRSessionPolicy::from_json(&police_json).unwrap_or_default(),
status: acc.credentials.status,
name: sa.name.unwrap_or_default(),
description: sa.description.unwrap_or_default(),
expiration: sa.expiration,
api_version: None,
};
svc_accts.insert(k.clone(), svc_acc_create_req);
}
let json_str = serde_json::to_vec(&svc_accts)
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, e.to_string()))?;
zip_writer
.start_file(file_path, options)
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, e.to_string()))?;
zip_writer
.write_all(&json_str)
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, e.to_string()))?;
}
USER_POLICY_MAPPINGS_FILE => {
let mut user_policy_mappings: HashMap<String, MappedPolicy> = HashMap::new();
iam_store
.load_mapped_policies(UserType::Reg, false, &mut user_policy_mappings)
.await
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, e.to_string()))?;
let json_str = serde_json::to_vec(&user_policy_mappings)
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, e.to_string()))?;
zip_writer
.start_file(file_path, options)
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, e.to_string()))?;
zip_writer
.write_all(&json_str)
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, e.to_string()))?;
}
GROUP_POLICY_MAPPINGS_FILE => {
let mut group_policy_mappings = HashMap::new();
iam_store
.load_mapped_policies(UserType::Reg, true, &mut group_policy_mappings)
.await
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, e.to_string()))?;
let json_str = serde_json::to_vec(&group_policy_mappings)
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, e.to_string()))?;
zip_writer
.start_file(file_path, options)
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, e.to_string()))?;
zip_writer
.write_all(&json_str)
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, e.to_string()))?;
}
STS_USER_POLICY_MAPPINGS_FILE => {
let mut sts_user_policy_mappings: HashMap<String, MappedPolicy> = HashMap::new();
iam_store
.load_mapped_policies(UserType::Sts, false, &mut sts_user_policy_mappings)
.await
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, e.to_string()))?;
let json_str = serde_json::to_vec(&sts_user_policy_mappings)
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, e.to_string()))?;
zip_writer
.start_file(file_path, options)
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, e.to_string()))?;
zip_writer
.write_all(&json_str)
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, e.to_string()))?;
}
_ => continue,
}
}
let zip_bytes = zip_writer
.finish()
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, e.to_string()))?;
let mut header = HeaderMap::new();
header.insert(CONTENT_TYPE, "application/zip".parse().unwrap());
header.insert(CONTENT_DISPOSITION, "attachment; filename=iam-assets.zip".parse().unwrap());
header.insert(CONTENT_LENGTH, zip_bytes.get_ref().len().to_string().parse().unwrap());
Ok(S3Response::with_headers((StatusCode::OK, Body::from(zip_bytes.into_inner())), header))
}
}
pub struct ImportIam {}
#[async_trait::async_trait]
impl Operation for ImportIam {
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
let Some(input_cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "get cred failed"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
validate_admin_request(
&req.headers,
&cred,
owner,
false,
vec![Action::AdminAction(AdminAction::ExportIAMAction)],
req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0)),
)
.await?;
let mut input = req.input;
let body = match input.store_all_limited(MAX_IAM_IMPORT_SIZE).await {
Ok(b) => b,
Err(e) => {
warn!("get body failed, e: {:?}", e);
return Err(s3_error!(InvalidRequest, "get body failed"));
}
};
let mut zip_reader =
ZipArchive::new(Cursor::new(body)).map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, e.to_string()))?;
let Ok(iam_store) = rustfs_iam::get() else {
return Err(s3_error!(InvalidRequest, "iam not init"));
};
let skipped = IAMEntities::default();
let mut removed = IAMEntities::default();
let mut added = IAMEntities::default();
let mut failed = IAMErrEntities::default();
{
let file_path = path_join_buf(&[IAM_ASSETS_DIR, ALL_POLICIES_FILE]);
let file_content = match zip_reader.by_name(file_path.as_str()) {
Err(ZipError::FileNotFound) => None,
Err(_) => return Err(s3_error!(InvalidRequest, "get file failed")),
Ok(file) => {
let mut file = file;
let mut file_content = Vec::new();
file.read_to_end(&mut file_content)
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, e.to_string()))?;
Some(file_content)
}
};
if let Some(file_content) = file_content {
let policies: HashMap<String, rustfs_policy::policy::Policy> = serde_json::from_slice(&file_content)
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, e.to_string()))?;
for (name, policy) in policies {
if policy.is_empty() {
let res = iam_store.delete_policy(&name, true).await;
removed.policies.push(name.clone());
if let Err(e) = res {
return Err(s3_error!(InternalError, "delete policy failed, name: {name}, err: {e}"));
}
continue;
}
let res = iam_store.set_policy(&name, policy).await;
added.policies.push(name.clone());
if let Err(e) = res {
return Err(s3_error!(InternalError, "set policy failed, name: {name}, err: {e}"));
}
}
}
}
let Some(sys_cred) = get_global_action_cred() else {
return Err(s3_error!(InvalidRequest, "get sys cred failed"));
};
{
let file_path = path_join_buf(&[IAM_ASSETS_DIR, ALL_USERS_FILE]);
let file_content = match zip_reader.by_name(file_path.as_str()) {
Err(ZipError::FileNotFound) => None,
Err(_) => return Err(s3_error!(InvalidRequest, "get file failed")),
Ok(file) => {
let mut file = file;
let mut file_content = Vec::new();
file.read_to_end(&mut file_content)
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, e.to_string()))?;
Some(file_content)
}
};
if let Some(file_content) = file_content {
let users: HashMap<String, AddOrUpdateUserReq> = serde_json::from_slice(&file_content)
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, e.to_string()))?;
for (ak, req) in users {
if ak == sys_cred.access_key {
return Err(s3_error!(InvalidArgument, "can't create user with system access key"));
}
if let Some(u) = iam_store.get_user(&ak).await {
if u.credentials.is_temp() || u.credentials.is_service_account() {
return Err(s3_error!(InvalidArgument, "can't create user with system access key"));
}
} else if has_space_be(&ak) {
return Err(s3_error!(InvalidArgument, "has space be"));
}
if let Err(e) = iam_store.create_user(&ak, &req).await {
failed.users.push(IAMErrEntity {
name: ak.clone(),
error: e.to_string(),
});
} else {
added.users.push(ak.clone());
}
}
}
}
{
let file_path = path_join_buf(&[IAM_ASSETS_DIR, ALL_GROUPS_FILE]);
let file_content = match zip_reader.by_name(file_path.as_str()) {
Err(ZipError::FileNotFound) => None,
Err(_) => return Err(s3_error!(InvalidRequest, "get file failed")),
Ok(file) => {
let mut file = file;
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | true |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/admin/handlers/event.rs | rustfs/src/admin/handlers/event.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::admin::router::Operation;
use crate::auth::{check_key_valid, get_session_token};
use http::{HeaderMap, StatusCode};
use matchit::Params;
use rustfs_config::notify::{NOTIFY_MQTT_SUB_SYS, NOTIFY_WEBHOOK_SUB_SYS};
use rustfs_config::{ENABLE_KEY, EnableState, MAX_ADMIN_REQUEST_BODY_SIZE};
use rustfs_targets::check_mqtt_broker_available;
use s3s::header::CONTENT_LENGTH;
use s3s::{Body, S3Error, S3ErrorCode, S3Request, S3Response, S3Result, header::CONTENT_TYPE, s3_error};
use serde::{Deserialize, Serialize};
use std::future::Future;
use std::io::{Error, ErrorKind};
use std::net::SocketAddr;
use std::path::Path;
use tokio::net::lookup_host;
use tokio::time::{Duration, sleep};
use tracing::{Span, debug, error, info, warn};
use url::Url;
#[derive(Debug, Deserialize)]
pub struct KeyValue {
pub key: String,
pub value: String,
}
#[derive(Debug, Deserialize)]
pub struct NotificationTargetBody {
pub key_values: Vec<KeyValue>,
}
#[derive(Serialize, Debug)]
struct NotificationEndpoint {
account_id: String,
service: String,
status: String,
}
#[derive(Serialize, Debug)]
struct NotificationEndpointsResponse {
notification_endpoints: Vec<NotificationEndpoint>,
}
async fn retry_with_backoff<F, Fut, T>(mut operation: F, max_attempts: usize, base_delay: Duration) -> Result<T, Error>
where
F: FnMut() -> Fut,
Fut: Future<Output = Result<T, Error>>,
{
assert!(max_attempts > 0, "max_attempts must be greater than 0");
let mut attempts = 0;
let mut delay = base_delay;
let mut last_err = None;
while attempts < max_attempts {
match operation().await {
Ok(result) => return Ok(result),
Err(e) => {
last_err = Some(e);
attempts += 1;
if attempts < max_attempts {
warn!(
"Retry attempt {}/{} failed: {}. Retrying in {:?}",
attempts,
max_attempts,
last_err.as_ref().unwrap(),
delay
);
sleep(delay).await;
delay = delay.saturating_mul(2);
}
}
}
}
Err(last_err.unwrap_or_else(|| Error::other("retry_with_backoff: unknown error")))
}
async fn retry_metadata(path: &str) -> Result<(), Error> {
retry_with_backoff(|| async { tokio::fs::metadata(path).await.map(|_| ()) }, 3, Duration::from_millis(100)).await
}
async fn validate_queue_dir(queue_dir: &str) -> S3Result<()> {
if !queue_dir.is_empty() {
if !Path::new(queue_dir).is_absolute() {
return Err(s3_error!(InvalidArgument, "queue_dir must be absolute path"));
}
if let Err(e) = retry_metadata(queue_dir).await {
return match e.kind() {
ErrorKind::NotFound => Err(s3_error!(InvalidArgument, "queue_dir does not exist")),
ErrorKind::PermissionDenied => Err(s3_error!(InvalidArgument, "queue_dir exists but permission denied")),
_ => Err(s3_error!(InvalidArgument, "failed to access queue_dir: {}", e)),
};
}
}
Ok(())
}
fn validate_cert_key_pair(cert: &Option<String>, key: &Option<String>) -> S3Result<()> {
if cert.is_some() != key.is_some() {
return Err(s3_error!(InvalidArgument, "client_cert and client_key must be specified as a pair"));
}
Ok(())
}
/// Set (create or update) a notification target
pub struct NotificationTarget {}
#[async_trait::async_trait]
impl Operation for NotificationTarget {
async fn call(&self, req: S3Request<Body>, params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
let span = Span::current();
let _enter = span.enter();
// 1. Analyze query parameters
let (target_type, target_name) = extract_target_params(¶ms)?;
// 2. Permission verification
let Some(input_cred) = &req.credentials else {
return Err(s3_error!(InvalidRequest, "credentials not found"));
};
let (_cred, _owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
// 3. Get notification system instance
let Some(ns) = rustfs_notify::notification_system() else {
return Err(s3_error!(InternalError, "notification system not initialized"));
};
// 4. The parsing request body is KVS (Key-Value Store)
let mut input = req.input;
let body = input.store_all_limited(MAX_ADMIN_REQUEST_BODY_SIZE).await.map_err(|e| {
warn!("failed to read request body: {:?}", e);
s3_error!(InvalidRequest, "failed to read request body")
})?;
// 1. Get the allowed key range
let allowed_keys: std::collections::HashSet<&str> = match target_type {
NOTIFY_WEBHOOK_SUB_SYS => rustfs_config::notify::NOTIFY_WEBHOOK_KEYS.iter().cloned().collect(),
NOTIFY_MQTT_SUB_SYS => rustfs_config::notify::NOTIFY_MQTT_KEYS.iter().cloned().collect(),
_ => unreachable!(),
};
let notification_body: NotificationTargetBody = serde_json::from_slice(&body)
.map_err(|e| s3_error!(InvalidArgument, "invalid json body for target config: {}", e))?;
// 2. Filter and verify keys, and splice target_name
let mut kvs_vec = Vec::new();
let mut endpoint_val = None;
let mut queue_dir_val = None;
let mut client_cert_val = None;
let mut client_key_val = None;
let mut qos_val = None;
let mut topic_val = String::new();
for kv in notification_body.key_values.iter() {
if !allowed_keys.contains(kv.key.as_str()) {
return Err(s3_error!(
InvalidArgument,
"key '{}' not allowed for target type '{}'",
kv.key,
target_type
));
}
if kv.key == "endpoint" {
endpoint_val = Some(kv.value.clone());
}
if target_type == NOTIFY_MQTT_SUB_SYS {
if kv.key == rustfs_config::MQTT_BROKER {
endpoint_val = Some(kv.value.clone());
}
if kv.key == rustfs_config::MQTT_TOPIC {
topic_val = kv.value.clone();
}
}
if kv.key == "queue_dir" {
queue_dir_val = Some(kv.value.clone());
}
if kv.key == "client_cert" {
client_cert_val = Some(kv.value.clone());
}
if kv.key == "client_key" {
client_key_val = Some(kv.value.clone());
}
if kv.key == "qos" {
qos_val = Some(kv.value.clone());
}
kvs_vec.push(rustfs_ecstore::config::KV {
key: kv.key.clone(),
value: kv.value.clone(),
hidden_if_empty: false,
});
}
if target_type == NOTIFY_WEBHOOK_SUB_SYS {
let endpoint = endpoint_val
.clone()
.ok_or_else(|| s3_error!(InvalidArgument, "endpoint is required"))?;
let url = Url::parse(&endpoint).map_err(|e| s3_error!(InvalidArgument, "invalid endpoint url: {}", e))?;
let host = url
.host_str()
.ok_or_else(|| s3_error!(InvalidArgument, "endpoint missing host"))?;
let port = url
.port_or_known_default()
.ok_or_else(|| s3_error!(InvalidArgument, "endpoint missing port"))?;
let addr = format!("{host}:{port}");
// First, try to parse as SocketAddr (IP:port)
if addr.parse::<SocketAddr>().is_err() {
// If not an IP:port, try DNS resolution
if lookup_host(&addr).await.is_err() {
return Err(s3_error!(InvalidArgument, "invalid or unresolvable endpoint address"));
}
}
if let Some(queue_dir) = queue_dir_val.clone() {
validate_queue_dir(&queue_dir).await?;
}
validate_cert_key_pair(&client_cert_val, &client_key_val)?;
}
if target_type == NOTIFY_MQTT_SUB_SYS {
let endpoint = endpoint_val.ok_or_else(|| s3_error!(InvalidArgument, "broker endpoint is required"))?;
if topic_val.is_empty() {
return Err(s3_error!(InvalidArgument, "topic is required"));
}
// Check MQTT Broker availability
if let Err(e) = check_mqtt_broker_available(&endpoint, &topic_val).await {
return Err(s3_error!(InvalidArgument, "MQTT Broker unavailable: {}", e));
}
if let Some(queue_dir) = queue_dir_val {
validate_queue_dir(&queue_dir).await?;
if let Some(qos) = qos_val {
match qos.parse::<u8>() {
Ok(qos_int) if qos_int == 1 || qos_int == 2 => {}
Ok(0) => {
return Err(s3_error!(InvalidArgument, "qos should be 1 or 2 if queue_dir is set"));
}
_ => {
return Err(s3_error!(InvalidArgument, "qos must be an integer 0, 1, or 2"));
}
}
}
}
}
// 3. Add ENABLE_KEY
kvs_vec.push(rustfs_ecstore::config::KV {
key: ENABLE_KEY.to_string(),
value: EnableState::On.to_string(),
hidden_if_empty: false,
});
let kvs = rustfs_ecstore::config::KVS(kvs_vec);
// 5. Call notification system to set target configuration
info!("Setting target config for type '{}', name '{}'", target_type, target_name);
ns.set_target_config(target_type, target_name, kvs).await.map_err(|e| {
error!("failed to set target config: {}", e);
S3Error::with_message(S3ErrorCode::InternalError, format!("failed to set target config: {e}"))
})?;
let mut header = HeaderMap::new();
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
if let Some(v) = req.headers.get("x-request-id") {
header.insert("x-request-id", v.clone());
}
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
}
}
/// Get a list of notification targets for all activities
pub struct ListNotificationTargets {}
#[async_trait::async_trait]
impl Operation for ListNotificationTargets {
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
let span = Span::current();
let _enter = span.enter();
debug!("ListNotificationTargets call start request params: {:?}", req.uri.query());
// 1. Permission verification
let Some(input_cred) = &req.credentials else {
return Err(s3_error!(InvalidRequest, "credentials not found"));
};
let (_cred, _owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
// 2. Get notification system instance
let Some(ns) = rustfs_notify::notification_system() else {
return Err(s3_error!(InternalError, "notification system not initialized"));
};
// 3. Get the list of activity targets
let active_targets = ns.get_active_targets().await;
debug!("ListNotificationTargets call found {} active targets", active_targets.len());
let mut notification_endpoints = Vec::new();
for target_id in active_targets.iter() {
notification_endpoints.push(NotificationEndpoint {
account_id: target_id.id.clone(),
service: target_id.name.to_string(),
status: "online".to_string(),
});
}
let response = NotificationEndpointsResponse { notification_endpoints };
// 4. Serialize and return the result
let data = serde_json::to_vec(&response).map_err(|e| {
error!("Failed to serialize notification targets response: {:?}", response);
S3Error::with_message(S3ErrorCode::InternalError, format!("failed to serialize targets: {e}"))
})?;
debug!("ListNotificationTargets call end, response data length: {}", data.len(),);
let mut header = HeaderMap::new();
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
if let Some(v) = req.headers.get("x-request-id") {
header.insert("x-request-id", v.clone());
}
Ok(S3Response::with_headers((StatusCode::OK, Body::from(data)), header))
}
}
/// Get a list of notification targets for all activities
pub struct ListTargetsArns {}
#[async_trait::async_trait]
impl Operation for ListTargetsArns {
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
let span = Span::current();
let _enter = span.enter();
debug!("ListTargetsArns call start request params: {:?}", req.uri.query());
// 1. Permission verification
let Some(input_cred) = &req.credentials else {
return Err(s3_error!(InvalidRequest, "credentials not found"));
};
let (_cred, _owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
// 2. Get notification system instance
let Some(ns) = rustfs_notify::notification_system() else {
return Err(s3_error!(InternalError, "notification system not initialized"));
};
// 3. Get the list of activity targets
let active_targets = ns.get_active_targets().await;
debug!("ListTargetsArns call found {} active targets", active_targets.len());
let region = match req.region.clone() {
Some(region) => region,
None => return Err(s3_error!(InvalidRequest, "region not found")),
};
let mut data_target_arn_list = Vec::new();
for target_id in active_targets.iter() {
data_target_arn_list.push(target_id.to_arn(®ion).to_string());
}
// 4. Serialize and return the result
let data = serde_json::to_vec(&data_target_arn_list)
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("failed to serialize targets: {e}")))?;
debug!("ListTargetsArns call end, response data length: {}", data.len(),);
let mut header = HeaderMap::new();
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
if let Some(v) = req.headers.get("x-request-id") {
header.insert("x-request-id", v.clone());
}
Ok(S3Response::with_headers((StatusCode::OK, Body::from(data)), header))
}
}
/// Delete a specified notification target
pub struct RemoveNotificationTarget {}
#[async_trait::async_trait]
impl Operation for RemoveNotificationTarget {
async fn call(&self, req: S3Request<Body>, params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
let span = Span::current();
let _enter = span.enter();
// 1. Analyze query parameters
let (target_type, target_name) = extract_target_params(¶ms)?;
// 2. Permission verification
let Some(input_cred) = &req.credentials else {
return Err(s3_error!(InvalidRequest, "credentials not found"));
};
let (_cred, _owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
// 3. Get notification system instance
let Some(ns) = rustfs_notify::notification_system() else {
return Err(s3_error!(InternalError, "notification system not initialized"));
};
// 4. Call notification system to remove target configuration
info!("Removing target config for type '{}', name '{}'", target_type, target_name);
ns.remove_target_config(target_type, target_name).await.map_err(|e| {
error!("failed to remove target config: {}", e);
S3Error::with_message(S3ErrorCode::InternalError, format!("failed to remove target config: {e}"))
})?;
let mut header = HeaderMap::new();
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
if let Some(v) = req.headers.get("x-request-id") {
header.insert("x-request-id", v.clone());
}
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
}
}
fn extract_param<'a>(params: &'a Params<'_, '_>, key: &str) -> S3Result<&'a str> {
params
.get(key)
.ok_or_else(|| s3_error!(InvalidArgument, "missing required parameter: '{}'", key))
}
fn extract_target_params<'a>(params: &'a Params<'_, '_>) -> S3Result<(&'a str, &'a str)> {
let target_type = extract_param(params, "target_type")?;
if target_type != NOTIFY_WEBHOOK_SUB_SYS && target_type != NOTIFY_MQTT_SUB_SYS {
return Err(s3_error!(InvalidArgument, "unsupported target type: '{}'", target_type));
}
let target_name = extract_param(params, "target_name")?;
Ok((target_type, target_name))
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/admin/handlers/group.rs | rustfs/src/admin/handlers/group.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::{
admin::{auth::validate_admin_request, router::Operation, utils::has_space_be},
auth::{check_key_valid, constant_time_eq, get_session_token},
server::RemoteAddr,
};
use http::{HeaderMap, StatusCode};
use matchit::Params;
use rustfs_config::MAX_ADMIN_REQUEST_BODY_SIZE;
use rustfs_credentials::get_global_action_cred;
use rustfs_iam::error::{is_err_no_such_group, is_err_no_such_user};
use rustfs_madmin::GroupAddRemove;
use rustfs_policy::policy::action::{Action, AdminAction};
use s3s::{
Body, S3Error, S3ErrorCode, S3Request, S3Response, S3Result,
header::{CONTENT_LENGTH, CONTENT_TYPE},
s3_error,
};
use serde::Deserialize;
use serde_urlencoded::from_bytes;
use tracing::warn;
#[derive(Debug, Deserialize, Default)]
pub struct GroupQuery {
pub group: String,
pub status: Option<String>,
}
pub struct ListGroups {}
#[async_trait::async_trait]
impl Operation for ListGroups {
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
warn!("handle ListGroups");
let Some(input_cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "get cred failed"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
validate_admin_request(
&req.headers,
&cred,
owner,
false,
vec![Action::AdminAction(AdminAction::ListGroupsAdminAction)],
req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0)),
)
.await?;
let Ok(iam_store) = rustfs_iam::get() else { return Err(s3_error!(InternalError, "iam not init")) };
let groups = iam_store.list_groups_load().await.map_err(|e| {
warn!("list groups failed, e: {:?}", e);
S3Error::with_message(S3ErrorCode::InternalError, e.to_string())
})?;
let body = serde_json::to_vec(&groups).map_err(|e| s3_error!(InternalError, "marshal body failed, e: {:?}", e))?;
let mut header = HeaderMap::new();
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
Ok(S3Response::with_headers((StatusCode::OK, Body::from(body)), header))
}
}
pub struct GetGroup {}
#[async_trait::async_trait]
impl Operation for GetGroup {
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
warn!("handle GetGroup");
let Some(input_cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "get cred failed"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
validate_admin_request(
&req.headers,
&cred,
owner,
false,
vec![Action::AdminAction(AdminAction::GetGroupAdminAction)],
req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0)),
)
.await?;
let query = {
if let Some(query) = req.uri.query() {
let input: GroupQuery =
from_bytes(query.as_bytes()).map_err(|_e| s3_error!(InvalidArgument, "get body failed1"))?;
input
} else {
GroupQuery::default()
}
};
let Ok(iam_store) = rustfs_iam::get() else { return Err(s3_error!(InternalError, "iam not init")) };
let g = iam_store.get_group_description(&query.group).await.map_err(|e| {
warn!("get group failed, e: {:?}", e);
S3Error::with_message(S3ErrorCode::InternalError, e.to_string())
})?;
let body = serde_json::to_vec(&g).map_err(|e| s3_error!(InternalError, "marshal body failed, e: {:?}", e))?;
let mut header = HeaderMap::new();
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
Ok(S3Response::with_headers((StatusCode::OK, Body::from(body)), header))
}
}
pub struct SetGroupStatus {}
#[async_trait::async_trait]
impl Operation for SetGroupStatus {
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
warn!("handle SetGroupStatus");
let Some(input_cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "get cred failed"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
validate_admin_request(
&req.headers,
&cred,
owner,
false,
vec![Action::AdminAction(AdminAction::EnableGroupAdminAction)],
req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0)),
)
.await?;
let query = {
if let Some(query) = req.uri.query() {
let input: GroupQuery =
from_bytes(query.as_bytes()).map_err(|_e| s3_error!(InvalidArgument, "get body failed1"))?;
input
} else {
GroupQuery::default()
}
};
if query.group.is_empty() {
return Err(s3_error!(InvalidArgument, "group is required"));
}
let Ok(iam_store) = rustfs_iam::get() else { return Err(s3_error!(InternalError, "iam not init")) };
if let Some(status) = query.status {
match status.as_str() {
"enabled" => {
iam_store.set_group_status(&query.group, true).await.map_err(|e| {
warn!("enable group failed, e: {:?}", e);
S3Error::with_message(S3ErrorCode::InternalError, e.to_string())
})?;
}
"disabled" => {
iam_store.set_group_status(&query.group, false).await.map_err(|e| {
warn!("enable group failed, e: {:?}", e);
S3Error::with_message(S3ErrorCode::InternalError, e.to_string())
})?;
}
_ => {
return Err(s3_error!(InvalidArgument, "invalid status"));
}
}
} else {
return Err(s3_error!(InvalidArgument, "status is required"));
}
let mut header = HeaderMap::new();
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
}
}
pub struct UpdateGroupMembers {}
#[async_trait::async_trait]
impl Operation for UpdateGroupMembers {
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
warn!("handle UpdateGroupMembers");
let Some(input_cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "get cred failed"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
validate_admin_request(
&req.headers,
&cred,
owner,
false,
vec![Action::AdminAction(AdminAction::AddUserToGroupAdminAction)],
req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0)),
)
.await?;
let mut input = req.input;
let body = match input.store_all_limited(MAX_ADMIN_REQUEST_BODY_SIZE).await {
Ok(b) => b,
Err(e) => {
warn!("get body failed, e: {:?}", e);
return Err(s3_error!(InvalidRequest, "group configuration body too large or failed to read"));
}
};
let args: GroupAddRemove = serde_json::from_slice(&body)
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("unmarshal body err {e}")))?;
warn!("UpdateGroupMembers args {:?}", args);
let Ok(iam_store) = rustfs_iam::get() else { return Err(s3_error!(InternalError, "iam not init")) };
for member in args.members.iter() {
match iam_store.is_temp_user(member).await {
Ok((is_temp, _)) => {
if is_temp {
return Err(S3Error::with_message(
S3ErrorCode::MethodNotAllowed,
format!("can't add temp user {member}"),
));
}
get_global_action_cred()
.map(|cred| {
if constant_time_eq(&cred.access_key, member) {
return Err(S3Error::with_message(
S3ErrorCode::MethodNotAllowed,
format!("can't add root {member}"),
));
}
Ok(())
})
.unwrap_or_else(|| {
Err(S3Error::with_message(S3ErrorCode::InternalError, "get global cred failed".to_string()))
})?;
}
Err(e) => {
if !is_err_no_such_user(&e) {
return Err(S3Error::with_message(S3ErrorCode::InternalError, e.to_string()));
}
}
}
}
if args.is_remove {
warn!("remove group members");
iam_store
.remove_users_from_group(&args.group, args.members)
.await
.map_err(|e| {
warn!("remove group members failed, e: {:?}", e);
S3Error::with_message(S3ErrorCode::InternalError, e.to_string())
})?;
} else {
warn!("add group members");
if let Err(err) = iam_store.get_group_description(&args.group).await
&& is_err_no_such_group(&err)
&& has_space_be(&args.group)
{
return Err(s3_error!(InvalidArgument, "not such group"));
}
iam_store.add_users_to_group(&args.group, args.members).await.map_err(|e| {
warn!("add group members failed, e: {:?}", e);
S3Error::with_message(S3ErrorCode::InternalError, e.to_string())
})?;
}
let mut header = HeaderMap::new();
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/admin/handlers/kms_dynamic.rs | rustfs/src/admin/handlers/kms_dynamic.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! KMS dynamic configuration admin API handlers
use super::Operation;
use crate::admin::auth::validate_admin_request;
use crate::auth::{check_key_valid, get_session_token};
use crate::server::RemoteAddr;
use hyper::StatusCode;
use matchit::Params;
use rustfs_config::MAX_ADMIN_REQUEST_BODY_SIZE;
use rustfs_ecstore::config::com::{read_config, save_config};
use rustfs_ecstore::new_object_layer_fn;
use rustfs_kms::{
ConfigureKmsRequest, ConfigureKmsResponse, KmsConfig, KmsConfigSummary, KmsServiceStatus, KmsStatusResponse, StartKmsRequest,
StartKmsResponse, StopKmsResponse, get_global_kms_service_manager,
};
use rustfs_policy::policy::action::{Action, AdminAction};
use s3s::{Body, S3Request, S3Response, S3Result, s3_error};
use tracing::{error, info, warn};
/// Path to store KMS configuration in the cluster metadata
const KMS_CONFIG_PATH: &str = "config/kms_config.json";
/// Save KMS configuration to cluster storage
async fn save_kms_config(config: &KmsConfig) -> Result<(), String> {
let Some(store) = new_object_layer_fn() else {
return Err("Storage layer not initialized".to_string());
};
let data = serde_json::to_vec(config).map_err(|e| format!("Failed to serialize KMS config: {e}"))?;
save_config(store, KMS_CONFIG_PATH, data)
.await
.map_err(|e| format!("Failed to save KMS config to storage: {e}"))?;
info!("KMS configuration persisted to cluster storage at {}", KMS_CONFIG_PATH);
Ok(())
}
/// Load KMS configuration from cluster storage
pub async fn load_kms_config() -> Option<KmsConfig> {
let Some(store) = new_object_layer_fn() else {
warn!("Storage layer not initialized, cannot load KMS config");
return None;
};
match read_config(store, KMS_CONFIG_PATH).await {
Ok(data) => match serde_json::from_slice::<KmsConfig>(&data) {
Ok(config) => {
info!("Loaded KMS configuration from cluster storage");
Some(config)
}
Err(e) => {
error!("Failed to deserialize KMS config: {}", e);
None
}
},
Err(e) => {
// Config not found is normal on first run
if e.to_string().contains("ConfigNotFound") || e.to_string().contains("not found") {
info!("No persisted KMS configuration found (first run or not configured yet)");
} else {
warn!("Failed to load KMS config from storage: {}", e);
}
None
}
}
}
/// Configure KMS service handler
pub struct ConfigureKmsHandler;
#[async_trait::async_trait]
impl Operation for ConfigureKmsHandler {
async fn call(&self, mut req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
let Some(cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "authentication required"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &cred.access_key).await?;
validate_admin_request(
&req.headers,
&cred,
owner,
false,
vec![Action::AdminAction(AdminAction::ServerInfoAdminAction)],
req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0)),
)
.await?;
let body = req
.input
.store_all_limited(MAX_ADMIN_REQUEST_BODY_SIZE)
.await
.map_err(|e| s3_error!(InvalidRequest, "failed to read request body: {}", e))?;
let configure_request: ConfigureKmsRequest = if body.is_empty() {
return Ok(S3Response::new((
StatusCode::BAD_REQUEST,
Body::from("Request body is required".to_string()),
)));
} else {
match serde_json::from_slice(&body) {
Ok(req) => req,
Err(e) => {
error!("Invalid JSON in configure request: {}", e);
return Ok(S3Response::new((StatusCode::BAD_REQUEST, Body::from(format!("Invalid JSON: {e}")))));
}
}
};
info!("Configuring KMS with request: {:?}", configure_request);
let service_manager = get_global_kms_service_manager().unwrap_or_else(|| {
warn!("KMS service manager not initialized, initializing now as fallback");
// Initialize the service manager as a fallback
rustfs_kms::init_global_kms_service_manager()
});
// Convert request to KmsConfig
let kms_config = configure_request.to_kms_config();
// Configure the service
let (success, message, status) = match service_manager.configure(kms_config.clone()).await {
Ok(()) => {
// Persist the configuration to cluster storage
if let Err(e) = save_kms_config(&kms_config).await {
let error_msg = format!("KMS configured in memory but failed to persist: {e}");
error!("{}", error_msg);
let status = service_manager.get_status().await;
(false, error_msg, status)
} else {
let status = service_manager.get_status().await;
info!("KMS configured successfully and persisted with status: {:?}", status);
(true, "KMS configured successfully".to_string(), status)
}
}
Err(e) => {
let error_msg = format!("Failed to configure KMS: {e}");
error!("{}", error_msg);
let status = service_manager.get_status().await;
(false, error_msg, status)
}
};
let response = ConfigureKmsResponse {
success,
message,
status,
};
let json_response = match serde_json::to_string(&response) {
Ok(json) => json,
Err(e) => {
error!("Failed to serialize response: {}", e);
return Ok(S3Response::new((
StatusCode::INTERNAL_SERVER_ERROR,
Body::from("Serialization error".to_string()),
)));
}
};
Ok(S3Response::new((StatusCode::OK, Body::from(json_response))))
}
}
/// Start KMS service handler
pub struct StartKmsHandler;
#[async_trait::async_trait]
impl Operation for StartKmsHandler {
async fn call(&self, mut req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
let Some(cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "authentication required"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &cred.access_key).await?;
validate_admin_request(
&req.headers,
&cred,
owner,
false,
vec![Action::AdminAction(AdminAction::ServerInfoAdminAction)],
req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0)),
)
.await?;
let body = req
.input
.store_all_limited(MAX_ADMIN_REQUEST_BODY_SIZE)
.await
.map_err(|e| s3_error!(InvalidRequest, "failed to read request body: {}", e))?;
let start_request: StartKmsRequest = if body.is_empty() {
StartKmsRequest { force: None }
} else {
match serde_json::from_slice(&body) {
Ok(req) => req,
Err(e) => {
error!("Invalid JSON in start request: {}", e);
return Ok(S3Response::new((StatusCode::BAD_REQUEST, Body::from(format!("Invalid JSON: {e}")))));
}
}
};
info!("Starting KMS service with force: {:?}", start_request.force);
let service_manager = get_global_kms_service_manager().unwrap_or_else(|| {
warn!("KMS service manager not initialized, initializing now as fallback");
// Initialize the service manager as a fallback
rustfs_kms::init_global_kms_service_manager()
});
// Check if already running and force flag
let current_status = service_manager.get_status().await;
if matches!(current_status, KmsServiceStatus::Running) && !start_request.force.unwrap_or(false) {
warn!("KMS service is already running");
let response = StartKmsResponse {
success: false,
message: "KMS service is already running. Use force=true to restart.".to_string(),
status: current_status,
};
let json_response = match serde_json::to_string(&response) {
Ok(json) => json,
Err(e) => {
error!("Failed to serialize response: {}", e);
return Ok(S3Response::new((
StatusCode::INTERNAL_SERVER_ERROR,
Body::from("Serialization error".to_string()),
)));
}
};
return Ok(S3Response::new((StatusCode::OK, Body::from(json_response))));
}
// Start the service (or restart if force=true)
let (success, message, status) =
if start_request.force.unwrap_or(false) && matches!(current_status, KmsServiceStatus::Running) {
// Force restart
match service_manager.stop().await {
Ok(()) => match service_manager.start().await {
Ok(()) => {
let status = service_manager.get_status().await;
info!("KMS service restarted successfully");
(true, "KMS service restarted successfully".to_string(), status)
}
Err(e) => {
let error_msg = format!("Failed to restart KMS service: {e}");
error!("{}", error_msg);
let status = service_manager.get_status().await;
(false, error_msg, status)
}
},
Err(e) => {
let error_msg = format!("Failed to stop KMS service for restart: {e}");
error!("{}", error_msg);
let status = service_manager.get_status().await;
(false, error_msg, status)
}
}
} else {
// Normal start
match service_manager.start().await {
Ok(()) => {
let status = service_manager.get_status().await;
info!("KMS service started successfully");
(true, "KMS service started successfully".to_string(), status)
}
Err(e) => {
let error_msg = format!("Failed to start KMS service: {e}");
error!("{}", error_msg);
let status = service_manager.get_status().await;
(false, error_msg, status)
}
}
};
let response = StartKmsResponse {
success,
message,
status,
};
let json_response = match serde_json::to_string(&response) {
Ok(json) => json,
Err(e) => {
error!("Failed to serialize response: {}", e);
return Ok(S3Response::new((
StatusCode::INTERNAL_SERVER_ERROR,
Body::from("Serialization error".to_string()),
)));
}
};
Ok(S3Response::new((StatusCode::OK, Body::from(json_response))))
}
}
/// Stop KMS service handler
pub struct StopKmsHandler;
#[async_trait::async_trait]
impl Operation for StopKmsHandler {
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
let Some(cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "authentication required"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &cred.access_key).await?;
validate_admin_request(
&req.headers,
&cred,
owner,
false,
vec![Action::AdminAction(AdminAction::ServerInfoAdminAction)],
req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0)),
)
.await?;
info!("Stopping KMS service");
let service_manager = get_global_kms_service_manager().unwrap_or_else(|| {
warn!("KMS service manager not initialized, initializing now as fallback");
// Initialize the service manager as a fallback
rustfs_kms::init_global_kms_service_manager()
});
let (success, message, status) = match service_manager.stop().await {
Ok(()) => {
let status = service_manager.get_status().await;
info!("KMS service stopped successfully");
(true, "KMS service stopped successfully".to_string(), status)
}
Err(e) => {
let error_msg = format!("Failed to stop KMS service: {e}");
error!("{}", error_msg);
let status = service_manager.get_status().await;
(false, error_msg, status)
}
};
let response = StopKmsResponse {
success,
message,
status,
};
let json_response = match serde_json::to_string(&response) {
Ok(json) => json,
Err(e) => {
error!("Failed to serialize response: {}", e);
return Ok(S3Response::new((
StatusCode::INTERNAL_SERVER_ERROR,
Body::from("Serialization error".to_string()),
)));
}
};
Ok(S3Response::new((StatusCode::OK, Body::from(json_response))))
}
}
/// Get KMS status handler
pub struct GetKmsStatusHandler;
#[async_trait::async_trait]
impl Operation for GetKmsStatusHandler {
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
let Some(cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "authentication required"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &cred.access_key).await?;
validate_admin_request(
&req.headers,
&cred,
owner,
false,
vec![Action::AdminAction(AdminAction::ServerInfoAdminAction)],
req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0)),
)
.await?;
info!("Getting KMS service status");
let service_manager = get_global_kms_service_manager().unwrap_or_else(|| {
warn!("KMS service manager not initialized, initializing now as fallback");
// Initialize the service manager as a fallback
rustfs_kms::init_global_kms_service_manager()
});
let status = service_manager.get_status().await;
let config = service_manager.get_config().await;
// Get backend type and health status
let backend_type = config.as_ref().map(|c| c.backend.clone());
let healthy = if matches!(status, KmsServiceStatus::Running) {
match service_manager.health_check().await {
Ok(healthy) => Some(healthy),
Err(_) => Some(false),
}
} else {
None
};
// Create config summary (without sensitive data)
let config_summary = config.as_ref().map(KmsConfigSummary::from);
let response = KmsStatusResponse {
status,
backend_type,
healthy,
config_summary,
};
info!("KMS status: {:?}", response);
let json_response = match serde_json::to_string(&response) {
Ok(json) => json,
Err(e) => {
error!("Failed to serialize response: {}", e);
return Ok(S3Response::new((
StatusCode::INTERNAL_SERVER_ERROR,
Body::from("Serialization error".to_string()),
)));
}
};
Ok(S3Response::new((StatusCode::OK, Body::from(json_response))))
}
}
/// Reconfigure KMS service handler
pub struct ReconfigureKmsHandler;
#[async_trait::async_trait]
impl Operation for ReconfigureKmsHandler {
async fn call(&self, mut req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
let Some(cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "authentication required"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &cred.access_key).await?;
validate_admin_request(
&req.headers,
&cred,
owner,
false,
vec![Action::AdminAction(AdminAction::ServerInfoAdminAction)],
req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0)),
)
.await?;
let body = req
.input
.store_all_limited(MAX_ADMIN_REQUEST_BODY_SIZE)
.await
.map_err(|e| s3_error!(InvalidRequest, "failed to read request body: {}", e))?;
let configure_request: ConfigureKmsRequest = if body.is_empty() {
return Ok(S3Response::new((
StatusCode::BAD_REQUEST,
Body::from("Request body is required".to_string()),
)));
} else {
match serde_json::from_slice(&body) {
Ok(req) => req,
Err(e) => {
error!("Invalid JSON in reconfigure request: {}", e);
return Ok(S3Response::new((StatusCode::BAD_REQUEST, Body::from(format!("Invalid JSON: {e}")))));
}
}
};
info!("Reconfiguring KMS with request: {:?}", configure_request);
let service_manager = get_global_kms_service_manager().unwrap_or_else(|| {
warn!("KMS service manager not initialized, initializing now as fallback");
// Initialize the service manager as a fallback
rustfs_kms::init_global_kms_service_manager()
});
// Convert request to KmsConfig
let kms_config = configure_request.to_kms_config();
// Reconfigure the service (stops, reconfigures, and starts)
let (success, message, status) = match service_manager.reconfigure(kms_config.clone()).await {
Ok(()) => {
// Persist the configuration to cluster storage
if let Err(e) = save_kms_config(&kms_config).await {
let error_msg = format!("KMS reconfigured in memory but failed to persist: {e}");
error!("{}", error_msg);
let status = service_manager.get_status().await;
(false, error_msg, status)
} else {
let status = service_manager.get_status().await;
info!("KMS reconfigured successfully and persisted with status: {:?}", status);
(true, "KMS reconfigured and restarted successfully".to_string(), status)
}
}
Err(e) => {
let error_msg = format!("Failed to reconfigure KMS: {e}");
error!("{}", error_msg);
let status = service_manager.get_status().await;
(false, error_msg, status)
}
};
let response = ConfigureKmsResponse {
success,
message,
status,
};
let json_response = match serde_json::to_string(&response) {
Ok(json) => json,
Err(e) => {
error!("Failed to serialize response: {}", e);
return Ok(S3Response::new((
StatusCode::INTERNAL_SERVER_ERROR,
Body::from("Serialization error".to_string()),
)));
}
};
Ok(S3Response::new((StatusCode::OK, Body::from(json_response))))
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/admin/handlers/kms_keys.rs | rustfs/src/admin/handlers/kms_keys.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! KMS key management admin API handlers
use super::Operation;
use crate::admin::auth::validate_admin_request;
use crate::auth::{check_key_valid, get_session_token};
use crate::server::RemoteAddr;
use hyper::{HeaderMap, StatusCode};
use matchit::Params;
use rustfs_config::MAX_ADMIN_REQUEST_BODY_SIZE;
use rustfs_kms::{KmsError, get_global_kms_service_manager, types::*};
use rustfs_policy::policy::action::{Action, AdminAction};
use s3s::header::CONTENT_TYPE;
use s3s::{Body, S3Request, S3Response, S3Result, s3_error};
use serde::{Deserialize, Serialize};
use serde_json;
use std::collections::HashMap;
use tracing::{error, info};
use urlencoding;
#[derive(Debug, Serialize, Deserialize)]
pub struct CreateKmsKeyRequest {
pub key_usage: Option<KeyUsage>,
pub description: Option<String>,
pub tags: Option<HashMap<String, String>>,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct CreateKmsKeyResponse {
pub success: bool,
pub message: String,
pub key_id: String,
pub key_metadata: Option<KeyMetadata>,
}
fn extract_query_params(uri: &hyper::Uri) -> HashMap<String, String> {
let mut params = HashMap::new();
if let Some(query) = uri.query() {
query.split('&').for_each(|pair| {
if let Some((key, value)) = pair.split_once('=') {
params.insert(
urlencoding::decode(key).unwrap_or_default().into_owned(),
urlencoding::decode(value).unwrap_or_default().into_owned(),
);
}
});
}
params
}
/// Create a new KMS key
pub struct CreateKmsKeyHandler;
#[async_trait::async_trait]
impl Operation for CreateKmsKeyHandler {
async fn call(&self, mut req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
let Some(cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "authentication required"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &cred.access_key).await?;
validate_admin_request(
&req.headers,
&cred,
owner,
false,
vec![Action::AdminAction(AdminAction::ServerInfoAdminAction)],
req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0)),
)
.await?;
let body = req
.input
.store_all_limited(MAX_ADMIN_REQUEST_BODY_SIZE)
.await
.map_err(|e| s3_error!(InvalidRequest, "failed to read request body: {}", e))?;
let request: CreateKmsKeyRequest = if body.is_empty() {
CreateKmsKeyRequest {
key_usage: Some(KeyUsage::EncryptDecrypt),
description: None,
tags: None,
}
} else {
serde_json::from_slice(&body).map_err(|e| s3_error!(InvalidRequest, "invalid JSON: {}", e))?
};
let Some(service_manager) = get_global_kms_service_manager() else {
let response = CreateKmsKeyResponse {
success: false,
message: "KMS service manager not initialized".to_string(),
key_id: "".to_string(),
key_metadata: None,
};
let data =
serde_json::to_vec(&response).map_err(|e| s3_error!(InternalError, "failed to serialize response: {}", e))?;
let mut headers = HeaderMap::new();
headers.insert(CONTENT_TYPE, "application/json".parse().unwrap());
return Ok(S3Response::with_headers((StatusCode::SERVICE_UNAVAILABLE, Body::from(data)), headers));
};
let Some(manager) = service_manager.get_manager().await else {
let response = CreateKmsKeyResponse {
success: false,
message: "KMS service not running".to_string(),
key_id: "".to_string(),
key_metadata: None,
};
let data =
serde_json::to_vec(&response).map_err(|e| s3_error!(InternalError, "failed to serialize response: {}", e))?;
let mut headers = HeaderMap::new();
headers.insert(CONTENT_TYPE, "application/json".parse().unwrap());
return Ok(S3Response::with_headers((StatusCode::SERVICE_UNAVAILABLE, Body::from(data)), headers));
};
// Extract key name from tags if provided
let tags = request.tags.unwrap_or_default();
let key_name = tags.get("name").cloned();
let kms_request = CreateKeyRequest {
key_name,
key_usage: request.key_usage.unwrap_or(KeyUsage::EncryptDecrypt),
description: request.description,
tags,
origin: Some("AWS_KMS".to_string()),
policy: None,
};
match manager.create_key(kms_request).await {
Ok(kms_response) => {
info!("Created KMS key: {}", kms_response.key_id);
let response = CreateKmsKeyResponse {
success: true,
message: "Key created successfully".to_string(),
key_id: kms_response.key_id,
key_metadata: Some(kms_response.key_metadata),
};
let data =
serde_json::to_vec(&response).map_err(|e| s3_error!(InternalError, "failed to serialize response: {}", e))?;
let mut headers = HeaderMap::new();
headers.insert(CONTENT_TYPE, "application/json".parse().unwrap());
Ok(S3Response::with_headers((StatusCode::OK, Body::from(data)), headers))
}
Err(e) => {
error!("Failed to create KMS key: {}", e);
let response = CreateKmsKeyResponse {
success: false,
message: format!("Failed to create key: {e}"),
key_id: "".to_string(),
key_metadata: None,
};
let data =
serde_json::to_vec(&response).map_err(|e| s3_error!(InternalError, "failed to serialize response: {}", e))?;
let mut headers = HeaderMap::new();
headers.insert(CONTENT_TYPE, "application/json".parse().unwrap());
Ok(S3Response::with_headers((StatusCode::INTERNAL_SERVER_ERROR, Body::from(data)), headers))
}
}
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct DeleteKmsKeyRequest {
pub key_id: String,
pub pending_window_in_days: Option<u32>,
pub force_immediate: Option<bool>,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct DeleteKmsKeyResponse {
pub success: bool,
pub message: String,
pub key_id: String,
pub deletion_date: Option<String>,
}
/// Delete a KMS key
pub struct DeleteKmsKeyHandler;
#[async_trait::async_trait]
impl Operation for DeleteKmsKeyHandler {
async fn call(&self, mut req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
let Some(cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "authentication required"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &cred.access_key).await?;
validate_admin_request(
&req.headers,
&cred,
owner,
false,
vec![Action::AdminAction(AdminAction::ServerInfoAdminAction)],
req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0)),
)
.await?;
let body = req
.input
.store_all_limited(MAX_ADMIN_REQUEST_BODY_SIZE)
.await
.map_err(|e| s3_error!(InvalidRequest, "failed to read request body: {}", e))?;
let request: DeleteKmsKeyRequest = if body.is_empty() {
let query_params = extract_query_params(&req.uri);
let Some(key_id) = query_params.get("keyId") else {
let response = DeleteKmsKeyResponse {
success: false,
message: "missing keyId parameter".to_string(),
key_id: "".to_string(),
deletion_date: None,
};
let data =
serde_json::to_vec(&response).map_err(|e| s3_error!(InternalError, "failed to serialize response: {}", e))?;
let mut headers = HeaderMap::new();
headers.insert(CONTENT_TYPE, "application/json".parse().unwrap());
return Ok(S3Response::with_headers((StatusCode::BAD_REQUEST, Body::from(data)), headers));
};
// Extract pending_window_in_days and force_immediate from query parameters
let pending_window_in_days = query_params.get("pending_window_in_days").and_then(|s| s.parse::<u32>().ok());
let force_immediate = query_params.get("force_immediate").and_then(|s| s.parse::<bool>().ok());
DeleteKmsKeyRequest {
key_id: key_id.clone(),
pending_window_in_days,
force_immediate,
}
} else {
serde_json::from_slice(&body).map_err(|e| s3_error!(InvalidRequest, "invalid JSON: {}", e))?
};
let Some(service_manager) = get_global_kms_service_manager() else {
let response = DeleteKmsKeyResponse {
success: false,
message: "KMS service manager not initialized".to_string(),
key_id: request.key_id,
deletion_date: None,
};
let data =
serde_json::to_vec(&response).map_err(|e| s3_error!(InternalError, "failed to serialize response: {}", e))?;
let mut headers = HeaderMap::new();
headers.insert(CONTENT_TYPE, "application/json".parse().unwrap());
return Ok(S3Response::with_headers((StatusCode::SERVICE_UNAVAILABLE, Body::from(data)), headers));
};
let Some(manager) = service_manager.get_manager().await else {
let response = DeleteKmsKeyResponse {
success: false,
message: "KMS service not running".to_string(),
key_id: request.key_id,
deletion_date: None,
};
let data =
serde_json::to_vec(&response).map_err(|e| s3_error!(InternalError, "failed to serialize response: {}", e))?;
let mut headers = HeaderMap::new();
headers.insert(CONTENT_TYPE, "application/json".parse().unwrap());
return Ok(S3Response::with_headers((StatusCode::SERVICE_UNAVAILABLE, Body::from(data)), headers));
};
let kms_request = DeleteKeyRequest {
key_id: request.key_id.clone(),
pending_window_in_days: request.pending_window_in_days,
force_immediate: request.force_immediate,
};
match manager.delete_key(kms_request).await {
Ok(kms_response) => {
info!("Successfully deleted KMS key: {}", kms_response.key_id);
let response = DeleteKmsKeyResponse {
success: true,
message: "Key deleted successfully".to_string(),
key_id: kms_response.key_id,
deletion_date: kms_response.deletion_date,
};
let data =
serde_json::to_vec(&response).map_err(|e| s3_error!(InternalError, "failed to serialize response: {}", e))?;
let mut headers = HeaderMap::new();
headers.insert(CONTENT_TYPE, "application/json".parse().unwrap());
Ok(S3Response::with_headers((StatusCode::OK, Body::from(data)), headers))
}
Err(e) => {
error!("Failed to delete KMS key {}: {}", request.key_id, e);
let status = match &e {
KmsError::KeyNotFound { .. } => StatusCode::NOT_FOUND,
KmsError::InvalidOperation { .. } | KmsError::ValidationError { .. } => StatusCode::BAD_REQUEST,
_ => StatusCode::INTERNAL_SERVER_ERROR,
};
let response = DeleteKmsKeyResponse {
success: false,
message: format!("Failed to delete key: {e}"),
key_id: request.key_id,
deletion_date: None,
};
let data =
serde_json::to_vec(&response).map_err(|e| s3_error!(InternalError, "failed to serialize response: {}", e))?;
let mut headers = HeaderMap::new();
headers.insert(CONTENT_TYPE, "application/json".parse().unwrap());
Ok(S3Response::with_headers((status, Body::from(data)), headers))
}
}
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct CancelKmsKeyDeletionRequest {
pub key_id: String,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct CancelKmsKeyDeletionResponse {
pub success: bool,
pub message: String,
pub key_id: String,
pub key_metadata: Option<KeyMetadata>,
}
/// Cancel KMS key deletion
pub struct CancelKmsKeyDeletionHandler;
#[async_trait::async_trait]
impl Operation for CancelKmsKeyDeletionHandler {
async fn call(&self, mut req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
let Some(cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "authentication required"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &cred.access_key).await?;
validate_admin_request(
&req.headers,
&cred,
owner,
false,
vec![Action::AdminAction(AdminAction::ServerInfoAdminAction)],
req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0)),
)
.await?;
let body = req
.input
.store_all_limited(MAX_ADMIN_REQUEST_BODY_SIZE)
.await
.map_err(|e| s3_error!(InvalidRequest, "failed to read request body: {}", e))?;
let request: CancelKmsKeyDeletionRequest = if body.is_empty() {
let query_params = extract_query_params(&req.uri);
let Some(key_id) = query_params.get("keyId") else {
let response = CancelKmsKeyDeletionResponse {
success: false,
message: "missing keyId parameter".to_string(),
key_id: "".to_string(),
key_metadata: None,
};
let data =
serde_json::to_vec(&response).map_err(|e| s3_error!(InternalError, "failed to serialize response: {}", e))?;
let mut headers = HeaderMap::new();
headers.insert(CONTENT_TYPE, "application/json".parse().unwrap());
return Ok(S3Response::with_headers((StatusCode::BAD_REQUEST, Body::from(data)), headers));
};
CancelKmsKeyDeletionRequest { key_id: key_id.clone() }
} else {
serde_json::from_slice(&body).map_err(|e| s3_error!(InvalidRequest, "invalid JSON: {}", e))?
};
let Some(service_manager) = get_global_kms_service_manager() else {
let response = CancelKmsKeyDeletionResponse {
success: false,
message: "KMS service manager not initialized".to_string(),
key_id: request.key_id,
key_metadata: None,
};
let data =
serde_json::to_vec(&response).map_err(|e| s3_error!(InternalError, "failed to serialize response: {}", e))?;
let mut headers = HeaderMap::new();
headers.insert(CONTENT_TYPE, "application/json".parse().unwrap());
return Ok(S3Response::with_headers((StatusCode::SERVICE_UNAVAILABLE, Body::from(data)), headers));
};
let Some(manager) = service_manager.get_manager().await else {
let response = CancelKmsKeyDeletionResponse {
success: false,
message: "KMS service not running".to_string(),
key_id: request.key_id,
key_metadata: None,
};
let data =
serde_json::to_vec(&response).map_err(|e| s3_error!(InternalError, "failed to serialize response: {}", e))?;
let mut headers = HeaderMap::new();
headers.insert(CONTENT_TYPE, "application/json".parse().unwrap());
return Ok(S3Response::with_headers((StatusCode::SERVICE_UNAVAILABLE, Body::from(data)), headers));
};
let kms_request = CancelKeyDeletionRequest {
key_id: request.key_id.clone(),
};
match manager.cancel_key_deletion(kms_request).await {
Ok(kms_response) => {
info!("Cancelled deletion for KMS key: {}", kms_response.key_id);
let response = CancelKmsKeyDeletionResponse {
success: true,
message: "Key deletion cancelled successfully".to_string(),
key_id: kms_response.key_id,
key_metadata: Some(kms_response.key_metadata),
};
let data =
serde_json::to_vec(&response).map_err(|e| s3_error!(InternalError, "failed to serialize response: {}", e))?;
let mut headers = HeaderMap::new();
headers.insert(CONTENT_TYPE, "application/json".parse().unwrap());
Ok(S3Response::with_headers((StatusCode::OK, Body::from(data)), headers))
}
Err(e) => {
error!("Failed to cancel deletion for KMS key {}: {}", request.key_id, e);
let response = CancelKmsKeyDeletionResponse {
success: false,
message: format!("Failed to cancel key deletion: {e}"),
key_id: request.key_id,
key_metadata: None,
};
let data =
serde_json::to_vec(&response).map_err(|e| s3_error!(InternalError, "failed to serialize response: {}", e))?;
let mut headers = HeaderMap::new();
headers.insert(CONTENT_TYPE, "application/json".parse().unwrap());
Ok(S3Response::with_headers((StatusCode::INTERNAL_SERVER_ERROR, Body::from(data)), headers))
}
}
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct ListKmsKeysResponse {
pub success: bool,
pub message: String,
pub keys: Vec<KeyInfo>,
pub truncated: bool,
pub next_marker: Option<String>,
}
/// List KMS keys
pub struct ListKmsKeysHandler;
#[async_trait::async_trait]
impl Operation for ListKmsKeysHandler {
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
let Some(cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "authentication required"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &cred.access_key).await?;
validate_admin_request(
&req.headers,
&cred,
owner,
false,
vec![Action::AdminAction(AdminAction::ServerInfoAdminAction)],
req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0)),
)
.await?;
let query_params = extract_query_params(&req.uri);
let limit = query_params.get("limit").and_then(|s| s.parse::<u32>().ok()).unwrap_or(100);
let marker = query_params.get("marker").cloned();
let Some(service_manager) = get_global_kms_service_manager() else {
let response = ListKmsKeysResponse {
success: false,
message: "KMS service manager not initialized".to_string(),
keys: vec![],
truncated: false,
next_marker: None,
};
let data =
serde_json::to_vec(&response).map_err(|e| s3_error!(InternalError, "failed to serialize response: {}", e))?;
let mut headers = HeaderMap::new();
headers.insert(CONTENT_TYPE, "application/json".parse().unwrap());
return Ok(S3Response::with_headers((StatusCode::SERVICE_UNAVAILABLE, Body::from(data)), headers));
};
let Some(manager) = service_manager.get_manager().await else {
let response = ListKmsKeysResponse {
success: false,
message: "KMS service not running".to_string(),
keys: vec![],
truncated: false,
next_marker: None,
};
let data =
serde_json::to_vec(&response).map_err(|e| s3_error!(InternalError, "failed to serialize response: {}", e))?;
let mut headers = HeaderMap::new();
headers.insert(CONTENT_TYPE, "application/json".parse().unwrap());
return Ok(S3Response::with_headers((StatusCode::SERVICE_UNAVAILABLE, Body::from(data)), headers));
};
let kms_request = ListKeysRequest {
limit: Some(limit),
marker,
status_filter: None,
usage_filter: None,
};
match manager.list_keys(kms_request).await {
Ok(kms_response) => {
info!("Listed {} KMS keys", kms_response.keys.len());
let response = ListKmsKeysResponse {
success: true,
message: "Keys listed successfully".to_string(),
keys: kms_response.keys,
truncated: kms_response.truncated,
next_marker: kms_response.next_marker,
};
let data =
serde_json::to_vec(&response).map_err(|e| s3_error!(InternalError, "failed to serialize response: {}", e))?;
let mut headers = HeaderMap::new();
headers.insert(CONTENT_TYPE, "application/json".parse().unwrap());
Ok(S3Response::with_headers((StatusCode::OK, Body::from(data)), headers))
}
Err(e) => {
error!("Failed to list KMS keys: {}", e);
let response = ListKmsKeysResponse {
success: false,
message: format!("Failed to list keys: {e}"),
keys: vec![],
truncated: false,
next_marker: None,
};
let data =
serde_json::to_vec(&response).map_err(|e| s3_error!(InternalError, "failed to serialize response: {}", e))?;
let mut headers = HeaderMap::new();
headers.insert(CONTENT_TYPE, "application/json".parse().unwrap());
Ok(S3Response::with_headers((StatusCode::INTERNAL_SERVER_ERROR, Body::from(data)), headers))
}
}
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct DescribeKmsKeyResponse {
pub success: bool,
pub message: String,
pub key_metadata: Option<KeyMetadata>,
}
/// Describe a KMS key
pub struct DescribeKmsKeyHandler;
#[async_trait::async_trait]
impl Operation for DescribeKmsKeyHandler {
async fn call(&self, req: S3Request<Body>, params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
let Some(cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "authentication required"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &cred.access_key).await?;
validate_admin_request(
&req.headers,
&cred,
owner,
false,
vec![Action::AdminAction(AdminAction::ServerInfoAdminAction)],
req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0)),
)
.await?;
let Some(key_id) = params.get("key_id") else {
let response = DescribeKmsKeyResponse {
success: false,
message: "missing keyId parameter".to_string(),
key_metadata: None,
};
let data =
serde_json::to_vec(&response).map_err(|e| s3_error!(InternalError, "failed to serialize response: {}", e))?;
let mut headers = HeaderMap::new();
headers.insert(CONTENT_TYPE, "application/json".parse().unwrap());
return Ok(S3Response::with_headers((StatusCode::BAD_REQUEST, Body::from(data)), headers));
};
let Some(service_manager) = get_global_kms_service_manager() else {
let response = DescribeKmsKeyResponse {
success: false,
message: "KMS service manager not initialized".to_string(),
key_metadata: None,
};
let data =
serde_json::to_vec(&response).map_err(|e| s3_error!(InternalError, "failed to serialize response: {}", e))?;
let mut headers = HeaderMap::new();
headers.insert(CONTENT_TYPE, "application/json".parse().unwrap());
return Ok(S3Response::with_headers((StatusCode::SERVICE_UNAVAILABLE, Body::from(data)), headers));
};
let Some(manager) = service_manager.get_manager().await else {
let response = DescribeKmsKeyResponse {
success: false,
message: "KMS service not running".to_string(),
key_metadata: None,
};
let data =
serde_json::to_vec(&response).map_err(|e| s3_error!(InternalError, "failed to serialize response: {}", e))?;
let mut headers = HeaderMap::new();
headers.insert(CONTENT_TYPE, "application/json".parse().unwrap());
return Ok(S3Response::with_headers((StatusCode::SERVICE_UNAVAILABLE, Body::from(data)), headers));
};
let kms_request = DescribeKeyRequest {
key_id: key_id.to_string(),
};
match manager.describe_key(kms_request).await {
Ok(kms_response) => {
info!("Described KMS key: {}", key_id);
let response = DescribeKmsKeyResponse {
success: true,
message: "Key described successfully".to_string(),
key_metadata: Some(kms_response.key_metadata),
};
let data =
serde_json::to_vec(&response).map_err(|e| s3_error!(InternalError, "failed to serialize response: {}", e))?;
let mut headers = HeaderMap::new();
headers.insert(CONTENT_TYPE, "application/json".parse().unwrap());
Ok(S3Response::with_headers((StatusCode::OK, Body::from(data)), headers))
}
Err(e) => {
error!("Failed to describe KMS key {}: {}", key_id, e);
let status = match &e {
KmsError::KeyNotFound { .. } => StatusCode::NOT_FOUND,
KmsError::InvalidOperation { .. } => StatusCode::BAD_REQUEST,
_ => StatusCode::INTERNAL_SERVER_ERROR,
};
let response = DescribeKmsKeyResponse {
success: false,
message: format!("Failed to describe key: {e}"),
key_metadata: None,
};
let data =
serde_json::to_vec(&response).map_err(|e| s3_error!(InternalError, "failed to serialize response: {}", e))?;
let mut headers = HeaderMap::new();
headers.insert(CONTENT_TYPE, "application/json".parse().unwrap());
Ok(S3Response::with_headers((status, Body::from(data)), headers))
}
}
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/admin/handlers/trace.rs | rustfs/src/admin/handlers/trace.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::admin::router::Operation;
use http::StatusCode;
use hyper::Uri;
use matchit::Params;
use rustfs_ecstore::{GLOBAL_Endpoints, rpc::PeerRestClient};
use rustfs_madmin::service_commands::ServiceTraceOpts;
use s3s::{Body, S3Request, S3Response, S3Result, s3_error};
use tracing::warn;
#[allow(dead_code)]
fn extract_trace_options(uri: &Uri) -> S3Result<ServiceTraceOpts> {
let mut st_opts = ServiceTraceOpts::default();
st_opts
.parse_params(uri)
.map_err(|_| s3_error!(InvalidRequest, "invalid params"))?;
Ok(st_opts)
}
#[allow(dead_code)]
pub struct Trace {}
#[async_trait::async_trait]
impl Operation for Trace {
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
warn!("handle Trace");
let _trace_opts = extract_trace_options(&req.uri)?;
// let (tx, rx) = mpsc::channel(10000);
let _peers = match GLOBAL_Endpoints.get() {
Some(ep) => PeerRestClient::new_clients(ep.clone()).await,
None => (Vec::new(), Vec::new()),
};
Err(s3_error!(NotImplemented))
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/admin/handlers/tier.rs | rustfs/src/admin/handlers/tier.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![allow(unused_variables, unused_mut, unused_must_use)]
use crate::{
admin::{auth::validate_admin_request, router::Operation},
auth::{check_key_valid, get_session_token},
server::RemoteAddr,
};
use http::{HeaderMap, StatusCode};
use matchit::Params;
use rustfs_config::MAX_ADMIN_REQUEST_BODY_SIZE;
use rustfs_ecstore::{
config::storageclass,
global::GLOBAL_TierConfigMgr,
tier::{
tier::{ERR_TIER_BACKEND_IN_USE, ERR_TIER_BACKEND_NOT_EMPTY, ERR_TIER_MISSING_CREDENTIALS},
tier_admin::TierCreds,
tier_config::{TierConfig, TierType},
tier_handlers::{
ERR_TIER_ALREADY_EXISTS, ERR_TIER_CONNECT_ERR, ERR_TIER_INVALID_CREDENTIALS, ERR_TIER_NAME_NOT_UPPERCASE,
ERR_TIER_NOT_FOUND,
},
},
};
use rustfs_policy::policy::action::{Action, AdminAction};
use s3s::{
Body, S3Error, S3ErrorCode, S3Request, S3Response, S3Result,
header::{CONTENT_LENGTH, CONTENT_TYPE},
s3_error,
};
use serde_urlencoded::from_bytes;
use time::OffsetDateTime;
use tracing::{debug, warn};
#[derive(Debug, Clone, serde::Deserialize, Default)]
pub struct AddTierQuery {
#[serde(rename = "accessKey")]
#[allow(dead_code)]
pub access_key: Option<String>,
#[allow(dead_code)]
pub status: Option<String>,
#[serde(rename = "secretKey")]
#[allow(dead_code)]
pub secret_key: Option<String>,
#[serde(rename = "serviceName")]
#[allow(dead_code)]
pub service_name: Option<String>,
#[serde(rename = "sessionToken")]
#[allow(dead_code)]
pub session_token: Option<String>,
pub tier: Option<String>,
#[serde(rename = "tierName")]
#[allow(dead_code)]
pub tier_name: Option<String>,
#[serde(rename = "tierType")]
#[allow(dead_code)]
pub tier_type: Option<String>,
pub force: Option<String>,
}
pub struct AddTier {}
#[async_trait::async_trait]
impl Operation for AddTier {
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
let query = {
if let Some(query) = req.uri.query() {
let input: AddTierQuery =
from_bytes(query.as_bytes()).map_err(|_e| s3_error!(InvalidArgument, "get query failed"))?;
input
} else {
AddTierQuery::default()
}
};
let Some(input_cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "get cred failed"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
validate_admin_request(
&req.headers,
&cred,
owner,
false,
vec![Action::AdminAction(AdminAction::SetTierAction)],
req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0)),
)
.await?;
let mut input = req.input;
let body = match input.store_all_limited(MAX_ADMIN_REQUEST_BODY_SIZE).await {
Ok(b) => b,
Err(e) => {
warn!("get body failed, e: {:?}", e);
return Err(s3_error!(InvalidRequest, "tier configuration body too large or failed to read"));
}
};
let mut args: TierConfig = serde_json::from_slice(&body)
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("unmarshal body err {e}")))?;
match args.tier_type {
TierType::S3 => {
args.name = args.s3.clone().unwrap().name;
}
TierType::RustFS => {
args.name = args.rustfs.clone().unwrap().name;
}
TierType::MinIO => {
args.name = args.minio.clone().unwrap().name;
}
TierType::Aliyun => {
args.name = args.aliyun.clone().unwrap().name;
}
TierType::Tencent => {
args.name = args.tencent.clone().unwrap().name;
}
TierType::Huaweicloud => {
args.name = args.huaweicloud.clone().unwrap().name;
}
TierType::Azure => {
args.name = args.azure.clone().unwrap().name;
}
TierType::GCS => {
args.name = args.gcs.clone().unwrap().name;
}
TierType::R2 => {
args.name = args.r2.clone().unwrap().name;
}
_ => (),
}
debug!("add tier args {:?}", args);
let mut force: bool = false;
let force_str = query.force.clone().unwrap_or_default();
if !force_str.is_empty() {
force = force_str.parse().map_err(|e| {
warn!("parse force failed, e: {:?}", e);
s3_error!(InvalidRequest, "parse force failed")
})?;
}
match args.name.as_str() {
storageclass::STANDARD | storageclass::RRS => {
warn!("tier reserved name, args.name: {}", args.name);
return Err(s3_error!(InvalidRequest, "Cannot use reserved tier name"));
}
&_ => (),
}
let mut tier_config_mgr = GLOBAL_TierConfigMgr.write().await;
//tier_config_mgr.reload(api);
if let Err(err) = tier_config_mgr.add(args, force).await {
return if err.code == ERR_TIER_ALREADY_EXISTS.code {
Err(S3Error::with_message(
S3ErrorCode::Custom("TierNameAlreadyExist".into()),
"tier name already exists!",
))
} else if err.code == ERR_TIER_NAME_NOT_UPPERCASE.code {
Err(S3Error::with_message(
S3ErrorCode::Custom("TierNameNotUppercase".into()),
"tier name not uppercase!",
))
} else if err.code == ERR_TIER_BACKEND_IN_USE.code {
Err(S3Error::with_message(
S3ErrorCode::Custom("TierNameBackendInUse!".into()),
"tier name backend in use!",
))
} else if err.code == ERR_TIER_CONNECT_ERR.code {
Err(S3Error::with_message(
S3ErrorCode::Custom("TierConnectError".into()),
"tier connect error!",
))
} else if err.code == ERR_TIER_INVALID_CREDENTIALS.code {
Err(S3Error::with_message(S3ErrorCode::Custom(err.code.clone().into()), err.message.clone()))
} else {
warn!("tier_config_mgr add failed, e: {:?}", err);
Err(S3Error::with_message(
S3ErrorCode::Custom("TierAddFailed".into()),
format!("tier add failed. {err}"),
))
};
}
if let Err(e) = tier_config_mgr.save().await {
warn!("tier_config_mgr save failed, e: {:?}", e);
return Err(S3Error::with_message(S3ErrorCode::Custom("TierAddFailed".into()), "tier save failed"));
}
let mut header = HeaderMap::new();
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
}
}
pub struct EditTier {}
#[async_trait::async_trait]
impl Operation for EditTier {
async fn call(&self, req: S3Request<Body>, params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
let query = {
if let Some(query) = req.uri.query() {
let input: AddTierQuery =
from_bytes(query.as_bytes()).map_err(|_e| s3_error!(InvalidArgument, "get query failed"))?;
input
} else {
AddTierQuery::default()
}
};
let Some(input_cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "get cred failed"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
validate_admin_request(
&req.headers,
&cred,
owner,
false,
vec![Action::AdminAction(AdminAction::SetTierAction)],
req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0)),
)
.await?;
let mut input = req.input;
let body = match input.store_all_limited(MAX_ADMIN_REQUEST_BODY_SIZE).await {
Ok(b) => b,
Err(e) => {
warn!("get body failed, e: {:?}", e);
return Err(s3_error!(InvalidRequest, "tier configuration body too large or failed to read"));
}
};
let creds: TierCreds = serde_json::from_slice(&body)
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("unmarshal body err {e}")))?;
debug!("edit tier args {:?}", creds);
let tier_name = params.get("tiername").map(|s| s.to_string()).unwrap_or_default();
let mut tier_config_mgr = GLOBAL_TierConfigMgr.write().await;
//tier_config_mgr.reload(api);
if let Err(err) = tier_config_mgr.edit(&tier_name, creds).await {
return if err.code == ERR_TIER_NOT_FOUND.code {
Err(S3Error::with_message(S3ErrorCode::Custom("TierNotFound".into()), "tier not found!"))
} else if err.code == ERR_TIER_MISSING_CREDENTIALS.code {
Err(S3Error::with_message(
S3ErrorCode::Custom("TierMissingCredentials".into()),
"tier missing credentials!",
))
} else {
warn!("tier_config_mgr edit failed, e: {:?}", err);
Err(S3Error::with_message(
S3ErrorCode::Custom("TierEditFailed".into()),
format!("tier edit failed. {err}"),
))
};
}
if let Err(e) = tier_config_mgr.save().await {
warn!("tier_config_mgr save failed, e: {:?}", e);
return Err(S3Error::with_message(S3ErrorCode::Custom("TierEditFailed".into()), "tier save failed"));
}
let mut header = HeaderMap::new();
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
}
}
#[derive(Debug, Clone, serde::Deserialize, Default)]
pub struct BucketQuery {
#[serde(rename = "bucket")]
#[allow(dead_code)]
pub bucket: String,
}
pub struct ListTiers {}
#[async_trait::async_trait]
impl Operation for ListTiers {
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
let query = {
if let Some(query) = req.uri.query() {
let input: BucketQuery =
from_bytes(query.as_bytes()).map_err(|_e| s3_error!(InvalidArgument, "get query failed"))?;
input
} else {
BucketQuery::default()
}
};
let Some(input_cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "get cred failed"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
validate_admin_request(
&req.headers,
&cred,
owner,
false,
vec![Action::AdminAction(AdminAction::ListTierAction)],
req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0)),
)
.await?;
let mut tier_config_mgr = GLOBAL_TierConfigMgr.read().await;
let tiers = tier_config_mgr.list_tiers();
let data = serde_json::to_vec(&tiers)
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("marshal tiers err {e}")))?;
let mut header = HeaderMap::new();
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
Ok(S3Response::with_headers((StatusCode::OK, Body::from(data)), header))
}
}
pub struct RemoveTier {}
#[async_trait::async_trait]
impl Operation for RemoveTier {
async fn call(&self, req: S3Request<Body>, params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
let query = {
if let Some(query) = req.uri.query() {
let input: AddTierQuery =
from_bytes(query.as_bytes()).map_err(|_e| s3_error!(InvalidArgument, "get query failed"))?;
input
} else {
AddTierQuery::default()
}
};
let Some(input_cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "get cred failed"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
validate_admin_request(
&req.headers,
&cred,
owner,
false,
vec![Action::AdminAction(AdminAction::SetTierAction)],
req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0)),
)
.await?;
let mut force: bool = false;
let force_str = query.force.clone().unwrap_or_default();
if !force_str.is_empty() {
force = force_str.parse().map_err(|e| {
warn!("parse force failed, e: {:?}", e);
s3_error!(InvalidRequest, "parse force failed")
})?;
}
let tier_name = params.get("tiername").map(|s| s.to_string()).unwrap_or_default();
let mut tier_config_mgr = GLOBAL_TierConfigMgr.write().await;
//tier_config_mgr.reload(api);
if let Err(err) = tier_config_mgr.remove(&tier_name, force).await {
return if err.code == ERR_TIER_NOT_FOUND.code {
Err(S3Error::with_message(S3ErrorCode::Custom("TierNotFound".into()), "tier not found."))
} else if err.code == ERR_TIER_BACKEND_NOT_EMPTY.code {
Err(S3Error::with_message(S3ErrorCode::Custom("TierNameBackendInUse".into()), "tier is used."))
} else {
warn!("tier_config_mgr remove failed, e: {:?}", err);
Err(S3Error::with_message(
S3ErrorCode::Custom("TierRemoveFailed".into()),
format!("tier remove failed. {err}"),
))
};
}
if let Err(e) = tier_config_mgr.save().await {
warn!("tier_config_mgr save failed, e: {:?}", e);
return Err(S3Error::with_message(S3ErrorCode::Custom("TierRemoveFailed".into()), "tier save failed"));
}
let mut header = HeaderMap::new();
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
}
}
#[allow(dead_code)]
pub struct VerifyTier {}
#[async_trait::async_trait]
impl Operation for VerifyTier {
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
let query = {
if let Some(query) = req.uri.query() {
let input: AddTierQuery =
from_bytes(query.as_bytes()).map_err(|_e| s3_error!(InvalidArgument, "get query failed"))?;
input
} else {
AddTierQuery::default()
}
};
let Some(input_cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "get cred failed"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
validate_admin_request(
&req.headers,
&cred,
owner,
false,
vec![Action::AdminAction(AdminAction::ListTierAction)],
req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0)),
)
.await?;
let mut tier_config_mgr = GLOBAL_TierConfigMgr.write().await;
tier_config_mgr.verify(&query.tier.unwrap()).await;
let mut header = HeaderMap::new();
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
}
}
pub struct GetTierInfo {}
#[async_trait::async_trait]
impl Operation for GetTierInfo {
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
let Some(input_cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "get cred failed"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
validate_admin_request(
&req.headers,
&cred,
owner,
false,
vec![Action::AdminAction(AdminAction::ListTierAction)],
req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0)),
)
.await?;
let query = {
if let Some(query) = req.uri.query() {
let input: AddTierQuery =
from_bytes(query.as_bytes()).map_err(|_e| s3_error!(InvalidArgument, "get query failed"))?;
input
} else {
AddTierQuery::default()
}
};
let tier_config_mgr = GLOBAL_TierConfigMgr.read().await;
let info = tier_config_mgr.get(&query.tier.unwrap());
let data = serde_json::to_vec(&info)
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("marshal tier err {e}")))?;
let mut header = HeaderMap::new();
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
Ok(S3Response::with_headers((StatusCode::OK, Body::from(data)), header))
}
}
#[derive(Debug, serde::Deserialize, Default)]
pub struct ClearTierQuery {
pub rand: Option<String>,
pub force: String,
}
pub struct ClearTier {}
#[async_trait::async_trait]
impl Operation for ClearTier {
async fn call(&self, req: S3Request<Body>, params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
let query = {
if let Some(query) = req.uri.query() {
let input: ClearTierQuery =
from_bytes(query.as_bytes()).map_err(|_e| s3_error!(InvalidArgument, "get query failed"))?;
input
} else {
ClearTierQuery::default()
}
};
let Some(input_cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "get cred failed"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
validate_admin_request(
&req.headers,
&cred,
owner,
false,
vec![Action::AdminAction(AdminAction::SetTierAction)],
req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0)),
)
.await?;
let mut force: bool = false;
let force_str = query.force;
if !force_str.is_empty() {
force = force_str.parse().unwrap();
}
let t = OffsetDateTime::now_utc();
let mut rand = "AGD1R25GI3I1GJGUGJFD7FBS4DFAASDF".to_string();
rand.insert_str(3, &t.day().to_string());
rand.insert_str(17, &t.month().to_string());
rand.insert_str(23, &t.year().to_string());
warn!("tier_config_mgr rand: {}", rand);
if query.rand != Some(rand) {
return Err(s3_error!(InvalidRequest, "get rand failed"));
};
let mut tier_config_mgr = GLOBAL_TierConfigMgr.write().await;
//tier_config_mgr.reload(api);
if let Err(err) = tier_config_mgr.clear_tier(force).await {
warn!("tier_config_mgr clear failed, e: {:?}", err);
return Err(S3Error::with_message(
S3ErrorCode::Custom("TierClearFailed".into()),
format!("tier clear failed. {err}"),
));
}
if let Err(e) = tier_config_mgr.save().await {
warn!("tier_config_mgr save failed, e: {:?}", e);
return Err(S3Error::with_message(S3ErrorCode::Custom("TierEditFailed".into()), "tier save failed"));
}
let mut header = HeaderMap::new();
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/admin/handlers/service_account.rs | rustfs/src/admin/handlers/service_account.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::admin::utils::has_space_be;
use crate::auth::{constant_time_eq, get_condition_values, get_session_token};
use crate::server::RemoteAddr;
use crate::{admin::router::Operation, auth::check_key_valid};
use http::HeaderMap;
use hyper::StatusCode;
use matchit::Params;
use rustfs_config::MAX_ADMIN_REQUEST_BODY_SIZE;
use rustfs_credentials::get_global_action_cred;
use rustfs_iam::error::is_err_no_such_service_account;
use rustfs_iam::sys::{NewServiceAccountOpts, UpdateServiceAccountOpts};
use rustfs_madmin::{
AddServiceAccountReq, AddServiceAccountResp, Credentials, InfoServiceAccountResp, ListServiceAccountsResp,
ServiceAccountInfo, UpdateServiceAccountReq,
};
use rustfs_policy::policy::action::{Action, AdminAction};
use rustfs_policy::policy::{Args, Policy};
use s3s::S3ErrorCode::InvalidRequest;
use s3s::header::CONTENT_LENGTH;
use s3s::{Body, S3Error, S3ErrorCode, S3Request, S3Response, S3Result, header::CONTENT_TYPE, s3_error};
use serde::Deserialize;
use serde_urlencoded::from_bytes;
use std::collections::HashMap;
use tracing::{debug, warn};
pub struct AddServiceAccount {}
#[async_trait::async_trait]
impl Operation for AddServiceAccount {
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
warn!("handle AddServiceAccount ");
let Some(req_cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "get cred failed"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &req_cred.access_key).await?;
let mut input = req.input;
let body = match input.store_all_limited(MAX_ADMIN_REQUEST_BODY_SIZE).await {
Ok(b) => b,
Err(e) => {
warn!("get body failed, e: {:?}", e);
return Err(s3_error!(
InvalidRequest,
"service account configuration body too large or failed to read"
));
}
};
let create_req: AddServiceAccountReq =
serde_json::from_slice(&body[..]).map_err(|e| s3_error!(InvalidRequest, "unmarshal body failed, e: {:?}", e))?;
// create_req.expiration = create_req.expiration.and_then(|expire| expire.replace_millisecond(0).ok());
if has_space_be(&create_req.access_key) {
return Err(s3_error!(InvalidRequest, "access key has spaces"));
}
create_req
.validate()
.map_err(|e| S3Error::with_message(InvalidRequest, e.to_string()))?;
let session_policy = if let Some(policy) = &create_req.policy {
let p = Policy::parse_config(policy.as_bytes()).map_err(|e| {
debug!("parse policy failed, e: {:?}", e);
s3_error!(InvalidArgument, "parse policy failed")
})?;
Some(p)
} else {
None
};
let Some(sys_cred) = get_global_action_cred() else {
return Err(s3_error!(InvalidRequest, "get sys cred failed"));
};
if constant_time_eq(&sys_cred.access_key, &create_req.access_key) {
return Err(s3_error!(InvalidArgument, "can't create user with system access key"));
}
let mut target_user = if let Some(u) = create_req.target_user {
u
} else {
cred.access_key.clone()
};
let req_user = cred.access_key.clone();
let mut req_parent_user = cred.access_key.clone();
let req_groups = cred.groups.clone();
let mut req_is_derived_cred = false;
if cred.is_service_account() || cred.is_temp() {
req_parent_user = cred.parent_user.clone();
req_is_derived_cred = true;
}
let Ok(iam_store) = rustfs_iam::get() else {
return Err(s3_error!(InvalidRequest, "iam not init"));
};
let deny_only = constant_time_eq(&cred.access_key, &target_user) || constant_time_eq(&cred.parent_user, &target_user);
if !iam_store
.is_allowed(&Args {
account: &cred.access_key,
groups: &cred.groups,
action: Action::AdminAction(AdminAction::CreateServiceAccountAdminAction),
bucket: "",
conditions: &get_condition_values(
&req.headers,
&cred,
None,
None,
req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0)),
),
is_owner: owner,
object: "",
claims: cred.claims.as_ref().unwrap_or(&HashMap::new()),
deny_only,
})
.await
{
return Err(s3_error!(AccessDenied, "access denied"));
}
if target_user != cred.access_key {
let has_user = iam_store.get_user(&target_user).await;
if has_user.is_none() && target_user != sys_cred.access_key {
return Err(s3_error!(InvalidRequest, "target user not exist"));
}
}
let is_svc_acc = target_user == req_user || target_user == req_parent_user;
let mut target_groups = None;
let mut opts = NewServiceAccountOpts {
access_key: create_req.access_key,
secret_key: create_req.secret_key,
name: create_req.name,
description: create_req.description,
expiration: create_req.expiration,
session_policy,
..Default::default()
};
if is_svc_acc {
if req_is_derived_cred {
if req_parent_user.is_empty() {
return Err(s3_error!(AccessDenied, "only derived cred can create service account"));
}
target_user = req_parent_user;
}
target_groups = req_groups;
if let Some(claims) = cred.claims {
if opts.claims.is_none() {
opts.claims = Some(HashMap::new());
}
for (k, v) in claims.iter() {
if claims.contains_key("exp") {
continue;
}
opts.claims.as_mut().unwrap().insert(k.clone(), v.clone());
}
}
}
let (new_cred, _) = iam_store
.new_service_account(&target_user, target_groups, opts)
.await
.map_err(|e| {
debug!("create service account failed, e: {:?}", e);
s3_error!(InternalError, "create service account failed, e: {:?}", e)
})?;
let resp = AddServiceAccountResp {
credentials: Credentials {
access_key: &new_cred.access_key,
secret_key: &new_cred.secret_key,
session_token: None,
expiration: new_cred.expiration,
},
};
let body = serde_json::to_vec(&resp).map_err(|e| s3_error!(InternalError, "marshal body failed, e: {:?}", e))?;
let mut header = HeaderMap::new();
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
Ok(S3Response::with_headers((StatusCode::OK, Body::from(body)), header))
}
}
#[derive(Debug, Default, Deserialize)]
struct AccessKeyQuery {
#[serde(rename = "accessKey")]
pub access_key: String,
}
pub struct UpdateServiceAccount {}
#[async_trait::async_trait]
impl Operation for UpdateServiceAccount {
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
warn!("handle UpdateServiceAccount");
let query = {
if let Some(query) = req.uri.query() {
let input: AccessKeyQuery =
from_bytes(query.as_bytes()).map_err(|_e| s3_error!(InvalidArgument, "get body failed1"))?;
input
} else {
AccessKeyQuery::default()
}
};
if query.access_key.is_empty() {
return Err(s3_error!(InvalidArgument, "access key is empty"));
}
let access_key = query.access_key;
let Ok(iam_store) = rustfs_iam::get() else {
return Err(s3_error!(InvalidRequest, "iam not init"));
};
// let svc_account = iam_store.get_service_account(&access_key).await.map_err(|e| {
// debug!("get service account failed, e: {:?}", e);
// s3_error!(InternalError, "get service account failed")
// })?;
let mut input = req.input;
let body = match input.store_all_limited(MAX_ADMIN_REQUEST_BODY_SIZE).await {
Ok(b) => b,
Err(e) => {
warn!("get body failed, e: {:?}", e);
return Err(s3_error!(
InvalidRequest,
"service account configuration body too large or failed to read"
));
}
};
let update_req: UpdateServiceAccountReq =
serde_json::from_slice(&body[..]).map_err(|e| s3_error!(InvalidRequest, "unmarshal body failed, e: {:?}", e))?;
update_req
.validate()
.map_err(|e| S3Error::with_message(InvalidRequest, e.to_string()))?;
let Some(input_cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "get cred failed"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
if !iam_store
.is_allowed(&Args {
account: &cred.access_key,
groups: &cred.groups,
action: Action::AdminAction(AdminAction::UpdateServiceAccountAdminAction),
bucket: "",
conditions: &get_condition_values(
&req.headers,
&cred,
None,
None,
req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0)),
),
is_owner: owner,
object: "",
claims: cred.claims.as_ref().unwrap_or(&HashMap::new()),
deny_only: false,
})
.await
{
return Err(s3_error!(AccessDenied, "access denied"));
}
let sp = {
if let Some(policy) = update_req.new_policy {
let sp = Policy::parse_config(policy.as_bytes()).map_err(|e| {
debug!("parse policy failed, e: {:?}", e);
s3_error!(InvalidArgument, "parse policy failed")
})?;
if sp.version.is_empty() && sp.statements.is_empty() {
None
} else {
Some(sp)
}
} else {
None
}
};
let opts = UpdateServiceAccountOpts {
secret_key: update_req.new_secret_key,
status: update_req.new_status,
name: update_req.new_name,
description: update_req.new_description,
expiration: update_req.new_expiration,
session_policy: sp,
};
let _ = iam_store.update_service_account(&access_key, opts).await.map_err(|e| {
debug!("update service account failed, e: {:?}", e);
s3_error!(InternalError, "update service account failed")
})?;
let mut header = HeaderMap::new();
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
}
}
pub struct InfoServiceAccount {}
#[async_trait::async_trait]
impl Operation for InfoServiceAccount {
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
warn!("handle InfoServiceAccount");
let query = {
if let Some(query) = req.uri.query() {
let input: AccessKeyQuery =
from_bytes(query.as_bytes()).map_err(|_e| s3_error!(InvalidArgument, "get body failed1"))?;
input
} else {
AccessKeyQuery::default()
}
};
if query.access_key.is_empty() {
return Err(s3_error!(InvalidArgument, "access key is empty"));
}
let access_key = query.access_key;
let Ok(iam_store) = rustfs_iam::get() else {
return Err(s3_error!(InvalidRequest, "iam not init"));
};
let (svc_account, session_policy) = iam_store.get_service_account(&access_key).await.map_err(|e| {
debug!("get service account failed, e: {:?}", e);
s3_error!(InternalError, "get service account failed")
})?;
let Some(input_cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "get cred failed"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
if !iam_store
.is_allowed(&Args {
account: &cred.access_key,
groups: &cred.groups,
action: Action::AdminAction(AdminAction::ListServiceAccountsAdminAction),
bucket: "",
conditions: &get_condition_values(
&req.headers,
&cred,
None,
None,
req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0)),
),
is_owner: owner,
object: "",
claims: cred.claims.as_ref().unwrap_or(&HashMap::new()),
deny_only: false,
})
.await
{
let user = if cred.parent_user.is_empty() {
&cred.access_key
} else {
&cred.parent_user
};
if user != &svc_account.parent_user {
return Err(s3_error!(AccessDenied, "access denied"));
}
}
let implied_policy = if let Some(policy) = session_policy.as_ref() {
policy.version.is_empty() && policy.statements.is_empty()
} else {
true
};
let svc_account_policy = {
if !implied_policy {
session_policy
} else {
let policies = iam_store
.policy_db_get(&svc_account.parent_user, &svc_account.groups)
.await
.map_err(|e| {
debug!("get service account policy failed, e: {:?}", e);
s3_error!(InternalError, "get service account policy failed")
})?;
Some(iam_store.get_combined_policy(&policies).await)
}
};
let policy = {
if let Some(policy) = svc_account_policy {
Some(serde_json::to_string(&policy).map_err(|e| {
debug!("marshal policy failed, e: {:?}", e);
s3_error!(InternalError, "marshal policy failed")
})?)
} else {
None
}
};
let resp = InfoServiceAccountResp {
parent_user: svc_account.parent_user,
account_status: svc_account.status,
implied_policy,
name: svc_account.name,
description: svc_account.description,
expiration: svc_account.expiration,
policy,
};
let body = serde_json::to_vec(&resp).map_err(|e| s3_error!(InternalError, "marshal body failed, e: {:?}", e))?;
let mut header = HeaderMap::new();
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
Ok(S3Response::with_headers((StatusCode::OK, Body::from(body)), header))
}
}
#[derive(Debug, Default, serde::Deserialize)]
pub struct ListServiceAccountQuery {
pub user: Option<String>,
}
pub struct ListServiceAccount {}
#[async_trait::async_trait]
impl Operation for ListServiceAccount {
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
warn!("handle ListServiceAccount");
let query = {
if let Some(query) = req.uri.query() {
let input: ListServiceAccountQuery = from_bytes(query.as_bytes())
.map_err(|_e| s3_error!(InvalidArgument, "invalid service account query parameters"))?;
input
} else {
ListServiceAccountQuery::default()
}
};
let Some(input_cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "get cred failed"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key)
.await
.map_err(|e| {
debug!("check key failed: {e:?}");
s3_error!(InternalError, "check key failed")
})?;
// let target_account = if let Some(user) = query.user {
// if user != input_cred.access_key {
// user
// } else if cred.parent_user.is_empty() {
// input_cred.access_key
// } else {
// cred.parent_user
// }
// } else if cred.parent_user.is_empty() {
// input_cred.access_key
// } else {
// cred.parent_user
// };
let Ok(iam_store) = rustfs_iam::get() else {
return Err(s3_error!(InvalidRequest, "iam not init"));
};
let target_account = if query.user.as_ref().is_some_and(|v| v != &cred.access_key) {
if !iam_store
.is_allowed(&Args {
account: &cred.access_key,
groups: &cred.groups,
action: Action::AdminAction(AdminAction::UpdateServiceAccountAdminAction),
bucket: "",
conditions: &get_condition_values(
&req.headers,
&cred,
None,
None,
req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0)),
),
is_owner: owner,
object: "",
claims: cred.claims.as_ref().unwrap_or(&HashMap::new()),
deny_only: false,
})
.await
{
return Err(s3_error!(AccessDenied, "access denied"));
}
query.user.unwrap_or_default()
} else if cred.parent_user.is_empty() {
cred.access_key
} else {
cred.parent_user
};
let service_accounts = iam_store.list_service_accounts(&target_account).await.map_err(|e| {
debug!("list service account failed: {e:?}");
s3_error!(InternalError, "list service account failed")
})?;
let accounts: Vec<ServiceAccountInfo> = service_accounts
.into_iter()
.map(|sa| ServiceAccountInfo {
parent_user: sa.parent_user.clone(),
account_status: sa.status.clone(),
implied_policy: sa.is_implied_policy(), // or set according to your logic
access_key: sa.access_key,
name: sa.name,
description: sa.description,
expiration: sa.expiration,
})
.collect();
let data = serde_json::to_vec(&ListServiceAccountsResp { accounts })
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("marshal users err {e}")))?;
let mut header = HeaderMap::new();
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
Ok(S3Response::with_headers((StatusCode::OK, Body::from(data)), header))
}
}
pub struct DeleteServiceAccount {}
#[async_trait::async_trait]
impl Operation for DeleteServiceAccount {
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
warn!("handle DeleteServiceAccount");
let Some(input_cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "get cred failed"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key)
.await
.map_err(|e| {
debug!("check key failed: {e:?}");
s3_error!(InternalError, "check key failed")
})?;
let query = {
if let Some(query) = req.uri.query() {
let input: AccessKeyQuery = from_bytes(query.as_bytes())
.map_err(|_e| s3_error!(InvalidArgument, "invalid access key query parameters"))?;
input
} else {
AccessKeyQuery::default()
}
};
if query.access_key.is_empty() {
return Err(s3_error!(InvalidArgument, "access key is empty"));
}
let Ok(iam_store) = rustfs_iam::get() else {
return Err(s3_error!(InvalidRequest, "iam not init"));
};
let svc_account = match iam_store.get_service_account(&query.access_key).await {
Ok((res, _)) => Some(res),
Err(err) => {
if is_err_no_such_service_account(&err) {
return Err(s3_error!(InvalidRequest, "service account not exist"));
}
None
}
};
if !iam_store
.is_allowed(&Args {
account: &cred.access_key,
groups: &cred.groups,
action: Action::AdminAction(AdminAction::RemoveServiceAccountAdminAction),
bucket: "",
conditions: &get_condition_values(
&req.headers,
&cred,
None,
None,
req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0)),
),
is_owner: owner,
object: "",
claims: cred.claims.as_ref().unwrap_or(&HashMap::new()),
deny_only: false,
})
.await
{
let user = if cred.parent_user.is_empty() {
&cred.access_key
} else {
&cred.parent_user
};
if svc_account.is_some_and(|v| &v.parent_user != user) {
return Err(s3_error!(InvalidRequest, "service account not exist"));
}
}
iam_store.delete_service_account(&query.access_key, true).await.map_err(|e| {
debug!("delete service account failed, e: {:?}", e);
s3_error!(InternalError, "delete service account failed")
})?;
let mut header = HeaderMap::new();
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/admin/handlers/policies.rs | rustfs/src/admin/handlers/policies.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::{
admin::{auth::validate_admin_request, router::Operation, utils::has_space_be},
auth::{check_key_valid, get_session_token},
server::RemoteAddr,
};
use http::{HeaderMap, StatusCode};
use matchit::Params;
use rustfs_config::MAX_ADMIN_REQUEST_BODY_SIZE;
use rustfs_credentials::get_global_action_cred;
use rustfs_iam::error::is_err_no_such_user;
use rustfs_iam::store::MappedPolicy;
use rustfs_policy::policy::{
Policy,
action::{Action, AdminAction},
};
use s3s::{
Body, S3Error, S3ErrorCode, S3Request, S3Response, S3Result,
header::{CONTENT_LENGTH, CONTENT_TYPE},
s3_error,
};
use serde::Deserialize;
use serde_urlencoded::from_bytes;
use std::collections::HashMap;
use tracing::warn;
#[derive(Debug, Deserialize, Default)]
pub struct BucketQuery {
pub bucket: String,
}
pub struct ListCannedPolicies {}
#[async_trait::async_trait]
impl Operation for ListCannedPolicies {
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
warn!("handle ListCannedPolicies");
let Some(input_cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "get cred failed"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
validate_admin_request(
&req.headers,
&cred,
owner,
false,
vec![Action::AdminAction(AdminAction::ListUserPoliciesAdminAction)],
req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0)),
)
.await?;
let query = {
if let Some(query) = req.uri.query() {
let input: BucketQuery =
from_bytes(query.as_bytes()).map_err(|_e| s3_error!(InvalidArgument, "get body failed1"))?;
input
} else {
BucketQuery::default()
}
};
let Ok(iam_store) = rustfs_iam::get() else { return Err(s3_error!(InternalError, "iam not init")) };
let policies = iam_store.list_polices(&query.bucket).await.map_err(|e| {
warn!("list policies failed, e: {:?}", e);
S3Error::with_message(S3ErrorCode::InternalError, e.to_string())
})?;
let kvs: HashMap<String, Policy> = policies
.into_iter()
.filter(|(_, v)| serde_json::to_string(v).is_ok())
.collect();
let body = serde_json::to_vec(&kvs).map_err(|e| s3_error!(InternalError, "marshal body failed, e: {:?}", e))?;
let mut header = HeaderMap::new();
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
Ok(S3Response::with_headers((StatusCode::OK, Body::from(body)), header))
}
}
#[derive(Debug, Deserialize, Default)]
pub struct PolicyNameQuery {
pub name: String,
}
pub struct AddCannedPolicy {}
#[async_trait::async_trait]
impl Operation for AddCannedPolicy {
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
warn!("handle AddCannedPolicy");
let Some(input_cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "get cred failed"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
validate_admin_request(
&req.headers,
&cred,
owner,
false,
vec![Action::AdminAction(AdminAction::CreatePolicyAdminAction)],
req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0)),
)
.await?;
let query = {
if let Some(query) = req.uri.query() {
let input: PolicyNameQuery =
from_bytes(query.as_bytes()).map_err(|_e| s3_error!(InvalidArgument, "get body failed1"))?;
input
} else {
PolicyNameQuery::default()
}
};
if query.name.is_empty() {
return Err(s3_error!(InvalidArgument, "policy name is empty"));
}
if has_space_be(&query.name) {
return Err(s3_error!(InvalidArgument, "policy name has space"));
}
let mut input = req.input;
let policy_bytes = match input.store_all_limited(MAX_ADMIN_REQUEST_BODY_SIZE).await {
Ok(b) => b,
Err(e) => {
warn!("get body failed, e: {:?}", e);
return Err(s3_error!(InvalidRequest, "policy configuration body too large or failed to read"));
}
};
let policy = Policy::parse_config(policy_bytes.as_ref()).map_err(|e| {
warn!("parse policy failed, e: {:?}", e);
S3Error::with_message(S3ErrorCode::InvalidRequest, e.to_string())
})?;
if policy.version.is_empty() {
return Err(s3_error!(InvalidRequest, "policy version is empty"));
}
let Ok(iam_store) = rustfs_iam::get() else { return Err(s3_error!(InternalError, "iam not init")) };
iam_store.set_policy(&query.name, policy).await.map_err(|e| {
warn!("set policy failed, e: {:?}", e);
S3Error::with_message(S3ErrorCode::InternalError, e.to_string())
})?;
let mut header = HeaderMap::new();
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
}
}
pub struct InfoCannedPolicy {}
#[async_trait::async_trait]
impl Operation for InfoCannedPolicy {
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
warn!("handle InfoCannedPolicy");
let Some(input_cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "get cred failed"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
validate_admin_request(
&req.headers,
&cred,
owner,
false,
vec![Action::AdminAction(AdminAction::GetPolicyAdminAction)],
req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0)),
)
.await?;
let query = {
if let Some(query) = req.uri.query() {
let input: PolicyNameQuery =
from_bytes(query.as_bytes()).map_err(|_e| s3_error!(InvalidArgument, "get body failed1"))?;
input
} else {
PolicyNameQuery::default()
}
};
if query.name.is_empty() {
return Err(s3_error!(InvalidArgument, "policy name is empty"));
}
let policies = MappedPolicy::new(&query.name).to_slice();
if policies.len() != 1 {
return Err(s3_error!(InvalidArgument, "too many policies"));
}
let Ok(iam_store) = rustfs_iam::get() else { return Err(s3_error!(InternalError, "iam not init")) };
let pd = iam_store.info_policy(&query.name).await.map_err(|e| {
warn!("info policy failed, e: {:?}", e);
S3Error::with_message(S3ErrorCode::InternalError, e.to_string())
})?;
let body = serde_json::to_vec(&pd).map_err(|e| s3_error!(InternalError, "marshal body failed, e: {:?}", e))?;
let mut header = HeaderMap::new();
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
Ok(S3Response::with_headers((StatusCode::OK, Body::from(body)), header))
}
}
pub struct RemoveCannedPolicy {}
#[async_trait::async_trait]
impl Operation for RemoveCannedPolicy {
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
warn!("handle RemoveCannedPolicy");
let Some(input_cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "get cred failed"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
validate_admin_request(
&req.headers,
&cred,
owner,
false,
vec![Action::AdminAction(AdminAction::DeletePolicyAdminAction)],
req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0)),
)
.await?;
let query = {
if let Some(query) = req.uri.query() {
let input: PolicyNameQuery =
from_bytes(query.as_bytes()).map_err(|_e| s3_error!(InvalidArgument, "get body failed1"))?;
input
} else {
PolicyNameQuery::default()
}
};
if query.name.is_empty() {
return Err(s3_error!(InvalidArgument, "policy name is empty"));
}
let Ok(iam_store) = rustfs_iam::get() else { return Err(s3_error!(InternalError, "iam not init")) };
iam_store.delete_policy(&query.name, true).await.map_err(|e| {
warn!("delete policy failed, e: {:?}", e);
S3Error::with_message(S3ErrorCode::InternalError, e.to_string())
})?;
let mut header = HeaderMap::new();
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
}
}
#[derive(Debug, Deserialize, Default)]
pub struct SetPolicyForUserOrGroupQuery {
#[serde(rename = "policyName")]
pub policy_name: String,
#[serde(rename = "userOrGroup")]
pub user_or_group: String,
#[serde(rename = "isGroup")]
pub is_group: bool,
}
pub struct SetPolicyForUserOrGroup {}
#[async_trait::async_trait]
impl Operation for SetPolicyForUserOrGroup {
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
warn!("handle SetPolicyForUserOrGroup");
let Some(input_cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "get cred failed"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
validate_admin_request(
&req.headers,
&cred,
owner,
false,
vec![Action::AdminAction(AdminAction::AttachPolicyAdminAction)],
req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0)),
)
.await?;
let query = {
if let Some(query) = req.uri.query() {
let input: SetPolicyForUserOrGroupQuery =
from_bytes(query.as_bytes()).map_err(|_e| s3_error!(InvalidArgument, "get body failed1"))?;
input
} else {
SetPolicyForUserOrGroupQuery::default()
}
};
if query.user_or_group.is_empty() {
return Err(s3_error!(InvalidArgument, "user or group is empty"));
}
let Ok(iam_store) = rustfs_iam::get() else { return Err(s3_error!(InternalError, "iam not init")) };
if !query.is_group {
match iam_store.is_temp_user(&query.user_or_group).await {
Ok((ok, _)) => {
if ok {
return Err(s3_error!(InvalidArgument, "temp user can't set policy"));
}
}
Err(err) => {
if !is_err_no_such_user(&err) {
warn!("is temp user failed, e: {:?}", err);
return Err(S3Error::with_message(S3ErrorCode::InternalError, err.to_string()));
}
}
};
let Some(sys_cred) = get_global_action_cred() else {
return Err(s3_error!(InternalError, "get global action cred failed"));
};
if query.user_or_group == sys_cred.access_key {
return Err(s3_error!(InvalidArgument, "can't set policy for system user"));
}
}
if !query.is_group {
if iam_store.get_user(&query.user_or_group).await.is_none() {
return Err(s3_error!(InvalidArgument, "user not exist"));
}
} else {
iam_store.get_group_description(&query.user_or_group).await.map_err(|e| {
warn!("get group description failed, e: {:?}", e);
S3Error::with_message(S3ErrorCode::InternalError, e.to_string())
})?;
}
iam_store
.policy_db_set(&query.user_or_group, rustfs_iam::store::UserType::Reg, query.is_group, &query.policy_name)
.await
.map_err(|e| {
warn!("policy db set failed, e: {:?}", e);
S3Error::with_message(S3ErrorCode::InternalError, e.to_string())
})?;
let mut header = HeaderMap::new();
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/admin/handlers/kms.rs | rustfs/src/admin/handlers/kms.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! KMS admin handlers for HTTP API
use super::Operation;
use crate::admin::auth::validate_admin_request;
use crate::auth::{check_key_valid, get_session_token};
use crate::server::RemoteAddr;
use base64::Engine;
use hyper::{HeaderMap, StatusCode};
use matchit::Params;
use rustfs_config::MAX_ADMIN_REQUEST_BODY_SIZE;
use rustfs_kms::{get_global_encryption_service, types::*};
use rustfs_policy::policy::action::{Action, AdminAction};
use s3s::header::CONTENT_TYPE;
use s3s::{Body, S3Request, S3Response, S3Result, s3_error};
use serde::{Deserialize, Serialize};
use serde_json;
use std::collections::HashMap;
use tracing::{error, info, warn};
#[derive(Debug, Serialize, Deserialize)]
pub struct CreateKeyApiRequest {
pub key_usage: Option<KeyUsage>,
pub description: Option<String>,
pub tags: Option<HashMap<String, String>>,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct CreateKeyApiResponse {
pub key_id: String,
pub key_metadata: KeyMetadata,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct DescribeKeyApiResponse {
pub key_metadata: KeyMetadata,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct ListKeysApiResponse {
pub keys: Vec<KeyInfo>,
pub truncated: bool,
pub next_marker: Option<String>,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct GenerateDataKeyApiRequest {
pub key_id: String,
pub key_spec: KeySpec,
pub encryption_context: Option<HashMap<String, String>>,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct GenerateDataKeyApiResponse {
pub key_id: String,
pub plaintext_key: String, // Base64 encoded
pub ciphertext_blob: String, // Base64 encoded
}
#[derive(Debug, Serialize, Deserialize)]
pub struct KmsStatusResponse {
pub backend_type: String,
pub backend_status: String,
pub cache_enabled: bool,
pub cache_stats: Option<CacheStatsResponse>,
pub default_key_id: Option<String>,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct CacheStatsResponse {
pub hit_count: u64,
pub miss_count: u64,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct KmsConfigResponse {
pub backend: String,
pub cache_enabled: bool,
pub cache_max_keys: usize,
pub cache_ttl_seconds: u64,
pub default_key_id: Option<String>,
}
fn extract_query_params(uri: &hyper::Uri) -> HashMap<String, String> {
let mut params = HashMap::new();
if let Some(query) = uri.query() {
query.split('&').for_each(|pair| {
if let Some((key, value)) = pair.split_once('=') {
params.insert(
urlencoding::decode(key).unwrap_or_default().into_owned(),
urlencoding::decode(value).unwrap_or_default().into_owned(),
);
}
});
}
params
}
/// Create a new KMS master key
pub struct CreateKeyHandler {}
#[async_trait::async_trait]
impl Operation for CreateKeyHandler {
async fn call(&self, mut req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
let Some(cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "authentication required"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &cred.access_key).await?;
validate_admin_request(
&req.headers,
&cred,
owner,
false,
vec![Action::AdminAction(AdminAction::ServerInfoAdminAction)], // TODO: Add specific KMS action
req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0)),
)
.await?;
let body = req
.input
.store_all_limited(MAX_ADMIN_REQUEST_BODY_SIZE)
.await
.map_err(|e| s3_error!(InvalidRequest, "failed to read request body: {}", e))?;
let request: CreateKeyApiRequest = if body.is_empty() {
CreateKeyApiRequest {
key_usage: Some(KeyUsage::EncryptDecrypt),
description: None,
tags: None,
}
} else {
serde_json::from_slice(&body).map_err(|e| s3_error!(InvalidRequest, "invalid JSON: {}", e))?
};
let Some(service) = get_global_encryption_service().await else {
return Err(s3_error!(InternalError, "KMS service not initialized"));
};
// Extract key name from tags if provided
let tags = request.tags.unwrap_or_default();
let key_name = tags.get("name").cloned();
let kms_request = CreateKeyRequest {
key_name,
key_usage: request.key_usage.unwrap_or(KeyUsage::EncryptDecrypt),
description: request.description,
tags,
origin: Some("AWS_KMS".to_string()),
policy: None,
};
match service.create_key(kms_request).await {
Ok(response) => {
let api_response = CreateKeyApiResponse {
key_id: response.key_id,
key_metadata: response.key_metadata,
};
let data = serde_json::to_vec(&api_response)
.map_err(|e| s3_error!(InternalError, "failed to serialize response: {}", e))?;
let mut headers = HeaderMap::new();
headers.insert(CONTENT_TYPE, "application/json".parse().unwrap());
Ok(S3Response::with_headers((StatusCode::OK, Body::from(data)), headers))
}
Err(e) => {
error!("Failed to create KMS key: {}", e);
Err(s3_error!(InternalError, "failed to create key: {}", e))
}
}
}
}
/// Describe a KMS key
pub struct DescribeKeyHandler {}
#[async_trait::async_trait]
impl Operation for DescribeKeyHandler {
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
let Some(cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "authentication required"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &cred.access_key).await?;
validate_admin_request(
&req.headers,
&cred,
owner,
false,
vec![Action::AdminAction(AdminAction::ServerInfoAdminAction)],
req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0)),
)
.await?;
let query_params = extract_query_params(&req.uri);
let Some(key_id) = query_params.get("keyId") else {
return Err(s3_error!(InvalidRequest, "missing keyId parameter"));
};
let Some(service) = get_global_encryption_service().await else {
return Err(s3_error!(InternalError, "KMS service not initialized"));
};
let request = DescribeKeyRequest { key_id: key_id.clone() };
match service.describe_key(request).await {
Ok(response) => {
let api_response = DescribeKeyApiResponse {
key_metadata: response.key_metadata,
};
let data = serde_json::to_vec(&api_response)
.map_err(|e| s3_error!(InternalError, "failed to serialize response: {}", e))?;
let mut headers = HeaderMap::new();
headers.insert(CONTENT_TYPE, "application/json".parse().unwrap());
Ok(S3Response::with_headers((StatusCode::OK, Body::from(data)), headers))
}
Err(e) => {
error!("Failed to describe KMS key {}: {}", key_id, e);
Err(s3_error!(InternalError, "failed to describe key: {}", e))
}
}
}
}
/// List KMS keys
pub struct ListKeysHandler {}
#[async_trait::async_trait]
impl Operation for ListKeysHandler {
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
let Some(cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "authentication required"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &cred.access_key).await?;
validate_admin_request(
&req.headers,
&cred,
owner,
false,
vec![Action::AdminAction(AdminAction::ServerInfoAdminAction)],
req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0)),
)
.await?;
let query_params = extract_query_params(&req.uri);
let limit = query_params.get("limit").and_then(|s| s.parse::<u32>().ok()).unwrap_or(100);
let marker = query_params.get("marker").cloned();
let Some(service) = get_global_encryption_service().await else {
return Err(s3_error!(InternalError, "KMS service not initialized"));
};
let request = ListKeysRequest {
limit: Some(limit),
marker,
status_filter: None,
usage_filter: None,
};
match service.list_keys(request).await {
Ok(response) => {
let api_response = ListKeysApiResponse {
keys: response.keys,
truncated: response.truncated,
next_marker: response.next_marker,
};
let data = serde_json::to_vec(&api_response)
.map_err(|e| s3_error!(InternalError, "failed to serialize response: {}", e))?;
let mut headers = HeaderMap::new();
headers.insert(CONTENT_TYPE, "application/json".parse().unwrap());
Ok(S3Response::with_headers((StatusCode::OK, Body::from(data)), headers))
}
Err(e) => {
error!("Failed to list KMS keys: {}", e);
Err(s3_error!(InternalError, "failed to list keys: {}", e))
}
}
}
}
/// Generate data encryption key
pub struct GenerateDataKeyHandler {}
#[async_trait::async_trait]
impl Operation for GenerateDataKeyHandler {
async fn call(&self, mut req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
let Some(cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "authentication required"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &cred.access_key).await?;
validate_admin_request(
&req.headers,
&cred,
owner,
false,
vec![Action::AdminAction(AdminAction::ServerInfoAdminAction)],
req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0)),
)
.await?;
let body = req
.input
.store_all_limited(MAX_ADMIN_REQUEST_BODY_SIZE)
.await
.map_err(|e| s3_error!(InvalidRequest, "failed to read request body: {}", e))?;
let request: GenerateDataKeyApiRequest =
serde_json::from_slice(&body).map_err(|e| s3_error!(InvalidRequest, "invalid JSON: {}", e))?;
let Some(service) = get_global_encryption_service().await else {
return Err(s3_error!(InternalError, "KMS service not initialized"));
};
let kms_request = GenerateDataKeyRequest {
key_id: request.key_id,
key_spec: request.key_spec,
encryption_context: request.encryption_context.unwrap_or_default(),
};
match service.generate_data_key(kms_request).await {
Ok(response) => {
let api_response = GenerateDataKeyApiResponse {
key_id: response.key_id,
plaintext_key: base64::prelude::BASE64_STANDARD.encode(&response.plaintext_key),
ciphertext_blob: base64::prelude::BASE64_STANDARD.encode(&response.ciphertext_blob),
};
let data = serde_json::to_vec(&api_response)
.map_err(|e| s3_error!(InternalError, "failed to serialize response: {}", e))?;
let mut headers = HeaderMap::new();
headers.insert(CONTENT_TYPE, "application/json".parse().unwrap());
Ok(S3Response::with_headers((StatusCode::OK, Body::from(data)), headers))
}
Err(e) => {
error!("Failed to generate data key: {}", e);
Err(s3_error!(InternalError, "failed to generate data key: {}", e))
}
}
}
}
/// Get KMS service status
pub struct KmsStatusHandler {}
#[async_trait::async_trait]
impl Operation for KmsStatusHandler {
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
let Some(cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "authentication required"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &cred.access_key).await?;
validate_admin_request(
&req.headers,
&cred,
owner,
false,
vec![Action::AdminAction(AdminAction::ServerInfoAdminAction)],
req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0)),
)
.await?;
let Some(service) = get_global_encryption_service().await else {
return Err(s3_error!(InternalError, "KMS service not initialized"));
};
let backend_status = match service.health_check().await {
Ok(true) => "healthy".to_string(),
Ok(false) => "unhealthy".to_string(),
Err(e) => {
warn!("KMS health check failed: {}", e);
"error".to_string()
}
};
let cache_stats = service.cache_stats().await.map(|(hits, misses)| CacheStatsResponse {
hit_count: hits,
miss_count: misses,
});
let response = KmsStatusResponse {
backend_type: "vault".to_string(), // TODO: Get from config
backend_status,
cache_enabled: cache_stats.is_some(),
cache_stats,
default_key_id: service.get_default_key_id().cloned(),
};
let data = serde_json::to_vec(&response).map_err(|e| s3_error!(InternalError, "failed to serialize response: {}", e))?;
let mut headers = HeaderMap::new();
headers.insert(CONTENT_TYPE, "application/json".parse().unwrap());
Ok(S3Response::with_headers((StatusCode::OK, Body::from(data)), headers))
}
}
/// Get KMS configuration
pub struct KmsConfigHandler {}
#[async_trait::async_trait]
impl Operation for KmsConfigHandler {
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
let Some(cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "authentication required"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &cred.access_key).await?;
validate_admin_request(
&req.headers,
&cred,
owner,
false,
vec![Action::AdminAction(AdminAction::ServerInfoAdminAction)],
req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0)),
)
.await?;
let Some(service) = get_global_encryption_service().await else {
return Err(s3_error!(InternalError, "KMS service not initialized"));
};
// TODO: Get actual config from service
let response = KmsConfigResponse {
backend: "vault".to_string(),
cache_enabled: true,
cache_max_keys: 1000,
cache_ttl_seconds: 300,
default_key_id: service.get_default_key_id().cloned(),
};
let data = serde_json::to_vec(&response).map_err(|e| s3_error!(InternalError, "failed to serialize response: {}", e))?;
let mut headers = HeaderMap::new();
headers.insert(CONTENT_TYPE, "application/json".parse().unwrap());
Ok(S3Response::with_headers((StatusCode::OK, Body::from(data)), headers))
}
}
/// Clear KMS cache
pub struct KmsClearCacheHandler {}
#[async_trait::async_trait]
impl Operation for KmsClearCacheHandler {
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
let Some(cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "authentication required"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &cred.access_key).await?;
validate_admin_request(
&req.headers,
&cred,
owner,
false,
vec![Action::AdminAction(AdminAction::ServerInfoAdminAction)],
req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0)),
)
.await?;
let Some(service) = get_global_encryption_service().await else {
return Err(s3_error!(InternalError, "KMS service not initialized"));
};
match service.clear_cache().await {
Ok(()) => {
info!("KMS cache cleared successfully");
let response = serde_json::json!({
"status": "success",
"message": "cache cleared successfully"
});
let data =
serde_json::to_vec(&response).map_err(|e| s3_error!(InternalError, "failed to serialize response: {}", e))?;
let mut headers = HeaderMap::new();
headers.insert(CONTENT_TYPE, "application/json".parse().unwrap());
Ok(S3Response::with_headers((StatusCode::OK, Body::from(data)), headers))
}
Err(e) => {
error!("Failed to clear KMS cache: {}", e);
Err(s3_error!(InternalError, "failed to clear cache: {}", e))
}
}
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/admin/handlers/pools.rs | rustfs/src/admin/handlers/pools.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use http::{HeaderMap, StatusCode};
use matchit::Params;
use rustfs_ecstore::{GLOBAL_Endpoints, new_object_layer_fn};
use rustfs_policy::policy::action::{Action, AdminAction};
use s3s::{Body, S3Error, S3ErrorCode, S3Request, S3Response, S3Result, header::CONTENT_TYPE, s3_error};
use serde::Deserialize;
use serde_urlencoded::from_bytes;
use tokio_util::sync::CancellationToken;
use tracing::warn;
use crate::{
admin::{auth::validate_admin_request, router::Operation},
auth::{check_key_valid, get_session_token},
error::ApiError,
server::RemoteAddr,
};
pub struct ListPools {}
#[async_trait::async_trait]
impl Operation for ListPools {
// GET <endpoint>/<admin-API>/pools/list
#[tracing::instrument(skip_all)]
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
warn!("handle ListPools");
let Some(input_cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "get cred failed"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
validate_admin_request(
&req.headers,
&cred,
owner,
false,
vec![
Action::AdminAction(AdminAction::ServerInfoAdminAction),
Action::AdminAction(AdminAction::DecommissionAdminAction),
],
req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0)),
)
.await?;
let Some(store) = new_object_layer_fn() else {
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string()));
};
let Some(endpoints) = GLOBAL_Endpoints.get() else {
return Err(s3_error!(NotImplemented));
};
if endpoints.legacy() {
return Err(s3_error!(NotImplemented));
}
let mut pools_status = Vec::new();
for (idx, _) in endpoints.as_ref().iter().enumerate() {
let state = store.status(idx).await.map_err(ApiError::from)?;
pools_status.push(state);
}
let data = serde_json::to_vec(&pools_status)
.map_err(|_e| S3Error::with_message(S3ErrorCode::InternalError, "parse accountInfo failed"))?;
let mut header = HeaderMap::new();
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
Ok(S3Response::with_headers((StatusCode::OK, Body::from(data)), header))
}
}
#[derive(Debug, Deserialize, Default)]
#[serde(default)]
pub struct StatusPoolQuery {
pub pool: String,
#[serde(rename = "by-id")]
pub by_id: String,
}
pub struct StatusPool {}
#[async_trait::async_trait]
impl Operation for StatusPool {
// GET <endpoint>/<admin-API>/pools/status?pool=http://server{1...4}/disk{1...4}
#[tracing::instrument(skip_all)]
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
warn!("handle StatusPool");
let Some(input_cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "get cred failed"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
validate_admin_request(
&req.headers,
&cred,
owner,
false,
vec![
Action::AdminAction(AdminAction::ServerInfoAdminAction),
Action::AdminAction(AdminAction::DecommissionAdminAction),
],
req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0)),
)
.await?;
let Some(endpoints) = GLOBAL_Endpoints.get() else {
return Err(s3_error!(NotImplemented));
};
if endpoints.legacy() {
return Err(s3_error!(NotImplemented));
}
let query = {
if let Some(query) = req.uri.query() {
let input: StatusPoolQuery =
from_bytes(query.as_bytes()).map_err(|_e| s3_error!(InvalidArgument, "get body failed"))?;
input
} else {
StatusPoolQuery::default()
}
};
let is_byid = query.by_id.as_str() == "true";
let has_idx = {
if is_byid {
let a = query.pool.parse::<usize>().unwrap_or_default();
if a < endpoints.as_ref().len() { Some(a) } else { None }
} else {
endpoints.get_pool_idx(&query.pool)
}
};
let Some(idx) = has_idx else {
warn!("specified pool {} not found, please specify a valid pool", &query.pool);
return Err(s3_error!(InvalidArgument));
};
let Some(store) = new_object_layer_fn() else {
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string()));
};
let pools_status = store.status(idx).await.map_err(ApiError::from)?;
let data = serde_json::to_vec(&pools_status)
.map_err(|_e| S3Error::with_message(S3ErrorCode::InternalError, "parse accountInfo failed"))?;
let mut header = HeaderMap::new();
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
Ok(S3Response::with_headers((StatusCode::OK, Body::from(data)), header))
}
}
pub struct StartDecommission {}
#[async_trait::async_trait]
impl Operation for StartDecommission {
// POST <endpoint>/<admin-API>/pools/decommission?pool=http://server{1...4}/disk{1...4}
#[tracing::instrument(skip_all)]
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
warn!("handle StartDecommission");
let Some(input_cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "get cred failed"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
validate_admin_request(
&req.headers,
&cred,
owner,
false,
vec![Action::AdminAction(AdminAction::DecommissionAdminAction)],
req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0)),
)
.await?;
let Some(endpoints) = GLOBAL_Endpoints.get() else {
return Err(s3_error!(NotImplemented));
};
if endpoints.legacy() {
return Err(s3_error!(NotImplemented));
}
let Some(store) = new_object_layer_fn() else {
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string()));
};
if store.is_decommission_running().await {
return Err(S3Error::with_message(
S3ErrorCode::InvalidRequest,
"DecommissionAlreadyRunning".to_string(),
));
}
// TODO: check IsRebalanceStarted
let query = {
if let Some(query) = req.uri.query() {
let input: StatusPoolQuery =
from_bytes(query.as_bytes()).map_err(|_e| s3_error!(InvalidArgument, "get body failed"))?;
input
} else {
StatusPoolQuery::default()
}
};
let is_byid = query.by_id.as_str() == "true";
let pools: Vec<&str> = query.pool.split(",").collect();
let mut pools_indices = Vec::with_capacity(pools.len());
let ctx = CancellationToken::new();
for pool in pools.iter() {
let idx = {
if is_byid {
pool.parse::<usize>()
.map_err(|_e| s3_error!(InvalidArgument, "pool parse failed"))?
} else {
let Some(idx) = endpoints.get_pool_idx(pool) else {
return Err(s3_error!(InvalidArgument, "pool parse failed"));
};
idx
}
};
let mut has_found = None;
for (i, pool) in store.pools.iter().enumerate() {
if i == idx {
has_found = Some(pool.clone());
break;
}
}
let Some(_p) = has_found else {
return Err(s3_error!(InvalidArgument));
};
pools_indices.push(idx);
}
if !pools_indices.is_empty() {
store.decommission(ctx.clone(), pools_indices).await.map_err(ApiError::from)?;
}
Ok(S3Response::new((StatusCode::OK, Body::default())))
}
}
pub struct CancelDecommission {}
#[async_trait::async_trait]
impl Operation for CancelDecommission {
// POST <endpoint>/<admin-API>/pools/cancel?pool=http://server{1...4}/disk{1...4}
#[tracing::instrument(skip_all)]
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
warn!("handle CancelDecommission");
let Some(input_cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "get cred failed"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
validate_admin_request(
&req.headers,
&cred,
owner,
false,
vec![Action::AdminAction(AdminAction::DecommissionAdminAction)],
req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0)),
)
.await?;
let Some(endpoints) = GLOBAL_Endpoints.get() else {
return Err(s3_error!(NotImplemented));
};
if endpoints.legacy() {
return Err(s3_error!(NotImplemented));
}
let query = {
if let Some(query) = req.uri.query() {
let input: StatusPoolQuery =
from_bytes(query.as_bytes()).map_err(|_e| s3_error!(InvalidArgument, "get body failed"))?;
input
} else {
StatusPoolQuery::default()
}
};
let is_byid = query.by_id.as_str() == "true";
let has_idx = {
if is_byid {
let a = query.pool.parse::<usize>().unwrap_or_default();
if a < endpoints.as_ref().len() { Some(a) } else { None }
} else {
endpoints.get_pool_idx(&query.pool)
}
};
let Some(idx) = has_idx else {
warn!("specified pool {} not found, please specify a valid pool", &query.pool);
return Err(s3_error!(InvalidArgument));
};
let Some(store) = new_object_layer_fn() else {
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string()));
};
store.decommission_cancel(idx).await.map_err(ApiError::from)?;
Ok(S3Response::new((StatusCode::OK, Body::default())))
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/admin/handlers/bucket_meta.rs | rustfs/src/admin/handlers/bucket_meta.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::{
collections::HashMap,
io::{Cursor, Read as _, Write as _},
};
use crate::{
admin::{auth::validate_admin_request, router::Operation},
auth::{check_key_valid, get_session_token},
server::RemoteAddr,
};
use http::{HeaderMap, StatusCode};
use matchit::Params;
use rustfs_config::MAX_BUCKET_METADATA_IMPORT_SIZE;
use rustfs_ecstore::{
StorageAPI,
bucket::{
metadata::{
BUCKET_LIFECYCLE_CONFIG, BUCKET_NOTIFICATION_CONFIG, BUCKET_POLICY_CONFIG, BUCKET_QUOTA_CONFIG_FILE,
BUCKET_REPLICATION_CONFIG, BUCKET_SSECONFIG, BUCKET_TAGGING_CONFIG, BUCKET_TARGETS_FILE, BUCKET_VERSIONING_CONFIG,
BucketMetadata, OBJECT_LOCK_CONFIG,
},
metadata_sys,
quota::BucketQuota,
target::BucketTargets,
},
error::StorageError,
new_object_layer_fn,
store_api::BucketOptions,
};
use rustfs_ecstore::{
bucket::utils::{deserialize, serialize},
store_api::MakeBucketOptions,
};
use rustfs_policy::policy::{
BucketPolicy,
action::{Action, AdminAction},
};
use rustfs_utils::path::{SLASH_SEPARATOR, path_join_buf};
use s3s::{
Body, S3Request, S3Response, S3Result,
dto::{
BucketLifecycleConfiguration, ObjectLockConfiguration, ReplicationConfiguration, ServerSideEncryptionConfiguration,
Tagging, VersioningConfiguration,
},
header::{CONTENT_DISPOSITION, CONTENT_LENGTH, CONTENT_TYPE},
s3_error,
};
use serde::Deserialize;
use serde_urlencoded::from_bytes;
use time::OffsetDateTime;
use tracing::warn;
use zip::{ZipArchive, ZipWriter, write::SimpleFileOptions};
#[derive(Debug, Default, serde::Deserialize)]
pub struct ExportBucketMetadataQuery {
pub bucket: String,
}
pub struct ExportBucketMetadata {}
#[async_trait::async_trait]
impl Operation for ExportBucketMetadata {
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
let query = {
if let Some(query) = req.uri.query() {
let input: ExportBucketMetadataQuery =
from_bytes(query.as_bytes()).map_err(|_e| s3_error!(InvalidArgument, "get query failed"))?;
input
} else {
ExportBucketMetadataQuery::default()
}
};
let Some(input_cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "get cred failed"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
validate_admin_request(
&req.headers,
&cred,
owner,
false,
vec![Action::AdminAction(AdminAction::ExportBucketMetadataAction)],
req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0)),
)
.await?;
let Some(store) = new_object_layer_fn() else {
return Err(s3_error!(InvalidRequest, "object store not init"));
};
let buckets = if query.bucket.is_empty() {
store
.list_bucket(&BucketOptions::default())
.await
.map_err(|e| s3_error!(InternalError, "list buckets failed: {e}"))?
} else {
let bucket = store
.get_bucket_info(&query.bucket, &BucketOptions::default())
.await
.map_err(|e| s3_error!(InternalError, "get bucket failed: {e}"))?;
vec![bucket]
};
let mut zip_writer = ZipWriter::new(Cursor::new(Vec::new()));
let confs = [
BUCKET_POLICY_CONFIG,
BUCKET_NOTIFICATION_CONFIG,
BUCKET_LIFECYCLE_CONFIG,
BUCKET_SSECONFIG,
BUCKET_TAGGING_CONFIG,
BUCKET_QUOTA_CONFIG_FILE,
OBJECT_LOCK_CONFIG,
BUCKET_VERSIONING_CONFIG,
BUCKET_REPLICATION_CONFIG,
BUCKET_TARGETS_FILE,
];
for bucket in buckets {
for &conf in confs.iter() {
let conf_path = path_join_buf(&[bucket.name.as_str(), conf]);
match conf {
BUCKET_POLICY_CONFIG => {
let config: BucketPolicy = match metadata_sys::get_bucket_policy(&bucket.name).await {
Ok((res, _)) => res,
Err(e) => {
if e == StorageError::ConfigNotFound {
continue;
}
return Err(s3_error!(InternalError, "get bucket metadata failed: {e}"));
}
};
let config_json =
serde_json::to_vec(&config).map_err(|e| s3_error!(InternalError, "serialize config failed: {e}"))?;
zip_writer
.start_file(conf_path, SimpleFileOptions::default())
.map_err(|e| s3_error!(InternalError, "start file failed: {e}"))?;
zip_writer
.write_all(&config_json)
.map_err(|e| s3_error!(InternalError, "write file failed: {e}"))?;
}
BUCKET_NOTIFICATION_CONFIG => {
let config: s3s::dto::NotificationConfiguration =
match metadata_sys::get_notification_config(&bucket.name).await {
Ok(Some(res)) => res,
Err(e) => {
if e == StorageError::ConfigNotFound {
continue;
}
return Err(s3_error!(InternalError, "get bucket metadata failed: {e}"));
}
Ok(None) => continue,
};
let config_xml =
serialize(&config).map_err(|e| s3_error!(InternalError, "serialize config failed: {e}"))?;
zip_writer
.start_file(conf_path, SimpleFileOptions::default())
.map_err(|e| s3_error!(InternalError, "start file failed: {e}"))?;
zip_writer
.write_all(&config_xml)
.map_err(|e| s3_error!(InternalError, "write file failed: {e}"))?;
}
BUCKET_LIFECYCLE_CONFIG => {
let config: BucketLifecycleConfiguration = match metadata_sys::get_lifecycle_config(&bucket.name).await {
Ok((res, _)) => res,
Err(e) => {
if e == StorageError::ConfigNotFound {
continue;
}
return Err(s3_error!(InternalError, "get bucket metadata failed: {e}"));
}
};
let config_xml =
serialize(&config).map_err(|e| s3_error!(InternalError, "serialize config failed: {e}"))?;
zip_writer
.start_file(conf_path, SimpleFileOptions::default())
.map_err(|e| s3_error!(InternalError, "start file failed: {e}"))?;
zip_writer
.write_all(&config_xml)
.map_err(|e| s3_error!(InternalError, "write file failed: {e}"))?;
}
BUCKET_TAGGING_CONFIG => {
let config: Tagging = match metadata_sys::get_tagging_config(&bucket.name).await {
Ok((res, _)) => res,
Err(e) => {
if e == StorageError::ConfigNotFound {
continue;
}
return Err(s3_error!(InternalError, "get bucket metadata failed: {e}"));
}
};
let config_xml =
serialize(&config).map_err(|e| s3_error!(InternalError, "serialize config failed: {e}"))?;
zip_writer
.start_file(conf_path, SimpleFileOptions::default())
.map_err(|e| s3_error!(InternalError, "start file failed: {e}"))?;
zip_writer
.write_all(&config_xml)
.map_err(|e| s3_error!(InternalError, "write file failed: {e}"))?;
}
BUCKET_QUOTA_CONFIG_FILE => {
let config: BucketQuota = match metadata_sys::get_quota_config(&bucket.name).await {
Ok((res, _)) => res,
Err(e) => {
if e == StorageError::ConfigNotFound {
continue;
}
return Err(s3_error!(InternalError, "get bucket metadata failed: {e}"));
}
};
let config_json =
serde_json::to_vec(&config).map_err(|e| s3_error!(InternalError, "serialize config failed: {e}"))?;
zip_writer
.start_file(conf_path, SimpleFileOptions::default())
.map_err(|e| s3_error!(InternalError, "start file failed: {e}"))?;
zip_writer
.write_all(&config_json)
.map_err(|e| s3_error!(InternalError, "write file failed: {e}"))?;
}
OBJECT_LOCK_CONFIG => {
let config = match metadata_sys::get_object_lock_config(&bucket.name).await {
Ok((res, _)) => res,
Err(e) => {
if e == StorageError::ConfigNotFound {
continue;
}
return Err(s3_error!(InternalError, "get bucket metadata failed: {e}"));
}
};
let config_xml =
serialize(&config).map_err(|e| s3_error!(InternalError, "serialize config failed: {e}"))?;
zip_writer
.start_file(conf_path, SimpleFileOptions::default())
.map_err(|e| s3_error!(InternalError, "start file failed: {e}"))?;
zip_writer
.write_all(&config_xml)
.map_err(|e| s3_error!(InternalError, "write file failed: {e}"))?;
}
BUCKET_SSECONFIG => {
let config = match metadata_sys::get_sse_config(&bucket.name).await {
Ok((res, _)) => res,
Err(e) => {
if e == StorageError::ConfigNotFound {
continue;
}
return Err(s3_error!(InternalError, "get bucket metadata failed: {e}"));
}
};
let config_xml =
serialize(&config).map_err(|e| s3_error!(InternalError, "serialize config failed: {e}"))?;
zip_writer
.start_file(conf_path, SimpleFileOptions::default())
.map_err(|e| s3_error!(InternalError, "start file failed: {e}"))?;
zip_writer
.write_all(&config_xml)
.map_err(|e| s3_error!(InternalError, "write file failed: {e}"))?;
}
BUCKET_VERSIONING_CONFIG => {
let config = match metadata_sys::get_versioning_config(&bucket.name).await {
Ok((res, _)) => res,
Err(e) => {
if e == StorageError::ConfigNotFound {
continue;
}
return Err(s3_error!(InternalError, "get bucket metadata failed: {e}"));
}
};
let config_xml =
serialize(&config).map_err(|e| s3_error!(InternalError, "serialize config failed: {e}"))?;
zip_writer
.start_file(conf_path, SimpleFileOptions::default())
.map_err(|e| s3_error!(InternalError, "start file failed: {e}"))?;
zip_writer
.write_all(&config_xml)
.map_err(|e| s3_error!(InternalError, "write file failed: {e}"))?;
}
BUCKET_REPLICATION_CONFIG => {
let config = match metadata_sys::get_replication_config(&bucket.name).await {
Ok((res, _)) => res,
Err(e) => {
if e == StorageError::ConfigNotFound {
continue;
}
return Err(s3_error!(InternalError, "get bucket metadata failed: {e}"));
}
};
let config_xml =
serialize(&config).map_err(|e| s3_error!(InternalError, "serialize config failed: {e}"))?;
zip_writer
.start_file(conf_path, SimpleFileOptions::default())
.map_err(|e| s3_error!(InternalError, "start file failed: {e}"))?;
zip_writer
.write_all(&config_xml)
.map_err(|e| s3_error!(InternalError, "write file failed: {e}"))?;
}
BUCKET_TARGETS_FILE => {
let config: BucketTargets = match metadata_sys::get_bucket_targets_config(&bucket.name).await {
Ok(res) => res,
Err(e) => {
if e == StorageError::ConfigNotFound {
continue;
}
return Err(s3_error!(InternalError, "get bucket metadata failed: {e}"));
}
};
let config_json =
serde_json::to_vec(&config).map_err(|e| s3_error!(InternalError, "serialize config failed: {e}"))?;
zip_writer
.start_file(conf_path, SimpleFileOptions::default())
.map_err(|e| s3_error!(InternalError, "start file failed: {e}"))?;
zip_writer
.write_all(&config_json)
.map_err(|e| s3_error!(InternalError, "write file failed: {e}"))?;
}
_ => {}
}
}
}
let zip_bytes = zip_writer
.finish()
.map_err(|e| s3_error!(InternalError, "finish zip failed: {e}"))?;
let mut header = HeaderMap::new();
header.insert(CONTENT_TYPE, "application/zip".parse().unwrap());
header.insert(CONTENT_DISPOSITION, "attachment; filename=bucket-meta.zip".parse().unwrap());
header.insert(CONTENT_LENGTH, zip_bytes.get_ref().len().to_string().parse().unwrap());
Ok(S3Response::with_headers((StatusCode::OK, Body::from(zip_bytes.into_inner())), header))
}
}
#[derive(Debug, Default, Deserialize)]
pub struct ImportBucketMetadataQuery {
#[allow(dead_code)]
pub bucket: String,
}
pub struct ImportBucketMetadata {}
#[async_trait::async_trait]
impl Operation for ImportBucketMetadata {
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
let _query = {
if let Some(query) = req.uri.query() {
let input: ImportBucketMetadataQuery =
from_bytes(query.as_bytes()).map_err(|_e| s3_error!(InvalidArgument, "get query failed"))?;
input
} else {
ImportBucketMetadataQuery::default()
}
};
let Some(input_cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "get cred failed"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
validate_admin_request(
&req.headers,
&cred,
owner,
false,
vec![Action::AdminAction(AdminAction::ImportBucketMetadataAction)],
req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0)),
)
.await?;
let mut input = req.input;
let body = match input.store_all_limited(MAX_BUCKET_METADATA_IMPORT_SIZE).await {
Ok(b) => b,
Err(e) => {
warn!("get body failed, e: {:?}", e);
return Err(s3_error!(InvalidRequest, "bucket metadata import body too large or failed to read"));
}
};
let mut zip_reader = ZipArchive::new(Cursor::new(body)).map_err(|e| s3_error!(InternalError, "get body failed: {e}"))?;
// First pass: read all file contents into memory
let mut file_contents = Vec::new();
for i in 0..zip_reader.len() {
let mut file = zip_reader
.by_index(i)
.map_err(|e| s3_error!(InternalError, "get file failed: {e}"))?;
let file_path = file.name().to_string();
let mut content = Vec::new();
file.read_to_end(&mut content)
.map_err(|e| s3_error!(InternalError, "read file failed: {e}"))?;
file_contents.push((file_path, content));
}
// Extract bucket names
let mut bucket_names = Vec::new();
for (file_path, _) in &file_contents {
let file_path_split = file_path.split(SLASH_SEPARATOR).collect::<Vec<&str>>();
if file_path_split.len() < 2 {
warn!("file path is invalid: {}", file_path);
continue;
}
let bucket_name = file_path_split[0].to_string();
if !bucket_names.contains(&bucket_name) {
bucket_names.push(bucket_name);
}
}
// Get existing bucket metadata
let mut bucket_metadatas: HashMap<String, BucketMetadata> = HashMap::new();
for bucket_name in bucket_names {
match metadata_sys::get_config_from_disk(&bucket_name).await {
Ok(res) => {
bucket_metadatas.insert(bucket_name, res);
}
Err(e) => {
if e == StorageError::ConfigNotFound {
warn!("bucket metadata not found: {e}");
continue;
}
warn!("get bucket metadata failed: {e}");
continue;
}
};
}
let Some(store) = new_object_layer_fn() else {
return Err(s3_error!(InvalidRequest, "object store not init"));
};
let update_at = OffsetDateTime::now_utc();
// Second pass: process file contents
for (file_path, content) in file_contents {
let file_path_split = file_path.split(SLASH_SEPARATOR).collect::<Vec<&str>>();
if file_path_split.len() < 2 {
warn!("file path is invalid: {}", file_path);
continue;
}
let bucket_name = file_path_split[0];
let conf_name = file_path_split[1];
// create bucket if not exists
if !bucket_metadatas.contains_key(bucket_name) {
if let Err(e) = store
.make_bucket(
bucket_name,
&MakeBucketOptions {
force_create: true,
..Default::default()
},
)
.await
{
warn!("create bucket failed: {e}");
continue;
}
let metadata = metadata_sys::get(bucket_name).await.unwrap_or_default();
bucket_metadatas.insert(bucket_name.to_string(), (*metadata).clone());
}
match conf_name {
BUCKET_POLICY_CONFIG => {
let config: BucketPolicy = match serde_json::from_slice(&content) {
Ok(config) => config,
Err(e) => {
warn!("deserialize config failed: {e}");
continue;
}
};
if config.version.is_empty() {
continue;
}
let metadata = bucket_metadatas.get_mut(bucket_name).unwrap();
metadata.policy_config_json = content;
metadata.policy_config_updated_at = update_at;
}
BUCKET_NOTIFICATION_CONFIG => {
if let Err(e) = deserialize::<s3s::dto::NotificationConfiguration>(&content) {
warn!("deserialize config failed: {e}");
continue;
}
let metadata = bucket_metadatas.get_mut(bucket_name).unwrap();
metadata.notification_config_xml = content;
metadata.notification_config_updated_at = update_at;
}
BUCKET_LIFECYCLE_CONFIG => {
if let Err(e) = deserialize::<BucketLifecycleConfiguration>(&content) {
warn!("deserialize config failed: {e}");
continue;
}
let metadata = bucket_metadatas.get_mut(bucket_name).unwrap();
metadata.lifecycle_config_xml = content;
metadata.lifecycle_config_updated_at = update_at;
}
BUCKET_SSECONFIG => {
if let Err(e) = deserialize::<ServerSideEncryptionConfiguration>(&content) {
warn!("deserialize config failed: {e}");
continue;
}
let metadata = bucket_metadatas.get_mut(bucket_name).unwrap();
metadata.encryption_config_xml = content;
metadata.encryption_config_updated_at = update_at;
}
BUCKET_TAGGING_CONFIG => {
if let Err(e) = deserialize::<Tagging>(&content) {
warn!("deserialize config failed: {e}");
continue;
}
let metadata = bucket_metadatas.get_mut(bucket_name).unwrap();
metadata.tagging_config_xml = content;
metadata.tagging_config_updated_at = update_at;
}
BUCKET_QUOTA_CONFIG_FILE => {
if let Err(e) = serde_json::from_slice::<BucketQuota>(&content) {
warn!("deserialize config failed: {e}");
continue;
}
let metadata = bucket_metadatas.get_mut(bucket_name).unwrap();
metadata.quota_config_json = content;
metadata.quota_config_updated_at = update_at;
}
OBJECT_LOCK_CONFIG => {
if let Err(e) = deserialize::<ObjectLockConfiguration>(&content) {
warn!("deserialize config failed: {e}");
continue;
}
let metadata = bucket_metadatas.get_mut(bucket_name).unwrap();
metadata.object_lock_config_xml = content;
metadata.object_lock_config_updated_at = update_at;
}
BUCKET_VERSIONING_CONFIG => {
if let Err(e) = deserialize::<VersioningConfiguration>(&content) {
warn!("deserialize config failed: {e}");
continue;
}
let metadata = bucket_metadatas.get_mut(bucket_name).unwrap();
metadata.versioning_config_xml = content;
metadata.versioning_config_updated_at = update_at;
}
BUCKET_REPLICATION_CONFIG => {
if let Err(e) = deserialize::<ReplicationConfiguration>(&content) {
warn!("deserialize config failed: {e}");
continue;
}
let metadata = bucket_metadatas.get_mut(bucket_name).unwrap();
metadata.replication_config_xml = content;
metadata.replication_config_updated_at = update_at;
}
BUCKET_TARGETS_FILE => {
if let Err(e) = serde_json::from_slice::<BucketTargets>(&content) {
warn!("deserialize config failed: {e}");
continue;
}
let metadata = bucket_metadatas.get_mut(bucket_name).unwrap();
metadata.bucket_targets_config_json = content;
metadata.bucket_targets_config_updated_at = update_at;
}
_ => {}
}
}
// TODO: site replication notify
let mut header = HeaderMap::new();
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/admin/handlers/rebalance.rs | rustfs/src/admin/handlers/rebalance.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::{
admin::{auth::validate_admin_request, router::Operation},
auth::{check_key_valid, get_session_token},
server::RemoteAddr,
};
use http::{HeaderMap, StatusCode};
use matchit::Params;
use rustfs_ecstore::rebalance::RebalanceMeta;
use rustfs_ecstore::{
StorageAPI,
error::StorageError,
new_object_layer_fn,
notification_sys::get_global_notification_sys,
rebalance::{DiskStat, RebalSaveOpt},
store_api::BucketOptions,
};
use rustfs_policy::policy::action::{Action, AdminAction};
use s3s::{
Body, S3Request, S3Response, S3Result,
header::{CONTENT_LENGTH, CONTENT_TYPE},
s3_error,
};
use serde::{Deserialize, Serialize};
use std::time::Duration;
use time::OffsetDateTime;
use tracing::warn;
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct RebalanceResp {
pub id: String,
}
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub struct RebalPoolProgress {
#[serde(rename = "objects")]
pub num_objects: u64,
#[serde(rename = "versions")]
pub num_versions: u64,
#[serde(rename = "bytes")]
pub bytes: u64,
#[serde(rename = "bucket")]
pub bucket: String,
#[serde(rename = "object")]
pub object: String,
#[serde(rename = "elapsed")]
pub elapsed: u64,
#[serde(rename = "eta")]
pub eta: u64,
}
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub struct RebalancePoolStatus {
#[serde(rename = "id")]
pub id: usize, // Pool index (zero-based)
#[serde(rename = "status")]
pub status: String, // Active if rebalance is running, empty otherwise
#[serde(rename = "used")]
pub used: f64, // Percentage used space
#[serde(rename = "progress")]
pub progress: Option<RebalPoolProgress>, // None when rebalance is not running
}
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub struct RebalanceAdminStatus {
pub id: String, // Identifies the ongoing rebalance operation by a UUID
#[serde(rename = "pools")]
pub pools: Vec<RebalancePoolStatus>, // Contains all pools, including inactive
#[serde(rename = "stoppedAt", with = "offsetdatetime_rfc3339")]
pub stopped_at: Option<OffsetDateTime>, // Optional timestamp when rebalance was stopped
}
pub struct RebalanceStart {}
#[async_trait::async_trait]
impl Operation for RebalanceStart {
#[tracing::instrument(skip_all)]
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
warn!("handle RebalanceStart");
let Some(input_cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "get cred failed"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
validate_admin_request(
&req.headers,
&cred,
owner,
false,
vec![Action::AdminAction(AdminAction::RebalanceAdminAction)],
req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0)),
)
.await?;
let Some(store) = new_object_layer_fn() else {
return Err(s3_error!(InternalError, "Not init"));
};
if store.pools.len() == 1 {
return Err(s3_error!(NotImplemented));
}
if store.is_decommission_running().await {
return Err(s3_error!(
InvalidRequest,
"Rebalance cannot be started, decommission is already in progress"
));
}
if store.is_rebalance_started().await {
return Err(s3_error!(OperationAborted, "Rebalance already in progress"));
}
let bucket_infos = store
.list_bucket(&BucketOptions::default())
.await
.map_err(|e| s3_error!(InternalError, "Failed to list buckets: {}", e))?;
let buckets: Vec<String> = bucket_infos.into_iter().map(|bucket| bucket.name).collect();
let id = match store.init_rebalance_meta(buckets).await {
Ok(id) => id,
Err(e) => {
return Err(s3_error!(InternalError, "Failed to init rebalance meta: {}", e));
}
};
store.start_rebalance().await;
warn!("Rebalance started with id: {}", id);
if let Some(notification_sys) = get_global_notification_sys() {
warn!("RebalanceStart Loading rebalance meta start");
notification_sys.load_rebalance_meta(true).await;
warn!("RebalanceStart Loading rebalance meta done");
}
let resp = RebalanceResp { id };
let data = serde_json::to_string(&resp).map_err(|e| s3_error!(InternalError, "Failed to serialize response: {}", e))?;
let mut header = HeaderMap::new();
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
Ok(S3Response::with_headers((StatusCode::OK, Body::from(data)), header))
}
}
// RebalanceStatus
pub struct RebalanceStatus {}
#[async_trait::async_trait]
impl Operation for RebalanceStatus {
#[tracing::instrument(skip_all)]
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
warn!("handle RebalanceStatus");
let Some(input_cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "get cred failed"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
validate_admin_request(
&req.headers,
&cred,
owner,
false,
vec![Action::AdminAction(AdminAction::RebalanceAdminAction)],
req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0)),
)
.await?;
let Some(store) = new_object_layer_fn() else {
return Err(s3_error!(InternalError, "Not init"));
};
let mut meta = RebalanceMeta::new();
if let Err(err) = meta.load(store.pools[0].clone()).await {
if err == StorageError::ConfigNotFound {
return Err(s3_error!(NoSuchResource, "Pool rebalance is not started"));
}
return Err(s3_error!(InternalError, "Failed to load rebalance meta: {}", err));
}
// Compute disk usage percentage
let si = store.storage_info().await;
let mut disk_stats = vec![DiskStat::default(); store.pools.len()];
for disk in si.disks.iter() {
if disk.pool_index < 0 || disk_stats.len() <= disk.pool_index as usize {
continue;
}
disk_stats[disk.pool_index as usize].available_space += disk.available_space;
disk_stats[disk.pool_index as usize].total_space += disk.total_space;
}
let mut stop_time = meta.stopped_at;
let mut admin_status = RebalanceAdminStatus {
id: meta.id.clone(),
stopped_at: meta.stopped_at,
pools: vec![RebalancePoolStatus::default(); meta.pool_stats.len()],
};
for (i, ps) in meta.pool_stats.iter().enumerate() {
admin_status.pools[i] = RebalancePoolStatus {
id: i,
status: ps.info.status.to_string(),
used: (disk_stats[i].total_space - disk_stats[i].available_space) as f64 / disk_stats[i].total_space as f64,
progress: None,
};
if !ps.participating {
continue;
}
// Calculate total bytes to be rebalanced
let total_bytes_to_rebal = ps.init_capacity as f64 * meta.percent_free_goal - ps.init_free_space as f64;
let mut elapsed = if let Some(start_time) = ps.info.start_time {
let now = OffsetDateTime::now_utc();
now - start_time
} else {
return Err(s3_error!(InternalError, "Start time is not available"));
};
let mut eta = if ps.bytes > 0 {
Duration::from_secs_f64(total_bytes_to_rebal * elapsed.as_seconds_f64() / ps.bytes as f64)
} else {
Duration::ZERO
};
if ps.info.end_time.is_some() {
stop_time = ps.info.end_time;
}
if let Some(stopped_at) = stop_time {
if let Some(start_time) = ps.info.start_time {
elapsed = stopped_at - start_time;
}
eta = Duration::ZERO;
}
admin_status.pools[i].progress = Some(RebalPoolProgress {
num_objects: ps.num_objects,
num_versions: ps.num_versions,
bytes: ps.bytes,
bucket: ps.bucket.clone(),
object: ps.object.clone(),
elapsed: elapsed.whole_seconds() as u64,
eta: eta.as_secs(),
});
}
let data =
serde_json::to_string(&admin_status).map_err(|e| s3_error!(InternalError, "Failed to serialize response: {}", e))?;
let mut header = HeaderMap::new();
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
Ok(S3Response::with_headers((StatusCode::OK, Body::from(data)), header))
}
}
// RebalanceStop
pub struct RebalanceStop {}
#[async_trait::async_trait]
impl Operation for RebalanceStop {
#[tracing::instrument(skip_all)]
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
warn!("handle RebalanceStop");
let Some(input_cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "get cred failed"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
validate_admin_request(
&req.headers,
&cred,
owner,
false,
vec![Action::AdminAction(AdminAction::RebalanceAdminAction)],
req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0)),
)
.await?;
let Some(store) = new_object_layer_fn() else {
return Err(s3_error!(InternalError, "Not init"));
};
if let Some(notification_sys) = get_global_notification_sys() {
notification_sys.stop_rebalance().await;
}
store
.save_rebalance_stats(0, RebalSaveOpt::StoppedAt)
.await
.map_err(|e| s3_error!(InternalError, "Failed to stop rebalance: {}", e))?;
warn!("handle RebalanceStop save_rebalance_stats done ");
if let Some(notification_sys) = get_global_notification_sys() {
warn!("handle RebalanceStop notification_sys load_rebalance_meta");
notification_sys.load_rebalance_meta(false).await;
warn!("handle RebalanceStop notification_sys load_rebalance_meta done");
}
let mut header = HeaderMap::new();
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
}
}
mod offsetdatetime_rfc3339 {
use serde::{self, Deserialize, Deserializer, Serializer};
use time::{OffsetDateTime, format_description::well_known::Rfc3339};
pub fn serialize<S>(dt: &Option<OffsetDateTime>, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match dt {
Some(dt) => {
let s = dt.format(&Rfc3339).map_err(serde::ser::Error::custom)?;
serializer.serialize_some(&s)
}
None => serializer.serialize_none(),
}
}
pub fn deserialize<'de, D>(deserializer: D) -> Result<Option<OffsetDateTime>, D::Error>
where
D: Deserializer<'de>,
{
let opt = Option::<String>::deserialize(deserializer)?;
match opt {
Some(s) => {
let dt = OffsetDateTime::parse(&s, &Rfc3339).map_err(serde::de::Error::custom)?;
Ok(Some(dt))
}
None => Ok(None),
}
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/admin/handlers/sts.rs | rustfs/src/admin/handlers/sts.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::{
admin::router::Operation,
auth::{check_key_valid, get_session_token},
};
use http::StatusCode;
use matchit::Params;
use rustfs_config::MAX_ADMIN_REQUEST_BODY_SIZE;
use rustfs_ecstore::bucket::utils::serialize;
use rustfs_iam::{manager::get_token_signing_key, sys::SESSION_POLICY_NAME};
use rustfs_policy::{auth::get_new_credentials_with_metadata, policy::Policy};
use s3s::{
Body, S3Error, S3ErrorCode, S3Request, S3Response, S3Result,
dto::{AssumeRoleOutput, Credentials, Timestamp},
s3_error,
};
use serde::Deserialize;
use serde_json::Value;
use serde_urlencoded::from_bytes;
use std::collections::HashMap;
use time::{Duration, OffsetDateTime};
use tracing::{error, info, warn};
const ASSUME_ROLE_ACTION: &str = "AssumeRole";
const ASSUME_ROLE_VERSION: &str = "2011-06-15";
#[derive(Deserialize, Debug, Default)]
#[serde(rename_all = "PascalCase", default)]
pub struct AssumeRoleRequest {
pub action: String,
pub duration_seconds: usize,
pub version: String,
pub role_arn: String,
pub role_session_name: String,
pub policy: String,
pub external_id: String,
}
pub struct AssumeRoleHandle {}
#[async_trait::async_trait]
impl Operation for AssumeRoleHandle {
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
warn!("handle AssumeRoleHandle");
let Some(user) = req.credentials else { return Err(s3_error!(InvalidRequest, "get cred failed")) };
let session_token = get_session_token(&req.uri, &req.headers);
if session_token.is_some() {
return Err(s3_error!(InvalidRequest, "AccessDenied1"));
}
let (cred, _owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &user.access_key).await?;
// TODO: Check permissions, do not allow STS access
if cred.is_temp() || cred.is_service_account() {
return Err(s3_error!(InvalidRequest, "AccessDenied"));
}
let mut input = req.input;
let bytes = match input.store_all_limited(MAX_ADMIN_REQUEST_BODY_SIZE).await {
Ok(b) => b,
Err(e) => {
warn!("get body failed, e: {:?}", e);
return Err(s3_error!(InvalidRequest, "STS request body too large or failed to read"));
}
};
let body: AssumeRoleRequest = from_bytes(&bytes).map_err(|_e| s3_error!(InvalidRequest, "invalid STS request format"))?;
if body.action.as_str() != ASSUME_ROLE_ACTION {
return Err(s3_error!(InvalidArgument, "not support action"));
}
if body.version.as_str() != ASSUME_ROLE_VERSION {
return Err(s3_error!(InvalidArgument, "not support version"));
}
let mut claims = cred.claims.unwrap_or_default();
populate_session_policy(&mut claims, &body.policy)?;
let exp = {
if body.duration_seconds > 0 {
body.duration_seconds
} else {
3600
}
};
claims.insert(
"exp".to_string(),
Value::Number(serde_json::Number::from(OffsetDateTime::now_utc().unix_timestamp() + exp as i64)),
);
claims.insert("parent".to_string(), Value::String(cred.access_key.clone()));
// warn!("AssumeRole get cred {:?}", &user);
// warn!("AssumeRole get body {:?}", &body);
let Ok(iam_store) = rustfs_iam::get() else {
return Err(s3_error!(InvalidRequest, "iam not init"));
};
if let Err(_err) = iam_store.policy_db_get(&cred.access_key, &cred.groups).await {
error!(
"AssumeRole get policy failed, err: {:?}, access_key: {:?}, groups: {:?}",
_err, cred.access_key, cred.groups
);
return Err(s3_error!(InvalidArgument, "invalid policy arg"));
}
let Some(secret) = get_token_signing_key() else {
return Err(s3_error!(InvalidArgument, "global active sk not init"));
};
info!("AssumeRole get claims {:?}", &claims);
let mut new_cred = get_new_credentials_with_metadata(&claims, &secret)
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("get new cred failed {e}")))?;
new_cred.parent_user = cred.access_key.clone();
info!("AssumeRole get new_cred {:?}", &new_cred);
if let Err(_err) = iam_store.set_temp_user(&new_cred.access_key, &new_cred, None).await {
return Err(s3_error!(InternalError, "set_temp_user failed"));
}
// TODO: globalSiteReplicationSys
let resp = AssumeRoleOutput {
credentials: Some(Credentials {
access_key_id: new_cred.access_key,
expiration: Timestamp::from(
new_cred
.expiration
.unwrap_or(OffsetDateTime::now_utc().saturating_add(Duration::seconds(3600))),
),
secret_access_key: new_cred.secret_key,
session_token: new_cred.session_token,
}),
..Default::default()
};
// getAssumeRoleCredentials
let output = serialize::<AssumeRoleOutput>(&resp).unwrap();
Ok(S3Response::new((StatusCode::OK, Body::from(output))))
}
}
pub fn populate_session_policy(claims: &mut HashMap<String, Value>, policy: &str) -> S3Result<()> {
if !policy.is_empty() {
let session_policy = Policy::parse_config(policy.as_bytes())
.map_err(|e| {
let error_msg = format!("Failed to parse session policy: {}. Please check that the policy is valid JSON format with standard brackets [] for arrays.", e);
S3Error::with_message(S3ErrorCode::InvalidRequest, error_msg)
})?;
if session_policy.version.is_empty() {
return Err(s3_error!(InvalidRequest, "invalid policy"));
}
let policy_buf = serde_json::to_vec(&session_policy)
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("marshal policy err {e}")))?;
if policy_buf.len() > 2048 {
return Err(s3_error!(InvalidRequest, "policy too large"));
}
claims.insert(
SESSION_POLICY_NAME.to_string(),
Value::String(base64_simd::URL_SAFE_NO_PAD.encode_to_string(&policy_buf)),
);
}
Ok(())
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/admin/handlers/profile.rs | rustfs/src/admin/handlers/profile.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::admin::router::Operation;
use http::header::CONTENT_TYPE;
use http::{HeaderMap, StatusCode};
use matchit::Params;
use s3s::{Body, S3Request, S3Response, S3Result};
use tracing::info;
pub struct TriggerProfileCPU {}
#[async_trait::async_trait]
impl Operation for TriggerProfileCPU {
async fn call(&self, _req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
info!("Triggering CPU profile dump via S3 request...");
let dur = std::time::Duration::from_secs(60);
match crate::profiling::dump_cpu_pprof_for(dur).await {
Ok(path) => {
let mut header = HeaderMap::new();
header.insert(CONTENT_TYPE, "text/html".parse().unwrap());
Ok(S3Response::with_headers((StatusCode::OK, Body::from(path.display().to_string())), header))
}
Err(e) => Err(s3s::s3_error!(InternalError, "{}", format!("Failed to dump CPU profile: {e}"))),
}
}
}
pub struct TriggerProfileMemory {}
#[async_trait::async_trait]
impl Operation for TriggerProfileMemory {
async fn call(&self, _req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
info!("Triggering Memory profile dump via S3 request...");
match crate::profiling::dump_memory_pprof_now().await {
Ok(path) => {
let mut header = HeaderMap::new();
header.insert(CONTENT_TYPE, "text/html".parse().unwrap());
Ok(S3Response::with_headers((StatusCode::OK, Body::from(path.display().to_string())), header))
}
Err(e) => Err(s3s::s3_error!(InternalError, "{}", format!("Failed to dump Memory profile: {e}"))),
}
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/protocols/mod.rs | rustfs/src/protocols/mod.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod client;
pub mod ftps;
pub mod gateway;
pub mod session;
pub mod sftp;
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/protocols/sftp/mod.rs | rustfs/src/protocols/sftp/mod.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! SFTP protocol implementation
pub mod handler;
pub mod server;
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/protocols/sftp/server.rs | rustfs/src/protocols/sftp/server.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::protocols::session::context::{Protocol as SessionProtocol, SessionContext};
use crate::protocols::session::principal::ProtocolPrincipal;
use crate::protocols::sftp::handler::SftpHandler;
use base64::{Engine as _, engine::general_purpose::STANDARD as BASE64};
use russh::ChannelId;
use russh::keys::{Algorithm, HashAlg, PrivateKey, PublicKey, PublicKeyBase64};
use russh::server::{Auth, Handler, Server as RusshServer, Session};
use ssh_key::Certificate;
use ssh_key::certificate::CertType;
use std::borrow::Cow;
use std::collections::HashMap;
use std::future::Future;
use std::net::SocketAddr;
use std::path::Path;
use std::sync::{Arc, Mutex};
use std::time::SystemTime;
use tokio::sync::mpsc;
use tracing::{debug, error, info, warn};
const DEFAULT_ADDR: &str = "0.0.0.0:0";
const AUTH_SUFFIX_SVC: &str = "=svc";
const AUTH_SUFFIX_LDAP: &str = "=ldap";
const SSH_KEY_TYPE_RSA: &str = "ssh-rsa";
const SSH_KEY_TYPE_ED25519: &str = "ssh-ed25519";
const SSH_KEY_TYPE_ECDSA: &str = "ecdsa-";
const SFTP_SUBSYSTEM: &str = "sftp";
const CRITICAL_OPTION_SOURCE_ADDRESS: &str = "source-address";
const AUTH_FAILURE_DELAY_MS: u64 = 300;
const SFTP_BUFFER_SIZE: usize = 65536;
const SFTP_READ_BUF_SIZE: usize = 32 * 1024;
type ServerError = Box<dyn std::error::Error + Send + Sync>;
#[derive(Debug, Clone)]
pub struct SftpConfig {
pub bind_addr: SocketAddr,
pub require_key_auth: bool,
pub cert_file: Option<String>,
pub key_file: Option<String>,
pub authorized_keys_file: Option<String>,
}
#[derive(Clone)]
pub struct SftpServer {
config: SftpConfig,
key_pair: Arc<PrivateKey>,
trusted_certificates: Arc<Vec<ssh_key::PublicKey>>,
authorized_keys: Arc<Vec<String>>,
}
impl SftpServer {
pub fn new(config: SftpConfig) -> Result<Self, ServerError> {
let key_pair = if let Some(key_file) = &config.key_file {
let path = Path::new(key_file);
russh::keys::load_secret_key(path, None)?
} else {
warn!("No host key provided, generating random key (not recommended for production).");
use russh::keys::signature::rand_core::OsRng;
let mut rng = OsRng;
PrivateKey::random(&mut rng, Algorithm::Ed25519)?
};
let trusted_certificates = if let Some(cert_file) = &config.cert_file {
info!("Loading trusted CA certificates from: {}", cert_file);
load_trusted_certificates(cert_file)?
} else {
if config.require_key_auth {
warn!("Key auth required but no CA certs provided.");
}
Vec::new()
};
let authorized_keys = if let Some(auth_keys_file) = &config.authorized_keys_file {
info!("Loading authorized SSH public keys from: {}", auth_keys_file);
load_authorized_keys(auth_keys_file).unwrap_or_else(|e| {
error!("Failed to load authorized keys from {}: {}", auth_keys_file, e);
Vec::new()
})
} else {
info!("No authorized keys file provided, will use IAM for key validation.");
Vec::new()
};
info!("Loaded {} authorized SSH public key(s)", authorized_keys.len());
Ok(Self {
config,
key_pair: Arc::new(key_pair),
trusted_certificates: Arc::new(trusted_certificates),
authorized_keys: Arc::new(authorized_keys),
})
}
pub async fn start(&self, mut shutdown_rx: tokio::sync::broadcast::Receiver<()>) -> Result<(), ServerError> {
info!("Starting SFTP server on {}", self.config.bind_addr);
let config = Arc::new(self.make_ssh_config());
let socket = tokio::net::TcpListener::bind(&self.config.bind_addr).await?;
let server_stub = self.clone();
loop {
tokio::select! {
accept_res = socket.accept() => {
match accept_res {
Ok((stream, addr)) => {
let config = config.clone();
let server_instance = server_stub.clone();
tokio::spawn(async move {
let handler = SftpConnectionHandler::new(addr, server_instance.trusted_certificates.clone(), server_instance.authorized_keys.clone());
if let Err(e) = russh::server::run_stream(config, stream, handler).await {
debug!("SFTP session closed from {}: {}", addr, e);
}
});
}
Err(e) => error!("Failed to accept SFTP connection: {}", e),
}
}
_ = shutdown_rx.recv() => {
info!("SFTP server shutting down");
break;
}
}
}
Ok(())
}
fn make_ssh_config(&self) -> russh::server::Config {
let mut config = russh::server::Config::default();
config.keys.push(self.key_pair.as_ref().clone());
config.preferred.key = Cow::Borrowed(&[
Algorithm::Ed25519,
Algorithm::Rsa { hash: None },
Algorithm::Rsa {
hash: Some(HashAlg::Sha256),
},
Algorithm::Rsa {
hash: Some(HashAlg::Sha512),
},
]);
config
}
pub fn config(&self) -> &SftpConfig {
&self.config
}
}
impl RusshServer for SftpServer {
type Handler = SftpConnectionHandler;
fn new_client(&mut self, peer_addr: Option<SocketAddr>) -> Self::Handler {
let addr = peer_addr.unwrap_or_else(|| DEFAULT_ADDR.parse().unwrap());
SftpConnectionHandler::new(addr, self.trusted_certificates.clone(), self.authorized_keys.clone())
}
}
struct ConnectionState {
client_ip: SocketAddr,
identity: Option<rustfs_policy::auth::UserIdentity>,
trusted_certificates: Arc<Vec<ssh_key::PublicKey>>,
authorized_keys: Arc<Vec<String>>,
sftp_channels: HashMap<ChannelId, mpsc::UnboundedSender<Vec<u8>>>,
}
#[derive(Clone)]
pub struct SftpConnectionHandler {
state: Arc<Mutex<ConnectionState>>,
}
impl SftpConnectionHandler {
fn new(client_ip: SocketAddr, trusted_certificates: Arc<Vec<ssh_key::PublicKey>>, authorized_keys: Arc<Vec<String>>) -> Self {
Self {
state: Arc::new(Mutex::new(ConnectionState {
client_ip,
identity: None,
trusted_certificates,
authorized_keys,
sftp_channels: HashMap::new(),
})),
}
}
}
impl Handler for SftpConnectionHandler {
type Error = ServerError;
fn auth_password(&mut self, user: &str, password: &str) -> impl Future<Output = Result<Auth, Self::Error>> + Send {
let raw_user = user.to_string();
let password = password.to_string();
let state = self.state.clone();
async move {
use rustfs_credentials::Credentials as S3Credentials;
use rustfs_iam::get;
let (username, suffix) = parse_auth_username(&raw_user);
if let Some(s) = suffix {
debug!("Detected auth suffix '{}' for user '{}'", s, username);
}
let iam_sys = get().map_err(|e| format!("IAM system unavailable: {}", e))?;
let s3_creds = S3Credentials {
access_key: username.to_string(),
secret_key: password.clone(),
session_token: String::new(),
expiration: None,
status: String::new(),
parent_user: String::new(),
groups: None,
claims: None,
name: None,
description: None,
};
let (user_identity, is_valid) = iam_sys
.check_key(&s3_creds.access_key)
.await
.map_err(|e| format!("IAM check failed: {}", e))?;
if !is_valid {
warn!("Invalid AccessKey: {}", username);
tokio::time::sleep(std::time::Duration::from_millis(AUTH_FAILURE_DELAY_MS)).await;
return Ok(Auth::Reject {
proceed_with_methods: None,
partial_success: false,
});
}
if let Some(identity) = user_identity {
if identity.credentials.secret_key != s3_creds.secret_key {
warn!("Invalid SecretKey for user: {}", username);
tokio::time::sleep(std::time::Duration::from_millis(AUTH_FAILURE_DELAY_MS)).await;
return Ok(Auth::Reject {
proceed_with_methods: None,
partial_success: false,
});
}
{
let mut guard = state.lock().unwrap();
guard.identity = Some(identity);
}
debug!("User {} authenticated successfully via password", username);
Ok(Auth::Accept)
} else {
Ok(Auth::Reject {
proceed_with_methods: None,
partial_success: false,
})
}
}
}
fn auth_publickey(&mut self, user: &str, key: &PublicKey) -> impl Future<Output = Result<Auth, Self::Error>> + Send {
let raw_user = user.to_string();
let key = key.clone();
let state = self.state.clone();
async move {
debug!("SFTP public key auth request for user: {}", raw_user);
let trusted_cas = {
let guard = state.lock().unwrap();
guard.trusted_certificates.clone()
};
if !trusted_cas.is_empty() {
match validate_ssh_certificate(&key, &trusted_cas, &raw_user) {
Ok(true) => {
let (username, _) = parse_auth_username(&raw_user);
use rustfs_iam::get;
let iam_sys = get().map_err(|e| format!("IAM system unavailable: {}", e))?;
let (user_identity, is_valid) = iam_sys
.check_key(username)
.await
.map_err(|e| format!("IAM lookup error: {}", e))?;
if is_valid && user_identity.is_some() {
{
let mut guard = state.lock().unwrap();
guard.identity = user_identity;
}
info!("User {} authenticated via SSH certificate", username);
Ok(Auth::Accept)
} else {
warn!("Valid certificate presented, but user '{}' does not exist in IAM", username);
Ok(Auth::Reject {
proceed_with_methods: None,
partial_success: false,
})
}
}
Ok(false) => Ok(Auth::Reject {
proceed_with_methods: None,
partial_success: false,
}),
Err(e) => {
error!("SSH certificate validation error: {}", e);
Ok(Auth::Reject {
proceed_with_methods: None,
partial_success: false,
})
}
}
} else {
let (username, _) = parse_auth_username(&raw_user);
use russh::keys::PublicKeyBase64;
let client_key_bytes = key.public_key_bytes();
let client_key_openssh = BASE64.encode(&client_key_bytes);
let authorized_keys_clone = {
let guard = state.lock().unwrap();
guard.authorized_keys.clone()
};
if !authorized_keys_clone.is_empty() {
debug!("Checking against {} pre-loaded authorized key(s)", authorized_keys_clone.len());
for authorized_key in authorized_keys_clone.iter() {
if authorized_key.contains(&client_key_openssh)
|| authorized_key == &client_key_openssh
|| compare_keys(authorized_key, &client_key_openssh)
{
use rustfs_iam::get;
if let Ok(iam_sys) = get() {
match iam_sys.check_key(username).await {
Ok((user_identity, is_valid)) => {
if is_valid && user_identity.is_some() {
let mut guard = state.lock().unwrap();
guard.identity = user_identity;
info!("User {} authenticated via pre-loaded authorized key (IAM verified)", username);
return Ok(Auth::Accept);
}
}
Err(e) => {
error!("IAM lookup error: {}", e);
}
}
}
warn!(
"Key matched pre-loaded authorized keys, but IAM verification failed for user '{}'",
username
);
}
}
}
use rustfs_iam::get;
match get() {
Ok(iam_sys) => match iam_sys.check_key(username).await {
Ok((user_identity, is_valid)) => {
if is_valid {
if let Some(identity) = user_identity {
let authorized_keys = identity.get_ssh_public_keys();
if authorized_keys.is_empty() {
warn!("User '{}' found in IAM but has no SSH public keys registered", username);
return Ok(Auth::Reject {
proceed_with_methods: None,
partial_success: false,
});
}
let key_valid = authorized_keys.iter().any(|authorized_key| {
authorized_key.contains(&client_key_openssh)
|| authorized_key == &client_key_openssh
|| compare_keys(authorized_key, &client_key_openssh)
});
if key_valid {
{
let mut guard = state.lock().unwrap();
guard.identity = Some(identity);
}
info!("User {} authenticated via public key from IAM", username);
Ok(Auth::Accept)
} else {
warn!("Public key auth failed: client key not in IAM for user '{}'", username);
Ok(Auth::Reject {
proceed_with_methods: None,
partial_success: false,
})
}
} else {
warn!("Public key auth failed: user '{}' not found in IAM", username);
Ok(Auth::Reject {
proceed_with_methods: None,
partial_success: false,
})
}
} else {
warn!("Public key auth failed: user '{}' not valid in IAM", username);
Ok(Auth::Reject {
proceed_with_methods: None,
partial_success: false,
})
}
}
Err(e) => {
error!("IAM lookup error: {}", e);
Ok(Auth::Reject {
proceed_with_methods: None,
partial_success: false,
})
}
},
Err(e) => {
error!("IAM system unavailable: {}", e);
Ok(Auth::Reject {
proceed_with_methods: None,
partial_success: false,
})
}
}
}
}
}
async fn channel_open_session(
&mut self,
_channel: russh::Channel<russh::server::Msg>,
_session: &mut Session,
) -> Result<bool, Self::Error> {
Ok(true)
}
fn data(
&mut self,
channel_id: ChannelId,
data: &[u8],
_session: &mut Session,
) -> impl Future<Output = Result<(), Self::Error>> + Send {
let state = self.state.clone();
let data = data.to_vec();
async move {
let sender = {
let guard = state.lock().unwrap();
guard.sftp_channels.get(&channel_id).cloned()
};
if let Some(tx) = sender {
let _ = tx.send(data);
}
Ok(())
}
}
fn subsystem_request(
&mut self,
channel_id: ChannelId,
name: &str,
session: &mut Session,
) -> impl Future<Output = Result<(), Self::Error>> + Send {
let name = name.to_string();
let state = self.state.clone();
let session_handle = session.handle();
async move {
if name == SFTP_SUBSYSTEM {
let (identity, client_ip) = {
let guard = state.lock().unwrap();
if let Some(id) = &guard.identity {
(id.clone(), guard.client_ip)
} else {
error!("SFTP subsystem requested but user not authenticated");
return Ok(());
}
};
debug!("Initializing SFTP subsystem for user: {}", identity.credentials.access_key);
let context =
SessionContext::new(ProtocolPrincipal::new(Arc::new(identity)), SessionProtocol::Sftp, client_ip.ip());
let (client_pipe, server_pipe) = tokio::io::duplex(SFTP_BUFFER_SIZE);
let (mut client_read, mut client_write) = tokio::io::split(client_pipe);
let (tx, mut rx) = mpsc::unbounded_channel::<Vec<u8>>();
{
let mut guard = state.lock().unwrap();
guard.sftp_channels.insert(channel_id, tx);
}
tokio::spawn(async move {
use tokio::io::AsyncWriteExt;
while let Some(data) = rx.recv().await {
if let Err(e) = client_write.write_all(&data).await {
debug!("SFTP input pipe closed: {}", e);
break;
}
}
});
let sftp_handler = SftpHandler::new(context);
tokio::spawn(async move {
russh_sftp::server::run(server_pipe, sftp_handler).await;
debug!("SFTP handler finished");
});
let session_handle = session_handle.clone();
tokio::spawn(async move {
use tokio::io::AsyncReadExt;
let mut buf = vec![0u8; SFTP_READ_BUF_SIZE];
loop {
match client_read.read(&mut buf).await {
Ok(0) => break,
Ok(n) => {
let data: Vec<u8> = buf[..n].to_vec();
if session_handle.data(channel_id, data.into()).await.is_err() {
break;
}
}
Err(e) => {
error!("Error reading from SFTP output: {}", e);
break;
}
}
}
let _ = session_handle.close(channel_id).await;
});
}
Ok(())
}
}
}
fn load_trusted_certificates(ca_cert_path: &str) -> Result<Vec<ssh_key::PublicKey>, ServerError> {
let path = Path::new(ca_cert_path);
if !path.exists() {
return Err(format!("CA certificate file not found: {}", ca_cert_path).into());
}
let contents = std::fs::read_to_string(path)?;
let mut keys = Vec::new();
for line in contents.lines() {
let line = line.trim();
if line.is_empty() || line.starts_with('#') {
continue;
}
match ssh_key::PublicKey::from_openssh(line) {
Ok(key) => keys.push(key),
Err(e) => warn!("Skipping invalid CA key line in {}: {}", ca_cert_path, e),
}
}
info!("Loaded {} trusted CA certificates from {}", keys.len(), ca_cert_path);
Ok(keys)
}
fn load_authorized_keys(auth_keys_path: &str) -> Result<Vec<String>, ServerError> {
let path = Path::new(auth_keys_path);
if !path.exists() {
return Err(format!("Authorized keys file not found: {}", auth_keys_path).into());
}
let contents = std::fs::read_to_string(path)?;
let mut keys = Vec::new();
for line in contents.lines() {
let line = line.trim();
if line.is_empty() || line.starts_with('#') {
continue;
}
if line.starts_with(SSH_KEY_TYPE_RSA) || line.starts_with(SSH_KEY_TYPE_ED25519) || line.starts_with(SSH_KEY_TYPE_ECDSA) {
keys.push(line.to_string());
} else {
warn!(
"Skipping invalid authorized key line in {}: doesn't start with valid key type",
auth_keys_path
);
}
}
info!("Loaded {} authorized SSH public keys from {}", keys.len(), auth_keys_path);
Ok(keys)
}
fn parse_auth_username(username: &str) -> (&str, Option<&str>) {
if let Some(idx) = username.rfind('=') {
let suffix = &username[idx..];
if suffix == AUTH_SUFFIX_SVC || suffix == AUTH_SUFFIX_LDAP {
return (&username[..idx], Some(suffix));
}
}
(username, None)
}
fn validate_ssh_certificate(
russh_key: &PublicKey,
trusted_cas: &[ssh_key::PublicKey],
raw_username: &str,
) -> Result<bool, ServerError> {
let (username, _suffix) = parse_auth_username(raw_username);
let key_bytes = russh_key.public_key_bytes();
let cert = match Certificate::from_bytes(&key_bytes) {
Ok(c) => c,
Err(_) => {
debug!("Provided key is not a certificate. Skipping cert validation.");
return Ok(false);
}
};
debug!("Verifying SSH Certificate: KeyID='{}', Serial={}", cert.comment(), cert.serial());
let mut signature_valid = false;
let signature_key = cert.signature_key();
for ca in trusted_cas {
if ca.key_data() == signature_key {
signature_valid = true;
debug!("Certificate signed by trusted CA: {}", ca.fingerprint(Default::default()));
break;
}
}
if !signature_valid {
warn!("Certificate signer not found in trusted CAs");
return Ok(false);
}
let now = SystemTime::now();
let valid_after = SystemTime::UNIX_EPOCH + std::time::Duration::from_secs(cert.valid_after());
let valid_before = SystemTime::UNIX_EPOCH + std::time::Duration::from_secs(cert.valid_before());
if now < valid_after {
warn!("Certificate is not yet valid (valid after {:?})", valid_after);
return Ok(false);
}
if now > valid_before {
warn!("Certificate has expired (valid until {:?})", valid_before);
return Ok(false);
}
if !cert.valid_principals().contains(&username.to_string()) {
warn!(
"Certificate does not authorize user '{}'. Principals: {:?}",
username,
cert.valid_principals()
);
return Ok(false);
}
match cert.cert_type() {
CertType::User => {}
_ => {
warn!("Certificate is not a User certificate");
return Ok(false);
}
}
for (name, _value) in cert.critical_options().iter() {
if name.as_str() == CRITICAL_OPTION_SOURCE_ADDRESS {
} else {
warn!("Rejecting certificate due to unsupported critical option: {}", name);
return Ok(false);
}
}
info!("SSH Certificate validation successful for user '{}'", username);
Ok(true)
}
fn compare_keys(stored_key: &str, client_key_base64: &str) -> bool {
let stored_key_parts: Vec<&str> = stored_key.split_whitespace().collect();
if stored_key_parts.is_empty() {
return false;
}
let stored_key_data = stored_key_parts.get(1).unwrap_or(&stored_key);
if *stored_key_data == client_key_base64 {
return true;
}
if let Ok(stored_bytes) = BASE64.decode(stored_key_data)
&& let Ok(client_bytes) = BASE64.decode(client_key_base64)
{
return stored_bytes == client_bytes;
}
false
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/protocols/sftp/handler.rs | rustfs/src/protocols/sftp/handler.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::protocols::client::s3::ProtocolS3Client;
use crate::protocols::gateway::action::S3Action;
use crate::protocols::gateway::authorize::authorize_operation;
use crate::protocols::gateway::error::map_s3_error_to_sftp_status;
use crate::protocols::session::context::SessionContext;
use futures::TryStreamExt;
use russh_sftp::protocol::{Attrs, Data, File, FileAttributes, Handle, Name, OpenFlags, Status, StatusCode, Version};
use russh_sftp::server::Handler;
use rustfs_utils::path;
use s3s::S3ErrorCode;
use s3s::dto::{DeleteBucketInput, DeleteObjectInput, GetObjectInput, ListObjectsV2Input, PutObjectInput, StreamingBlob};
use std::collections::HashMap;
use std::future::Future;
use std::path::PathBuf;
use std::sync::Arc;
use std::sync::atomic::{AtomicU32, Ordering};
use tokio::fs::{File as TokioFile, OpenOptions};
use tokio::io::{AsyncReadExt, AsyncSeekExt, AsyncWriteExt};
use tokio::sync::RwLock;
use tokio_util::io::StreamReader;
use tracing::{debug, error, trace};
use uuid::Uuid;
const INITIAL_HANDLE_ID: u32 = 1;
const ROOT_PATH: &str = "/";
const CURRENT_DIR: &str = ".";
const PARENT_DIR: &str = "..";
const HANDLE_ID_PREFIX: &str = "handle_";
const PATH_SEPARATOR: &str = "/";
const PERMISSION_DENIED_PATH: &str = "..";
const DIR_MODE: u32 = 0o040000;
const FILE_MODE: u32 = 0o100000;
const DIR_PERMISSIONS: u32 = 0o755;
const FILE_PERMISSIONS: u32 = 0o644;
/// State associated with an open file handle
#[derive(Debug)]
enum HandleState {
Read {
path: String,
bucket: String,
key: String,
},
Write {
path: String,
bucket: String,
key: String,
temp_file_path: PathBuf,
file_handle: Option<TokioFile>,
},
Dir {
path: String,
files: Vec<File>,
offset: usize,
},
}
#[derive(Clone)]
pub struct SftpHandler {
session_context: SessionContext,
handles: Arc<RwLock<HashMap<String, HandleState>>>,
next_handle_id: Arc<AtomicU32>,
temp_dir: PathBuf,
current_dir: Arc<RwLock<String>>,
fs: crate::storage::ecfs::FS,
}
impl SftpHandler {
pub fn new(session_context: SessionContext) -> Self {
let fs = crate::storage::ecfs::FS {};
Self {
session_context,
handles: Arc::new(RwLock::new(HashMap::new())),
next_handle_id: Arc::new(AtomicU32::new(INITIAL_HANDLE_ID)),
temp_dir: std::env::temp_dir(),
current_dir: Arc::new(RwLock::new(ROOT_PATH.to_string())),
fs,
}
}
fn create_s3_client(&self) -> ProtocolS3Client {
ProtocolS3Client::new(self.fs.clone(), self.session_context.access_key().to_string())
}
fn parse_path(&self, path_str: &str) -> Result<(String, Option<String>), StatusCode> {
if path_str.contains(PERMISSION_DENIED_PATH) {
return Err(StatusCode::PermissionDenied);
}
// Clean the path to normalize
let cleaned_path = path::clean(path_str);
let (bucket, object) = path::path_to_bucket_object(&cleaned_path);
let key = if object.is_empty() { None } else { Some(object) };
debug!(
"SFTP parse_path - input: '{}', cleaned: '{}', bucket: '{}', key: {:?}",
path_str, cleaned_path, bucket, key
);
Ok((bucket, key))
}
fn generate_handle_id(&self) -> String {
let id = self.next_handle_id.fetch_add(1, Ordering::Relaxed);
format!("{}{}", HANDLE_ID_PREFIX, id)
}
/// Convert relative path to absolute path based on current directory
async fn resolve_path(&self, path_str: &str) -> String {
let current = self.current_dir.read().await;
if path_str.starts_with(PATH_SEPARATOR) {
// Absolute path
return path::clean(path_str).to_string();
}
// Relative path
if path_str == CURRENT_DIR {
current.clone()
} else if path_str == PARENT_DIR {
if *current == ROOT_PATH {
ROOT_PATH.to_string()
} else {
let parent = std::path::Path::new(&*current)
.parent()
.map(|p| p.to_str().unwrap())
.unwrap_or(ROOT_PATH);
path::clean(parent).to_string()
}
} else {
// Join current directory with path
let joined = if *current == ROOT_PATH {
format!("{}{}", PATH_SEPARATOR, path_str.trim_start_matches(PATH_SEPARATOR))
} else {
format!(
"{}{}{}",
current.trim_end_matches(PATH_SEPARATOR),
PATH_SEPARATOR,
path_str.trim_start_matches(PATH_SEPARATOR)
)
};
path::clean(&joined).to_string()
}
}
async fn cleanup_state(&self, state: HandleState) {
if let HandleState::Write { temp_file_path, .. } = state {
let _ = tokio::fs::remove_file(temp_file_path).await;
}
}
async fn do_stat(&self, path: String) -> Result<FileAttributes, StatusCode> {
debug!("SFTP do_stat - input path: '{}'", path);
let (bucket, key_opt) = self.parse_path(&path)?;
if bucket.is_empty() {
let mut attrs = FileAttributes::default();
attrs.set_dir(true);
attrs.size = Some(0);
let current_mode = attrs.permissions.unwrap_or(0);
attrs.permissions = Some(current_mode | DIR_MODE | DIR_PERMISSIONS);
return Ok(attrs);
}
let action = if key_opt.is_none() {
S3Action::HeadBucket
} else {
S3Action::HeadObject
};
debug!("SFTP do_stat - parsed bucket: '{}', key: {:?}, action: {:?}", bucket, key_opt, action);
authorize_operation(&self.session_context, &action, &bucket, key_opt.as_deref())
.await
.map_err(|_| StatusCode::PermissionDenied)?;
let s3_client = self.create_s3_client();
match action {
S3Action::HeadBucket => {
let input = s3s::dto::HeadBucketInput {
bucket,
..Default::default()
};
match s3_client.head_bucket(input).await {
Ok(_) => {
let mut attrs = FileAttributes::default();
attrs.set_dir(true);
attrs.size = Some(0);
attrs.permissions = Some(DIR_PERMISSIONS | DIR_MODE);
attrs.mtime = Some(0);
Ok(attrs)
}
Err(_) => Err(StatusCode::NoSuchFile),
}
}
S3Action::HeadObject => {
let key = key_opt.expect("key_opt should be Some for HeadObject action");
let input = s3s::dto::HeadObjectInput {
bucket,
key,
..Default::default()
};
match s3_client.head_object(input).await {
Ok(out) => {
let mut attrs = FileAttributes::default();
attrs.set_dir(false);
attrs.size = Some(out.content_length.unwrap_or(0) as u64);
if let Some(lm) = out.last_modified {
let dt = time::OffsetDateTime::from(lm);
attrs.mtime = Some(dt.unix_timestamp() as u32);
}
attrs.permissions = Some(FILE_PERMISSIONS | FILE_MODE);
Ok(attrs)
}
Err(_) => Err(StatusCode::NoSuchFile),
}
}
_ => {
error!("SFTP do_stat - Unexpected action type");
Err(StatusCode::Failure)
}
}
}
}
impl Handler for SftpHandler {
type Error = StatusCode;
fn unimplemented(&self) -> Self::Error {
StatusCode::OpUnsupported
}
async fn init(&mut self, version: u32, _extensions: HashMap<String, String>) -> Result<Version, Self::Error> {
trace!("SFTP Init version: {}", version);
Ok(Version::new())
}
fn open(
&mut self,
id: u32,
filename: String,
pflags: OpenFlags,
_attrs: FileAttributes,
) -> impl Future<Output = Result<Handle, Self::Error>> + Send {
let this = self.clone();
async move {
debug!("SFTP Open: {} (flags: {:?})", filename, pflags);
// Resolve relative path to absolute path
let resolved_filename = this.resolve_path(&filename).await;
let (bucket, key_opt) = this.parse_path(&resolved_filename)?;
if bucket.is_empty() {
return Err(StatusCode::PermissionDenied); // Cannot open root directory as file
}
let key = key_opt.ok_or(StatusCode::PermissionDenied)?; // Cannot open bucket as file
let handle_id = this.generate_handle_id();
let state;
if pflags.contains(OpenFlags::WRITE) || pflags.contains(OpenFlags::CREATE) || pflags.contains(OpenFlags::TRUNCATE) {
let action = S3Action::PutObject;
authorize_operation(&this.session_context, &action, &bucket, Some(&key))
.await
.map_err(|_| StatusCode::PermissionDenied)?;
if pflags.contains(OpenFlags::APPEND) {
return Err(StatusCode::OpUnsupported);
}
let temp_filename = format!("rustfs-sftp-{}.tmp", Uuid::new_v4());
let temp_path = this.temp_dir.join(temp_filename);
let file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.truncate(true)
.open(&temp_path)
.await
.map_err(|e| {
error!("Failed to create temp file: {}", e);
StatusCode::Failure
})?;
state = HandleState::Write {
path: filename.clone(),
bucket,
key,
temp_file_path: temp_path,
file_handle: Some(file),
};
} else {
let action = S3Action::GetObject;
authorize_operation(&this.session_context, &action, &bucket, Some(&key))
.await
.map_err(|_| StatusCode::PermissionDenied)?;
state = HandleState::Read {
path: filename.clone(),
bucket,
key,
};
}
this.handles.write().await.insert(handle_id.clone(), state);
Ok(Handle { id, handle: handle_id })
}
}
fn close(&mut self, id: u32, handle: String) -> impl Future<Output = Result<Status, Self::Error>> + Send {
let this = self.clone();
async move {
let state = this.handles.write().await.remove(&handle);
match state {
Some(HandleState::Write {
bucket,
key,
temp_file_path,
mut file_handle,
..
}) => {
let mut file = file_handle.take().ok_or(StatusCode::Failure)?;
if let Err(e) = file.flush().await {
error!("Flush to disk failed: {}", e);
let _ = tokio::fs::remove_file(&temp_file_path).await;
return Err(StatusCode::Failure);
}
let metadata = file.metadata().await.map_err(|e| {
error!("Failed to get metadata: {}", e);
StatusCode::Failure
})?;
let file_size = metadata.len();
if let Err(e) = file.seek(std::io::SeekFrom::Start(0)).await {
error!("Seek temp file failed: {}", e);
let _ = tokio::fs::remove_file(&temp_file_path).await;
return Err(StatusCode::Failure);
}
let s3_client = this.create_s3_client();
let stream = tokio_util::io::ReaderStream::new(file);
let body = StreamingBlob::wrap(stream);
let input = PutObjectInput::builder()
.bucket(bucket.clone())
.key(key.clone())
.body(Option::from(body))
.content_length(Option::from(file_size as i64)) // 告诉 S3 文件多大
.build()
.unwrap();
let result = match s3_client.put_object(input).await {
Ok(_) => Status {
id,
status_code: StatusCode::Ok,
error_message: "Success".into(),
language_tag: "en".into(),
},
Err(e) => {
error!("S3 PutObject failed: {}", e);
let status_code = map_s3_error_to_sftp_status(&e);
return Err(status_code);
}
};
let _ = tokio::fs::remove_file(&temp_file_path).await;
Ok(result)
}
Some(state) => {
this.cleanup_state(state).await;
Ok(Status {
id,
status_code: StatusCode::Ok,
error_message: "Success".into(),
language_tag: "en".into(),
})
}
None => Err(StatusCode::NoSuchFile),
}
}
}
fn read(&mut self, id: u32, handle: String, offset: u64, len: u32) -> impl Future<Output = Result<Data, Self::Error>> + Send {
let this = self.clone();
async move {
let (bucket, key) = {
let guard = this.handles.read().await;
match guard.get(&handle) {
Some(HandleState::Read { bucket, key, .. }) => (bucket.clone(), key.clone()),
Some(_) => return Err(StatusCode::OpUnsupported),
None => return Err(StatusCode::NoSuchFile),
}
};
let s3_client = this.create_s3_client();
let range_end = offset + (len as u64) - 1;
let mut builder = GetObjectInput::builder();
builder.set_bucket(bucket);
builder.set_key(key);
if offset > 0
&& let Ok(range) = s3s::dto::Range::parse(&format!("bytes={}-{}", offset, range_end))
{
builder.set_range(Some(range));
}
let input = builder.build().map_err(|_| StatusCode::Failure)?;
match s3_client.get_object(input).await {
Ok(output) => {
let mut data = Vec::new();
if let Some(body) = output.body {
let stream = body.map_err(std::io::Error::other);
let mut reader = StreamReader::new(stream);
reader.read_to_end(&mut data).await.map_err(|_| StatusCode::Failure)?;
}
Ok(Data { id, data })
}
Err(e) => match e.code() {
S3ErrorCode::InvalidRange => Err(StatusCode::Eof),
_ => Err(map_s3_error_to_sftp_status(&e)),
},
}
}
}
fn write(
&mut self,
id: u32,
handle: String,
offset: u64,
data: Vec<u8>,
) -> impl Future<Output = Result<Status, Self::Error>> + Send {
let this = self.clone();
async move {
let mut guard = this.handles.write().await;
if let Some(HandleState::Write { file_handle, .. }) = guard.get_mut(&handle) {
if let Some(file) = file_handle {
if let Err(e) = file.seek(std::io::SeekFrom::Start(offset)).await {
error!("File seek failed: {}", e);
return Err(StatusCode::Failure);
}
if let Err(e) = file.write_all(&data).await {
error!("File write failed: {}", e);
return Err(StatusCode::Failure);
}
Ok(Status {
id,
status_code: StatusCode::Ok,
error_message: "Success".into(),
language_tag: "en".into(),
})
} else {
Err(StatusCode::Failure)
}
} else {
Err(StatusCode::NoSuchFile)
}
}
}
fn lstat(&mut self, id: u32, path: String) -> impl Future<Output = Result<Attrs, Self::Error>> + Send {
let this = self.clone();
async move {
let resolved = this.resolve_path(&path).await;
let attrs = this.do_stat(resolved).await?;
Ok(Attrs { id, attrs })
}
}
fn fstat(&mut self, id: u32, handle: String) -> impl Future<Output = Result<Attrs, Self::Error>> + Send {
let this = self.clone();
async move {
let path = {
let guard = this.handles.read().await;
match guard.get(&handle) {
Some(HandleState::Read { path, .. }) => path.clone(),
Some(HandleState::Write { path, .. }) => path.clone(),
Some(HandleState::Dir { path, .. }) => path.clone(),
None => return Err(StatusCode::NoSuchFile),
}
};
let attrs = this.do_stat(path).await?;
Ok(Attrs { id, attrs })
}
}
fn opendir(&mut self, id: u32, path: String) -> impl Future<Output = Result<Handle, Self::Error>> + Send {
let this = self.clone();
async move {
debug!("SFTP Opendir START: path='{}'", path);
// Resolve relative path to absolute path
let resolved_path = this.resolve_path(&path).await;
debug!("SFTP Opendir - resolved path: '{}'", resolved_path);
// Handle root directory case - list all buckets
if resolved_path == "/" || resolved_path == "/." {
debug!("SFTP Opendir - listing root directory (all buckets)");
let action = S3Action::ListBuckets;
authorize_operation(&this.session_context, &action, "", None)
.await
.map_err(|_| StatusCode::PermissionDenied)?;
// List all buckets
let s3_client = this.create_s3_client();
let input = s3s::dto::ListBucketsInput::builder()
.build()
.map_err(|_| StatusCode::Failure)?;
let secret_key = &this.session_context.principal.user_identity.credentials.secret_key;
let output = s3_client.list_buckets(input, secret_key).await.map_err(|e| {
error!("SFTP Opendir - failed to list buckets: {}", e);
StatusCode::Failure
})?;
let mut files = Vec::new();
if let Some(buckets) = output.buckets {
for bucket in buckets {
if let Some(bucket_name) = bucket.name {
let mut attrs = FileAttributes::default();
attrs.set_dir(true);
attrs.permissions = Some(0o755);
files.push(File {
filename: bucket_name.clone(),
longname: format!("drwxr-xr-x 2 0 0 0 Dec 28 18:54 {}", bucket_name),
attrs,
});
}
}
}
let handle_id = this.generate_handle_id();
let mut guard = this.handles.write().await;
guard.insert(
handle_id.clone(),
HandleState::Dir {
path: "/".to_string(),
files,
offset: 0,
},
);
return Ok(Handle { id, handle: handle_id });
}
// Handle bucket directory listing
let (bucket, key_prefix) = this.parse_path(&resolved_path)?;
debug!("SFTP Opendir - bucket: '{}', key_prefix: {:?}", bucket, key_prefix);
let action = S3Action::ListBucket;
authorize_operation(&this.session_context, &action, &bucket, key_prefix.as_deref())
.await
.map_err(|_| StatusCode::PermissionDenied)?;
let mut builder = s3s::dto::ListObjectsV2Input::builder();
builder.set_bucket(bucket.clone());
let prefix = if let Some(ref p) = key_prefix {
path::retain_slash(p)
} else {
String::new()
};
if !prefix.is_empty() {
builder.set_prefix(Some(prefix));
}
builder.set_delimiter(Some("/".to_string()));
let s3_client = this.create_s3_client();
let input = builder.build().map_err(|_| StatusCode::Failure)?;
let mut files = Vec::new();
match s3_client.list_objects_v2(input).await {
Ok(output) => {
if let Some(prefixes) = output.common_prefixes {
for p in prefixes {
if let Some(prefix_str) = p.prefix {
let name = prefix_str
.trim_end_matches('/')
.split('/')
.next_back()
.unwrap_or("")
.to_string();
if !name.is_empty() {
let mut attrs = FileAttributes::default();
attrs.set_dir(true);
attrs.permissions = Some(0o755);
files.push(File {
filename: name.clone(),
longname: format!("drwxr-xr-x 1 rustfs rustfs 0 Jan 1 1970 {}", name),
attrs,
});
}
}
}
}
if let Some(contents) = output.contents {
for obj in contents {
if let Some(key) = obj.key {
if key.ends_with('/') {
continue;
}
let name = key.split('/').next_back().unwrap_or("").to_string();
let size = obj.size.unwrap_or(0) as u64;
let mut attrs = FileAttributes {
size: Some(size),
permissions: Some(0o644),
..Default::default()
};
if let Some(lm) = obj.last_modified {
let dt = time::OffsetDateTime::from(lm);
attrs.mtime = Some(dt.unix_timestamp() as u32);
}
files.push(File {
filename: name.clone(),
longname: format!("-rw-r--r-- 1 rustfs rustfs {} Jan 1 1970 {}", size, name),
attrs,
});
}
}
}
}
Err(e) => {
error!("S3 List failed: {}", e);
return Err(StatusCode::Failure);
}
}
let handle_id = this.generate_handle_id();
this.handles
.write()
.await
.insert(handle_id.clone(), HandleState::Dir { path, files, offset: 0 });
Ok(Handle { id, handle: handle_id })
}
}
fn readdir(&mut self, id: u32, handle: String) -> impl Future<Output = Result<Name, Self::Error>> + Send {
let this = self.clone();
async move {
let mut guard = this.handles.write().await;
if let Some(HandleState::Dir { files, offset, .. }) = guard.get_mut(&handle) {
debug!("SFTP Readdir - handle: {}, offset: {}, total files: {}", handle, offset, files.len());
for (i, f) in files.iter().enumerate() {
debug!("SFTP Readdir - file[{}]: filename='{}', longname='{}'", i, f.filename, f.longname);
}
if *offset >= files.len() {
debug!("SFTP Readdir - offset {} >= files length {}, returning empty", offset, files.len());
return Ok(Name { id, files: Vec::new() });
}
let chunk = files[*offset..].to_vec();
debug!("SFTP Readdir - returning {} files (offset {})", chunk.len(), offset);
*offset = files.len();
Ok(Name { id, files: chunk })
} else {
debug!("SFTP Readdir - handle '{}' not found or not a directory handle", handle);
Err(StatusCode::NoSuchFile)
}
}
}
fn remove(&mut self, id: u32, filename: String) -> impl Future<Output = Result<Status, Self::Error>> + Send {
let this = self.clone();
async move {
// Resolve relative path to absolute path
let resolved_filename = this.resolve_path(&filename).await;
let (bucket, key_opt) = this.parse_path(&resolved_filename)?;
if let Some(key) = key_opt {
// Delete object
let action = S3Action::DeleteObject;
authorize_operation(&this.session_context, &action, &bucket, Some(&key))
.await
.map_err(|_| StatusCode::PermissionDenied)?;
let input = DeleteObjectInput {
bucket,
key,
..Default::default()
};
let s3_client = this.create_s3_client();
s3_client.delete_object(input).await.map_err(|e| {
error!("SFTP REMOVE - failed to delete object: {}", e);
StatusCode::Failure
})?;
Ok(Status {
id,
status_code: StatusCode::Ok,
error_message: "Success".into(),
language_tag: "en".into(),
})
} else {
// Delete bucket - check if bucket is empty first
debug!("SFTP REMOVE - attempting to delete bucket: '{}'", bucket);
let action = S3Action::DeleteBucket;
authorize_operation(&this.session_context, &action, &bucket, None)
.await
.map_err(|_| StatusCode::PermissionDenied)?;
let s3_client = this.create_s3_client();
// Check if bucket is empty
let list_input = ListObjectsV2Input {
bucket: bucket.clone(),
max_keys: Some(1),
..Default::default()
};
match s3_client.list_objects_v2(list_input).await {
Ok(output) => {
if let Some(objects) = output.contents
&& !objects.is_empty()
{
debug!("SFTP REMOVE - bucket '{}' is not empty, cannot delete", bucket);
return Ok(Status {
id,
status_code: StatusCode::Failure,
error_message: format!("Bucket '{}' is not empty", bucket),
language_tag: "en".into(),
});
}
}
Err(e) => {
debug!("SFTP REMOVE - failed to list objects: {}", e);
}
}
// Bucket is empty, delete it
let delete_bucket_input = DeleteBucketInput {
bucket: bucket.clone(),
..Default::default()
};
match s3_client.delete_bucket(delete_bucket_input).await {
Ok(_) => {
debug!("SFTP REMOVE - successfully deleted bucket: '{}'", bucket);
Ok(Status {
id,
status_code: StatusCode::Ok,
error_message: "Success".into(),
language_tag: "en".into(),
})
}
Err(e) => {
error!("SFTP REMOVE - failed to delete bucket '{}': {}", bucket, e);
Ok(Status {
id,
status_code: StatusCode::Failure,
error_message: format!("Failed to delete bucket: {}", e),
language_tag: "en".into(),
})
}
}
}
}
}
fn mkdir(
&mut self,
id: u32,
path: String,
_attrs: FileAttributes,
) -> impl Future<Output = Result<Status, Self::Error>> + Send {
let this = self.clone();
async move {
let (bucket, key_opt) = this.parse_path(&path)?;
if let Some(key) = key_opt {
// Create directory inside bucket
let dir_key = path::retain_slash(&key);
let action = S3Action::PutObject;
authorize_operation(&this.session_context, &action, &bucket, Some(&dir_key))
.await
.map_err(|_| StatusCode::PermissionDenied)?;
let s3_client = this.create_s3_client();
let empty_stream = futures::stream::empty::<Result<bytes::Bytes, std::io::Error>>();
let body = StreamingBlob::wrap(empty_stream);
let input = PutObjectInput {
bucket,
key: dir_key,
body: Some(body),
..Default::default()
};
match s3_client.put_object(input).await {
Ok(_) => Ok(Status {
id,
status_code: StatusCode::Ok,
error_message: "Directory created".into(),
language_tag: "en".into(),
}),
Err(e) => {
error!("SFTP Failed to create directory: {}", e);
Ok(Status {
id,
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | true |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/protocols/ftps/mod.rs | rustfs/src/protocols/ftps/mod.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! FTPS protocol implementation
pub mod driver;
pub mod server;
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/protocols/ftps/server.rs | rustfs/src/protocols/ftps/server.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::protocols::ftps::driver::FtpsDriver;
use crate::protocols::session::context::{Protocol as SessionProtocol, SessionContext};
use crate::protocols::session::principal::ProtocolPrincipal;
use libunftp::{
ServerError,
auth::{AuthenticationError, UserDetail},
options::FtpsRequired,
};
use std::fmt::{Debug, Display, Formatter};
use std::net::{IpAddr, SocketAddr};
use std::path::Path;
use std::sync::Arc;
use thiserror::Error;
use tokio::sync::broadcast;
use tracing::{debug, error, info, warn};
const ROOT_PATH: &str = "/";
const DEFAULT_SOURCE_IP: &str = "0.0.0.0";
const PORT_RANGE_SEPARATOR: &str = "-";
const PASSIVE_PORTS_PART_COUNT: usize = 2;
/// FTPS user implementation
#[derive(Debug, Clone)]
pub struct FtpsUser {
/// Username for the FTP session
pub username: String,
/// User's display name
pub name: Option<String>,
/// Session context for this user
pub session_context: SessionContext,
}
impl UserDetail for FtpsUser {
fn home(&self) -> Option<&Path> {
Some(Path::new(ROOT_PATH))
}
}
impl Display for FtpsUser {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match &self.name {
Some(display_name) => write!(f, "FtpsUser({} - {})", self.username, display_name),
None => write!(f, "FtpsUser({})", self.username),
}
}
}
/// FTPS server initialization error
#[derive(Debug, Error)]
pub enum FtpsInitError {
#[error("failed to bind address {0}")]
Bind(#[from] std::io::Error),
#[error("server error: {0}")]
Server(#[from] ServerError),
#[error("invalid FTPS configuration: {0}")]
InvalidConfig(String),
}
/// FTPS server configuration
#[derive(Debug, Clone)]
pub struct FtpsConfig {
/// Server bind address
pub bind_addr: SocketAddr,
/// Passive port range (e.g., "40000-50000")
pub passive_ports: Option<String>,
/// External IP address for passive mode
pub external_ip: Option<String>,
/// Whether FTPS is required
pub ftps_required: bool,
/// Certificate file path
pub cert_file: Option<String>,
/// Private key file path
pub key_file: Option<String>,
}
impl FtpsConfig {
/// Validates the configuration
pub async fn validate(&self) -> Result<(), FtpsInitError> {
if self.ftps_required && (self.cert_file.is_none() || self.key_file.is_none()) {
return Err(FtpsInitError::InvalidConfig(
"FTPS is required but certificate or key file is missing".to_string(),
));
}
if let Some(path) = &self.cert_file
&& !tokio::fs::try_exists(path).await.unwrap_or(false)
{
return Err(FtpsInitError::InvalidConfig(format!("Certificate file not found: {}", path)));
}
if let Some(path) = &self.key_file
&& !tokio::fs::try_exists(path).await.unwrap_or(false)
{
return Err(FtpsInitError::InvalidConfig(format!("Key file not found: {}", path)));
}
// Validate passive ports format
if self.passive_ports.is_some() {
self.parse_passive_ports()?;
}
Ok(())
}
/// Parse passive ports range from string format "start-end"
fn parse_passive_ports(&self) -> Result<std::ops::RangeInclusive<u16>, FtpsInitError> {
match &self.passive_ports {
Some(ports) => {
let parts: Vec<&str> = ports.split(PORT_RANGE_SEPARATOR).collect();
if parts.len() != PASSIVE_PORTS_PART_COUNT {
return Err(FtpsInitError::InvalidConfig(format!(
"Invalid passive ports format: {}, expected 'start-end'",
ports
)));
}
let start = parts[0]
.parse::<u16>()
.map_err(|e| FtpsInitError::InvalidConfig(format!("Invalid start port: {}", e)))?;
let end = parts[1]
.parse::<u16>()
.map_err(|e| FtpsInitError::InvalidConfig(format!("Invalid end port: {}", e)))?;
if start > end {
return Err(FtpsInitError::InvalidConfig("Start port cannot be greater than end port".to_string()));
}
Ok(start..=end)
}
None => Err(FtpsInitError::InvalidConfig("No passive ports configured".to_string())),
}
}
}
/// FTPS server implementation
pub struct FtpsServer {
/// Server configuration
config: FtpsConfig,
}
impl FtpsServer {
/// Create a new FTPS server
pub async fn new(config: FtpsConfig) -> Result<Self, FtpsInitError> {
config.validate().await?;
Ok(Self { config })
}
/// Start the FTPS server
///
/// This method binds the listener first to ensure the port is available,
/// then spawns the server loop in a background task.
pub async fn start(&self, mut shutdown_rx: broadcast::Receiver<()>) -> Result<(), FtpsInitError> {
info!("Initializing FTPS server on {}", self.config.bind_addr);
let mut server_builder =
libunftp::ServerBuilder::with_authenticator(Box::new(FtpsDriver::new), Arc::new(FtpsAuthenticator::new()));
// Configure passive ports for data connections
if let Some(passive_ports) = &self.config.passive_ports {
let range = self.config.parse_passive_ports()?;
info!("Configuring FTPS passive ports range: {:?} ({})", range, passive_ports);
server_builder = server_builder.passive_ports(range);
} else {
warn!("No passive ports configured, using system-assigned ports");
}
// Configure external IP address for passive mode
if let Some(ref external_ip) = self.config.external_ip {
info!("Configuring FTPS external IP for passive mode: {}", external_ip);
server_builder = server_builder.passive_host(external_ip.as_str());
}
// Configure FTPS / TLS
if let Some(cert) = &self.config.cert_file {
if let Some(key) = &self.config.key_file {
debug!("Enabling FTPS with cert: {} and key: {}", cert, key);
server_builder = server_builder.ftps(cert, key);
if self.config.ftps_required {
info!("FTPS is explicitly required for all connections");
server_builder = server_builder.ftps_required(FtpsRequired::All, FtpsRequired::All);
}
}
} else if self.config.ftps_required {
return Err(FtpsInitError::InvalidConfig("FTPS required but certificates not provided".into()));
}
// Build the server instance
let server = server_builder.build().map_err(FtpsInitError::Server)?;
// libunftp's listen() binds to the address and runs the loop
let bind_addr = self.config.bind_addr.to_string();
let server_handle = tokio::spawn(async move {
if let Err(e) = server.listen(bind_addr).await {
error!("FTPS server runtime error: {}", e);
return Err(FtpsInitError::Server(e));
}
Ok(())
});
// Wait for shutdown signal or server failure
tokio::select! {
result = server_handle => {
match result {
Ok(Ok(())) => {
info!("FTPS server stopped normally");
Ok(())
}
Ok(Err(e)) => {
error!("FTPS server internal error: {}", e);
Err(e)
}
Err(e) => {
error!("FTPS server panic or task cancellation: {}", e);
Err(FtpsInitError::Bind(std::io::Error::other(e.to_string())))
}
}
}
_ = shutdown_rx.recv() => {
info!("FTPS server received shutdown signal");
// libunftp listen() is not easily cancellable gracefully without dropping the future.
// The select! dropping server_handle will close the listener.
Ok(())
}
}
}
/// Get server configuration
pub fn config(&self) -> &FtpsConfig {
&self.config
}
}
/// FTPS authenticator implementation
#[derive(Debug, Default)]
pub struct FtpsAuthenticator;
impl FtpsAuthenticator {
/// Create a new FTPS authenticator
pub fn new() -> Self {
Self
}
}
#[async_trait::async_trait]
impl libunftp::auth::Authenticator<FtpsUser> for FtpsAuthenticator {
/// Authenticate FTP user against RustFS IAM system
async fn authenticate(&self, username: &str, creds: &libunftp::auth::Credentials) -> Result<FtpsUser, AuthenticationError> {
use rustfs_credentials::Credentials as S3Credentials;
use rustfs_iam::get;
debug!("FTPS authentication attempt for user: {}", username);
// Access IAM system
let iam_sys = get().map_err(|e| {
error!("IAM system unavailable during FTPS auth: {}", e);
AuthenticationError::ImplPropagated("Internal authentication service unavailable".to_string(), Some(Box::new(e)))
})?;
// Map FTP credentials to S3 Credentials structure
// Note: FTP PASSWORD is treated as S3 SECRET KEY
let s3_creds = S3Credentials {
access_key: username.to_string(),
secret_key: creds.password.clone().unwrap_or_default(),
// Fields below are not used for authentication verification, but for struct compliance
session_token: String::new(),
expiration: None,
status: String::new(),
parent_user: String::new(),
groups: None,
claims: None,
name: None,
description: None,
};
let (user_identity, is_valid) = iam_sys.check_key(&s3_creds.access_key).await.map_err(|e| {
error!("IAM check_key failed for {}: {}", username, e);
AuthenticationError::ImplPropagated("Authentication verification failed".to_string(), Some(Box::new(e)))
})?;
if !is_valid {
warn!("FTPS login failed: Invalid access key '{}'", username);
return Err(AuthenticationError::BadUser);
}
let identity = user_identity.ok_or_else(|| {
error!("User identity missing despite valid key for {}", username);
AuthenticationError::BadUser
})?;
// Constant time comparison is preferred if available, but for now simple eq
if !identity.credentials.secret_key.eq(&s3_creds.secret_key) {
warn!("FTPS login failed: Invalid secret key for '{}'", username);
return Err(AuthenticationError::BadPassword);
}
// Policy conditions relying on `aws:SourceIp` will currently not work correctly for FTP.
// TODO: Investigate wrapping the authenticator or using Proxy Protocol metadata if available in future libunftp versions.
let source_ip: IpAddr = DEFAULT_SOURCE_IP.parse().unwrap();
let session_context =
SessionContext::new(ProtocolPrincipal::new(Arc::new(identity.clone())), SessionProtocol::Ftps, source_ip);
let ftps_user = FtpsUser {
username: username.to_string(),
name: identity.credentials.name.clone(),
session_context,
};
info!("FTPS user '{}' authenticated successfully", username);
Ok(ftps_user)
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/protocols/ftps/driver.rs | rustfs/src/protocols/ftps/driver.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! FTPS driver implementation
//!
//! This module provides the FTPS driver that integrates with libunftp
//! and translates FTP operations to S3 actions through the gateway.
use crate::protocols::client::s3::ProtocolS3Client;
use crate::protocols::gateway::action::S3Action;
use crate::protocols::gateway::adapter::is_operation_supported;
use crate::protocols::gateway::authorize::authorize_operation;
use crate::protocols::gateway::error::map_s3_error_to_ftps;
use crate::protocols::gateway::restrictions::{get_s3_equivalent_operation, is_ftp_feature_supported};
use crate::protocols::session::context::SessionContext;
use async_trait::async_trait;
use futures::stream;
use futures_util::TryStreamExt;
use libunftp::storage::{Error, ErrorKind, Fileinfo, Metadata, Result, StorageBackend};
use rustfs_utils::path;
use s3s::dto::StreamingBlob;
use s3s::dto::{GetObjectInput, PutObjectInput};
use std::fmt::Debug;
use std::path::{Path, PathBuf};
use tokio::io::AsyncRead;
use tokio_util::io::StreamReader;
use tracing::{debug, error, info, trace};
/// FTPS storage driver implementation
#[derive(Debug, Clone)]
pub struct FtpsDriver {
fs: crate::storage::ecfs::FS,
}
impl FtpsDriver {
/// Create a new FTPS driver
pub fn new() -> Self {
let fs = crate::storage::ecfs::FS {};
Self { fs }
}
/// Validate FTP feature support
fn validate_feature_support(&self, feature: &str) -> Result<()> {
if !is_ftp_feature_supported(feature) {
let error_msg = if let Some(s3_equivalent) = get_s3_equivalent_operation(feature) {
format!("Unsupported FTP feature: {}. S3 equivalent: {}", feature, s3_equivalent)
} else {
format!("Unsupported FTP feature: {}", feature)
};
error!("{}", error_msg);
return Err(Error::new(ErrorKind::PermanentFileNotAvailable, error_msg));
}
Ok(())
}
/// Get SessionContext from User
fn get_session_context_from_user(&self, user: &super::server::FtpsUser) -> Result<SessionContext> {
Ok(user.session_context.clone())
}
/// Create ProtocolS3Client for the given user
fn create_s3_client_for_user(&self, user: &super::server::FtpsUser) -> Result<ProtocolS3Client> {
let session_context = &user.session_context;
let s3_client = ProtocolS3Client::new(self.fs.clone(), session_context.access_key().to_string());
Ok(s3_client)
}
/// List all buckets (for root path)
async fn list_buckets(
&self,
user: &super::server::FtpsUser,
session_context: &SessionContext,
) -> Result<Vec<Fileinfo<PathBuf, FtpsMetadata>>> {
let s3_client = self.create_s3_client_for_user(user)?;
let action = S3Action::ListBuckets;
if !is_operation_supported(crate::protocols::session::context::Protocol::Ftps, &action) {
error!("FTPS LIST - ListBuckets operation not supported for FTPS protocol");
return Err(Error::new(ErrorKind::PermanentFileNotAvailable, "Operation not supported"));
}
// Authorize the operation
match authorize_operation(session_context, &action, "", None).await {
Ok(_) => debug!("FTPS LIST - ListBuckets authorization successful"),
Err(e) => {
error!("FTPS LIST - ListBuckets authorization failed: {}", e);
return Err(Error::new(ErrorKind::PermanentFileNotAvailable, "Access denied"));
}
}
let mut list_result = Vec::new();
// List all buckets
let input = s3s::dto::ListBucketsInput::builder()
.build()
.map_err(|_| Error::new(ErrorKind::PermanentFileNotAvailable, "Failed to build ListBucketsInput"))?;
// Get the real secret key from the authenticated user
let secret_key = &session_context.principal.user_identity.credentials.secret_key;
debug!(
"FTPS LIST - calling S3 list_buckets with access_key: {}",
session_context.principal.access_key()
);
match s3_client.list_buckets(input, secret_key).await {
Ok(output) => {
debug!(
"FTPS LIST - S3 list_buckets succeeded, buckets count: {:?}",
output.buckets.as_ref().map(|b| b.len()).unwrap_or(0)
);
if let Some(buckets) = output.buckets {
for bucket in buckets {
if let Some(ref bucket_name) = bucket.name {
debug!("FTPS LIST - found bucket: '{}'", bucket_name);
let metadata = FtpsMetadata {
size: 0,
is_directory: true,
modification_time: bucket
.creation_date
.map(|t| {
let offset_datetime: time::OffsetDateTime = t.into();
offset_datetime.unix_timestamp() as u64
})
.unwrap_or(0),
};
list_result.push(Fileinfo {
path: PathBuf::from(bucket_name),
metadata,
});
}
}
}
Ok(list_result)
}
Err(e) => {
error!("FTPS LIST - Failed to list buckets: {}", e);
let protocol_error = map_s3_error_to_ftps(&e);
Err(Error::new(ErrorKind::PermanentFileNotAvailable, protocol_error))
}
}
}
/// Create bucket
async fn create_bucket(&self, user: &super::server::FtpsUser, session_context: &SessionContext, bucket: &str) -> Result<()> {
let s3_client = self.create_s3_client_for_user(user)?;
let action = S3Action::CreateBucket;
if !is_operation_supported(crate::protocols::session::context::Protocol::Ftps, &action) {
error!("FTPS CREATE_BUCKET - operation not supported for FTPS protocol");
return Err(Error::new(ErrorKind::PermanentFileNotAvailable, "Operation not supported"));
}
// Authorize the operation
match authorize_operation(session_context, &action, bucket, None).await {
Ok(_) => debug!("FTPS CREATE_BUCKET - authorization successful"),
Err(e) => {
error!("FTPS CREATE_BUCKET - authorization failed: {}", e);
return Err(Error::new(ErrorKind::PermanentFileNotAvailable, "Access denied"));
}
}
// Create bucket
let mut input_builder = s3s::dto::CreateBucketInput::builder();
input_builder.set_bucket(bucket.to_string());
let input = input_builder
.build()
.map_err(|_| Error::new(ErrorKind::PermanentFileNotAvailable, "Failed to build CreateBucketInput"))?;
match s3_client.create_bucket(input).await {
Ok(_) => {
debug!("FTPS CREATE_BUCKET - successfully created bucket: '{}'", bucket);
Ok(())
}
Err(e) => {
error!("FTPS CREATE_BUCKET - failed to create bucket: '{}', error: {}", bucket, e);
let protocol_error = map_s3_error_to_ftps(&e);
Err(Error::new(ErrorKind::PermanentFileNotAvailable, protocol_error))
}
}
}
/// Get bucket and key from path
fn parse_path(&self, path_str: &str) -> Result<(String, Option<String>)> {
debug!("FTPS parse_path - input: '{}'", path_str);
let (bucket, object) = path::path_to_bucket_object(path_str);
let key = if object.is_empty() { None } else { Some(object) };
debug!("FTPS parse_path - bucket: '{}', key: {:?}", bucket, key);
Ok((bucket, key))
}
}
#[async_trait]
impl StorageBackend<super::server::FtpsUser> for FtpsDriver {
type Metadata = FtpsMetadata;
/// Get file metadata
async fn metadata<P: AsRef<Path> + Send + Debug>(&self, user: &super::server::FtpsUser, path: P) -> Result<Self::Metadata> {
trace!("FTPS metadata request for path: {:?}", path);
let s3_client = self.create_s3_client_for_user(user)?;
let path_str = path.as_ref().to_string_lossy();
let (bucket, key) = self.parse_path(&path_str)?;
if let Some(object_key) = key {
// Object metadata request
let action = S3Action::HeadObject;
if !is_operation_supported(crate::protocols::session::context::Protocol::Ftps, &action) {
return Err(Error::new(ErrorKind::PermanentFileNotAvailable, "Operation not supported"));
}
// Authorize the operation
let session_context = self.get_session_context_from_user(user)?;
// Log the operation for audit purposes
debug!(
"FTPS operation authorized: user={}, action={}, bucket={}, object={}, source_ip={}",
session_context.access_key(),
action.as_str(),
bucket,
object_key,
session_context.source_ip
);
authorize_operation(&session_context, &action, &bucket, Some(&object_key))
.await
.map_err(|_| Error::new(ErrorKind::PermanentFileNotAvailable, "Access denied"))?;
let mut builder = s3s::dto::HeadObjectInput::builder();
builder.set_bucket(bucket.clone());
builder.set_key(object_key.clone());
let input = builder
.build()
.map_err(|_| Error::new(ErrorKind::PermanentFileNotAvailable, "Failed to build HeadObjectInput"))?;
match s3_client.head_object(input).await {
Ok(output) => {
let metadata = FtpsMetadata {
size: output.content_length.unwrap_or(0) as u64,
is_directory: false,
modification_time: output
.last_modified
.map(|t| {
let offset_datetime: time::OffsetDateTime = t.into();
offset_datetime.unix_timestamp() as u64
})
.unwrap_or(0),
};
Ok(metadata)
}
Err(e) => {
error!("Failed to get object metadata: {}", e);
Err(map_s3_error_to_ftps(&e))
}
}
} else {
// Bucket metadata request
let action = S3Action::HeadBucket;
if !is_operation_supported(crate::protocols::session::context::Protocol::Ftps, &action) {
return Err(Error::new(ErrorKind::PermanentFileNotAvailable, "Operation not supported"));
}
// Authorize the operation
let session_context = self.get_session_context_from_user(user)?;
authorize_operation(&session_context, &action, &bucket, None)
.await
.map_err(|_| Error::new(ErrorKind::PermanentFileNotAvailable, "Access denied"))?;
let mut builder = s3s::dto::HeadBucketInput::builder();
builder.set_bucket(bucket.clone());
let input = builder
.build()
.map_err(|_| Error::new(ErrorKind::PermanentFileNotAvailable, "Failed to build HeadBucketInput"))?;
match s3_client.head_bucket(input).await {
Ok(_) => {
let metadata = FtpsMetadata {
size: 0,
is_directory: true,
modification_time: 0,
};
Ok(metadata)
}
Err(e) => {
error!("Failed to get bucket metadata: {}", e);
Err(map_s3_error_to_ftps(&e))
}
}
}
}
/// Get directory listing
async fn list<P: AsRef<Path> + Send + Debug>(
&self,
user: &super::server::FtpsUser,
path: P,
) -> Result<Vec<Fileinfo<PathBuf, Self::Metadata>>> {
info!("FTPS LIST request - user: {}, raw path: {:?}", user.username, path);
let s3_client = self.create_s3_client_for_user(user)?;
let session_context = self.get_session_context_from_user(user)?;
let path_str = path.as_ref().to_string_lossy();
info!("FTPS LIST - parsing path: '{}'", path_str);
// Check if this is root path listing
if path_str == "/" || path_str == "/." {
debug!("FTPS LIST - root path listing (including /.), using ListBuckets");
return self.list_buckets(user, &session_context).await;
}
// Handle paths ending with /., e.g., /testbucket/.
// Remove trailing /. to get the actual path
let cleaned_path = if let Some(stripped) = path_str.strip_suffix("/.") {
info!("FTPS LIST - path ends with /., removing trailing /.");
stripped
} else {
&path_str
};
let (bucket, prefix) = self.parse_path(cleaned_path)?;
debug!("FTPS LIST - parsed bucket: '{}', prefix: {:?}", bucket, prefix);
// Validate feature support
self.validate_feature_support("LIST command")?;
let action = S3Action::ListBucket;
if !is_operation_supported(crate::protocols::session::context::Protocol::Ftps, &action) {
return Err(Error::new(ErrorKind::PermanentFileNotAvailable, "Operation not supported"));
}
// Authorize the operation
debug!("FTPS LIST - authorizing operation for bucket: '{}', prefix: {:?}", bucket, prefix);
match authorize_operation(&session_context, &action, &bucket, prefix.as_deref()).await {
Ok(_) => debug!("FTPS LIST - authorization successful"),
Err(e) => {
error!("FTPS LIST - authorization failed: {}", e);
return Err(Error::new(ErrorKind::PermanentFileNotAvailable, "Access denied"));
}
}
let mut list_result = Vec::new();
// List objects with prefix
let mut builder = s3s::dto::ListObjectsV2Input::builder();
builder.set_bucket(bucket.clone());
builder.set_prefix(prefix.clone());
builder.set_delimiter(Option::from("/".to_string()));
let input = builder
.build()
.map_err(|_| Error::new(ErrorKind::PermanentFileNotAvailable, "Failed to build ListObjectsV2Input"))?;
match s3_client.list_objects_v2(input).await {
Ok(output) => {
// Add directories (common prefixes)
if let Some(common_prefixes) = output.common_prefixes {
for prefix_info in common_prefixes {
if let Some(key) = prefix_info.prefix {
let dir_name = key.trim_end_matches('/').to_string();
let metadata = FtpsMetadata {
size: 0,
is_directory: true,
modification_time: 0,
};
list_result.push(Fileinfo {
path: PathBuf::from(dir_name),
metadata,
});
}
}
}
// Add files (objects)
if let Some(contents) = output.contents {
for object in contents {
if let Some(key) = object.key {
let file_name = key;
let metadata = FtpsMetadata {
size: object.size.unwrap_or(0) as u64,
is_directory: false,
modification_time: object
.last_modified
.map(|t| {
let offset_datetime: time::OffsetDateTime = t.into();
offset_datetime.unix_timestamp() as u64
})
.unwrap_or(0),
};
list_result.push(Fileinfo {
path: PathBuf::from(file_name),
metadata,
});
}
}
}
Ok(list_result)
}
Err(e) => {
error!("Failed to list objects: {}", e);
let protocol_error = map_s3_error_to_ftps(&e);
Err(Error::new(ErrorKind::PermanentFileNotAvailable, protocol_error))
}
}
}
/// Get file
async fn get<P: AsRef<Path> + Send + Debug>(
&self,
user: &super::server::FtpsUser,
path: P,
start_pos: u64,
) -> Result<Box<dyn AsyncRead + Send + Sync + Unpin>> {
trace!("FTPS get request for path: {:?} at position: {}", path, start_pos);
let s3_client = self.create_s3_client_for_user(user)?;
let session_context = self.get_session_context_from_user(user)?;
let path_str = path.as_ref().to_string_lossy();
let (bucket, key) = self.parse_path(&path_str)?;
if key.is_none() {
return Err(Error::new(ErrorKind::PermanentFileNotAvailable, "Cannot read bucket as file"));
}
let object_key = key.unwrap();
let action = S3Action::GetObject;
if !is_operation_supported(crate::protocols::session::context::Protocol::Ftps, &action) {
return Err(Error::new(ErrorKind::PermanentFileNotAvailable, "Operation not supported"));
}
// Authorize the operation
authorize_operation(&session_context, &action, &bucket, Some(&object_key))
.await
.map_err(|_| Error::new(ErrorKind::PermanentFileNotAvailable, "Access denied"))?;
let mut builder = GetObjectInput::builder();
builder.set_bucket(bucket);
builder.set_key(object_key);
if start_pos > 0
&& let Ok(range) = s3s::dto::Range::parse(&format!("bytes={}-", start_pos))
{
builder.set_range(Some(range));
}
let input = builder
.build()
.map_err(|_| Error::new(ErrorKind::PermanentFileNotAvailable, "Failed to build GetObjectInput"))?;
match s3_client.get_object(input).await {
Ok(output) => {
if let Some(body) = output.body {
let stream = body.map_err(std::io::Error::other);
let reader = StreamReader::new(stream);
Ok(Box::new(reader))
} else {
Err(Error::new(ErrorKind::PermanentFileNotAvailable, "Empty object body"))
}
}
Err(e) => {
let protocol_error = map_s3_error_to_ftps(&e);
Err(Error::new(ErrorKind::PermanentFileNotAvailable, protocol_error))
}
}
}
/// Put file
async fn put<P: AsRef<Path> + Send + Debug, R: AsyncRead + Send + Sync + Unpin + 'static>(
&self,
user: &super::server::FtpsUser,
input: R,
path: P,
start_pos: u64,
) -> Result<u64> {
trace!("FTPS put request for path: {:?} at position: {}", path, start_pos);
let s3_client = self.create_s3_client_for_user(user)?;
let session_context = self.get_session_context_from_user(user)?;
let path_str = path.as_ref().to_string_lossy();
let (bucket, key) = self.parse_path(&path_str)?;
if key.is_none() {
return Err(Error::new(ErrorKind::PermanentFileNotAvailable, "Cannot write to bucket directly"));
}
let object_key = key.unwrap();
// Check for append operation (not supported)
if start_pos > 0 {
self.validate_feature_support("APPE command (file append)")?;
}
let action = S3Action::PutObject;
if !is_operation_supported(crate::protocols::session::context::Protocol::Ftps, &action) {
return Err(Error::new(ErrorKind::PermanentFileNotAvailable, "Operation not supported"));
}
// Authorize the operation
authorize_operation(&session_context, &action, &bucket, Some(&object_key))
.await
.map_err(|_| Error::new(ErrorKind::PermanentFileNotAvailable, "Access denied"))?;
// Convert AsyncRead to bytes
let bytes_vec = {
let mut buffer = Vec::new();
let mut reader = input;
tokio::io::copy(&mut reader, &mut buffer)
.await
.map_err(|e| Error::new(ErrorKind::TransientFileNotAvailable, e.to_string()))?;
buffer
};
let file_size = bytes_vec.len();
let mut put_builder = PutObjectInput::builder();
put_builder.set_bucket(bucket.clone());
put_builder.set_key(object_key.clone());
put_builder.set_content_length(Some(file_size as i64));
// Create StreamingBlob with known size
let data_bytes = bytes::Bytes::from(bytes_vec);
let stream = stream::once(async move { Ok::<bytes::Bytes, std::io::Error>(data_bytes) });
let streaming_blob = StreamingBlob::wrap(stream);
put_builder.set_body(Some(streaming_blob));
let put_input = put_builder
.build()
.map_err(|_| Error::new(ErrorKind::PermanentFileNotAvailable, "Failed to build PutObjectInput"))?;
match s3_client.put_object(put_input).await {
Ok(output) => {
debug!("Successfully put object: {:?}", output);
// Return the size of the uploaded object
Ok(file_size as u64)
}
Err(e) => {
error!("FTPS put - S3 error details: {:?}", e);
let protocol_error = map_s3_error_to_ftps(&e);
Err(Error::new(ErrorKind::PermanentFileNotAvailable, protocol_error))
}
}
}
/// Delete file
async fn del<P: AsRef<Path> + Send + Debug>(&self, user: &super::server::FtpsUser, path: P) -> Result<()> {
trace!("FTPS delete request for path: {:?}", path);
let s3_client = self.create_s3_client_for_user(user)?;
let session_context = self.get_session_context_from_user(user)?;
let path_str = path.as_ref().to_string_lossy();
let (bucket, key) = self.parse_path(&path_str)?;
if key.is_none() {
return Err(Error::new(ErrorKind::PermanentFileNotAvailable, "Cannot delete bucket"));
}
let object_key = key.unwrap();
let action = S3Action::DeleteObject;
if !is_operation_supported(crate::protocols::session::context::Protocol::Ftps, &action) {
return Err(Error::new(ErrorKind::PermanentFileNotAvailable, "Operation not supported"));
}
// Authorize the operation
authorize_operation(&session_context, &action, &bucket, Some(&object_key))
.await
.map_err(|_| Error::new(ErrorKind::PermanentFileNotAvailable, "Access denied"))?;
let mut builder = s3s::dto::DeleteObjectInput::builder();
builder.set_bucket(bucket);
builder.set_key(object_key);
let input = builder
.build()
.map_err(|_| Error::new(ErrorKind::PermanentFileNotAvailable, "Failed to build DeleteObjectInput"))?;
match s3_client.delete_object(input).await {
Ok(_) => {
debug!("Successfully deleted object");
Ok(())
}
Err(e) => {
error!("Failed to delete object: {}", e);
let protocol_error = map_s3_error_to_ftps(&e);
Err(Error::new(ErrorKind::PermanentFileNotAvailable, protocol_error))
}
}
}
/// Create directory
async fn mkd<P: AsRef<Path> + Send + Debug>(&self, user: &super::server::FtpsUser, path: P) -> Result<()> {
let s3_client = self.create_s3_client_for_user(user)?;
let session_context = self.get_session_context_from_user(user)?;
let path_str = path.as_ref().to_string_lossy();
let (bucket, key) = self.parse_path(&path_str)?;
let dir_key = if let Some(k) = key {
// Creating directory inside bucket
path::retain_slash(&k)
} else {
// Creating bucket - use CreateBucket action instead of PutObject
debug!("FTPS MKDIR - Creating bucket: '{}'", bucket);
return self.create_bucket(user, &session_context, &bucket).await;
};
let action = S3Action::PutObject;
if !is_operation_supported(crate::protocols::session::context::Protocol::Ftps, &action) {
return Err(Error::new(ErrorKind::PermanentFileNotAvailable, "Operation not supported"));
}
// Authorize the operation
authorize_operation(&session_context, &action, &bucket, Some(&dir_key))
.await
.map_err(|_| Error::new(ErrorKind::PermanentFileNotAvailable, "Access denied"))?;
// Create directory marker object
let mut input_builder = PutObjectInput::builder();
input_builder.set_bucket(bucket);
input_builder.set_key(dir_key);
input_builder.set_body(Some(StreamingBlob::from(s3s::Body::from(Vec::new()))));
let input = input_builder
.build()
.map_err(|_| Error::new(ErrorKind::PermanentFileNotAvailable, "Failed to build PutObjectInput"))?;
match s3_client.put_object(input).await {
Ok(_) => {
debug!("Successfully created directory marker");
Ok(())
}
Err(e) => {
error!("Failed to create directory marker: {}", e);
let protocol_error = map_s3_error_to_ftps(&e);
Err(Error::new(ErrorKind::PermanentFileNotAvailable, protocol_error))
}
}
}
async fn rename<P: AsRef<Path> + Send + Debug>(&self, _user: &super::server::FtpsUser, _from: P, _to: P) -> Result<()> {
// Rename/copy operations are not supported in FTPS
Err(Error::new(ErrorKind::PermanentFileNotAvailable, "Rename operation not supported"))
}
/// Remove directory
async fn rmd<P: AsRef<Path> + Send + Debug>(&self, user: &super::server::FtpsUser, path: P) -> Result<()> {
debug!("FTPS RMD request for path: {:?}", path);
let s3_client = self.create_s3_client_for_user(user)?;
let session_context = self.get_session_context_from_user(user)?;
let path_str = path.as_ref().to_string_lossy();
let (bucket, key) = self.parse_path(&path_str)?;
if let Some(key) = key {
// Remove directory inside bucket
let dir_key = path::retain_slash(&key);
let action = S3Action::DeleteObject;
if !is_operation_supported(crate::protocols::session::context::Protocol::Ftps, &action) {
return Err(Error::new(ErrorKind::PermanentFileNotAvailable, "Operation not supported"));
}
// Authorize the operation
authorize_operation(&session_context, &action, &bucket, Some(&dir_key))
.await
.map_err(|_| Error::new(ErrorKind::PermanentFileNotAvailable, "Access denied"))?;
// Save references for debug output after build
let bucket_for_log = bucket.clone();
let dir_key_for_log = dir_key.clone();
let mut builder = s3s::dto::DeleteObjectInput::builder();
builder = builder.bucket(bucket);
builder = builder.key(dir_key);
let input = builder
.build()
.map_err(|_| Error::new(ErrorKind::PermanentFileNotAvailable, "Failed to build DeleteObjectInput"))?;
match s3_client.delete_object(input).await {
Ok(_) => {
debug!(
"FTPS RMD - successfully removed directory marker: '{}' in bucket '{}'",
dir_key_for_log, bucket_for_log
);
Ok(())
}
Err(e) => {
error!("FTPS RMD - failed to remove directory marker: {}", e);
let protocol_error = map_s3_error_to_ftps(&e);
Err(Error::new(ErrorKind::PermanentFileNotAvailable, protocol_error))
}
}
} else {
// Delete bucket - check if bucket is empty first
debug!("FTPS RMD - attempting to delete bucket: '{}'", bucket);
let action = S3Action::DeleteBucket;
if !is_operation_supported(crate::protocols::session::context::Protocol::Ftps, &action) {
return Err(Error::new(ErrorKind::PermanentFileNotAvailable, "Operation not supported"));
}
authorize_operation(&session_context, &action, &bucket, None)
.await
.map_err(|_| Error::new(ErrorKind::PermanentFileNotAvailable, "Access denied"))?;
// Check if bucket is empty
let list_input = s3s::dto::ListObjectsV2Input {
bucket: bucket.clone(),
max_keys: Some(1),
..Default::default()
};
match s3_client.list_objects_v2(list_input).await {
Ok(output) => {
if let Some(objects) = output.contents
&& !objects.is_empty()
{
debug!("FTPS RMD - bucket '{}' is not empty, cannot delete", bucket);
return Err(Error::new(
ErrorKind::PermanentFileNotAvailable,
format!("Bucket '{}' is not empty", bucket),
));
}
}
Err(e) => {
debug!("FTPS RMD - failed to list objects: {}", e);
}
}
// Bucket is empty, delete it
let delete_bucket_input = s3s::dto::DeleteBucketInput {
bucket: bucket.clone(),
..Default::default()
};
match s3_client.delete_bucket(delete_bucket_input).await {
Ok(_) => {
debug!("FTPS RMD - successfully deleted bucket: '{}'", bucket);
Ok(())
}
Err(e) => {
error!("FTPS RMD - failed to delete bucket '{}': {}", bucket, e);
let protocol_error = map_s3_error_to_ftps(&e);
Err(Error::new(ErrorKind::PermanentFileNotAvailable, protocol_error))
}
}
}
}
/// Change working directory
async fn cwd<P: AsRef<Path> + Send + Debug>(&self, user: &super::server::FtpsUser, path: P) -> Result<()> {
debug!("FTPS cwd request for path: {:?}", path);
let session_context = self.get_session_context_from_user(user)?;
let path_str = path.as_ref().to_string_lossy();
info!("FTPS cwd - received path: '{}'", path_str);
// Handle special cases
if path_str == "/" || path_str == "/." {
// cd to root directory - always allowed
debug!("FTPS cwd - changing to root directory");
return Ok(());
}
if path_str == "." {
// cd . - stay in current directory
debug!("FTPS cwd - staying in current directory");
return Ok(());
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | true |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/protocols/gateway/authorize.rs | rustfs/src/protocols/gateway/authorize.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::action::S3Action;
use super::adapter::is_operation_supported;
use crate::protocols::session::context::SessionContext;
use rustfs_credentials;
use rustfs_iam::get;
use rustfs_policy::policy::Args;
use std::collections::HashMap;
use tracing::{debug, error};
/// Check if a principal is allowed to perform an S3 action
pub async fn is_authorized(session_context: &SessionContext, action: &S3Action, bucket: &str, object: Option<&str>) -> bool {
let iam_sys = match get() {
Ok(sys) => sys,
Err(e) => {
error!("IAM system unavailable: {}", e);
return false;
}
};
// Create policy arguments
let mut claims = HashMap::new();
claims.insert(
"principal".to_string(),
serde_json::Value::String(session_context.principal.access_key().to_string()),
);
let policy_action: rustfs_policy::policy::action::Action = action.clone().into();
// Check if user is the owner (admin)
let is_owner = if let Some(global_cred) = rustfs_credentials::get_global_action_cred() {
session_context.principal.access_key() == global_cred.access_key
} else {
false
};
let args = Args {
account: session_context.principal.access_key(),
groups: &session_context.principal.user_identity.credentials.groups,
action: policy_action,
bucket,
conditions: &HashMap::new(),
is_owner,
object: object.unwrap_or(""),
claims: &claims,
deny_only: false,
};
debug!(
"FTPS AUTH - Checking authorization: account={}, action={:?}, bucket='{}', object={:?}",
args.account, args.action, args.bucket, args.object
);
let allowed = iam_sys.is_allowed(&args).await;
debug!("FTPS AUTH - Authorization result: {}", allowed);
allowed
}
/// Unified authorization entry point for all protocols
pub async fn authorize_operation(
session_context: &SessionContext,
action: &S3Action,
bucket: &str,
object: Option<&str>,
) -> Result<(), AuthorizationError> {
// First check if the operation is supported
if !is_operation_supported(session_context.protocol.clone(), action) {
return Err(AuthorizationError::AccessDenied);
}
// Then check IAM authorization
if is_authorized(session_context, action, bucket, object).await {
Ok(())
} else {
Err(AuthorizationError::AccessDenied)
}
}
/// Authorization errors
#[derive(Debug, thiserror::Error)]
pub enum AuthorizationError {
#[error("Access denied")]
AccessDenied,
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/protocols/gateway/restrictions.rs | rustfs/src/protocols/gateway/restrictions.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/// Unsupported FTP features list
pub const UNSUPPORTED_FTP_FEATURES: &[&str] = &[
// Atomic rename operations (must be implemented via CopyObject+DeleteObject)
"Atomic RNFR/RNTO rename",
// File append operations (S3 does not support native append)
"APPE command (file append)",
// POSIX permission operations (S3 uses ACLs and Policies)
"chmod command",
"chown command",
// Symbolic links (S3 object storage does not support)
"SYMLINK creation",
// Hard links (S3 object storage does not support)
"HARD LINK creation",
// File locking (S3 does not support filesystem-level locking)
"File locking mechanism",
// Direct directory rename (must be implemented via object copy)
"Directory atomic rename",
];
/// Check if an FTP feature is supported
pub fn is_ftp_feature_supported(feature: &str) -> bool {
!UNSUPPORTED_FTP_FEATURES.contains(&feature)
}
/// Get S3 equivalent operation for unsupported features
pub fn get_s3_equivalent_operation(unsupported_feature: &str) -> Option<&'static str> {
match unsupported_feature {
"Atomic RNFR/RNTO rename" | "SSH_FXP_RENAME atomic rename" | "Directory atomic rename" => {
Some("Use CopyObject + DeleteObject to implement rename")
}
"APPE command (file append)" | "SSH_FXP_OPEN append mode" => Some("Use PutObject to overwrite the entire object"),
"chmod command"
| "chown command"
| "SSH_FXP_SETSTAT permission modification"
| "SSH_FXP_FSETSTAT permission modification" => Some("Use S3 ACLs or Bucket Policies to manage permissions"),
"SYMLINK creation" | "SSH_FXP_SYMLINK creation" => Some("S3 object storage does not support symbolic links"),
"File locking mechanism" | "SSH_FXP_BLOCK file locking" => {
Some("Use S3 object versioning or conditional writes for concurrency control")
}
_ => None,
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/protocols/gateway/adapter.rs | rustfs/src/protocols/gateway/adapter.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Protocol to S3 action adapter
use super::action::S3Action;
use crate::protocols::session::context::Protocol;
pub fn is_operation_supported(protocol: Protocol, action: &S3Action) -> bool {
match protocol {
Protocol::Ftps => match action {
// Bucket operations: FTPS has no native bucket commands, but gateway allows create/delete
S3Action::CreateBucket => true,
S3Action::DeleteBucket => true,
// Object operations: All file operations supported
S3Action::GetObject => true, // RETR command
S3Action::PutObject => true, // STOR and APPE commands both map to PutObject
S3Action::DeleteObject => true, // DELE command
S3Action::HeadObject => true, // SIZE command
// Multipart operations: FTPS has no native multipart upload support
S3Action::CreateMultipartUpload => false,
S3Action::UploadPart => false,
S3Action::CompleteMultipartUpload => false,
S3Action::AbortMultipartUpload => false,
S3Action::ListMultipartUploads => false,
S3Action::ListParts => false,
// ACL operations: FTPS has no native ACL support
S3Action::GetBucketAcl => false,
S3Action::PutBucketAcl => false,
S3Action::GetObjectAcl => false,
S3Action::PutObjectAcl => false,
// Other operations
S3Action::CopyObject => false, // No native copy support in FTPS
S3Action::ListBucket => true, // LIST command
S3Action::ListBuckets => true, // LIST at root level
S3Action::HeadBucket => true, // Can check if directory exists
},
Protocol::Sftp => match action {
// Bucket operations: SFTP can create/delete buckets via mkdir/rmdir
S3Action::CreateBucket => true,
S3Action::DeleteBucket => true,
// Object operations: All file operations supported
S3Action::GetObject => true, // RealPath + Open + Read
S3Action::PutObject => true, // Open + Write
S3Action::DeleteObject => true, // Remove
S3Action::HeadObject => true, // Stat/Fstat
// Multipart operations: SFTP has no native multipart upload support
S3Action::CreateMultipartUpload => false,
S3Action::UploadPart => false,
S3Action::CompleteMultipartUpload => false,
S3Action::AbortMultipartUpload => false,
S3Action::ListMultipartUploads => false,
S3Action::ListParts => false,
// ACL operations: SFTP has no native ACL support
S3Action::GetBucketAcl => false,
S3Action::PutBucketAcl => false,
S3Action::GetObjectAcl => false,
S3Action::PutObjectAcl => false,
// Other operations
S3Action::CopyObject => false, // No remote copy, only local rename
S3Action::ListBucket => true, // Readdir
S3Action::ListBuckets => true, // Readdir at root
S3Action::HeadBucket => true, // Stat on directory
},
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/protocols/gateway/error.rs | rustfs/src/protocols/gateway/error.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// FTP error code constants
pub mod ftp_errors {
pub const FILE_NOT_FOUND: &str = "550 File not found";
pub const DIRECTORY_NOT_FOUND: &str = "550 Directory not found";
pub const PERMISSION_DENIED: &str = "550 Permission denied";
pub const DIRECTORY_NOT_EMPTY: &str = "550 Directory not empty";
pub const DIRECTORY_ALREADY_EXISTS: &str = "550 Directory already exists";
pub const INVALID_DIRECTORY_NAME: &str = "553 Invalid directory name";
pub const INVALID_FILE_NAME: &str = "553 Invalid file name";
pub const INVALID_REQUEST: &str = "501 Invalid request";
pub const INTERNAL_SERVER_ERROR: &str = "421 Internal server error";
}
// FTP error messages mapping
pub fn map_s3_error_to_ftp_string(s3_error: &s3s::S3Error) -> String {
match s3_error.code() {
s3s::S3ErrorCode::NoSuchKey => ftp_errors::FILE_NOT_FOUND.to_string(),
s3s::S3ErrorCode::NoSuchBucket => ftp_errors::DIRECTORY_NOT_FOUND.to_string(),
s3s::S3ErrorCode::AccessDenied => ftp_errors::PERMISSION_DENIED.to_string(),
s3s::S3ErrorCode::BucketNotEmpty => ftp_errors::DIRECTORY_NOT_EMPTY.to_string(),
s3s::S3ErrorCode::BucketAlreadyExists => ftp_errors::DIRECTORY_ALREADY_EXISTS.to_string(),
s3s::S3ErrorCode::InvalidBucketName => ftp_errors::INVALID_DIRECTORY_NAME.to_string(),
s3s::S3ErrorCode::InvalidObjectState => ftp_errors::INVALID_FILE_NAME.to_string(),
s3s::S3ErrorCode::InvalidRequest => ftp_errors::INVALID_REQUEST.to_string(),
s3s::S3ErrorCode::InternalError => ftp_errors::INTERNAL_SERVER_ERROR.to_string(),
_ => ftp_errors::INTERNAL_SERVER_ERROR.to_string(),
}
}
/// Map S3Error to FTPS libunftp Error
pub fn map_s3_error_to_ftps(s3_error: &s3s::S3Error) -> libunftp::storage::Error {
use libunftp::storage::{Error, ErrorKind};
match s3_error.code() {
s3s::S3ErrorCode::NoSuchKey | s3s::S3ErrorCode::NoSuchBucket => {
Error::new(ErrorKind::PermanentFileNotAvailable, map_s3_error_to_ftp_string(s3_error))
}
s3s::S3ErrorCode::AccessDenied => Error::new(ErrorKind::PermissionDenied, map_s3_error_to_ftp_string(s3_error)),
s3s::S3ErrorCode::InvalidRequest | s3s::S3ErrorCode::InvalidBucketName | s3s::S3ErrorCode::InvalidObjectState => {
Error::new(ErrorKind::PermanentFileNotAvailable, map_s3_error_to_ftp_string(s3_error))
}
_ => Error::new(ErrorKind::PermanentFileNotAvailable, map_s3_error_to_ftp_string(s3_error)),
}
}
/// Map S3Error directly to SFTP StatusCode
pub fn map_s3_error_to_sftp_status(s3_error: &s3s::S3Error) -> russh_sftp::protocol::StatusCode {
use russh_sftp::protocol::StatusCode;
match s3_error.code() {
s3s::S3ErrorCode::NoSuchKey => StatusCode::NoSuchFile, // SSH_FX_NO_SUCH_FILE (2)
s3s::S3ErrorCode::NoSuchBucket => StatusCode::NoSuchFile, // SSH_FX_NO_SUCH_FILE (2)
s3s::S3ErrorCode::AccessDenied => StatusCode::PermissionDenied, // SSH_FX_PERMISSION_DENIED (3)
s3s::S3ErrorCode::BucketNotEmpty => StatusCode::Failure, // SSH_FX_DIR_NOT_EMPTY (21)
s3s::S3ErrorCode::BucketAlreadyExists => StatusCode::Failure, // SSH_FX_FILE_ALREADY_EXISTS (17)
s3s::S3ErrorCode::InvalidBucketName => StatusCode::Failure, // SSH_FX_INVALID_FILENAME (22)
s3s::S3ErrorCode::InvalidObjectState => StatusCode::Failure, // SSH_FX_INVALID_FILENAME (22)
s3s::S3ErrorCode::InvalidRequest => StatusCode::OpUnsupported, // SSH_FX_OP_UNSUPPORTED (5)
s3s::S3ErrorCode::InternalError => StatusCode::Failure, // SSH_FX_FAILURE (4)
_ => StatusCode::Failure, // SSH_FX_FAILURE as default
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/protocols/gateway/mod.rs | rustfs/src/protocols/gateway/mod.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Gateway module for protocol implementations
pub mod action;
pub mod adapter;
pub mod authorize;
pub mod error;
pub mod restrictions;
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/protocols/gateway/action.rs | rustfs/src/protocols/gateway/action.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use rustfs_policy::policy::action::S3Action as PolicyS3Action;
/// S3 actions that can be performed through the gateway
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum S3Action {
// Bucket operations
CreateBucket,
DeleteBucket,
ListBucket,
ListBuckets,
HeadBucket,
// Object operations
GetObject,
PutObject,
DeleteObject,
HeadObject,
// Multipart operations
CreateMultipartUpload,
UploadPart,
CompleteMultipartUpload,
AbortMultipartUpload,
ListMultipartUploads,
ListParts,
// ACL operations
GetBucketAcl,
PutBucketAcl,
GetObjectAcl,
PutObjectAcl,
// Other operations
CopyObject,
}
impl From<S3Action> for PolicyS3Action {
fn from(action: S3Action) -> Self {
match action {
S3Action::CreateBucket => PolicyS3Action::CreateBucketAction,
S3Action::DeleteBucket => PolicyS3Action::DeleteBucketAction,
S3Action::ListBucket => PolicyS3Action::ListBucketAction,
S3Action::ListBuckets => PolicyS3Action::ListAllMyBucketsAction,
S3Action::HeadBucket => PolicyS3Action::HeadBucketAction,
S3Action::GetObject => PolicyS3Action::GetObjectAction,
S3Action::PutObject => PolicyS3Action::PutObjectAction,
S3Action::DeleteObject => PolicyS3Action::DeleteObjectAction,
S3Action::HeadObject => PolicyS3Action::GetObjectAction,
S3Action::CreateMultipartUpload => PolicyS3Action::PutObjectAction,
S3Action::UploadPart => PolicyS3Action::PutObjectAction,
S3Action::CompleteMultipartUpload => PolicyS3Action::PutObjectAction,
S3Action::AbortMultipartUpload => PolicyS3Action::AbortMultipartUploadAction,
S3Action::ListMultipartUploads => PolicyS3Action::ListBucketMultipartUploadsAction,
S3Action::ListParts => PolicyS3Action::ListMultipartUploadPartsAction,
S3Action::GetBucketAcl => PolicyS3Action::GetBucketPolicyAction,
S3Action::PutBucketAcl => PolicyS3Action::PutBucketPolicyAction,
S3Action::GetObjectAcl => PolicyS3Action::GetObjectAction,
S3Action::PutObjectAcl => PolicyS3Action::PutObjectAction,
S3Action::CopyObject => PolicyS3Action::PutObjectAction,
}
}
}
impl From<S3Action> for rustfs_policy::policy::action::Action {
fn from(action: S3Action) -> Self {
rustfs_policy::policy::action::Action::S3Action(action.into())
}
}
impl S3Action {
/// Get the string representation of the action
pub fn as_str(&self) -> &'static str {
match self {
S3Action::CreateBucket => "s3:CreateBucket",
S3Action::DeleteBucket => "s3:DeleteBucket",
S3Action::ListBucket => "s3:ListBucket",
S3Action::ListBuckets => "s3:ListAllMyBuckets",
S3Action::HeadBucket => "s3:ListBucket",
S3Action::GetObject => "s3:GetObject",
S3Action::PutObject => "s3:PutObject",
S3Action::DeleteObject => "s3:DeleteObject",
S3Action::HeadObject => "s3:GetObject",
S3Action::CreateMultipartUpload => "s3:PutObject",
S3Action::UploadPart => "s3:PutObject",
S3Action::CompleteMultipartUpload => "s3:PutObject",
S3Action::AbortMultipartUpload => "s3:AbortMultipartUpload",
S3Action::ListMultipartUploads => "s3:ListBucketMultipartUploads",
S3Action::ListParts => "s3:ListMultipartUploadParts",
S3Action::GetBucketAcl => "s3:GetBucketAcl",
S3Action::PutBucketAcl => "s3:PutBucketAcl",
S3Action::GetObjectAcl => "s3:GetObjectAcl",
S3Action::PutObjectAcl => "s3:PutObjectAcl",
S3Action::CopyObject => "s3:PutObject",
}
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/protocols/session/mod.rs | rustfs/src/protocols/session/mod.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Session management for protocol implementations
pub mod context;
pub mod principal;
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/protocols/session/context.rs | rustfs/src/protocols/session/context.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Session context for protocol implementations
use crate::protocols::session::principal::ProtocolPrincipal;
use std::net::IpAddr;
/// Protocol types
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum Protocol {
Ftps,
Sftp,
}
/// Session context for protocol operations
#[derive(Debug, Clone)]
pub struct SessionContext {
/// The protocol principal (authenticated user)
pub principal: ProtocolPrincipal,
/// The protocol type
pub protocol: Protocol,
/// The source IP address
pub source_ip: IpAddr,
}
impl SessionContext {
/// Create a new session context
pub fn new(principal: ProtocolPrincipal, protocol: Protocol, source_ip: IpAddr) -> Self {
Self {
principal,
protocol,
source_ip,
}
}
/// Get the access key for this session
pub fn access_key(&self) -> &str {
self.principal.access_key()
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/protocols/session/principal.rs | rustfs/src/protocols/session/principal.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use rustfs_policy::auth::UserIdentity;
use std::sync::Arc;
/// Protocol principal representing an authenticated user
#[derive(Debug, Clone)]
pub struct ProtocolPrincipal {
/// User identity from IAM system
pub user_identity: Arc<UserIdentity>,
}
impl ProtocolPrincipal {
/// Create a new protocol principal
pub fn new(user_identity: Arc<UserIdentity>) -> Self {
Self { user_identity }
}
/// Get the access key for this principal
pub fn access_key(&self) -> &str {
&self.user_identity.credentials.access_key
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/protocols/client/s3.rs | rustfs/src/protocols/client/s3.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::storage::ecfs::FS;
use http::{HeaderMap, Method};
use rustfs_credentials;
use s3s::dto::*;
use s3s::{S3, S3Request, S3Result};
use tokio_stream::Stream;
use tracing::trace;
/// S3 client for internal protocol use
pub struct ProtocolS3Client {
/// FS instance for internal operations
fs: FS,
/// Access key for the client
access_key: String,
}
impl ProtocolS3Client {
/// Create a new protocol S3 client
pub fn new(fs: FS, access_key: String) -> Self {
Self { fs, access_key }
}
/// Get object - maps to S3 GetObject
pub async fn get_object(&self, input: GetObjectInput) -> S3Result<GetObjectOutput> {
trace!(
"Protocol S3 client GetObject request: bucket={}, key={:?}, access_key={}",
input.bucket, input.key, self.access_key
);
// Go through standard S3 API path
let uri: http::Uri = format!("/{}{}", input.bucket, input.key.as_str()).parse().unwrap_or_default();
let req = S3Request {
input,
method: Method::GET,
uri,
headers: HeaderMap::default(),
extensions: http::Extensions::default(),
credentials: None,
region: None,
service: None,
trailing_headers: None,
};
let resp = self.fs.get_object(req).await?;
Ok(resp.output)
}
/// Put object - maps to S3 PutObject
pub async fn put_object(&self, input: PutObjectInput) -> S3Result<PutObjectOutput> {
trace!(
"Protocol S3 client PutObject request: bucket={}, key={:?}, access_key={}",
input.bucket, input.key, self.access_key
);
let uri: http::Uri = format!("/{}{}", input.bucket, input.key.as_str()).parse().unwrap_or_default();
// Set required headers for put operation
let mut headers = HeaderMap::default();
if let Some(ref body) = input.body {
let (lower, upper) = body.size_hint();
if let Some(len) = upper {
headers.insert("content-length", len.to_string().parse().unwrap());
} else if lower > 0 {
headers.insert("content-length", lower.to_string().parse().unwrap());
}
}
let req = S3Request {
input,
method: Method::PUT,
uri,
headers,
extensions: http::Extensions::default(),
credentials: None,
region: None,
service: None,
trailing_headers: None,
};
let resp = self.fs.put_object(req).await?;
Ok(resp.output)
}
/// Delete object - maps to S3 DeleteObject
pub async fn delete_object(&self, input: DeleteObjectInput) -> S3Result<DeleteObjectOutput> {
trace!(
"Protocol S3 client DeleteObject request: bucket={}, key={:?}, access_key={}",
input.bucket, input.key, self.access_key
);
let uri: http::Uri = format!("/{}{}", input.bucket, input.key.as_str()).parse().unwrap_or_default();
let req = S3Request {
input,
method: Method::DELETE,
uri,
headers: HeaderMap::default(),
extensions: http::Extensions::default(),
credentials: None,
region: None,
service: None,
trailing_headers: None,
};
let resp = self.fs.delete_object(req).await?;
Ok(resp.output)
}
/// Head object - maps to S3 HeadObject
pub async fn head_object(&self, input: HeadObjectInput) -> S3Result<HeadObjectOutput> {
trace!(
"Protocol S3 client HeadObject request: bucket={}, key={:?}, access_key={}",
input.bucket, input.key, self.access_key
);
let uri: http::Uri = format!("/{}{}", input.bucket, input.key.as_str()).parse().unwrap_or_default();
let req = S3Request {
input,
method: Method::HEAD,
uri,
headers: HeaderMap::default(),
extensions: http::Extensions::default(),
credentials: None,
region: None,
service: None,
trailing_headers: None,
};
let resp = self.fs.head_object(req).await?;
Ok(resp.output)
}
/// Head bucket - maps to S3 HeadBucket
pub async fn head_bucket(&self, input: HeadBucketInput) -> S3Result<HeadBucketOutput> {
trace!(
"Protocol S3 client HeadBucket request: bucket={}, access_key={}",
input.bucket, self.access_key
);
let uri: http::Uri = format!("/{}", input.bucket).parse().unwrap_or_default();
let req = S3Request {
input,
method: Method::HEAD,
uri,
headers: HeaderMap::default(),
extensions: http::Extensions::default(),
credentials: None,
region: None,
service: None,
trailing_headers: None,
};
let resp = self.fs.head_bucket(req).await?;
Ok(resp.output)
}
/// List objects v2 - maps to S3 ListObjectsV2
pub async fn list_objects_v2(&self, input: ListObjectsV2Input) -> S3Result<ListObjectsV2Output> {
trace!(
"Protocol S3 client ListObjectsV2 request: bucket={}, access_key={}",
input.bucket, self.access_key
);
let uri: http::Uri = format!("/{}?list-type=2", input.bucket).parse().unwrap_or_default();
let req = S3Request {
input,
method: Method::GET,
uri,
headers: HeaderMap::default(),
extensions: http::Extensions::default(),
credentials: None,
region: None,
service: None,
trailing_headers: None,
};
let resp = self.fs.list_objects_v2(req).await?;
Ok(resp.output)
}
/// List buckets - maps to S3 ListBuckets
/// Note: This requires credentials and ReqInfo because list_buckets performs credential validation
pub async fn list_buckets(&self, input: ListBucketsInput, secret_key: &str) -> S3Result<ListBucketsOutput> {
trace!("Protocol S3 client ListBuckets request: access_key={}", self.access_key);
// Create proper credentials with the real secret key from authentication
let credentials = Some(s3s::auth::Credentials {
access_key: self.access_key.clone(),
secret_key: secret_key.to_string().into(),
});
// Check if user is the owner (admin)
let is_owner = if let Some(global_cred) = rustfs_credentials::get_global_action_cred() {
self.access_key == global_cred.access_key
} else {
false
};
// Create ReqInfo for authorization (required by list_buckets)
let mut extensions = http::Extensions::default();
extensions.insert(crate::storage::access::ReqInfo {
cred: Some(rustfs_credentials::Credentials {
access_key: self.access_key.clone(),
secret_key: secret_key.to_string(),
session_token: String::new(),
expiration: None,
status: String::new(),
parent_user: String::new(),
groups: None,
claims: None,
name: None,
description: None,
}),
is_owner,
bucket: None,
object: None,
version_id: None,
region: None,
});
let req = S3Request {
input,
method: Method::GET,
uri: http::Uri::from_static("/"),
headers: HeaderMap::default(),
extensions,
credentials,
region: None,
service: None,
trailing_headers: None,
};
let resp = self.fs.list_buckets(req).await?;
Ok(resp.output)
}
/// Create bucket - maps to S3 CreateBucket
pub async fn create_bucket(&self, input: CreateBucketInput) -> S3Result<CreateBucketOutput> {
trace!(
"Protocol S3 client CreateBucket request: bucket={:?}, access_key={}",
input.bucket, self.access_key
);
let bucket_str = input.bucket.as_str();
let uri: http::Uri = format!("/{}", bucket_str).parse().unwrap_or_default();
let req = S3Request {
input,
method: Method::PUT,
uri,
headers: HeaderMap::default(),
extensions: http::Extensions::default(),
credentials: None,
region: None,
service: None,
trailing_headers: None,
};
let resp = self.fs.create_bucket(req).await?;
Ok(resp.output)
}
/// Delete bucket - maps to S3 DeleteBucket
pub async fn delete_bucket(&self, input: DeleteBucketInput) -> S3Result<DeleteBucketOutput> {
trace!(
"Protocol S3 client DeleteBucket request: bucket={}, access_key={}",
input.bucket, self.access_key
);
let uri: http::Uri = format!("/{}", input.bucket).parse().unwrap_or_default();
let req = S3Request {
input,
method: Method::DELETE,
uri,
headers: HeaderMap::default(),
extensions: http::Extensions::default(),
credentials: None,
region: None,
service: None,
trailing_headers: None,
};
let resp = self.fs.delete_bucket(req).await?;
Ok(resp.output)
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/protocols/client/mod.rs | rustfs/src/protocols/client/mod.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod s3;
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/server/readiness.rs | rustfs/src/server/readiness.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use bytes::Bytes;
use http::{Request as HttpRequest, Response, StatusCode};
use http_body::Body;
use http_body_util::{BodyExt, Full};
use hyper::body::Incoming;
use rustfs_common::GlobalReadiness;
use std::future::Future;
use std::pin::Pin;
use std::sync::Arc;
use std::task::{Context, Poll};
use tower::{Layer, Service};
use tracing::debug;
/// ReadinessGateLayer ensures that the system components (IAM, Storage)
/// are fully initialized before allowing any request to proceed.
#[derive(Clone)]
pub struct ReadinessGateLayer {
readiness: Arc<GlobalReadiness>,
}
impl ReadinessGateLayer {
/// Create a new ReadinessGateLayer
/// # Arguments
/// * `readiness` - An Arc to the GlobalReadiness instance
///
/// # Returns
/// A new instance of ReadinessGateLayer
pub fn new(readiness: Arc<GlobalReadiness>) -> Self {
Self { readiness }
}
}
impl<S> Layer<S> for ReadinessGateLayer {
type Service = ReadinessGateService<S>;
/// Wrap the inner service with ReadinessGateService
/// # Arguments
/// * `inner` - The inner service to wrap
/// # Returns
/// An instance of ReadinessGateService
fn layer(&self, inner: S) -> Self::Service {
ReadinessGateService {
inner,
readiness: self.readiness.clone(),
}
}
}
#[derive(Clone)]
pub struct ReadinessGateService<S> {
inner: S,
readiness: Arc<GlobalReadiness>,
}
type BoxError = Box<dyn std::error::Error + Send + Sync>;
type BoxBody = http_body_util::combinators::UnsyncBoxBody<Bytes, BoxError>;
impl<S, B> Service<HttpRequest<Incoming>> for ReadinessGateService<S>
where
S: Service<HttpRequest<Incoming>, Response = Response<B>> + Clone + Send + 'static,
S::Future: Send + 'static,
S::Error: Send + 'static,
B: Body<Data = Bytes> + Send + 'static,
B::Error: Into<BoxError> + Send + 'static,
{
type Response = Response<BoxBody>;
type Error = S::Error;
type Future = Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send>>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.inner.poll_ready(cx)
}
fn call(&mut self, req: HttpRequest<Incoming>) -> Self::Future {
let mut inner = self.inner.clone();
let readiness = self.readiness.clone();
Box::pin(async move {
let path = req.uri().path();
debug!("ReadinessGateService: Received request for path: {}", path);
// 1) Exact match: fixed probe/resource path
let is_exact_probe = matches!(
path,
crate::server::PROFILE_MEMORY_PATH
| crate::server::PROFILE_CPU_PATH
| crate::server::HEALTH_PREFIX
| crate::server::FAVICON_PATH
);
// 2) Prefix matching: the entire set of route prefixes (including their subpaths)
let is_prefix_probe = path.starts_with(crate::server::RUSTFS_ADMIN_PREFIX)
|| path.starts_with(crate::server::CONSOLE_PREFIX)
|| path.starts_with(crate::server::RPC_PREFIX)
|| path.starts_with(crate::server::ADMIN_PREFIX)
|| path.starts_with(crate::server::TONIC_PREFIX);
let is_probe = is_exact_probe || is_prefix_probe;
if !is_probe && !readiness.is_ready() {
let body: BoxBody = Full::new(Bytes::from_static(b"Service not ready"))
.map_err(|e| -> BoxError { Box::new(e) })
.boxed_unsync();
let resp = Response::builder()
.status(StatusCode::SERVICE_UNAVAILABLE)
.header(http::header::RETRY_AFTER, "5")
.header(http::header::CONTENT_TYPE, "text/plain; charset=utf-8")
.header(http::header::CACHE_CONTROL, "no-store")
.body(body)
.expect("failed to build not ready response");
return Ok(resp);
}
let resp = inner.call(req).await?;
// System is ready, forward to the actual S3/RPC handlers
// Transparently converts any response body into a BoxBody, and then Trace/Cors/Compression continues to work
let (parts, body) = resp.into_parts();
let body: BoxBody = body.map_err(Into::into).boxed_unsync();
Ok(Response::from_parts(parts, body))
})
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/server/event.rs | rustfs/src/server/event.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use rustfs_config::DEFAULT_DELIMITER;
use rustfs_ecstore::config::GLOBAL_SERVER_CONFIG;
use tracing::{error, info, instrument, warn};
/// Shuts down the event notifier system gracefully
pub(crate) async fn shutdown_event_notifier() {
info!("Shutting down event notifier system...");
if !rustfs_notify::is_notification_system_initialized() {
info!("Event notifier system is not initialized, nothing to shut down.");
return;
}
let system = match rustfs_notify::notification_system() {
Some(sys) => sys,
None => {
info!("Event notifier system is not initialized.");
return;
}
};
// Call the shutdown function from the rustfs_notify module
system.shutdown().await;
info!("Event notifier system shut down successfully.");
}
#[instrument]
pub(crate) async fn init_event_notifier() {
info!(
target: "rustfs::main::init_event_notifier",
"Initializing event notifier..."
);
// 1. Get the global configuration loaded by ecstore
let server_config = match GLOBAL_SERVER_CONFIG.get() {
Some(config) => config.clone(), // Clone the config to pass ownership
None => {
warn!("Event notifier initialization failed: Global server config not loaded.");
return;
}
};
info!(
target: "rustfs::main::init_event_notifier",
"Global server configuration loaded successfully"
);
// 2. Check if the notify subsystem exists in the configuration, and skip initialization if it doesn't
if server_config
.get_value(rustfs_config::notify::NOTIFY_MQTT_SUB_SYS, DEFAULT_DELIMITER)
.is_none()
|| server_config
.get_value(rustfs_config::notify::NOTIFY_WEBHOOK_SUB_SYS, DEFAULT_DELIMITER)
.is_none()
{
info!(
target: "rustfs::main::init_event_notifier",
"'notify' subsystem not configured, skipping event notifier initialization."
);
return;
}
info!(
target: "rustfs::main::init_event_notifier",
"Event notifier configuration found, proceeding with initialization."
);
// 3. Initialize the notification system asynchronously with a global configuration
// Use direct await for better error handling and faster initialization
if let Err(e) = rustfs_notify::initialize(server_config).await {
error!("Failed to initialize event notifier system: {}", e);
} else {
info!(
target: "rustfs::main::init_event_notifier",
"Event notifier system initialized successfully."
);
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/server/compress.rs | rustfs/src/server/compress.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! HTTP Response Compression Module
//!
//! This module provides configurable HTTP response compression functionality
//! using a whitelist-based approach. Unlike traditional blacklist approaches,
//! this design only compresses explicitly configured content types, which:
//!
//! 1. Preserves Content-Length for all other responses (better browser UX)
//! 2. Aligns with MinIO's opt-in compression behavior
//! 3. Provides fine-grained control over what gets compressed
//!
//! # Configuration
//!
//! Compression can be configured via environment variables or command line options:
//!
//! - `RUSTFS_COMPRESS_ENABLE` - Enable/disable compression (default: off)
//! - `RUSTFS_COMPRESS_EXTENSIONS` - File extensions to compress (e.g., `.txt,.log,.csv`)
//! - `RUSTFS_COMPRESS_MIME_TYPES` - MIME types to compress (e.g., `text/*,application/json`)
//! - `RUSTFS_COMPRESS_MIN_SIZE` - Minimum file size for compression (default: 1000 bytes)
//!
//! # Example
//!
//! ```bash
//! RUSTFS_COMPRESS_ENABLE=on \
//! RUSTFS_COMPRESS_EXTENSIONS=.txt,.log,.csv \
//! RUSTFS_COMPRESS_MIME_TYPES=text/*,application/json \
//! RUSTFS_COMPRESS_MIN_SIZE=1000 \
//! rustfs /data
//! ```
use http::Response;
use rustfs_config::{
DEFAULT_COMPRESS_ENABLE, DEFAULT_COMPRESS_EXTENSIONS, DEFAULT_COMPRESS_MIME_TYPES, DEFAULT_COMPRESS_MIN_SIZE,
ENV_COMPRESS_ENABLE, ENV_COMPRESS_EXTENSIONS, ENV_COMPRESS_MIME_TYPES, ENV_COMPRESS_MIN_SIZE, EnableState,
};
use std::str::FromStr;
use tower_http::compression::predicate::Predicate;
use tracing::debug;
/// Configuration for HTTP response compression.
///
/// This structure holds the whitelist-based compression settings:
/// - File extensions that should be compressed (checked via Content-Disposition header)
/// - MIME types that should be compressed (supports wildcards like `text/*`)
/// - Minimum file size threshold for compression
///
/// When compression is enabled, only responses matching these criteria will be compressed.
/// This approach aligns with MinIO's behavior where compression is opt-in rather than default.
#[derive(Clone, Debug)]
pub struct CompressionConfig {
/// Whether compression is enabled
pub enabled: bool,
/// File extensions to compress (normalized to lowercase with leading dot)
pub extensions: Vec<String>,
/// MIME type patterns to compress (supports wildcards like `text/*`)
pub mime_patterns: Vec<String>,
/// Minimum file size (in bytes) for compression
pub min_size: u64,
}
impl CompressionConfig {
/// Create a new compression configuration from environment variables
///
/// Reads the following environment variables:
/// - `RUSTFS_COMPRESS_ENABLE` - Enable/disable compression (default: false)
/// - `RUSTFS_COMPRESS_EXTENSIONS` - File extensions to compress (default: "")
/// - `RUSTFS_COMPRESS_MIME_TYPES` - MIME types to compress (default: "text/*,application/json,...")
/// - `RUSTFS_COMPRESS_MIN_SIZE` - Minimum file size for compression (default: 1000)
pub fn from_env() -> Self {
// Read compression enable state
let enabled = std::env::var(ENV_COMPRESS_ENABLE)
.ok()
.and_then(|v| EnableState::from_str(&v).ok())
.map(|state| state.is_enabled())
.unwrap_or(DEFAULT_COMPRESS_ENABLE);
// Read file extensions
let extensions_str = std::env::var(ENV_COMPRESS_EXTENSIONS).unwrap_or_else(|_| DEFAULT_COMPRESS_EXTENSIONS.to_string());
let extensions: Vec<String> = if extensions_str.is_empty() {
Vec::new()
} else {
extensions_str
.split(',')
.map(|s| {
let s = s.trim().to_lowercase();
if s.starts_with('.') { s } else { format!(".{s}") }
})
.filter(|s| s.len() > 1)
.collect()
};
// Read MIME type patterns
let mime_types_str = std::env::var(ENV_COMPRESS_MIME_TYPES).unwrap_or_else(|_| DEFAULT_COMPRESS_MIME_TYPES.to_string());
let mime_patterns: Vec<String> = if mime_types_str.is_empty() {
Vec::new()
} else {
mime_types_str
.split(',')
.map(|s| s.trim().to_lowercase())
.filter(|s| !s.is_empty())
.collect()
};
// Read minimum file size
let min_size = std::env::var(ENV_COMPRESS_MIN_SIZE)
.ok()
.and_then(|v| v.parse::<u64>().ok())
.unwrap_or(DEFAULT_COMPRESS_MIN_SIZE);
Self {
enabled,
extensions,
mime_patterns,
min_size,
}
}
/// Check if a MIME type matches any of the configured patterns
pub(crate) fn matches_mime_type(&self, content_type: &str) -> bool {
let ct_lower = content_type.to_lowercase();
// Extract the main MIME type (before any parameters like charset)
let main_type = ct_lower.split(';').next().unwrap_or(&ct_lower).trim();
for pattern in &self.mime_patterns {
if pattern.ends_with("/*") {
// Wildcard pattern like "text/*"
let prefix = &pattern[..pattern.len() - 1]; // "text/"
if main_type.starts_with(prefix) {
return true;
}
} else if main_type == pattern {
// Exact match
return true;
}
}
false
}
/// Check if a filename matches any of the configured extensions
/// The filename is extracted from Content-Disposition header
pub(crate) fn matches_extension(&self, filename: &str) -> bool {
if self.extensions.is_empty() {
return false;
}
let filename_lower = filename.to_lowercase();
for ext in &self.extensions {
if filename_lower.ends_with(ext) {
return true;
}
}
false
}
/// Extract filename from Content-Disposition header
/// Format: attachment; filename="example.txt" or attachment; filename=example.txt
pub(crate) fn extract_filename_from_content_disposition(header_value: &str) -> Option<String> {
// Look for filename= or filename*= parameter
let lower = header_value.to_lowercase();
// Try to find filename="..." or filename=...
if let Some(idx) = lower.find("filename=") {
let start = idx + "filename=".len();
let rest = &header_value[start..];
// Check if it's quoted
if let Some(stripped) = rest.strip_prefix('"') {
// Find closing quote
if let Some(end_quote) = stripped.find('"') {
return Some(stripped[..end_quote].to_string());
}
} else {
// Unquoted - take until semicolon or end
let end = rest.find(';').unwrap_or(rest.len());
return Some(rest[..end].trim().to_string());
}
}
None
}
}
impl Default for CompressionConfig {
fn default() -> Self {
Self {
enabled: rustfs_config::DEFAULT_COMPRESS_ENABLE,
extensions: rustfs_config::DEFAULT_COMPRESS_EXTENSIONS
.split(',')
.filter_map(|s| {
let s = s.trim().to_lowercase();
if s.is_empty() {
None
} else if s.starts_with('.') {
Some(s)
} else {
Some(format!(".{s}"))
}
})
.collect(),
mime_patterns: rustfs_config::DEFAULT_COMPRESS_MIME_TYPES
.split(',')
.map(|s| s.trim().to_lowercase())
.filter(|s| !s.is_empty())
.collect(),
min_size: rustfs_config::DEFAULT_COMPRESS_MIN_SIZE,
}
}
}
/// Predicate to determine if a response should be compressed.
///
/// This predicate implements a whitelist-based compression approach:
/// - Only compresses responses that match configured file extensions OR MIME types
/// - Respects minimum file size threshold
/// - Always skips error responses (4xx, 5xx) to avoid Content-Length issues
///
/// # Design Philosophy
/// Unlike the previous blacklist approach, this whitelist approach:
/// 1. Only compresses explicitly configured content types
/// 2. Preserves Content-Length for all other responses (better browser UX)
/// 3. Aligns with MinIO's opt-in compression behavior
///
/// # Note on tower-http Integration
/// The `tower-http::CompressionLayer` automatically handles:
/// - Skipping responses with `Content-Encoding` header (already compressed)
/// - Skipping responses with `Content-Range` header (Range requests)
///
/// These checks are performed before calling this predicate, so we don't need to check them here.
///
/// # Extension Matching
/// File extension matching works by extracting the filename from the
/// `Content-Disposition` response header (e.g., `attachment; filename="file.txt"`).
///
/// # Performance
/// This predicate is evaluated per-response and has O(n) complexity where n is
/// the number of configured extensions/MIME patterns.
#[derive(Clone, Debug)]
pub struct CompressionPredicate {
config: CompressionConfig,
}
impl CompressionPredicate {
/// Create a new compression predicate with the given configuration
pub fn new(config: CompressionConfig) -> Self {
Self { config }
}
}
impl Predicate for CompressionPredicate {
fn should_compress<B>(&self, response: &Response<B>) -> bool
where
B: http_body::Body,
{
// If compression is disabled, never compress
if !self.config.enabled {
return false;
}
let status = response.status();
// Never compress error responses (4xx and 5xx status codes)
// This prevents Content-Length mismatch issues with error responses
if status.is_client_error() || status.is_server_error() {
debug!("Skipping compression for error response: status={}", status.as_u16());
return false;
}
// Note: CONTENT_ENCODING and CONTENT_RANGE checks are handled by tower-http's
// CompressionLayer before calling this predicate, so we don't need to check them here.
// Check Content-Length header for minimum size threshold
if let Some(content_length) = response.headers().get(http::header::CONTENT_LENGTH)
&& let Ok(length_str) = content_length.to_str()
&& let Ok(length) = length_str.parse::<u64>()
&& length < self.config.min_size
{
debug!(
"Skipping compression for small response: size={} bytes, min_size={}",
length, self.config.min_size
);
return false;
}
// Check if the response matches configured extension via Content-Disposition
if let Some(content_disposition) = response.headers().get(http::header::CONTENT_DISPOSITION)
&& let Ok(cd) = content_disposition.to_str()
&& let Some(filename) = CompressionConfig::extract_filename_from_content_disposition(cd)
&& self.config.matches_extension(&filename)
{
debug!("Compressing response: filename '{}' matches configured extension", filename);
return true;
}
// Check if the response matches configured MIME type
if let Some(content_type) = response.headers().get(http::header::CONTENT_TYPE)
&& let Ok(ct) = content_type.to_str()
&& self.config.matches_mime_type(ct)
{
debug!("Compressing response: Content-Type '{}' matches configured MIME pattern", ct);
return true;
}
// Default: don't compress (whitelist approach)
debug!("Skipping compression: response does not match any configured extension or MIME type");
false
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_compression_config_default() {
let config = CompressionConfig::default();
assert!(!config.enabled);
assert!(config.extensions.is_empty());
assert!(!config.mime_patterns.is_empty());
assert_eq!(config.min_size, 1000);
}
#[test]
fn test_compression_config_mime_matching() {
let config = CompressionConfig {
enabled: true,
extensions: vec![],
mime_patterns: vec!["text/*".to_string(), "application/json".to_string()],
min_size: 1000,
};
// Test wildcard matching
assert!(config.matches_mime_type("text/plain"));
assert!(config.matches_mime_type("text/html"));
assert!(config.matches_mime_type("text/css"));
assert!(config.matches_mime_type("TEXT/PLAIN")); // case insensitive
// Test exact matching
assert!(config.matches_mime_type("application/json"));
assert!(config.matches_mime_type("application/json; charset=utf-8"));
// Test non-matching types
assert!(!config.matches_mime_type("image/png"));
assert!(!config.matches_mime_type("application/octet-stream"));
assert!(!config.matches_mime_type("video/mp4"));
}
#[test]
fn test_compression_config_extension_matching() {
let config = CompressionConfig {
enabled: true,
extensions: vec![".txt".to_string(), ".log".to_string(), ".csv".to_string()],
mime_patterns: vec![],
min_size: 1000,
};
// Test matching extensions
assert!(config.matches_extension("file.txt"));
assert!(config.matches_extension("path/to/file.log"));
assert!(config.matches_extension("data.csv"));
assert!(config.matches_extension("FILE.TXT")); // case insensitive
// Test non-matching extensions
assert!(!config.matches_extension("image.png"));
assert!(!config.matches_extension("archive.zip"));
assert!(!config.matches_extension("document.pdf"));
}
#[test]
fn test_extract_filename_from_content_disposition() {
// Quoted filename
assert_eq!(
CompressionConfig::extract_filename_from_content_disposition(r#"attachment; filename="example.txt""#),
Some("example.txt".to_string())
);
// Unquoted filename
assert_eq!(
CompressionConfig::extract_filename_from_content_disposition("attachment; filename=example.log"),
Some("example.log".to_string())
);
// Filename with path
assert_eq!(
CompressionConfig::extract_filename_from_content_disposition(r#"attachment; filename="path/to/file.csv""#),
Some("path/to/file.csv".to_string())
);
// Mixed case
assert_eq!(
CompressionConfig::extract_filename_from_content_disposition(r#"Attachment; FILENAME="test.json""#),
Some("test.json".to_string())
);
// No filename
assert_eq!(CompressionConfig::extract_filename_from_content_disposition("inline"), None);
}
#[test]
fn test_compression_config_from_empty_strings() {
// Simulate config with empty extension and mime strings
let config = CompressionConfig {
enabled: true,
extensions: ""
.split(',')
.map(|s| s.trim().to_lowercase())
.filter(|s| !s.is_empty())
.collect(),
mime_patterns: ""
.split(',')
.map(|s| s.trim().to_lowercase())
.filter(|s| !s.is_empty())
.collect(),
min_size: 1000,
};
assert!(config.extensions.is_empty());
assert!(config.mime_patterns.is_empty());
assert!(!config.matches_extension("file.txt"));
assert!(!config.matches_mime_type("text/plain"));
}
#[test]
fn test_compression_config_extension_normalization() {
// Extensions should be normalized with leading dot
let extensions: Vec<String> = "txt,.log,csv"
.split(',')
.map(|s| {
let s = s.trim().to_lowercase();
if s.starts_with('.') { s } else { format!(".{s}") }
})
.filter(|s| s.len() > 1)
.collect();
assert_eq!(extensions, vec![".txt", ".log", ".csv"]);
}
#[test]
fn test_compression_predicate_creation() {
// Test that CompressionPredicate can be created with various configs
let config_disabled = CompressionConfig {
enabled: false,
extensions: vec![".txt".to_string()],
mime_patterns: vec!["text/*".to_string()],
min_size: 0,
};
let predicate = CompressionPredicate::new(config_disabled.clone());
assert!(!predicate.config.enabled);
let config_enabled = CompressionConfig {
enabled: true,
extensions: vec![".txt".to_string(), ".log".to_string()],
mime_patterns: vec!["text/*".to_string(), "application/json".to_string()],
min_size: 1000,
};
let predicate = CompressionPredicate::new(config_enabled.clone());
assert!(predicate.config.enabled);
assert_eq!(predicate.config.extensions.len(), 2);
assert_eq!(predicate.config.mime_patterns.len(), 2);
assert_eq!(predicate.config.min_size, 1000);
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/server/audit.rs | rustfs/src/server/audit.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use rustfs_audit::{AuditError, AuditResult, audit_system, init_audit_system, system::AuditSystemState};
use rustfs_config::DEFAULT_DELIMITER;
use rustfs_ecstore::config::GLOBAL_SERVER_CONFIG;
use tracing::{info, warn};
/// Start the audit system.
/// This function checks if the audit subsystem is configured in the global server configuration.
/// If configured, it initializes and starts the audit system.
/// If not configured, it skips the initialization.
/// It also handles cases where the audit system is already running or if the global configuration is not loaded.
pub(crate) async fn start_audit_system() -> AuditResult<()> {
info!(
target: "rustfs::main::start_audit_system",
"Initializing the audit system..."
);
// 1. Get the global configuration loaded by ecstore
let server_config = match GLOBAL_SERVER_CONFIG.get() {
Some(config) => {
info!(
target: "rustfs::main::start_audit_system",
"Global server configuration loads successfully: {:?}", config
);
config.clone()
}
None => {
warn!(
target: "rustfs::main::start_audit_system",
"Audit system initialization failed: Global server configuration not loaded."
);
return Err(AuditError::ConfigNotLoaded);
}
};
info!(
target: "rustfs::main::start_audit_system",
"The global server configuration is loaded"
);
// 2. Check if the notify subsystem exists in the configuration, and skip initialization if it doesn't
let mqtt_config = server_config.get_value(rustfs_config::audit::AUDIT_MQTT_SUB_SYS, DEFAULT_DELIMITER);
let webhook_config = server_config.get_value(rustfs_config::audit::AUDIT_WEBHOOK_SUB_SYS, DEFAULT_DELIMITER);
if mqtt_config.is_none() && webhook_config.is_none() {
info!(
target: "rustfs::main::start_audit_system",
"Audit subsystem (MQTT/Webhook) is not configured, and audit system initialization is skipped."
);
return Ok(());
}
info!(
target: "rustfs::main::start_audit_system",
"Audit subsystem configuration detected (MQTT: {}, Webhook: {}) and started initializing the audit system.",
mqtt_config.is_some(),
webhook_config.is_some()
);
// 3. Initialize and start the audit system
let system = init_audit_system();
// Check if the audit system is already running
let state = system.get_state().await;
if state == AuditSystemState::Running {
warn!(
target: "rustfs::main::start_audit_system",
"The audit system is running, skip repeated initialization."
);
return Err(AuditError::AlreadyInitialized);
}
// Preparation before starting
match system.start(server_config).await {
Ok(_) => {
info!(
target: "rustfs::main::start_audit_system",
"Audit system started successfully with time: {}.",
chrono::Utc::now()
);
Ok(())
}
Err(e) => {
warn!(
target: "rustfs::main::start_audit_system",
"Audit system startup failed: {:?}",
e
);
Err(e)
}
}
}
/// Stop the audit system.
/// This function checks if the audit system is initialized and running.
/// If it is running, it prepares to stop the system, stops it, and records the stop time.
/// If the system is already stopped or not initialized, it logs a warning and returns.
pub(crate) async fn stop_audit_system() -> AuditResult<()> {
if let Some(system) = audit_system() {
let state = system.get_state().await;
if state == AuditSystemState::Stopped {
warn!("Audit system already stopped");
return Ok(());
}
// Prepare before stopping
system.close().await?;
// Record after stopping
info!("Audit system stopped at {}", chrono::Utc::now());
Ok(())
} else {
warn!("Audit system not initialized, cannot stop");
Ok(())
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/server/prefix.rs | rustfs/src/server/prefix.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/// Predefined CPU profiling path for RustFS server.
/// This path is used to access CPU profiling data.
pub(crate) const PROFILE_CPU_PATH: &str = "/profile/cpu";
/// This path is used to access memory profiling data.
pub(crate) const PROFILE_MEMORY_PATH: &str = "/profile/memory";
/// Favicon path to handle browser requests for the favicon.
/// This path serves the favicon.ico file.
pub(crate) const FAVICON_PATH: &str = "/favicon.ico";
/// Predefined health check path for RustFS server.
/// This path is used to check the health status of the server.
pub(crate) const HEALTH_PREFIX: &str = "/health";
/// Predefined administrative prefix for RustFS server routes.
/// This prefix is used for endpoints that handle administrative tasks
/// such as configuration, monitoring, and management.
pub(crate) const ADMIN_PREFIX: &str = "/rustfs/admin";
/// Environment variable name for overriding the default
/// administrative prefix path.
pub(crate) const RUSTFS_ADMIN_PREFIX: &str = "/rustfs/admin/v3";
/// Predefined console prefix for RustFS server routes.
/// This prefix is used for endpoints that handle console-related tasks
/// such as user interface and management.
pub(crate) const CONSOLE_PREFIX: &str = "/rustfs/console";
/// Predefined RPC prefix for RustFS server routes.
/// This prefix is used for endpoints that handle remote procedure calls (RPC).
pub(crate) const RPC_PREFIX: &str = "/rustfs/rpc";
/// Predefined gRPC service prefix for RustFS server.
/// This prefix is used for gRPC service endpoints.
/// For example, the full gRPC method path would be "/node_service.NodeService/MethodName".
pub(crate) const TONIC_PREFIX: &str = "/node_service.NodeService";
/// LOGO art for RustFS server.
pub(crate) const LOGO: &str = r#"
░█▀▄░█░█░█▀▀░▀█▀░█▀▀░█▀▀
░█▀▄░█░█░▀▀█░░█░░█▀▀░▀▀█
░▀░▀░▀▀▀░▀▀▀░░▀░░▀░░░▀▀▀
"#;
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/server/http.rs | rustfs/src/server/http.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Ensure the correct path for parse_license is imported
use super::compress::{CompressionConfig, CompressionPredicate};
use crate::admin;
use crate::auth::IAMAuth;
use crate::config;
use crate::server::{ReadinessGateLayer, RemoteAddr, ServiceState, ServiceStateManager, hybrid::hybrid, layer::RedirectLayer};
use crate::storage;
use crate::storage::tonic_service::make_server;
use bytes::Bytes;
use http::{HeaderMap, Request as HttpRequest, Response};
use hyper_util::{
rt::{TokioExecutor, TokioIo},
server::conn::auto::Builder as ConnBuilder,
server::graceful::GracefulShutdown,
service::TowerToHyperService,
};
use metrics::{counter, histogram};
use rustfs_common::GlobalReadiness;
use rustfs_config::{MI_B, RUSTFS_TLS_CERT, RUSTFS_TLS_KEY};
use rustfs_protos::proto_gen::node_service::node_service_server::NodeServiceServer;
use rustfs_utils::net::parse_and_resolve_address;
use rustls::ServerConfig;
use s3s::{host::MultiDomain, service::S3Service, service::S3ServiceBuilder};
use socket2::{SockRef, TcpKeepalive};
use std::io::{Error, Result};
use std::net::SocketAddr;
use std::sync::Arc;
use std::time::Duration;
use tokio::net::{TcpListener, TcpStream};
use tokio_rustls::TlsAcceptor;
use tonic::{Request, Status, metadata::MetadataValue};
use tower::ServiceBuilder;
use tower_http::add_extension::AddExtensionLayer;
use tower_http::catch_panic::CatchPanicLayer;
use tower_http::compression::CompressionLayer;
use tower_http::cors::{AllowOrigin, Any, CorsLayer};
use tower_http::request_id::{MakeRequestUuid, PropagateRequestIdLayer, SetRequestIdLayer};
use tower_http::trace::TraceLayer;
use tracing::{Span, debug, error, info, instrument, warn};
/// Parse CORS allowed origins from configuration
fn parse_cors_origins(origins: Option<&String>) -> CorsLayer {
use http::Method;
let cors_layer = CorsLayer::new()
.allow_methods([
Method::GET,
Method::POST,
Method::PUT,
Method::DELETE,
Method::HEAD,
Method::OPTIONS,
])
.allow_headers(Any);
match origins {
Some(origins_str) if origins_str == "*" => cors_layer.allow_origin(Any).expose_headers(Any),
Some(origins_str) => {
let origins: Vec<&str> = origins_str.split(',').map(|s| s.trim()).collect();
if origins.is_empty() {
warn!("Empty CORS origins provided, using permissive CORS");
cors_layer.allow_origin(Any).expose_headers(Any)
} else {
// Parse origins with proper error handling
let mut valid_origins = Vec::new();
for origin in origins {
match origin.parse::<http::HeaderValue>() {
Ok(header_value) => {
valid_origins.push(header_value);
}
Err(e) => {
warn!("Invalid CORS origin '{}': {}", origin, e);
}
}
}
if valid_origins.is_empty() {
warn!("No valid CORS origins found, using permissive CORS");
cors_layer.allow_origin(Any).expose_headers(Any)
} else {
info!("Endpoint CORS origins configured: {:?}", valid_origins);
cors_layer.allow_origin(AllowOrigin::list(valid_origins)).expose_headers(Any)
}
}
}
None => {
debug!("No CORS origins configured for endpoint, using permissive CORS");
cors_layer.allow_origin(Any).expose_headers(Any)
}
}
}
fn get_cors_allowed_origins() -> String {
std::env::var(rustfs_config::ENV_CORS_ALLOWED_ORIGINS)
.unwrap_or_else(|_| rustfs_config::DEFAULT_CORS_ALLOWED_ORIGINS.to_string())
.parse::<String>()
.unwrap_or(rustfs_config::DEFAULT_CONSOLE_CORS_ALLOWED_ORIGINS.to_string())
}
pub async fn start_http_server(
opt: &config::Opt,
worker_state_manager: ServiceStateManager,
readiness: Arc<GlobalReadiness>,
) -> Result<tokio::sync::broadcast::Sender<()>> {
let server_addr = parse_and_resolve_address(opt.address.as_str()).map_err(Error::other)?;
let server_port = server_addr.port();
// The listening address and port are obtained from the parameters
let listener = {
let mut server_addr = server_addr;
// Try to create a socket for the address family; if that fails, fallback to IPv4.
let mut socket = match socket2::Socket::new(
socket2::Domain::for_address(server_addr),
socket2::Type::STREAM,
Some(socket2::Protocol::TCP),
) {
Ok(s) => s,
Err(e) => {
warn!("Failed to create socket for {:?}: {}, falling back to IPv4", server_addr, e);
let ipv4_addr = SocketAddr::new(std::net::Ipv4Addr::UNSPECIFIED.into(), server_addr.port());
server_addr = ipv4_addr;
socket2::Socket::new(socket2::Domain::IPV4, socket2::Type::STREAM, Some(socket2::Protocol::TCP))?
}
};
// If address is IPv6 try to enable dual-stack; on failure, switch to IPv4 socket.
if server_addr.is_ipv6()
&& let Err(e) = socket.set_only_v6(false)
{
warn!("Failed to set IPV6_V6ONLY=false, attempting IPv4 fallback: {}", e);
let ipv4_addr = SocketAddr::new(std::net::Ipv4Addr::UNSPECIFIED.into(), server_addr.port());
server_addr = ipv4_addr;
socket = socket2::Socket::new(socket2::Domain::IPV4, socket2::Type::STREAM, Some(socket2::Protocol::TCP))?;
}
// Common setup for both IPv4 and successful dual-stack IPv6
let backlog = get_listen_backlog();
socket.set_reuse_address(true)?;
// Set the socket to non-blocking before passing it to Tokio.
socket.set_nonblocking(true)?;
// Attempt bind; if bind fails for IPv6, try IPv4 fallback once more.
if let Err(bind_err) = socket.bind(&server_addr.into()) {
warn!("Failed to bind to {}: {}.", server_addr, bind_err);
if server_addr.is_ipv6() {
// Try IPv4 fallback
let ipv4_addr = SocketAddr::new(std::net::Ipv4Addr::UNSPECIFIED.into(), server_addr.port());
server_addr = ipv4_addr;
socket = socket2::Socket::new(socket2::Domain::IPV4, socket2::Type::STREAM, Some(socket2::Protocol::TCP))?;
socket.set_reuse_address(true)?;
socket.set_nonblocking(true)?;
socket.bind(&server_addr.into())?;
// [FIX] Ensure fallback socket is moved to listening state as well.
socket.listen(backlog)?;
} else {
return Err(bind_err);
}
} else {
// Listen on the socket when initial bind succeeded
socket.listen(backlog)?;
}
TcpListener::from_std(socket.into())?
};
let tls_acceptor = setup_tls_acceptor(opt.tls_path.as_deref().unwrap_or_default()).await?;
let tls_enabled = tls_acceptor.is_some();
let protocol = if tls_enabled { "https" } else { "http" };
// Obtain the listener address
let local_addr: SocketAddr = listener.local_addr()?;
let local_ip = match rustfs_utils::get_local_ip() {
Some(ip) => ip,
None => {
warn!("Unable to obtain local IP address, using fallback IP: {}", local_addr.ip());
local_addr.ip()
}
};
let local_ip_str = if local_ip.is_ipv6() {
format!("[{local_ip}]")
} else {
local_ip.to_string()
};
// Detailed endpoint information (showing all API endpoints)
let api_endpoints = format!("{protocol}://{local_ip_str}:{server_port}");
let localhost_endpoint = format!("{protocol}://127.0.0.1:{server_port}");
let now_time = chrono::Local::now().format("%Y-%m-%d %H:%M:%S").to_string();
if opt.console_enable {
admin::console::init_console_cfg(local_ip, server_port);
info!(
target: "rustfs::console::startup",
"Console WebUI available at: {protocol}://{local_ip_str}:{server_port}/rustfs/console/index.html"
);
info!(
target: "rustfs::console::startup",
"Console WebUI (localhost): {protocol}://127.0.0.1:{server_port}/rustfs/console/index.html",
);
println!("Console WebUI Start Time: {now_time}");
println!("Console WebUI available at: {protocol}://{local_ip_str}:{server_port}/rustfs/console/index.html");
println!("Console WebUI (localhost): {protocol}://127.0.0.1:{server_port}/rustfs/console/index.html",);
} else {
info!(target: "rustfs::main::startup","RustFS API: {api_endpoints} {localhost_endpoint}");
println!("RustFS Http API: {api_endpoints} {localhost_endpoint}");
println!("RustFS Start Time: {now_time}");
if rustfs_credentials::DEFAULT_ACCESS_KEY.eq(&opt.access_key)
&& rustfs_credentials::DEFAULT_SECRET_KEY.eq(&opt.secret_key)
{
warn!(
"Detected default credentials '{}:{}', we recommend that you change these values with 'RUSTFS_ACCESS_KEY' and 'RUSTFS_SECRET_KEY' environment variables",
rustfs_credentials::DEFAULT_ACCESS_KEY,
rustfs_credentials::DEFAULT_SECRET_KEY
);
}
info!(target: "rustfs::main::startup","For more information, visit https://rustfs.com/docs/");
info!(target: "rustfs::main::startup", "To enable the console, restart the server with --console-enable and a valid --console-address.");
}
// Setup S3 service
// This project uses the S3S library to implement S3 services
let s3_service = {
let store = storage::ecfs::FS::new();
let mut b = S3ServiceBuilder::new(store.clone());
let access_key = opt.access_key.clone();
let secret_key = opt.secret_key.clone();
b.set_auth(IAMAuth::new(access_key, secret_key));
b.set_access(store.clone());
b.set_route(admin::make_admin_route(opt.console_enable)?);
if !opt.server_domains.is_empty() {
MultiDomain::new(&opt.server_domains).map_err(Error::other)?; // validate domains
// add the default port number to the given server domains
let mut domain_sets = std::collections::HashSet::new();
for domain in &opt.server_domains {
domain_sets.insert(domain.to_string());
if let Some((host, _)) = domain.split_once(':') {
domain_sets.insert(format!("{host}:{server_port}"));
} else {
domain_sets.insert(format!("{domain}:{server_port}"));
}
}
info!("virtual-hosted-style requests are enabled use domain_name {:?}", &domain_sets);
b.set_host(MultiDomain::new(domain_sets).map_err(Error::other)?);
}
b.build()
};
// Create shutdown channel
let (shutdown_tx, mut shutdown_rx) = tokio::sync::broadcast::channel(1);
let shutdown_tx_clone = shutdown_tx.clone();
// Capture CORS configuration for the server loop
let cors_allowed_origins = get_cors_allowed_origins();
let cors_allowed_origins = if cors_allowed_origins.is_empty() {
None
} else {
Some(cors_allowed_origins)
};
// Create compression configuration from environment variables
let compression_config = CompressionConfig::from_env();
if compression_config.enabled {
info!(
"HTTP response compression enabled: extensions={:?}, mime_patterns={:?}, min_size={} bytes",
compression_config.extensions, compression_config.mime_patterns, compression_config.min_size
);
} else {
debug!("HTTP response compression is disabled");
}
let is_console = opt.console_enable;
tokio::spawn(async move {
// Create CORS layer inside the server loop closure
let cors_layer = parse_cors_origins(cors_allowed_origins.as_ref());
#[cfg(unix)]
let (mut sigterm_inner, mut sigint_inner) = {
use tokio::signal::unix::{SignalKind, signal};
// Unix platform specific code
let sigterm_inner = signal(SignalKind::terminate()).expect("Failed to create SIGTERM signal handler");
let sigint_inner = signal(SignalKind::interrupt()).expect("Failed to create SIGINT signal handler");
(sigterm_inner, sigint_inner)
};
let http_server = Arc::new(ConnBuilder::new(TokioExecutor::new()));
let mut ctrl_c = std::pin::pin!(tokio::signal::ctrl_c());
let graceful = Arc::new(GracefulShutdown::new());
debug!("graceful initiated");
// service ready
worker_state_manager.update(ServiceState::Ready);
let tls_acceptor = tls_acceptor.map(Arc::new);
loop {
debug!("Waiting for new connection...");
let (socket, _) = {
#[cfg(unix)]
{
tokio::select! {
res = listener.accept() => match res {
Ok(conn) => conn,
Err(err) => {
error!("error accepting connection: {err}");
continue;
}
},
_ = ctrl_c.as_mut() => {
info!("Ctrl-C received in worker thread");
let _ = shutdown_tx_clone.send(());
break;
},
Some(_) = sigint_inner.recv() => {
info!("SIGINT received in worker thread");
let _ = shutdown_tx_clone.send(());
break;
},
Some(_) = sigterm_inner.recv() => {
info!("SIGTERM received in worker thread");
let _ = shutdown_tx_clone.send(());
break;
},
_ = shutdown_rx.recv() => {
info!("Shutdown signal received in worker thread");
break;
}
}
}
#[cfg(not(unix))]
{
tokio::select! {
res = listener.accept() => match res {
Ok(conn) => conn,
Err(err) => {
error!("error accepting connection: {err}");
continue;
}
},
_ = ctrl_c.as_mut() => {
info!("Ctrl-C received in worker thread");
let _ = shutdown_tx_clone.send(());
break;
},
_ = shutdown_rx.recv() => {
info!("Shutdown signal received in worker thread");
break;
}
}
}
};
let socket_ref = SockRef::from(&socket);
// Enable TCP Keepalive to detect dead clients (e.g. power loss)
// Idle: 10s, Interval: 5s, Retries: 3
let ka = TcpKeepalive::new()
.with_time(Duration::from_secs(10))
.with_interval(Duration::from_secs(5));
#[cfg(not(any(target_os = "openbsd", target_os = "netbsd")))]
let ka = ka.with_retries(3);
if let Err(err) = socket_ref.set_tcp_keepalive(&ka) {
warn!(?err, "Failed to set TCP_KEEPALIVE");
}
if let Err(err) = socket_ref.set_tcp_nodelay(true) {
warn!(?err, "Failed to set TCP_NODELAY");
}
if let Err(err) = socket_ref.set_recv_buffer_size(4 * MI_B) {
warn!(?err, "Failed to set set_recv_buffer_size");
}
if let Err(err) = socket_ref.set_send_buffer_size(4 * MI_B) {
warn!(?err, "Failed to set set_send_buffer_size");
}
let connection_ctx = ConnectionContext {
http_server: http_server.clone(),
s3_service: s3_service.clone(),
cors_layer: cors_layer.clone(),
compression_config: compression_config.clone(),
is_console,
readiness: readiness.clone(),
};
process_connection(socket, tls_acceptor.clone(), connection_ctx, graceful.clone());
}
worker_state_manager.update(ServiceState::Stopping);
match Arc::try_unwrap(graceful) {
Ok(g) => {
tokio::select! {
() = g.shutdown() => {
debug!("Gracefully shutdown!");
},
() = tokio::time::sleep(Duration::from_secs(10)) => {
debug!("Waited 10 seconds for graceful shutdown, aborting...");
}
}
}
Err(arc_graceful) => {
error!("Cannot perform graceful shutdown, other references exist err: {:?}", arc_graceful);
tokio::time::sleep(Duration::from_secs(10)).await;
debug!("Timeout reached, forcing shutdown");
}
}
worker_state_manager.update(ServiceState::Stopped);
});
Ok(shutdown_tx)
}
/// Sets up the TLS acceptor if certificates are available.
#[instrument(skip(tls_path))]
async fn setup_tls_acceptor(tls_path: &str) -> Result<Option<TlsAcceptor>> {
if tls_path.is_empty() || tokio::fs::metadata(tls_path).await.is_err() {
debug!("TLS path is not provided or does not exist, starting with HTTP");
return Ok(None);
}
debug!("Found TLS directory, checking for certificates");
// Make sure to use a modern encryption suite
let _ = rustls::crypto::aws_lc_rs::default_provider().install_default();
let mtls_verifier = rustfs_utils::build_webpki_client_verifier(tls_path)?;
// 1. Attempt to load all certificates in the directory (multi-certificate support, for SNI)
if let Ok(cert_key_pairs) = rustfs_utils::load_all_certs_from_directory(tls_path)
&& !cert_key_pairs.is_empty()
{
debug!("Found {} certificates, creating SNI-aware multi-cert resolver", cert_key_pairs.len());
// Create an SNI-enabled certificate resolver
let resolver = rustfs_utils::create_multi_cert_resolver(cert_key_pairs)?;
// Configure the server to enable SNI support
let mut server_config = if let Some(verifier) = mtls_verifier.clone() {
ServerConfig::builder()
.with_client_cert_verifier(verifier)
.with_cert_resolver(Arc::new(resolver))
} else {
ServerConfig::builder()
.with_no_client_auth()
.with_cert_resolver(Arc::new(resolver))
};
// Configure ALPN protocol priority
server_config.alpn_protocols = vec![b"h2".to_vec(), b"http/1.1".to_vec(), b"http/1.0".to_vec()];
// Log SNI requests
if rustfs_utils::tls_key_log() {
server_config.key_log = Arc::new(rustls::KeyLogFile::new());
}
return Ok(Some(TlsAcceptor::from(Arc::new(server_config))));
}
// 2. Revert to the traditional single-certificate mode
let key_path = format!("{tls_path}/{RUSTFS_TLS_KEY}");
let cert_path = format!("{tls_path}/{RUSTFS_TLS_CERT}");
if tokio::try_join!(tokio::fs::metadata(&key_path), tokio::fs::metadata(&cert_path)).is_ok() {
debug!("Found legacy single TLS certificate, starting with HTTPS");
let certs = rustfs_utils::load_certs(&cert_path).map_err(|e| rustfs_utils::certs_error(e.to_string()))?;
let key = rustfs_utils::load_private_key(&key_path).map_err(|e| rustfs_utils::certs_error(e.to_string()))?;
let mut server_config = if let Some(verifier) = mtls_verifier {
ServerConfig::builder()
.with_client_cert_verifier(verifier)
.with_single_cert(certs, key)
.map_err(|e| rustfs_utils::certs_error(e.to_string()))?
} else {
ServerConfig::builder()
.with_no_client_auth()
.with_single_cert(certs, key)
.map_err(|e| rustfs_utils::certs_error(e.to_string()))?
};
// Configure ALPN protocol priority
server_config.alpn_protocols = vec![b"h2".to_vec(), b"http/1.1".to_vec(), b"http/1.0".to_vec()];
// Log SNI requests
if rustfs_utils::tls_key_log() {
server_config.key_log = Arc::new(rustls::KeyLogFile::new());
}
return Ok(Some(TlsAcceptor::from(Arc::new(server_config))));
}
debug!("No valid TLS certificates found in the directory, starting with HTTP");
Ok(None)
}
#[derive(Clone)]
struct ConnectionContext {
http_server: Arc<ConnBuilder<TokioExecutor>>,
s3_service: S3Service,
cors_layer: CorsLayer,
compression_config: CompressionConfig,
is_console: bool,
readiness: Arc<GlobalReadiness>,
}
/// Process a single incoming TCP connection.
///
/// This function is executed in a new Tokio task and it will:
/// 1. If TLS is configured, perform TLS handshake.
/// 2. Build a complete service stack for this connection, including S3, RPC services, and all middleware.
/// 3. Use Hyper to handle HTTP requests on this connection.
/// 4. Incorporate connections into the management of elegant closures.
#[instrument(skip_all, fields(peer_addr = %socket.peer_addr().map(|a| a.to_string()).unwrap_or_else(|_| "unknown".to_string())
))]
fn process_connection(
socket: TcpStream,
tls_acceptor: Option<Arc<TlsAcceptor>>,
context: ConnectionContext,
graceful: Arc<GracefulShutdown>,
) {
tokio::spawn(async move {
let ConnectionContext {
http_server,
s3_service,
cors_layer,
compression_config,
is_console,
readiness,
} = context;
// Build services inside each connected task to avoid passing complex service types across tasks,
// It also ensures that each connection has an independent service instance.
let rpc_service = NodeServiceServer::with_interceptor(make_server(), check_auth);
let service = hybrid(s3_service, rpc_service);
let remote_addr = match socket.peer_addr() {
Ok(addr) => Some(RemoteAddr(addr)),
Err(e) => {
tracing::warn!(
error = %e,
"Failed to obtain peer address; policy evaluation may fall back to a default source IP"
);
None
}
};
let hybrid_service = ServiceBuilder::new()
.layer(SetRequestIdLayer::x_request_id(MakeRequestUuid))
.layer(CatchPanicLayer::new())
.layer(AddExtensionLayer::new(remote_addr))
// CRITICAL: Insert ReadinessGateLayer before business logic
// This stops requests from hitting IAMAuth or Storage if they are not ready.
.layer(ReadinessGateLayer::new(readiness))
.layer(
TraceLayer::new_for_http()
.make_span_with(|request: &HttpRequest<_>| {
let trace_id = request
.headers()
.get(http::header::HeaderName::from_static("x-request-id"))
.and_then(|v| v.to_str().ok())
.unwrap_or("unknown");
let span = tracing::info_span!("http-request",
trace_id = %trace_id,
status_code = tracing::field::Empty,
method = %request.method(),
uri = %request.uri(),
version = ?request.version(),
);
for (header_name, header_value) in request.headers() {
if header_name == "user-agent" || header_name == "content-type" || header_name == "content-length" {
span.record(header_name.as_str(), header_value.to_str().unwrap_or("invalid"));
}
}
span
})
.on_request(|request: &HttpRequest<_>, span: &Span| {
let _enter = span.enter();
debug!("http started method: {}, url path: {}", request.method(), request.uri().path());
let labels = [
("key_request_method", format!("{}", request.method())),
("key_request_uri_path", request.uri().path().to_owned().to_string()),
];
counter!("rustfs.api.requests.total", &labels).increment(1);
})
.on_response(|response: &Response<_>, latency: Duration, span: &Span| {
span.record("status_code", tracing::field::display(response.status()));
let _enter = span.enter();
histogram!("rustfs.request.latency.ms").record(latency.as_millis() as f64);
debug!("http response generated in {:?}", latency)
})
.on_body_chunk(|chunk: &Bytes, latency: Duration, span: &Span| {
let _enter = span.enter();
histogram!("rustfs.request.body.len").record(chunk.len() as f64);
debug!("http body sending {} bytes in {:?}", chunk.len(), latency);
})
.on_eos(|_trailers: Option<&HeaderMap>, stream_duration: Duration, span: &Span| {
let _enter = span.enter();
debug!("http stream closed after {:?}", stream_duration)
})
.on_failure(|_error, latency: Duration, span: &Span| {
let _enter = span.enter();
counter!("rustfs.api.requests.failure.total").increment(1);
debug!("http request failure error: {:?} in {:?}", _error, latency)
}),
)
.layer(PropagateRequestIdLayer::x_request_id())
.layer(cors_layer)
// Compress responses based on whitelist configuration
// Only compresses when enabled and matches configured extensions/MIME types
.layer(CompressionLayer::new().compress_when(CompressionPredicate::new(compression_config)))
.option_layer(if is_console { Some(RedirectLayer) } else { None })
.service(service);
let hybrid_service = TowerToHyperService::new(hybrid_service);
// Decide whether to handle HTTPS or HTTP connections based on the existence of TLS Acceptor
if let Some(acceptor) = tls_acceptor {
debug!("TLS handshake start");
let peer_addr = socket
.peer_addr()
.ok()
.map_or_else(|| "unknown".to_string(), |addr| addr.to_string());
match acceptor.accept(socket).await {
Ok(tls_socket) => {
debug!("TLS handshake successful");
let stream = TokioIo::new(tls_socket);
let conn = http_server.serve_connection(stream, hybrid_service);
if let Err(err) = graceful.watch(conn).await {
handle_connection_error(&*err);
}
}
Err(err) => {
// Detailed analysis of the reasons why the TLS handshake fails
let err_str = err.to_string();
let mut key_failure_type_str: &str = "UNKNOWN";
if err_str.contains("unexpected EOF") || err_str.contains("handshake eof") {
warn!(peer_addr = %peer_addr, "TLS handshake failed. If this client needs HTTP, it should connect to the HTTP port instead");
key_failure_type_str = "UNEXPECTED_EOF";
} else if err_str.contains("protocol version") {
error!(
peer_addr = %peer_addr,
"TLS handshake failed due to protocol version mismatch: {}", err
);
key_failure_type_str = "PROTOCOL_VERSION";
} else if err_str.contains("certificate") {
error!(
peer_addr = %peer_addr,
"TLS handshake failed due to certificate issues: {}", err
);
key_failure_type_str = "CERTIFICATE";
} else {
error!(
peer_addr = %peer_addr,
"TLS handshake failed: {}", err
);
}
counter!("rustfs_tls_handshake_failures", &[("key_failure_type", key_failure_type_str)]).increment(1);
// Record detailed diagnostic information
debug!(
peer_addr = %peer_addr,
error_type = %std::any::type_name_of_val(&err),
error_details = %err,
"TLS handshake failure details"
);
return;
}
}
debug!("TLS handshake success");
} else {
debug!("Http handshake start");
let stream = TokioIo::new(socket);
let conn = http_server.serve_connection(stream, hybrid_service);
if let Err(err) = graceful.watch(conn).await {
handle_connection_error(&*err);
}
debug!("Http handshake success");
};
});
}
/// Handles connection errors by logging them with appropriate severity
fn handle_connection_error(err: &(dyn std::error::Error + 'static)) {
if let Some(hyper_err) = err.downcast_ref::<hyper::Error>() {
if hyper_err.is_incomplete_message() {
warn!("The HTTP connection is closed prematurely and the message is not completed:{}", hyper_err);
} else if hyper_err.is_closed() {
warn!("The HTTP connection is closed:{}", hyper_err);
} else if hyper_err.is_parse() {
error!("HTTP message parsing failed:{}", hyper_err);
} else if hyper_err.is_user() {
error!("HTTP user-custom error:{}", hyper_err);
} else if hyper_err.is_canceled() {
warn!("The HTTP connection is canceled:{}", hyper_err);
} else {
error!("Unknown hyper error:{:?}", hyper_err);
}
} else if let Some(io_err) = err.downcast_ref::<Error>() {
error!("Unknown connection IO error:{}", io_err);
} else {
error!("Unknown connection error type:{:?}", err);
}
}
#[allow(clippy::result_large_err)]
fn check_auth(req: Request<()>) -> std::result::Result<Request<()>, Status> {
let token_str = rustfs_credentials::get_grpc_token();
let token: MetadataValue<_> = token_str.parse().map_err(|e| {
error!("Failed to parse RUSTFS_GRPC_AUTH_TOKEN into gRPC metadata value: {}", e);
Status::internal("Invalid auth token configuration")
})?;
match req.metadata().get("authorization") {
Some(t) if token == t => Ok(req),
_ => Err(Status::unauthenticated("No valid auth token")),
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | true |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/server/layer.rs | rustfs/src/server/layer.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::server::hybrid::HybridBody;
use http::{Request as HttpRequest, Response, StatusCode};
use hyper::body::Incoming;
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
use tower::{Layer, Service};
use tracing::debug;
/// Redirect layer that redirects browser requests to the console
#[derive(Clone)]
pub struct RedirectLayer;
impl<S> Layer<S> for RedirectLayer {
type Service = RedirectService<S>;
fn layer(&self, inner: S) -> Self::Service {
RedirectService { inner }
}
}
/// Service implementation for redirect functionality
#[derive(Clone)]
pub struct RedirectService<S> {
inner: S,
}
impl<S, RestBody, GrpcBody> Service<HttpRequest<Incoming>> for RedirectService<S>
where
S: Service<HttpRequest<Incoming>, Response = Response<HybridBody<RestBody, GrpcBody>>> + Clone + Send + 'static,
S::Future: Send + 'static,
S::Error: Into<Box<dyn std::error::Error + Send + Sync>> + Send + 'static,
RestBody: Default + Send + 'static,
GrpcBody: Send + 'static,
{
type Response = Response<HybridBody<RestBody, GrpcBody>>;
type Error = Box<dyn std::error::Error + Send + Sync>;
type Future = Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send>>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.inner.poll_ready(cx).map_err(Into::into)
}
fn call(&mut self, req: HttpRequest<Incoming>) -> Self::Future {
// Check if this is a GET request without Authorization header and User-Agent contains Mozilla
// and the path is either "/" or "/index.html"
let path = req.uri().path().trim_end_matches('/');
let should_redirect = req.method() == http::Method::GET
&& !req.headers().contains_key(http::header::AUTHORIZATION)
&& req
.headers()
.get(http::header::USER_AGENT)
.and_then(|v| v.to_str().ok())
.map(|ua| ua.contains("Mozilla"))
.unwrap_or(false)
&& (path.is_empty() || path == "/rustfs" || path == "/index.html");
if should_redirect {
debug!("Redirecting browser request from {} to console", path);
// Create redirect response
let redirect_response = Response::builder()
.status(StatusCode::FOUND)
.header(http::header::LOCATION, "/rustfs/console/")
.body(HybridBody::Rest {
rest_body: RestBody::default(),
})
.expect("failed to build redirect response");
return Box::pin(async move { Ok(redirect_response) });
}
// Otherwise, forward to the next service
let mut inner = self.inner.clone();
Box::pin(async move { inner.call(req).await.map_err(Into::into) })
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/server/service_state.rs | rustfs/src/server/service_state.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use atomic_enum::atomic_enum;
use std::sync::Arc;
use std::sync::atomic::Ordering;
use std::time::Duration;
use tracing::info;
// a configurable shutdown timeout
pub(crate) const SHUTDOWN_TIMEOUT: Duration = Duration::from_secs(1);
#[cfg(target_os = "linux")]
fn notify_systemd(state: &str) {
use libsystemd::daemon::{NotifyState, notify};
use tracing::{debug, error};
let notify_state = match state {
"ready" => NotifyState::Ready,
"stopping" => NotifyState::Stopping,
_ => {
info!("Unsupported state passed to notify_systemd: {}", state);
return;
}
};
if let Err(e) = notify(false, &[notify_state]) {
error!("Failed to notify systemd: {}", e);
} else {
debug!("Successfully notified systemd: {}", state);
}
info!("Systemd notifications are enabled on linux (state: {})", state);
}
#[cfg(not(target_os = "linux"))]
fn notify_systemd(state: &str) {
info!("Systemd notifications are not available on this platform not linux (state: {})", state);
}
#[derive(Debug)]
pub enum ShutdownSignal {
CtrlC,
#[cfg(unix)]
Sigterm,
#[cfg(unix)]
Sigint,
}
#[atomic_enum]
#[derive(PartialEq)]
pub(crate) enum ServiceState {
Starting,
Ready,
Stopping,
Stopped,
}
#[cfg(unix)]
pub(crate) async fn wait_for_shutdown() -> ShutdownSignal {
use tokio::signal::unix::{SignalKind, signal};
let mut sigterm = signal(SignalKind::terminate()).expect("failed to create SIGTERM signal handler");
let mut sigint = signal(SignalKind::interrupt()).expect("failed to create SIGINT signal handler");
tokio::select! {
_ = tokio::signal::ctrl_c() => {
info!("RustFS Received Ctrl-C signal");
ShutdownSignal::CtrlC
}
_ = sigint.recv() => {
info!("RustFS Received SIGINT signal");
ShutdownSignal::Sigint
}
_ = sigterm.recv() => {
info!("RustFS Received SIGTERM signal");
ShutdownSignal::Sigterm
}
}
}
#[cfg(not(unix))]
pub(crate) async fn wait_for_shutdown() -> ShutdownSignal {
tokio::select! {
_ = tokio::signal::ctrl_c() => {
info!("Received Ctrl-C signal");
ShutdownSignal::CtrlC
}
}
}
#[derive(Clone)]
pub(crate) struct ServiceStateManager {
state: Arc<AtomicServiceState>,
}
impl ServiceStateManager {
pub fn new() -> Self {
Self {
state: Arc::new(AtomicServiceState::new(ServiceState::Starting)),
}
}
pub fn update(&self, new_state: ServiceState) {
self.state.store(new_state, Ordering::SeqCst);
self.notify_systemd(&new_state);
}
pub fn current_state(&self) -> ServiceState {
self.state.load(Ordering::SeqCst)
}
fn notify_systemd(&self, state: &ServiceState) {
match state {
ServiceState::Starting => {
info!("RustFS Service is starting...");
#[cfg(target_os = "linux")]
if let Err(e) =
libsystemd::daemon::notify(false, &[libsystemd::daemon::NotifyState::Status("Starting...".to_string())])
{
tracing::error!("Failed to notify systemd of starting state: {}", e);
}
}
ServiceState::Ready => {
info!("RustFS Service is ready");
notify_systemd("ready");
}
ServiceState::Stopping => {
info!("RustFS Service is stopping...");
notify_systemd("stopping");
}
ServiceState::Stopped => {
info!("RustFS Service has stopped");
#[cfg(target_os = "linux")]
if let Err(e) =
libsystemd::daemon::notify(false, &[libsystemd::daemon::NotifyState::Status("Stopped".to_string())])
{
tracing::error!("Failed to notify systemd of stopped state: {}", e);
}
}
}
}
}
impl Default for ServiceStateManager {
fn default() -> Self {
Self::new()
}
}
// Example of use
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_service_state_manager() {
let manager = ServiceStateManager::new();
// The initial state should be Starting
assert_eq!(manager.current_state(), ServiceState::Starting);
// Update the status to Ready
manager.update(ServiceState::Ready);
assert_eq!(manager.current_state(), ServiceState::Ready);
// Update the status to Stopping
manager.update(ServiceState::Stopping);
assert_eq!(manager.current_state(), ServiceState::Stopping);
// Update the status to Stopped
manager.update(ServiceState::Stopped);
assert_eq!(manager.current_state(), ServiceState::Stopped);
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/server/runtime.rs | rustfs/src/server/runtime.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::time::Duration;
use sysinfo::{RefreshKind, System};
#[inline]
fn compute_default_thread_stack_size() -> usize {
// Baseline: Release 1 MiB,Debug 2 MiB;macOS at least 2 MiB
// macOS is more conservative: many system libraries and backtracking are more "stack-eating"
if cfg!(debug_assertions) || cfg!(target_os = "macos") {
2 * rustfs_config::DEFAULT_THREAD_STACK_SIZE
} else {
rustfs_config::DEFAULT_THREAD_STACK_SIZE
}
}
#[inline]
fn detect_cores() -> usize {
// Priority physical cores, fallback logic cores, minimum 1
let mut sys = System::new_with_specifics(RefreshKind::everything().without_memory().without_processes());
sys.refresh_cpu_all();
sys.cpus().len().max(1)
}
#[inline]
fn compute_default_worker_threads() -> usize {
// Physical cores are used by default (closer to CPU compute resources and cache topology)
detect_cores()
}
/// Default max_blocking_threads calculations based on sysinfo:
/// 16 cores -> 1024; more than 16 cores are doubled by multiples:
/// 1..=16 -> 1024, 17..=32 -> 2048, 33..=64 -> 4096, and so on.
fn compute_default_max_blocking_threads() -> usize {
const BASE_CORES: usize = rustfs_config::DEFAULT_WORKER_THREADS;
const BASE_THREADS: usize = rustfs_config::DEFAULT_MAX_BLOCKING_THREADS;
let cores = detect_cores();
let mut threads = BASE_THREADS;
let mut threshold = BASE_CORES;
// When the number of cores exceeds the threshold, the number of threads is doubled for each doubling threshold
while cores > threshold {
threads = threads.saturating_mul(2);
threshold = threshold.saturating_mul(2);
}
threads
}
/// Customize the Tokio runtime configuration
/// These configurations can be adjusted by environment variables
/// to optimize performance based on the deployment environment
/// Custom Tokio runtime builder (can be fully overridden by ENV)
/// - RUSTFS_RUNTIME_WORKER_THREADS
/// - RUSTFS_RUNTIME_MAX_BLOCKING_THREADS
/// - RUSTFS_RUNTIME_THREAD_STACK_SIZE
/// - RUSTFS_RUNTIME_THREAD_KEEP_ALIVE
/// - RUSTFS_RUNTIME_GLOBAL_QUEUE_INTERVAL
/// - RUSTFS_RUNTIME_EVENT_INTERVAL
/// - RUSTFS_RUNTIME_THREAD_NAME
/// - RUSTFS_RUNTIME_MAX_IO_EVENTS_PER_TICK
/// - RUSTFS_RUNTIME_THREAD_PRINT_ENABLED
///
/// Returns: Configured Tokio runtime builder
/// # Panics
/// Panics if environment variable values are invalid
/// # Examples
/// ```no_run
/// use rustfs_server::get_tokio_runtime_builder;
/// let builder = get_tokio_runtime_builder();
/// let runtime = builder.build().unwrap();
/// ```
pub(crate) fn get_tokio_runtime_builder() -> tokio::runtime::Builder {
let mut builder = tokio::runtime::Builder::new_multi_thread();
// Worker threads(Default physical cores)
let default_worker_threads = compute_default_worker_threads();
let worker_threads = rustfs_utils::get_env_usize(rustfs_config::ENV_WORKER_THREADS, default_worker_threads);
builder.worker_threads(worker_threads);
// Max blocking threads: Prioritize environment variables, otherwise the default value is dynamically calculated based on sysinfo
let default_max_blocking_threads = compute_default_max_blocking_threads();
let max_blocking_threads = rustfs_utils::get_env_usize(rustfs_config::ENV_MAX_BLOCKING_THREADS, default_max_blocking_threads);
builder.max_blocking_threads(max_blocking_threads);
// Thread stack size (environment variables first, followed by dynamic default by platform/build type)
let default_stack = compute_default_thread_stack_size();
let thread_stack_size = rustfs_utils::get_env_usize(rustfs_config::ENV_THREAD_STACK_SIZE, default_stack);
builder.thread_stack_size(thread_stack_size);
// Thread keep alive(Blocking the thread pool is kept alive)
let thread_keep_alive =
rustfs_utils::get_env_u64(rustfs_config::ENV_THREAD_KEEP_ALIVE, rustfs_config::DEFAULT_THREAD_KEEP_ALIVE);
builder.thread_keep_alive(Duration::from_secs(thread_keep_alive));
// Global queue interval(Task Fairness/Throughput Tradeoff)
let global_queue_interval =
rustfs_utils::get_env_u32(rustfs_config::ENV_GLOBAL_QUEUE_INTERVAL, rustfs_config::DEFAULT_GLOBAL_QUEUE_INTERVAL);
builder.global_queue_interval(global_queue_interval);
// Event interval(View the interval of I/O/timer events)
let event_interval = rustfs_utils::get_env_u32(rustfs_config::ENV_EVENT_INTERVAL, rustfs_config::DEFAULT_EVENT_INTERVAL);
builder.event_interval(event_interval);
// Thread name
let thread_name = rustfs_utils::get_env_str(rustfs_config::ENV_THREAD_NAME, rustfs_config::DEFAULT_THREAD_NAME);
builder.thread_name(thread_name.clone());
// Enable I/O driver and set the maximum number of I/O events per tick (nevents)
let max_io_events_per_tick =
rustfs_utils::get_env_usize(rustfs_config::ENV_MAX_IO_EVENTS_PER_TICK, rustfs_config::DEFAULT_MAX_IO_EVENTS_PER_TICK);
builder.enable_all().max_io_events_per_tick(max_io_events_per_tick);
// Optional: Simple log of thread start/stop
if print_tokio_thread_enable() {
builder
.on_thread_start(|| {
let id = std::thread::current().id();
println!(
"RustFS Worker Thread running - initializing resources time: {:?}, thread id: {:?}",
chrono::Utc::now().to_rfc3339(),
id
);
})
.on_thread_stop(|| {
let id = std::thread::current().id();
println!(
"RustFS Worker Thread stopping - cleaning up resources time: {:?}, thread id: {:?}",
chrono::Utc::now().to_rfc3339(),
id
)
});
}
if !rustfs_obs::is_production_environment() {
println!(
"Starting Tokio runtime with configured parameters:\n\
worker_threads: {worker_threads}, max_blocking_threads: {max_blocking_threads}, \
thread_stack_size: {thread_stack_size}, thread_keep_alive: {thread_keep_alive}, \
global_queue_interval: {global_queue_interval}, event_interval: {event_interval}, \
max_io_events_per_tick: {max_io_events_per_tick}, thread_name: {thread_name}"
);
}
builder
}
/// Whether to print tokio threads
/// This can be useful for debugging purposes
fn print_tokio_thread_enable() -> bool {
rustfs_utils::get_env_bool(rustfs_config::ENV_THREAD_PRINT_ENABLED, rustfs_config::DEFAULT_THREAD_PRINT_ENABLED)
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/server/mod.rs | rustfs/src/server/mod.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
mod audit;
mod cert;
mod compress;
mod event;
mod http;
mod hybrid;
mod layer;
mod prefix;
mod readiness;
mod runtime;
mod service_state;
pub(crate) use audit::{start_audit_system, stop_audit_system};
pub(crate) use cert::init_cert;
pub(crate) use event::{init_event_notifier, shutdown_event_notifier};
pub(crate) use http::start_http_server;
pub(crate) use prefix::*;
pub(crate) use readiness::ReadinessGateLayer;
pub(crate) use runtime::get_tokio_runtime_builder;
pub(crate) use service_state::SHUTDOWN_TIMEOUT;
pub(crate) use service_state::ServiceState;
pub(crate) use service_state::ServiceStateManager;
pub(crate) use service_state::ShutdownSignal;
pub(crate) use service_state::wait_for_shutdown;
#[derive(Clone, Copy, Debug)]
pub struct RemoteAddr(pub std::net::SocketAddr);
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/server/hybrid.rs | rustfs/src/server/hybrid.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use futures::Future;
use http_body::Frame;
use hyper::body::Incoming;
use hyper::{Request, Response};
use pin_project_lite::pin_project;
use std::pin::Pin;
use std::task::{Context, Poll};
use tower::Service;
type BoxError = Box<dyn std::error::Error + Send + Sync + 'static>;
/// Generate a [`HybridService`]
pub(crate) fn hybrid<MakeRest, Grpc>(make_rest: MakeRest, grpc: Grpc) -> HybridService<MakeRest, Grpc> {
HybridService { rest: make_rest, grpc }
}
/// The service that can serve both gRPC and REST HTTP Requests
#[derive(Clone)]
pub struct HybridService<Rest, Grpc> {
rest: Rest,
grpc: Grpc,
}
impl<Rest, Grpc, RestBody, GrpcBody> Service<Request<Incoming>> for HybridService<Rest, Grpc>
where
Rest: Service<Request<Incoming>, Response = Response<RestBody>>,
Grpc: Service<Request<Incoming>, Response = Response<GrpcBody>>,
Rest::Error: Into<BoxError>,
Grpc::Error: Into<BoxError>,
{
type Response = Response<HybridBody<RestBody, GrpcBody>>;
type Error = BoxError;
type Future = HybridFuture<Rest::Future, Grpc::Future>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
match self.rest.poll_ready(cx) {
Poll::Ready(Ok(())) => match self.grpc.poll_ready(cx) {
Poll::Ready(Ok(())) => Poll::Ready(Ok(())),
Poll::Ready(Err(e)) => Poll::Ready(Err(e.into())),
Poll::Pending => Poll::Pending,
},
Poll::Ready(Err(e)) => Poll::Ready(Err(e.into())),
Poll::Pending => Poll::Pending,
}
}
/// When calling the service, gRPC is served if the HTTP request version is HTTP/2
/// and if the Content-Type is "application/grpc"; otherwise, the request is served
/// as a REST request
fn call(&mut self, req: Request<Incoming>) -> Self::Future {
match (req.version(), req.headers().get(hyper::header::CONTENT_TYPE)) {
(hyper::Version::HTTP_2, Some(hv)) if hv.as_bytes().starts_with(b"application/grpc") => HybridFuture::Grpc {
grpc_future: self.grpc.call(req),
},
_ => HybridFuture::Rest {
rest_future: self.rest.call(req),
},
}
}
}
pin_project! {
/// A hybrid HTTP body that will be used in the response type for the
/// [`HybridFuture`], i.e., the output of the [`HybridService`]
#[project = HybridBodyProj]
pub enum HybridBody<RestBody, GrpcBody> {
Rest {
#[pin]
rest_body: RestBody
},
Grpc {
#[pin]
grpc_body: GrpcBody
},
}
}
impl<RestBody, GrpcBody> Default for HybridBody<RestBody, GrpcBody>
where
RestBody: Default,
// GrpcBody: Default,
{
fn default() -> Self {
Self::Rest {
rest_body: RestBody::default(),
}
}
}
impl<RestBody, GrpcBody> http_body::Body for HybridBody<RestBody, GrpcBody>
where
RestBody: http_body::Body + Send + Unpin,
GrpcBody: http_body::Body<Data = RestBody::Data> + Send + Unpin,
RestBody::Error: Into<BoxError>,
GrpcBody::Error: Into<BoxError>,
{
type Data = RestBody::Data;
type Error = BoxError;
fn is_end_stream(&self) -> bool {
match self {
Self::Rest { rest_body } => rest_body.is_end_stream(),
Self::Grpc { grpc_body } => grpc_body.is_end_stream(),
}
}
fn poll_frame(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Result<Frame<Self::Data>, Self::Error>>> {
match self.project() {
HybridBodyProj::Rest { rest_body } => rest_body.poll_frame(cx).map_err(Into::into),
HybridBodyProj::Grpc { grpc_body } => grpc_body.poll_frame(cx).map_err(Into::into),
}
}
fn size_hint(&self) -> http_body::SizeHint {
match self {
Self::Rest { rest_body } => rest_body.size_hint(),
Self::Grpc { grpc_body } => grpc_body.size_hint(),
}
}
}
pin_project! {
/// A future that accepts an HTTP request as input and returns an HTTP
/// response as output for the [`HybridService`]
#[project = HybridFutureProj]
pub enum HybridFuture<RestFuture, GrpcFuture> {
Rest {
#[pin]
rest_future: RestFuture,
},
Grpc {
#[pin]
grpc_future: GrpcFuture,
},
}
}
impl<RestFuture, GrpcFuture, RestBody, GrpcBody, RestError, GrpcError> Future for HybridFuture<RestFuture, GrpcFuture>
where
RestFuture: Future<Output = Result<Response<RestBody>, RestError>>,
GrpcFuture: Future<Output = Result<Response<GrpcBody>, GrpcError>>,
RestError: Into<BoxError>,
GrpcError: Into<BoxError>,
{
type Output = Result<Response<HybridBody<RestBody, GrpcBody>>, BoxError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
match self.project() {
HybridFutureProj::Rest { rest_future } => match rest_future.poll(cx) {
Poll::Ready(Ok(res)) => Poll::Ready(Ok(res.map(|rest_body| HybridBody::Rest { rest_body }))),
Poll::Ready(Err(err)) => Poll::Ready(Err(err.into())),
Poll::Pending => Poll::Pending,
},
HybridFutureProj::Grpc { grpc_future } => match grpc_future.poll(cx) {
Poll::Ready(Ok(res)) => Poll::Ready(Ok(res.map(|grpc_body| HybridBody::Grpc { grpc_body }))),
Poll::Ready(Err(err)) => Poll::Ready(Err(err.into())),
Poll::Pending => Poll::Pending,
},
}
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/server/cert.rs | rustfs/src/server/cert.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use rustfs_common::{MtlsIdentityPem, set_global_mtls_identity, set_global_root_cert};
use rustfs_config::{RUSTFS_CA_CERT, RUSTFS_PUBLIC_CERT, RUSTFS_TLS_CERT};
use rustls::pki_types::{CertificateDer, PrivateKeyDer};
use std::path::{Path, PathBuf};
use tracing::{debug, info};
#[derive(Debug)]
pub enum RustFSError {
Cert(String),
}
impl std::fmt::Display for RustFSError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
RustFSError::Cert(msg) => write!(f, "Certificate error: {}", msg),
}
}
}
impl std::error::Error for RustFSError {}
/// Parse PEM-encoded certificates into DER format.
/// Returns a vector of DER-encoded certificates.
///
/// # Arguments
/// * `pem` - A byte slice containing the PEM-encoded certificates.
///
/// # Returns
/// A vector of `CertificateDer` containing the DER-encoded certificates.
///
/// # Errors
/// Returns `RustFSError` if parsing fails.
fn parse_pem_certs(pem: &[u8]) -> Result<Vec<CertificateDer<'static>>, RustFSError> {
let mut out = Vec::new();
let mut reader = std::io::Cursor::new(pem);
for item in rustls_pemfile::certs(&mut reader) {
let c = item.map_err(|e| RustFSError::Cert(format!("parse cert pem: {e}")))?;
out.push(c);
}
Ok(out)
}
/// Parse a PEM-encoded private key into DER format.
/// Supports PKCS#8 and RSA private keys.
///
/// # Arguments
/// * `pem` - A byte slice containing the PEM-encoded private key.
///
/// # Returns
/// A `PrivateKeyDer` containing the DER-encoded private key.
///
/// # Errors
/// Returns `RustFSError` if parsing fails or no key is found.
fn parse_pem_private_key(pem: &[u8]) -> Result<PrivateKeyDer<'static>, RustFSError> {
let mut reader = std::io::Cursor::new(pem);
let key = rustls_pemfile::private_key(&mut reader).map_err(|e| RustFSError::Cert(format!("parse private key pem: {e}")))?;
key.ok_or_else(|| RustFSError::Cert("no private key found in PEM".into()))
}
/// Helper function to read a file and return its contents.
/// Returns the file contents as a vector of bytes.
/// # Errors
/// Returns `RustFSError` if reading fails.
async fn read_file(path: &PathBuf, desc: &str) -> Result<Vec<u8>, RustFSError> {
tokio::fs::read(path)
.await
.map_err(|e| RustFSError::Cert(format!("read {} {:?}: {e}", desc, path)))
}
/// Initialize TLS material for both server and outbound client connections.
///
/// Loads roots from:
/// - `${RUSTFS_TLS_PATH}/ca.crt` (or `tls/ca.crt`)
/// - `${RUSTFS_TLS_PATH}/public.crt` (optional additional root bundle)
/// - system roots if `RUSTFS_TRUST_SYSTEM_CA=true` (default: false)
/// - if `RUSTFS_TRUST_LEAF_CERT_AS_CA=true`, also loads leaf cert(s) from
/// `${RUSTFS_TLS_PATH}/rustfs_cert.pem` into the root store.
///
/// Loads mTLS client identity (optional) from:
/// - `${RUSTFS_TLS_PATH}/client_cert.pem`
/// - `${RUSTFS_TLS_PATH}/client_key.pem`
///
/// Environment overrides:
/// - RUSTFS_TLS_PATH
/// - RUSTFS_MTLS_CLIENT_CERT
/// - RUSTFS_MTLS_CLIENT_KEY
pub(crate) async fn init_cert(tls_path: &str) -> Result<(), RustFSError> {
if tls_path.is_empty() {
info!("No TLS path configured; skipping certificate initialization");
return Ok(());
}
let tls_dir = PathBuf::from(tls_path);
// Load root certificates
load_root_certs(&tls_dir).await?;
// Load optional mTLS identity
load_mtls_identity(&tls_dir).await?;
Ok(())
}
/// Load root certificates from various sources.
async fn load_root_certs(tls_dir: &Path) -> Result<(), RustFSError> {
let mut cert_data = Vec::new();
let trust_leaf_as_ca =
rustfs_utils::get_env_bool(rustfs_config::ENV_TRUST_LEAF_CERT_AS_CA, rustfs_config::DEFAULT_TRUST_LEAF_CERT_AS_CA);
if trust_leaf_as_ca {
walk_dir(tls_dir.to_path_buf(), RUSTFS_TLS_CERT, &mut cert_data).await;
info!("Loaded leaf certificate(s) as root CA as per RUSTFS_TRUST_LEAF_CERT_AS_CA");
}
// Try public.crt and ca.crt
let public_cert_path = tls_dir.join(RUSTFS_PUBLIC_CERT);
load_cert_file(public_cert_path.to_str().unwrap_or_default(), &mut cert_data, "CA certificate").await;
let ca_cert_path = tls_dir.join(RUSTFS_CA_CERT);
load_cert_file(ca_cert_path.to_str().unwrap_or_default(), &mut cert_data, "CA certificate").await;
// Load system root certificates if enabled
let trust_system_ca = rustfs_utils::get_env_bool(rustfs_config::ENV_TRUST_SYSTEM_CA, rustfs_config::DEFAULT_TRUST_SYSTEM_CA);
if trust_system_ca {
let system_ca_paths = [
"/etc/ssl/certs/ca-certificates.crt", // Debian/Ubuntu/Alpine
"/etc/pki/tls/certs/ca-bundle.crt", // Fedora/RHEL/CentOS
"/etc/ssl/ca-bundle.pem", // OpenSUSE
"/etc/pki/tls/cacert.pem", // OpenELEC
"/etc/ssl/cert.pem", // macOS/FreeBSD
"/usr/local/etc/openssl/cert.pem", // macOS/Homebrew OpenSSL
"/usr/local/share/certs/ca-root-nss.crt", // FreeBSD
"/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem", // RHEL
"/usr/share/pki/ca-trust-legacy/ca-bundle.legacy.crt", // RHEL legacy
];
let mut system_cert_loaded = false;
for path in system_ca_paths {
if load_cert_file(path, &mut cert_data, "system root certificates").await {
system_cert_loaded = true;
info!("Loaded system root certificates from {}", path);
break;
}
}
if !system_cert_loaded {
debug!("Could not find system root certificates in common locations.");
}
} else {
info!("Loading system root certificates disabled via RUSTFS_TRUST_SYSTEM_CA");
}
if !cert_data.is_empty() {
set_global_root_cert(cert_data).await;
info!("Configured custom root certificates for inter-node communication");
}
Ok(())
}
/// Load optional mTLS identity.
async fn load_mtls_identity(tls_dir: &Path) -> Result<(), RustFSError> {
let client_cert_path = match rustfs_utils::get_env_opt_str(rustfs_config::ENV_MTLS_CLIENT_CERT) {
Some(p) => PathBuf::from(p),
None => tls_dir.join(rustfs_config::RUSTFS_CLIENT_CERT_FILENAME),
};
let client_key_path = match rustfs_utils::get_env_opt_str(rustfs_config::ENV_MTLS_CLIENT_KEY) {
Some(p) => PathBuf::from(p),
None => tls_dir.join(rustfs_config::RUSTFS_CLIENT_KEY_FILENAME),
};
if client_cert_path.exists() && client_key_path.exists() {
let cert_bytes = read_file(&client_cert_path, "client cert").await?;
let key_bytes = read_file(&client_key_path, "client key").await?;
// Validate parse-ability early; store as PEM bytes for tonic.
parse_pem_certs(&cert_bytes)?;
parse_pem_private_key(&key_bytes)?;
let identity_pem = MtlsIdentityPem {
cert_pem: cert_bytes,
key_pem: key_bytes,
};
set_global_mtls_identity(Some(identity_pem)).await;
info!("Loaded mTLS client identity cert={:?} key={:?}", client_cert_path, client_key_path);
} else {
set_global_mtls_identity(None).await;
info!(
"mTLS client identity not configured (missing {:?} and/or {:?}); proceeding with server-only TLS",
client_cert_path, client_key_path
);
}
Ok(())
}
/// Helper function to load a certificate file and append to cert_data.
/// Returns true if the file was successfully loaded.
async fn load_cert_file(path: &str, cert_data: &mut Vec<u8>, desc: &str) -> bool {
if tokio::fs::metadata(path).await.is_ok() {
if let Ok(data) = tokio::fs::read(path).await {
cert_data.extend(data);
cert_data.push(b'\n');
info!("Loaded {} from {}", desc, path);
true
} else {
debug!("Failed to read {} from {}", desc, path);
false
}
} else {
debug!("{} file not found at {}", desc, path);
false
}
}
/// Load the certificate file if its name matches `cert_name`.
/// If it matches, the certificate data is appended to `cert_data`.
///
/// # Parameters
/// - `entry`: The directory entry to check.
/// - `cert_name`: The name of the certificate file to match.
/// - `cert_data`: A mutable vector to append loaded certificate data.
async fn load_if_matches(entry: &tokio::fs::DirEntry, cert_name: &str, cert_data: &mut Vec<u8>) {
let fname = entry.file_name().to_string_lossy().to_string();
if fname == cert_name {
let p = entry.path();
load_cert_file(&p.to_string_lossy(), cert_data, "certificate").await;
}
}
/// Search the directory at `path` and one level of subdirectories to find and load
/// certificates matching `cert_name`. Loaded certificate data is appended to
/// `cert_data`.
/// # Parameters
/// - `path`: The starting directory path to search for certificates.
/// - `cert_name`: The name of the certificate file to look for.
/// - `cert_data`: A mutable vector to append loaded certificate data.
async fn walk_dir(path: PathBuf, cert_name: &str, cert_data: &mut Vec<u8>) {
if let Ok(mut rd) = tokio::fs::read_dir(&path).await {
while let Ok(Some(entry)) = rd.next_entry().await {
if let Ok(ft) = entry.file_type().await {
if ft.is_file() {
load_if_matches(&entry, cert_name, cert_data).await;
} else if ft.is_dir() {
// Only check direct subdirectories, no deeper recursion
if let Ok(mut sub_rd) = tokio::fs::read_dir(&entry.path()).await {
while let Ok(Some(sub_entry)) = sub_rd.next_entry().await {
if let Ok(sub_ft) = sub_entry.file_type().await
&& sub_ft.is_file()
{
load_if_matches(&sub_entry, cert_name, cert_data).await;
}
// Ignore subdirectories and symlinks in subdirs to limit to one level
}
}
} else if ft.is_symlink() {
// Follow symlink and treat target as file or directory, but limit to one level
if let Ok(meta) = tokio::fs::metadata(&entry.path()).await {
if meta.is_file() {
load_if_matches(&entry, cert_name, cert_data).await;
} else if meta.is_dir() {
// Treat as directory but only check its direct contents
if let Ok(mut sub_rd) = tokio::fs::read_dir(&entry.path()).await {
while let Ok(Some(sub_entry)) = sub_rd.next_entry().await {
if let Ok(sub_ft) = sub_entry.file_type().await
&& sub_ft.is_file()
{
load_if_matches(&sub_entry, cert_name, cert_data).await;
}
// Ignore deeper levels
}
}
}
}
}
}
}
} else {
debug!("Certificate directory not found: {}", path.display());
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/config/workload_profiles.rs | rustfs/src/config/workload_profiles.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![allow(dead_code)]
//! Adaptive buffer sizing optimization for different workload types.
//!
//! This module provides intelligent buffer size selection based on file size and workload profile
//! to achieve optimal balance between performance, memory usage, and security.
use rustfs_config::{KI_B, MI_B};
use std::sync::OnceLock;
use std::sync::atomic::{AtomicBool, Ordering};
/// Global buffer configuration that can be set at application startup
static GLOBAL_BUFFER_CONFIG: OnceLock<RustFSBufferConfig> = OnceLock::new();
/// Global flag indicating whether buffer profiles are enabled
static BUFFER_PROFILE_ENABLED: AtomicBool = AtomicBool::new(false);
/// Enable or disable buffer profiling globally
///
/// This controls whether the opt-in buffer profiling feature is active.
///
/// # Arguments
/// * `enabled` - Whether to enable buffer profiling
pub fn set_buffer_profile_enabled(enabled: bool) {
BUFFER_PROFILE_ENABLED.store(enabled, Ordering::Relaxed);
}
/// Check if buffer profiling is enabled globally
pub fn is_buffer_profile_enabled() -> bool {
BUFFER_PROFILE_ENABLED.load(Ordering::Relaxed)
}
/// Initialize the global buffer configuration
///
/// This should be called once at application startup with the desired profile.
/// If not called, the default GeneralPurpose profile will be used.
///
/// # Arguments
/// * `config` - The buffer configuration to use globally
///
/// # Examples
/// ```ignore
/// use rustfs::config::workload_profiles::{RustFSBufferConfig, WorkloadProfile};
///
/// // Initialize with AiTraining profile
/// init_global_buffer_config(RustFSBufferConfig::new(WorkloadProfile::AiTraining));
/// ```
pub fn init_global_buffer_config(config: RustFSBufferConfig) {
let _ = GLOBAL_BUFFER_CONFIG.set(config);
}
/// Get the global buffer configuration
///
/// Returns the configured profile, or GeneralPurpose if not initialized.
pub fn get_global_buffer_config() -> &'static RustFSBufferConfig {
GLOBAL_BUFFER_CONFIG.get_or_init(RustFSBufferConfig::default)
}
/// Workload profile types that define buffer sizing strategies
#[derive(Debug, Clone, PartialEq)]
pub enum WorkloadProfile {
/// General purpose - default configuration with balanced performance and memory
GeneralPurpose,
/// AI/ML training: optimized for large sequential reads with maximum throughput
AiTraining,
/// Data analytics: mixed read-write patterns with moderate buffer sizes
DataAnalytics,
/// Web workloads: small file intensive with minimal memory overhead
WebWorkload,
/// Industrial IoT: real-time streaming with low latency priority
IndustrialIoT,
/// Secure storage: security first, memory constrained for compliance
SecureStorage,
/// Custom configuration for specialized requirements
Custom(BufferConfig),
}
/// Buffer size configuration for adaptive buffering
#[derive(Debug, Clone, PartialEq)]
pub struct BufferConfig {
/// Minimum buffer size in bytes (for very small files or memory-constrained environments)
pub min_size: usize,
/// Maximum buffer size in bytes (cap for large files to prevent excessive memory usage)
pub max_size: usize,
/// Default size for unknown file size scenarios (streaming/chunked uploads)
pub default_unknown: usize,
/// File size thresholds and corresponding buffer sizes: (file_size_threshold, buffer_size)
/// Thresholds should be in ascending order
pub thresholds: Vec<(i64, usize)>,
}
/// Complete buffer configuration for RustFS
#[derive(Debug, Clone)]
pub struct RustFSBufferConfig {
/// Selected workload profile
pub workload: WorkloadProfile,
/// Computed buffer configuration (either from profile or custom)
pub base_config: BufferConfig,
}
impl WorkloadProfile {
/// Parse a workload profile from a string name
///
/// # Arguments
/// * `name` - The name of the profile (case-insensitive)
///
/// # Returns
/// The corresponding WorkloadProfile, or GeneralPurpose if name is not recognized
///
/// # Examples
/// ```
/// use rustfs::config::workload_profiles::WorkloadProfile;
///
/// let profile = WorkloadProfile::from_name("AiTraining");
/// let profile2 = WorkloadProfile::from_name("aitraining"); // case-insensitive
/// let profile3 = WorkloadProfile::from_name("unknown"); // defaults to GeneralPurpose
/// ```
pub fn from_name(name: &str) -> Self {
match name.to_lowercase().as_str() {
"generalpurpose" | "general" => WorkloadProfile::GeneralPurpose,
"aitraining" | "ai" => WorkloadProfile::AiTraining,
"dataanalytics" | "analytics" => WorkloadProfile::DataAnalytics,
"webworkload" | "web" => WorkloadProfile::WebWorkload,
"industrialiot" | "iot" => WorkloadProfile::IndustrialIoT,
"securestorage" | "secure" => WorkloadProfile::SecureStorage,
_ => {
// Default to GeneralPurpose for unknown profiles
WorkloadProfile::GeneralPurpose
}
}
}
/// Get the buffer configuration for this workload profile
pub fn config(&self) -> BufferConfig {
match self {
WorkloadProfile::GeneralPurpose => Self::general_purpose_config(),
WorkloadProfile::AiTraining => Self::ai_training_config(),
WorkloadProfile::DataAnalytics => Self::data_analytics_config(),
WorkloadProfile::WebWorkload => Self::web_workload_config(),
WorkloadProfile::IndustrialIoT => Self::industrial_iot_config(),
WorkloadProfile::SecureStorage => Self::secure_storage_config(),
WorkloadProfile::Custom(config) => config.clone(),
}
}
/// General purpose configuration: balanced performance and memory usage
/// - Small files (< 1MB): 64KB buffer
/// - Medium files (1MB-100MB): 256KB buffer
/// - Large files (>= 100MB): 1MB buffer
fn general_purpose_config() -> BufferConfig {
BufferConfig {
min_size: 64 * KI_B,
max_size: MI_B,
default_unknown: MI_B,
thresholds: vec![
(MI_B as i64, 64 * KI_B), // < 1MB: 64KB
(100 * MI_B as i64, 256 * KI_B), // 1MB-100MB: 256KB
(i64::MAX, MI_B), // >= 100MB: 1MB
],
}
}
/// AI/ML training configuration: optimized for large sequential reads
/// - Small files (< 10MB): 512KB buffer
/// - Medium files (10MB-500MB): 2MB buffer
/// - Large files (>= 500MB): 4MB buffer for maximum throughput
fn ai_training_config() -> BufferConfig {
BufferConfig {
min_size: 512 * KI_B,
max_size: 4 * MI_B,
default_unknown: 2 * MI_B,
thresholds: vec![
(10 * MI_B as i64, 512 * KI_B), // < 10MB: 512KB
(500 * MI_B as i64, 2 * MI_B), // 10MB-500MB: 2MB
(i64::MAX, 4 * MI_B), // >= 500MB: 4MB
],
}
}
/// Data analytics configuration: mixed read-write patterns
/// - Small files (< 5MB): 128KB buffer
/// - Medium files (5MB-200MB): 512KB buffer
/// - Large files (>= 200MB): 2MB buffer
fn data_analytics_config() -> BufferConfig {
BufferConfig {
min_size: 128 * KI_B,
max_size: 2 * MI_B,
default_unknown: 512 * KI_B,
thresholds: vec![
(5 * MI_B as i64, 128 * KI_B), // < 5MB: 128KB
(200 * MI_B as i64, 512 * KI_B), // 5MB-200MB: 512KB
(i64::MAX, 2 * MI_B), // >= 200MB: 2MB
],
}
}
/// Web workload configuration: small file intensive
/// - Small files (< 512KB): 32KB buffer to minimize memory
/// - Medium files (512KB-10MB): 128KB buffer
/// - Large files (>= 10MB): 256KB buffer (rare for web assets)
fn web_workload_config() -> BufferConfig {
BufferConfig {
min_size: 32 * KI_B,
max_size: 256 * KI_B,
default_unknown: 128 * KI_B,
thresholds: vec![
(512 * KI_B as i64, 32 * KI_B), // < 512KB: 32KB
(10 * MI_B as i64, 128 * KI_B), // 512KB-10MB: 128KB
(i64::MAX, 256 * KI_B), // >= 10MB: 256KB
],
}
}
/// Industrial IoT configuration: real-time streaming with low latency
/// - Small files (< 1MB): 64KB buffer for quick processing
/// - Medium files (1MB-50MB): 256KB buffer
/// - Large files (>= 50MB): 512KB buffer (cap for memory constraints)
fn industrial_iot_config() -> BufferConfig {
BufferConfig {
min_size: 64 * KI_B,
max_size: 512 * KI_B,
default_unknown: 256 * KI_B,
thresholds: vec![
(MI_B as i64, 64 * KI_B), // < 1MB: 64KB
(50 * MI_B as i64, 256 * KI_B), // 1MB-50MB: 256KB
(i64::MAX, 512 * KI_B), // >= 50MB: 512KB
],
}
}
/// Secure storage configuration: security first, memory constrained
/// - Small files (< 1MB): 32KB buffer (minimal memory footprint)
/// - Medium files (1MB-50MB): 128KB buffer
/// - Large files (>= 50MB): 256KB buffer (strict memory limit for compliance)
fn secure_storage_config() -> BufferConfig {
BufferConfig {
min_size: 32 * KI_B,
max_size: 256 * KI_B,
default_unknown: 128 * KI_B,
thresholds: vec![
(MI_B as i64, 32 * KI_B), // < 1MB: 32KB
(50 * MI_B as i64, 128 * KI_B), // 1MB-50MB: 128KB
(i64::MAX, 256 * KI_B), // >= 50MB: 256KB
],
}
}
/// Detect special OS environment and return appropriate workload profile
/// Supports Chinese secure operating systems (Kylin, NeoKylin, Unity OS, etc.)
pub fn detect_os_environment() -> Option<WorkloadProfile> {
#[cfg(target_os = "linux")]
{
// Read /etc/os-release to detect Chinese secure OS distributions
if let Ok(content) = std::fs::read_to_string("/etc/os-release") {
let content_lower = content.to_lowercase();
// Check for Chinese secure OS distributions
if content_lower.contains("kylin")
|| content_lower.contains("neokylin")
|| content_lower.contains("uos")
|| content_lower.contains("unity")
|| content_lower.contains("openkylin")
{
// Use SecureStorage profile for Chinese secure OS environments
return Some(WorkloadProfile::SecureStorage);
}
}
}
None
}
}
impl BufferConfig {
/// Calculate the optimal buffer size for a given file size
///
/// # Arguments
/// * `file_size` - The size of the file in bytes, or -1 if unknown
///
/// # Returns
/// Optimal buffer size in bytes based on the configuration
pub fn calculate_buffer_size(&self, file_size: i64) -> usize {
// Handle unknown or negative file sizes
if file_size < 0 {
return self.default_unknown.clamp(self.min_size, self.max_size);
}
// Find the appropriate buffer size from thresholds
for (threshold, buffer_size) in &self.thresholds {
if file_size < *threshold {
return (*buffer_size).clamp(self.min_size, self.max_size);
}
}
// Fallback to max_size if no threshold matched (shouldn't happen with i64::MAX threshold)
self.max_size
}
/// Validate the buffer configuration
pub fn validate(&self) -> Result<(), String> {
if self.min_size == 0 {
return Err("min_size must be greater than 0".to_string());
}
if self.max_size < self.min_size {
return Err("max_size must be >= min_size".to_string());
}
if self.default_unknown < self.min_size || self.default_unknown > self.max_size {
return Err("default_unknown must be between min_size and max_size".to_string());
}
if self.thresholds.is_empty() {
return Err("thresholds cannot be empty".to_string());
}
// Validate thresholds are in ascending order
let mut prev_threshold = -1i64;
for (threshold, buffer_size) in &self.thresholds {
if *threshold <= prev_threshold {
return Err("thresholds must be in ascending order".to_string());
}
if *buffer_size < self.min_size || *buffer_size > self.max_size {
return Err(format!(
"buffer_size {} must be between min_size {} and max_size {}",
buffer_size, self.min_size, self.max_size
));
}
prev_threshold = *threshold;
}
Ok(())
}
}
impl RustFSBufferConfig {
/// Create a new buffer configuration with the given workload profile
pub fn new(workload: WorkloadProfile) -> Self {
let base_config = workload.config();
Self { workload, base_config }
}
/// Create a configuration with auto-detected OS environment
/// Falls back to GeneralPurpose if no special environment detected
pub fn with_auto_detect() -> Self {
let workload = WorkloadProfile::detect_os_environment().unwrap_or(WorkloadProfile::GeneralPurpose);
Self::new(workload)
}
/// Get the buffer size for a given file size
pub fn get_buffer_size(&self, file_size: i64) -> usize {
self.base_config.calculate_buffer_size(file_size)
}
}
impl Default for RustFSBufferConfig {
fn default() -> Self {
Self::new(WorkloadProfile::GeneralPurpose)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_general_purpose_config() {
let config = WorkloadProfile::GeneralPurpose.config();
// Test small files (< 1MB) - should use 64KB
assert_eq!(config.calculate_buffer_size(0), 64 * KI_B);
assert_eq!(config.calculate_buffer_size(512 * KI_B as i64), 64 * KI_B);
assert_eq!(config.calculate_buffer_size((MI_B - 1) as i64), 64 * KI_B);
// Test medium files (1MB - 100MB) - should use 256KB
assert_eq!(config.calculate_buffer_size(MI_B as i64), 256 * KI_B);
assert_eq!(config.calculate_buffer_size((50 * MI_B) as i64), 256 * KI_B);
assert_eq!(config.calculate_buffer_size((100 * MI_B - 1) as i64), 256 * KI_B);
// Test large files (>= 100MB) - should use 1MB
assert_eq!(config.calculate_buffer_size((100 * MI_B) as i64), MI_B);
assert_eq!(config.calculate_buffer_size((500 * MI_B) as i64), MI_B);
assert_eq!(config.calculate_buffer_size((10 * 1024 * MI_B) as i64), MI_B);
// Test unknown size
assert_eq!(config.calculate_buffer_size(-1), MI_B);
}
#[test]
fn test_ai_training_config() {
let config = WorkloadProfile::AiTraining.config();
// Test small files
assert_eq!(config.calculate_buffer_size((5 * MI_B) as i64), 512 * KI_B);
assert_eq!(config.calculate_buffer_size((10 * MI_B - 1) as i64), 512 * KI_B);
// Test medium files
assert_eq!(config.calculate_buffer_size((10 * MI_B) as i64), 2 * MI_B);
assert_eq!(config.calculate_buffer_size((100 * MI_B) as i64), 2 * MI_B);
assert_eq!(config.calculate_buffer_size((500 * MI_B - 1) as i64), 2 * MI_B);
// Test large files
assert_eq!(config.calculate_buffer_size((500 * MI_B) as i64), 4 * MI_B);
assert_eq!(config.calculate_buffer_size((1024 * MI_B) as i64), 4 * MI_B);
// Test unknown size
assert_eq!(config.calculate_buffer_size(-1), 2 * MI_B);
}
#[test]
fn test_web_workload_config() {
let config = WorkloadProfile::WebWorkload.config();
// Test small files
assert_eq!(config.calculate_buffer_size((100 * KI_B) as i64), 32 * KI_B);
assert_eq!(config.calculate_buffer_size((512 * KI_B - 1) as i64), 32 * KI_B);
// Test medium files
assert_eq!(config.calculate_buffer_size((512 * KI_B) as i64), 128 * KI_B);
assert_eq!(config.calculate_buffer_size((5 * MI_B) as i64), 128 * KI_B);
assert_eq!(config.calculate_buffer_size((10 * MI_B - 1) as i64), 128 * KI_B);
// Test large files
assert_eq!(config.calculate_buffer_size((10 * MI_B) as i64), 256 * KI_B);
assert_eq!(config.calculate_buffer_size((50 * MI_B) as i64), 256 * KI_B);
// Test unknown size
assert_eq!(config.calculate_buffer_size(-1), 128 * KI_B);
}
#[test]
fn test_secure_storage_config() {
let config = WorkloadProfile::SecureStorage.config();
// Test small files
assert_eq!(config.calculate_buffer_size((500 * KI_B) as i64), 32 * KI_B);
assert_eq!(config.calculate_buffer_size((MI_B - 1) as i64), 32 * KI_B);
// Test medium files
assert_eq!(config.calculate_buffer_size(MI_B as i64), 128 * KI_B);
assert_eq!(config.calculate_buffer_size((25 * MI_B) as i64), 128 * KI_B);
assert_eq!(config.calculate_buffer_size((50 * MI_B - 1) as i64), 128 * KI_B);
// Test large files
assert_eq!(config.calculate_buffer_size((50 * MI_B) as i64), 256 * KI_B);
assert_eq!(config.calculate_buffer_size((100 * MI_B) as i64), 256 * KI_B);
// Test unknown size
assert_eq!(config.calculate_buffer_size(-1), 128 * KI_B);
}
#[test]
fn test_industrial_iot_config() {
let config = WorkloadProfile::IndustrialIoT.config();
// Test configuration
assert_eq!(config.calculate_buffer_size((500 * KI_B) as i64), 64 * KI_B);
assert_eq!(config.calculate_buffer_size((25 * MI_B) as i64), 256 * KI_B);
assert_eq!(config.calculate_buffer_size((100 * MI_B) as i64), 512 * KI_B);
assert_eq!(config.calculate_buffer_size(-1), 256 * KI_B);
}
#[test]
fn test_data_analytics_config() {
let config = WorkloadProfile::DataAnalytics.config();
// Test configuration
assert_eq!(config.calculate_buffer_size((2 * MI_B) as i64), 128 * KI_B);
assert_eq!(config.calculate_buffer_size((100 * MI_B) as i64), 512 * KI_B);
assert_eq!(config.calculate_buffer_size((500 * MI_B) as i64), 2 * MI_B);
assert_eq!(config.calculate_buffer_size(-1), 512 * KI_B);
}
#[test]
fn test_custom_config() {
let custom_config = BufferConfig {
min_size: 16 * KI_B,
max_size: 512 * KI_B,
default_unknown: 128 * KI_B,
thresholds: vec![(MI_B as i64, 64 * KI_B), (i64::MAX, 256 * KI_B)],
};
let profile = WorkloadProfile::Custom(custom_config.clone());
let config = profile.config();
assert_eq!(config.calculate_buffer_size(512 * KI_B as i64), 64 * KI_B);
assert_eq!(config.calculate_buffer_size(2 * MI_B as i64), 256 * KI_B);
assert_eq!(config.calculate_buffer_size(-1), 128 * KI_B);
}
#[test]
fn test_buffer_config_validation() {
// Valid configuration
let valid_config = BufferConfig {
min_size: 32 * KI_B,
max_size: MI_B,
default_unknown: 256 * KI_B,
thresholds: vec![(MI_B as i64, 128 * KI_B), (i64::MAX, 512 * KI_B)],
};
assert!(valid_config.validate().is_ok());
// Invalid: min_size is 0
let invalid_config = BufferConfig {
min_size: 0,
max_size: MI_B,
default_unknown: 256 * KI_B,
thresholds: vec![(MI_B as i64, 128 * KI_B)],
};
assert!(invalid_config.validate().is_err());
// Invalid: max_size < min_size
let invalid_config = BufferConfig {
min_size: MI_B,
max_size: 32 * KI_B,
default_unknown: 256 * KI_B,
thresholds: vec![(MI_B as i64, 128 * KI_B)],
};
assert!(invalid_config.validate().is_err());
// Invalid: default_unknown out of range
let invalid_config = BufferConfig {
min_size: 32 * KI_B,
max_size: 256 * KI_B,
default_unknown: MI_B,
thresholds: vec![(MI_B as i64, 128 * KI_B)],
};
assert!(invalid_config.validate().is_err());
// Invalid: empty thresholds
let invalid_config = BufferConfig {
min_size: 32 * KI_B,
max_size: MI_B,
default_unknown: 256 * KI_B,
thresholds: vec![],
};
assert!(invalid_config.validate().is_err());
// Invalid: thresholds not in ascending order
let invalid_config = BufferConfig {
min_size: 32 * KI_B,
max_size: MI_B,
default_unknown: 256 * KI_B,
thresholds: vec![(100 * MI_B as i64, 512 * KI_B), (MI_B as i64, 128 * KI_B)],
};
assert!(invalid_config.validate().is_err());
}
#[test]
fn test_rustfs_buffer_config() {
let config = RustFSBufferConfig::new(WorkloadProfile::GeneralPurpose);
assert_eq!(config.get_buffer_size(500 * KI_B as i64), 64 * KI_B);
assert_eq!(config.get_buffer_size(50 * MI_B as i64), 256 * KI_B);
assert_eq!(config.get_buffer_size(200 * MI_B as i64), MI_B);
let default_config = RustFSBufferConfig::default();
assert_eq!(default_config.get_buffer_size(500 * KI_B as i64), 64 * KI_B);
}
#[test]
fn test_workload_profile_equality() {
assert_eq!(WorkloadProfile::GeneralPurpose, WorkloadProfile::GeneralPurpose);
assert_ne!(WorkloadProfile::GeneralPurpose, WorkloadProfile::AiTraining);
let custom1 = BufferConfig {
min_size: 32 * KI_B,
max_size: MI_B,
default_unknown: 256 * KI_B,
thresholds: vec![(MI_B as i64, 128 * KI_B)],
};
let custom2 = custom1.clone();
assert_eq!(WorkloadProfile::Custom(custom1.clone()), WorkloadProfile::Custom(custom2));
}
#[test]
fn test_workload_profile_from_name() {
// Test exact matches (case-insensitive)
assert_eq!(WorkloadProfile::from_name("GeneralPurpose"), WorkloadProfile::GeneralPurpose);
assert_eq!(WorkloadProfile::from_name("generalpurpose"), WorkloadProfile::GeneralPurpose);
assert_eq!(WorkloadProfile::from_name("GENERALPURPOSE"), WorkloadProfile::GeneralPurpose);
assert_eq!(WorkloadProfile::from_name("general"), WorkloadProfile::GeneralPurpose);
assert_eq!(WorkloadProfile::from_name("AiTraining"), WorkloadProfile::AiTraining);
assert_eq!(WorkloadProfile::from_name("aitraining"), WorkloadProfile::AiTraining);
assert_eq!(WorkloadProfile::from_name("ai"), WorkloadProfile::AiTraining);
assert_eq!(WorkloadProfile::from_name("DataAnalytics"), WorkloadProfile::DataAnalytics);
assert_eq!(WorkloadProfile::from_name("dataanalytics"), WorkloadProfile::DataAnalytics);
assert_eq!(WorkloadProfile::from_name("analytics"), WorkloadProfile::DataAnalytics);
assert_eq!(WorkloadProfile::from_name("WebWorkload"), WorkloadProfile::WebWorkload);
assert_eq!(WorkloadProfile::from_name("webworkload"), WorkloadProfile::WebWorkload);
assert_eq!(WorkloadProfile::from_name("web"), WorkloadProfile::WebWorkload);
assert_eq!(WorkloadProfile::from_name("IndustrialIoT"), WorkloadProfile::IndustrialIoT);
assert_eq!(WorkloadProfile::from_name("industrialiot"), WorkloadProfile::IndustrialIoT);
assert_eq!(WorkloadProfile::from_name("iot"), WorkloadProfile::IndustrialIoT);
assert_eq!(WorkloadProfile::from_name("SecureStorage"), WorkloadProfile::SecureStorage);
assert_eq!(WorkloadProfile::from_name("securestorage"), WorkloadProfile::SecureStorage);
assert_eq!(WorkloadProfile::from_name("secure"), WorkloadProfile::SecureStorage);
// Test unknown name defaults to GeneralPurpose
assert_eq!(WorkloadProfile::from_name("unknown"), WorkloadProfile::GeneralPurpose);
assert_eq!(WorkloadProfile::from_name("invalid"), WorkloadProfile::GeneralPurpose);
assert_eq!(WorkloadProfile::from_name(""), WorkloadProfile::GeneralPurpose);
}
#[test]
fn test_global_buffer_config() {
use super::{is_buffer_profile_enabled, set_buffer_profile_enabled};
// Test enable/disable
set_buffer_profile_enabled(true);
assert!(is_buffer_profile_enabled());
set_buffer_profile_enabled(false);
assert!(!is_buffer_profile_enabled());
// Reset for other tests
set_buffer_profile_enabled(false);
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/config/config_test.rs | rustfs/src/config/config_test.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#[cfg(test)]
#[allow(unsafe_op_in_unsafe_fn)]
mod tests {
use crate::config::Opt;
use clap::Parser;
use rustfs_ecstore::disks_layout::DisksLayout;
use serial_test::serial;
use std::env;
/// Helper function to run test with environment variable set.
/// Automatically cleans up the environment variable after the test.
///
/// # Safety
/// This function uses unsafe env::set_var and env::remove_var.
/// Tests using this helper must be marked with #[serial] to avoid race conditions.
#[allow(unsafe_code)]
fn with_env_var<F>(key: &str, value: &str, test_fn: F)
where
F: FnOnce(),
{
unsafe {
env::set_var(key, value);
}
// Ensure cleanup happens even if test panics
let result = std::panic::catch_unwind(std::panic::AssertUnwindSafe(test_fn));
unsafe {
env::remove_var(key);
}
// Re-panic if the test failed
if let Err(e) = result {
std::panic::resume_unwind(e);
}
}
/// Helper to parse volumes and verify the layout.
fn verify_layout<T, F>(volumes: &[T], verify_fn: F)
where
T: AsRef<str>,
F: FnOnce(&DisksLayout),
{
let layout = DisksLayout::from_volumes(volumes).expect("Failed to parse volumes");
verify_fn(&layout);
}
#[test]
fn test_default_console_configuration() {
// Test that default console configuration is correct
let args = vec!["rustfs", "/test/volume"];
let opt = Opt::parse_from(args);
assert!(opt.console_enable);
assert_eq!(opt.console_address, ":9001");
assert_eq!(opt.address, ":9000");
}
#[test]
fn test_custom_console_configuration() {
// Test custom console configuration
let args = vec![
"rustfs",
"/test/volume",
"--console-address",
":8080",
"--address",
":8000",
"--console-enable",
"false",
];
let opt = Opt::parse_from(args);
assert!(opt.console_enable);
assert_eq!(opt.console_address, ":8080");
assert_eq!(opt.address, ":8000");
}
#[test]
fn test_console_and_endpoint_ports_different() {
// Ensure console and endpoint use different default ports
let args = vec!["rustfs", "/test/volume"];
let opt = Opt::parse_from(args);
// Parse port numbers from addresses
let endpoint_port: u16 = opt.address.trim_start_matches(':').parse().expect("Invalid endpoint port");
let console_port: u16 = opt
.console_address
.trim_start_matches(':')
.parse()
.expect("Invalid console port");
assert_ne!(endpoint_port, console_port, "Console and endpoint should use different ports");
assert_eq!(endpoint_port, 9000);
assert_eq!(console_port, 9001);
}
#[test]
fn test_volumes_and_disk_layout_parsing() {
use rustfs_ecstore::disks_layout::DisksLayout;
// Test case 1: Single volume path
let args = vec!["rustfs", "/data/vol1"];
let opt = Opt::parse_from(args);
assert_eq!(opt.volumes.len(), 1);
assert_eq!(opt.volumes[0], "/data/vol1");
let layout = DisksLayout::from_volumes(&opt.volumes).expect("Failed to parse single volume");
assert!(!layout.is_empty_layout());
assert!(layout.is_single_drive_layout());
assert_eq!(layout.get_single_drive_layout(), "/data/vol1");
// Test case 2: Multiple volume paths (space-separated via env)
let args = vec!["rustfs", "/data/vol1", "/data/vol2", "/data/vol3", "/data/vol4"];
let opt = Opt::parse_from(args);
assert_eq!(opt.volumes.len(), 4);
let layout = DisksLayout::from_volumes(&opt.volumes).expect("Failed to parse multiple volumes");
assert!(!layout.is_empty_layout());
assert!(!layout.is_single_drive_layout());
assert_eq!(layout.get_set_count(0), 1);
assert_eq!(layout.get_drives_per_set(0), 4);
// Test case 3: Ellipses pattern - simple range
let args = vec!["rustfs", "/data/vol{1...4}"];
let opt = Opt::parse_from(args);
assert_eq!(opt.volumes.len(), 1);
assert_eq!(opt.volumes[0], "/data/vol{1...4}");
let layout = DisksLayout::from_volumes(&opt.volumes).expect("Failed to parse ellipses pattern");
assert!(!layout.is_empty_layout());
assert_eq!(layout.get_set_count(0), 1);
assert_eq!(layout.get_drives_per_set(0), 4);
// Test case 4: Ellipses pattern - larger range that creates multiple sets
let args = vec!["rustfs", "/data/vol{1...16}"];
let opt = Opt::parse_from(args);
let layout = DisksLayout::from_volumes(&opt.volumes).expect("Failed to parse ellipses with multiple sets");
assert!(!layout.is_empty_layout());
assert_eq!(layout.get_drives_per_set(0), 16);
// Test case 5: Distributed setup pattern
let args = vec!["rustfs", "http://server{1...4}/data/vol{1...4}"];
let opt = Opt::parse_from(args);
let layout = DisksLayout::from_volumes(&opt.volumes).expect("Failed to parse distributed pattern");
assert!(!layout.is_empty_layout());
assert_eq!(layout.get_drives_per_set(0), 16);
// Test case 6: Multiple pools (legacy: false)
let args = vec!["rustfs", "http://server1/data{1...4}", "http://server2/data{1...4}"];
let opt = Opt::parse_from(args);
assert_eq!(opt.volumes.len(), 2);
let layout = DisksLayout::from_volumes(&opt.volumes).expect("Failed to parse multiple pools");
assert!(!layout.legacy);
assert_eq!(layout.pools.len(), 2);
// Test case 7: Minimum valid drives for erasure coding (2 drives minimum)
let args = vec!["rustfs", "/data/vol1", "/data/vol2"];
let opt = Opt::parse_from(args);
let layout = DisksLayout::from_volumes(&opt.volumes).expect("Should succeed with 2 drives");
assert_eq!(layout.get_drives_per_set(0), 2);
// Test case 8: Invalid - single drive not enough for erasure coding
let args = vec!["rustfs", "/data/vol1"];
let opt = Opt::parse_from(args);
// Single drive is special case and should succeed for single drive layout
let layout = DisksLayout::from_volumes(&opt.volumes).expect("Single drive should work");
assert!(layout.is_single_drive_layout());
// Test case 9: Command line with both address and volumes
let args = vec![
"rustfs",
"/data/vol{1...8}",
"--address",
":9000",
"--console-address",
":9001",
];
let opt = Opt::parse_from(args);
assert_eq!(opt.volumes.len(), 1);
assert_eq!(opt.address, ":9000");
assert_eq!(opt.console_address, ":9001");
let layout = DisksLayout::from_volumes(&opt.volumes).expect("Failed to parse with address args");
assert!(!layout.is_empty_layout());
assert_eq!(layout.get_drives_per_set(0), 8);
// Test case 10: Multiple ellipses in single argument - nested pattern
let args = vec!["rustfs", "/data{0...3}/vol{0...4}"];
let opt = Opt::parse_from(args);
assert_eq!(opt.volumes.len(), 1);
assert_eq!(opt.volumes[0], "/data{0...3}/vol{0...4}");
let layout = DisksLayout::from_volumes(&opt.volumes).expect("Failed to parse nested ellipses pattern");
assert!(!layout.is_empty_layout());
// 4 data dirs * 5 vols = 20 drives
let total_drives = layout.get_set_count(0) * layout.get_drives_per_set(0);
assert_eq!(total_drives, 20, "Expected 20 drives from /data{{0...3}}/vol{{0...4}}");
// Test case 11: Multiple pools with nested ellipses patterns
let args = vec!["rustfs", "/data{0...3}/vol{0...4}", "/data{4...7}/vol{0...4}"];
let opt = Opt::parse_from(args);
assert_eq!(opt.volumes.len(), 2);
let layout = DisksLayout::from_volumes(&opt.volumes).expect("Failed to parse multiple pools with nested patterns");
assert!(!layout.legacy);
assert_eq!(layout.pools.len(), 2);
// Each pool should have 20 drives (4 * 5)
let pool0_drives = layout.get_set_count(0) * layout.get_drives_per_set(0);
let pool1_drives = layout.get_set_count(1) * layout.get_drives_per_set(1);
assert_eq!(pool0_drives, 20, "Pool 0 should have 20 drives");
assert_eq!(pool1_drives, 20, "Pool 1 should have 20 drives");
// Test case 11: Complex distributed pattern with multiple ellipses
let args = vec!["rustfs", "http://server{1...2}.local/disk{1...8}"];
let opt = Opt::parse_from(args);
let layout = DisksLayout::from_volumes(&opt.volumes).expect("Failed to parse distributed nested pattern");
assert!(!layout.is_empty_layout());
// 2 servers * 8 disks = 16 drives
let total_drives = layout.get_set_count(0) * layout.get_drives_per_set(0);
assert_eq!(total_drives, 16, "Expected 16 drives from server{{1...2}}/disk{{1...8}}");
// Test case 12: Zero-padded patterns
let args = vec!["rustfs", "/data/vol{01...16}"];
let opt = Opt::parse_from(args);
let layout = DisksLayout::from_volumes(&opt.volumes).expect("Failed to parse zero-padded pattern");
assert!(!layout.is_empty_layout());
assert_eq!(layout.get_drives_per_set(0), 16);
}
/// Test environment variable parsing for volumes.
/// Uses #[serial] to avoid concurrent env var modifications.
#[test]
#[serial]
#[allow(unsafe_code)]
fn test_rustfs_volumes_env_variable() {
// Test case 1: Single volume via environment variable
with_env_var("RUSTFS_VOLUMES", "/data/vol1", || {
let args = vec!["rustfs"];
let opt = Opt::parse_from(args);
assert_eq!(opt.volumes.len(), 1);
assert_eq!(opt.volumes[0], "/data/vol1");
let layout = DisksLayout::from_volumes(&opt.volumes).expect("Failed to parse single volume from env");
assert!(layout.is_single_drive_layout());
});
// Test case 2: Multiple volumes via environment variable (space-separated)
with_env_var("RUSTFS_VOLUMES", "/data/vol1 /data/vol2 /data/vol3 /data/vol4", || {
let args = vec!["rustfs"];
let opt = Opt::parse_from(args);
assert_eq!(opt.volumes.len(), 4);
assert_eq!(opt.volumes[0], "/data/vol1");
assert_eq!(opt.volumes[1], "/data/vol2");
assert_eq!(opt.volumes[2], "/data/vol3");
assert_eq!(opt.volumes[3], "/data/vol4");
verify_layout(&opt.volumes, |layout| {
assert!(!layout.is_single_drive_layout());
assert_eq!(layout.get_drives_per_set(0), 4);
});
});
// Test case 3: Ellipses pattern via environment variable
with_env_var("RUSTFS_VOLUMES", "/data/vol{1...4}", || {
let args = vec!["rustfs"];
let opt = Opt::parse_from(args);
assert_eq!(opt.volumes.len(), 1);
assert_eq!(opt.volumes[0], "/data/vol{1...4}");
verify_layout(&opt.volumes, |layout| {
assert_eq!(layout.get_drives_per_set(0), 4);
});
});
// Test case 4: Larger range with ellipses
with_env_var("RUSTFS_VOLUMES", "/data/vol{1...16}", || {
let args = vec!["rustfs"];
let opt = Opt::parse_from(args);
verify_layout(&opt.volumes, |layout| {
assert_eq!(layout.get_drives_per_set(0), 16);
});
});
// Test case 5: Distributed setup pattern
with_env_var("RUSTFS_VOLUMES", "http://server{1...4}/data/vol{1...4}", || {
let args = vec!["rustfs"];
let opt = Opt::parse_from(args);
verify_layout(&opt.volumes, |layout| {
assert_eq!(layout.get_drives_per_set(0), 16);
});
});
// Test case 6: Multiple pools via environment variable (space-separated)
with_env_var("RUSTFS_VOLUMES", "http://server1/data{1...4} http://server2/data{1...4}", || {
let args = vec!["rustfs"];
let opt = Opt::parse_from(args);
assert_eq!(opt.volumes.len(), 2);
verify_layout(&opt.volumes, |layout| {
assert!(!layout.legacy);
assert_eq!(layout.pools.len(), 2);
});
});
// Test case 7: Nested ellipses pattern
with_env_var("RUSTFS_VOLUMES", "/data{0...3}/vol{0...4}", || {
let args = vec!["rustfs"];
let opt = Opt::parse_from(args);
assert_eq!(opt.volumes.len(), 1);
assert_eq!(opt.volumes[0], "/data{0...3}/vol{0...4}");
verify_layout(&opt.volumes, |layout| {
let total_drives = layout.get_set_count(0) * layout.get_drives_per_set(0);
assert_eq!(total_drives, 20, "Expected 20 drives from /data{{0...3}}/vol{{0...4}}");
});
});
// Test case 8: Multiple pools with nested ellipses
with_env_var("RUSTFS_VOLUMES", "/data{0...3}/vol{0...4} /data{4...7}/vol{0...4}", || {
let args = vec!["rustfs"];
let opt = Opt::parse_from(args);
assert_eq!(opt.volumes.len(), 2);
verify_layout(&opt.volumes, |layout| {
assert_eq!(layout.pools.len(), 2);
let pool0_drives = layout.get_set_count(0) * layout.get_drives_per_set(0);
let pool1_drives = layout.get_set_count(1) * layout.get_drives_per_set(1);
assert_eq!(pool0_drives, 20, "Pool 0 should have 20 drives");
assert_eq!(pool1_drives, 20, "Pool 1 should have 20 drives");
});
});
// Test case 9: Complex distributed pattern with multiple ellipses
with_env_var("RUSTFS_VOLUMES", "http://server{1...2}.local/disk{1...8}", || {
let args = vec!["rustfs"];
let opt = Opt::parse_from(args);
verify_layout(&opt.volumes, |layout| {
let total_drives = layout.get_set_count(0) * layout.get_drives_per_set(0);
assert_eq!(total_drives, 16, "Expected 16 drives from server{{1...2}}/disk{{1...8}}");
});
});
// Test case 10: Zero-padded patterns
with_env_var("RUSTFS_VOLUMES", "/data/vol{01...16}", || {
let args = vec!["rustfs"];
let opt = Opt::parse_from(args);
verify_layout(&opt.volumes, |layout| {
assert_eq!(layout.get_drives_per_set(0), 16);
});
});
// Test case 11: Environment variable with additional CLI options
with_env_var("RUSTFS_VOLUMES", "/data/vol{1...8}", || {
let args = vec!["rustfs", "--address", ":9000", "--console-address", ":9001"];
let opt = Opt::parse_from(args);
assert_eq!(opt.volumes.len(), 1);
assert_eq!(opt.address, ":9000");
assert_eq!(opt.console_address, ":9001");
verify_layout(&opt.volumes, |layout| {
assert_eq!(layout.get_drives_per_set(0), 8);
});
});
// Test case 12: Command line argument overrides environment variable
with_env_var("RUSTFS_VOLUMES", "/data/vol1", || {
let args = vec!["rustfs", "/override/vol1"];
let opt = Opt::parse_from(args);
assert_eq!(opt.volumes.len(), 1);
// CLI argument should override environment variable
assert_eq!(opt.volumes[0], "/override/vol1");
});
}
/// Test boundary cases for path parsing.
/// NOTE: Current implementation uses space as delimiter,
/// which means paths with spaces are NOT supported.
#[test]
#[serial]
#[allow(unsafe_code)]
fn test_volumes_boundary_cases() {
// Test case 1: Paths with spaces are not properly supported (known limitation)
// This test documents the current behavior - space-separated paths will be split
with_env_var("RUSTFS_VOLUMES", "/data/my disk/vol1", || {
let args = vec!["rustfs"];
let opt = Opt::try_parse_from(args).expect("Failed to parse with spaces in path");
// Current behavior: space causes split into 2 volumes
assert_eq!(opt.volumes.len(), 2, "Paths with spaces are split (known limitation)");
assert_eq!(opt.volumes[0], "/data/my");
assert_eq!(opt.volumes[1], "disk/vol1");
});
// Test case 2: Empty environment variable causes parsing failure
// because volumes is required and NonEmptyStringValueParser filters empty strings
with_env_var("RUSTFS_VOLUMES", "", || {
let args = vec!["rustfs"];
let result = Opt::try_parse_from(args);
// Should fail because no volumes provided (empty string filtered out)
assert!(result.is_err(), "Empty RUSTFS_VOLUMES should fail parsing (required field)");
});
// Test case 2b: Multiple consecutive spaces create empty strings during splitting
// This causes parsing to fail because volumes is required and empty strings are invalid
with_env_var("RUSTFS_VOLUMES", "/data/vol1 /data/vol2", || {
let args = vec!["rustfs"];
let result = Opt::try_parse_from(args);
// Should fail because double space creates an empty element
assert!(result.is_err(), "Multiple consecutive spaces should cause parsing failure");
});
// Test case 3: Very long path with ellipses (stress test)
// Note: Large drive counts may be automatically split into multiple sets
let long_path = format!("/very/long/path/structure/with/many/directories/vol{{1...{}}}", 100);
with_env_var("RUSTFS_VOLUMES", &long_path, || {
let args = vec!["rustfs"];
let opt = Opt::try_parse_from(args).expect("Failed to parse with long ellipses path");
verify_layout(&opt.volumes, |layout| {
// Total drives should be 100, but may be distributed across sets
let total_drives = layout.get_set_count(0) * layout.get_drives_per_set(0);
assert_eq!(total_drives, 100, "Total drives should be 100");
});
});
}
/// Test error handling for invalid ellipses patterns.
#[test]
fn test_invalid_ellipses_patterns() {
// Test case 1: Invalid ellipses format (letters instead of numbers)
let args = vec!["rustfs", "/data/vol{a...z}"];
let opt = Opt::parse_from(args);
let result = DisksLayout::from_volumes(&opt.volumes);
assert!(result.is_err(), "Invalid ellipses pattern with letters should fail");
// Test case 2: Reversed range (larger to smaller)
let args = vec!["rustfs", "/data/vol{10...1}"];
let opt = Opt::parse_from(args);
let result = DisksLayout::from_volumes(&opt.volumes);
// Depending on implementation, this may succeed with 0 drives or fail
// Document actual behavior
if let Ok(layout) = result {
assert!(
layout.is_empty_layout() || layout.get_drives_per_set(0) == 0,
"Reversed range should result in empty layout"
);
}
}
#[test]
fn test_server_domains_parsing() {
// Test case 1: server domains without ports
let args = vec![
"rustfs",
"/data/vol1",
"--server-domains",
"example.com,api.example.com,cdn.example.com",
];
let opt = Opt::parse_from(args);
assert_eq!(opt.server_domains.len(), 3);
assert_eq!(opt.server_domains[0], "example.com");
assert_eq!(opt.server_domains[1], "api.example.com");
assert_eq!(opt.server_domains[2], "cdn.example.com");
// Test case 2: server domains with ports
let args = vec![
"rustfs",
"/data/vol1",
"--server-domains",
"example.com:9000,api.example.com:8080,cdn.example.com:443",
];
let opt = Opt::parse_from(args);
assert_eq!(opt.server_domains.len(), 3);
assert_eq!(opt.server_domains[0], "example.com:9000");
assert_eq!(opt.server_domains[1], "api.example.com:8080");
assert_eq!(opt.server_domains[2], "cdn.example.com:443");
// Test case 3: mixed server domains (with and without ports)
let args = vec![
"rustfs",
"/data/vol1",
"--server-domains",
"example.com,api.example.com:9000,cdn.example.com,storage.example.com:8443",
];
let opt = Opt::parse_from(args);
assert_eq!(opt.server_domains.len(), 4);
assert_eq!(opt.server_domains[0], "example.com");
assert_eq!(opt.server_domains[1], "api.example.com:9000");
assert_eq!(opt.server_domains[2], "cdn.example.com");
assert_eq!(opt.server_domains[3], "storage.example.com:8443");
// Test case 4: single domain with port
let args = vec!["rustfs", "/data/vol1", "--server-domains", "example.com:9000"];
let opt = Opt::parse_from(args);
assert_eq!(opt.server_domains.len(), 1);
assert_eq!(opt.server_domains[0], "example.com:9000");
// Test case 5: localhost with different ports
let args = vec![
"rustfs",
"/data/vol1",
"--server-domains",
"localhost:9000,127.0.0.1:9000,localhost",
];
let opt = Opt::parse_from(args);
assert_eq!(opt.server_domains.len(), 3);
assert_eq!(opt.server_domains[0], "localhost:9000");
assert_eq!(opt.server_domains[1], "127.0.0.1:9000");
assert_eq!(opt.server_domains[2], "localhost");
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/rustfs/src/config/mod.rs | rustfs/src/config/mod.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use clap::Parser;
use clap::builder::NonEmptyStringValueParser;
use const_str::concat;
use std::string::ToString;
shadow_rs::shadow!(build);
pub mod workload_profiles;
#[cfg(test)]
mod config_test;
#[allow(clippy::const_is_empty)]
const SHORT_VERSION: &str = {
if !build::TAG.is_empty() {
build::TAG
} else if !build::SHORT_COMMIT.is_empty() {
concat!("@", build::SHORT_COMMIT)
} else {
build::PKG_VERSION
}
};
const LONG_VERSION: &str = concat!(
concat!(SHORT_VERSION, "\n"),
concat!("build time : ", build::BUILD_TIME, "\n"),
concat!("build profile: ", build::BUILD_RUST_CHANNEL, "\n"),
concat!("build os : ", build::BUILD_OS, "\n"),
concat!("rust version : ", build::RUST_VERSION, "\n"),
concat!("rust channel : ", build::RUST_CHANNEL, "\n"),
concat!("git branch : ", build::BRANCH, "\n"),
concat!("git commit : ", build::COMMIT_HASH, "\n"),
concat!("git tag : ", build::TAG, "\n"),
concat!("git status :\n", build::GIT_STATUS_FILE),
);
#[derive(Debug, Parser, Clone)]
#[command(version = SHORT_VERSION, long_version = LONG_VERSION)]
pub struct Opt {
/// DIR points to a directory on a filesystem.
#[arg(
required = true,
env = "RUSTFS_VOLUMES",
value_delimiter = ' ',
value_parser = NonEmptyStringValueParser::new()
)]
pub volumes: Vec<String>,
/// bind to a specific ADDRESS:PORT, ADDRESS can be an IP or hostname
#[arg(long, default_value_t = rustfs_config::DEFAULT_ADDRESS.to_string(), env = "RUSTFS_ADDRESS")]
pub address: String,
/// Domain name used for virtual-hosted-style requests.
#[arg(
long,
env = "RUSTFS_SERVER_DOMAINS",
value_delimiter = ',',
value_parser = NonEmptyStringValueParser::new()
)]
pub server_domains: Vec<String>,
/// Access key used for authentication.
#[arg(long, default_value_t = rustfs_credentials::DEFAULT_ACCESS_KEY.to_string(), env = "RUSTFS_ACCESS_KEY")]
pub access_key: String,
/// Secret key used for authentication.
#[arg(long, default_value_t = rustfs_credentials::DEFAULT_SECRET_KEY.to_string(), env = "RUSTFS_SECRET_KEY")]
pub secret_key: String,
/// Enable console server
#[arg(long, default_value_t = rustfs_config::DEFAULT_CONSOLE_ENABLE, env = "RUSTFS_CONSOLE_ENABLE")]
pub console_enable: bool,
/// Console server bind address
#[arg(long, default_value_t = rustfs_config::DEFAULT_CONSOLE_ADDRESS.to_string(), env = "RUSTFS_CONSOLE_ADDRESS")]
pub console_address: String,
/// Observability endpoint for trace, metrics and logs,only support grpc mode.
#[arg(long, default_value_t = rustfs_config::DEFAULT_OBS_ENDPOINT.to_string(), env = "RUSTFS_OBS_ENDPOINT")]
pub obs_endpoint: String,
/// tls path for rustfs API and console.
#[arg(long, env = "RUSTFS_TLS_PATH")]
pub tls_path: Option<String>,
#[arg(long, env = "RUSTFS_LICENSE")]
pub license: Option<String>,
#[arg(long, env = "RUSTFS_REGION")]
pub region: Option<String>,
/// Enable KMS encryption for server-side encryption
#[arg(long, default_value_t = false, env = "RUSTFS_KMS_ENABLE")]
pub kms_enable: bool,
/// KMS backend type (local or vault)
#[arg(long, default_value_t = String::from("local"), env = "RUSTFS_KMS_BACKEND")]
pub kms_backend: String,
/// KMS key directory for local backend
#[arg(long, env = "RUSTFS_KMS_KEY_DIR")]
pub kms_key_dir: Option<String>,
/// Vault address for vault backend
#[arg(long, env = "RUSTFS_KMS_VAULT_ADDRESS")]
pub kms_vault_address: Option<String>,
/// Vault token for vault backend
#[arg(long, env = "RUSTFS_KMS_VAULT_TOKEN")]
pub kms_vault_token: Option<String>,
/// Default KMS key ID for encryption
#[arg(long, env = "RUSTFS_KMS_DEFAULT_KEY_ID")]
pub kms_default_key_id: Option<String>,
/// Disable adaptive buffer sizing with workload profiles
/// Set this flag to use legacy fixed-size buffer behavior from PR #869
#[arg(long, default_value_t = false, env = "RUSTFS_BUFFER_PROFILE_DISABLE")]
pub buffer_profile_disable: bool,
/// Workload profile for adaptive buffer sizing
/// Options: GeneralPurpose, AiTraining, DataAnalytics, WebWorkload, IndustrialIoT, SecureStorage
#[arg(long, default_value_t = String::from("GeneralPurpose"), env = "RUSTFS_BUFFER_PROFILE")]
pub buffer_profile: String,
}
// lazy_static::lazy_static! {
// pub(crate) static ref OPT: OnceLock<Opt> = OnceLock::new();
// }
// pub fn init_config(opt: Opt) {
// OPT.set(opt).expect("Failed to set global config");
// }
// pub fn get_config() -> &'static Opt {
// OPT.get().expect("Global config not initialized")
// }
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/rio/src/reader.rs | crates/rio/src/reader.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::pin::Pin;
use std::task::{Context, Poll};
use tokio::io::{AsyncRead, ReadBuf};
use crate::compress_index::TryGetIndex;
use crate::{EtagResolvable, HashReaderDetector, Reader};
pub struct WarpReader<R> {
inner: R,
}
impl<R: AsyncRead + Unpin + Send + Sync> WarpReader<R> {
pub fn new(inner: R) -> Self {
Self { inner }
}
}
impl<R: AsyncRead + Unpin + Send + Sync> AsyncRead for WarpReader<R> {
fn poll_read(mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>) -> Poll<std::io::Result<()>> {
Pin::new(&mut self.inner).poll_read(cx, buf)
}
}
impl<R: AsyncRead + Unpin + Send + Sync> HashReaderDetector for WarpReader<R> {}
impl<R: AsyncRead + Unpin + Send + Sync> EtagResolvable for WarpReader<R> {}
impl<R: AsyncRead + Unpin + Send + Sync> TryGetIndex for WarpReader<R> {}
impl<R: AsyncRead + Unpin + Send + Sync> Reader for WarpReader<R> {}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/rio/src/errors.rs | crates/rio/src/errors.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use thiserror::Error;
/// SHA256 mismatch error - when content SHA256 does not match what was sent from client
#[derive(Error, Debug, Clone, PartialEq)]
#[error("Bad sha256: Expected {expected_sha256} does not match calculated {calculated_sha256}")]
pub struct Sha256Mismatch {
pub expected_sha256: String,
pub calculated_sha256: String,
}
/// Bad digest error - Content-MD5 you specified did not match what we received
#[derive(Error, Debug, Clone, PartialEq)]
#[error("Bad digest: Expected {expected_md5} does not match calculated {calculated_md5}")]
pub struct BadDigest {
pub expected_md5: String,
pub calculated_md5: String,
}
/// Size too small error - reader size too small
#[derive(Error, Debug, Clone, PartialEq)]
#[error("Size small: got {got}, want {want}")]
pub struct SizeTooSmall {
pub want: i64,
pub got: i64,
}
/// Size too large error - reader size too large
#[derive(Error, Debug, Clone, PartialEq)]
#[error("Size large: got {got}, want {want}")]
pub struct SizeTooLarge {
pub want: i64,
pub got: i64,
}
/// Size mismatch error
#[derive(Error, Debug, Clone, PartialEq)]
#[error("Size mismatch: got {got}, want {want}")]
pub struct SizeMismatch {
pub want: i64,
pub got: i64,
}
/// Checksum mismatch error - when content checksum does not match what was sent from client
#[derive(Error, Debug, Clone, PartialEq)]
#[error("Bad checksum: Want {want} does not match calculated {got}")]
pub struct ChecksumMismatch {
pub want: String,
pub got: String,
}
/// Invalid checksum error
#[derive(Error, Debug, Clone, PartialEq)]
#[error("invalid checksum")]
pub struct InvalidChecksum;
/// Check if an error is a checksum mismatch
pub fn is_checksum_mismatch(err: &(dyn std::error::Error + 'static)) -> bool {
err.downcast_ref::<ChecksumMismatch>().is_some()
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/rio/src/hardlimit_reader.rs | crates/rio/src/hardlimit_reader.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::compress_index::{Index, TryGetIndex};
use crate::{EtagResolvable, HashReaderDetector, HashReaderMut, Reader};
use pin_project_lite::pin_project;
use std::io::{Error, Result};
use std::pin::Pin;
use std::task::{Context, Poll};
use tokio::io::{AsyncRead, ReadBuf};
pin_project! {
pub struct HardLimitReader {
#[pin]
pub inner: Box<dyn Reader>,
remaining: i64,
}
}
impl HardLimitReader {
pub fn new(inner: Box<dyn Reader>, limit: i64) -> Self {
HardLimitReader { inner, remaining: limit }
}
}
impl AsyncRead for HardLimitReader {
fn poll_read(mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>) -> Poll<Result<()>> {
if self.remaining < 0 {
return Poll::Ready(Err(Error::other("input provided more bytes than specified")));
}
// Save the initial length
let before = buf.filled().len();
// Poll the inner reader
let this = self.as_mut().project();
let poll = this.inner.poll_read(cx, buf);
if let Poll::Ready(Ok(())) = &poll {
let after = buf.filled().len();
let read = (after - before) as i64;
self.remaining -= read;
if self.remaining < 0 {
return Poll::Ready(Err(Error::other("input provided more bytes than specified")));
}
}
poll
}
}
impl EtagResolvable for HardLimitReader {
fn try_resolve_etag(&mut self) -> Option<String> {
self.inner.try_resolve_etag()
}
}
impl HashReaderDetector for HardLimitReader {
fn is_hash_reader(&self) -> bool {
self.inner.is_hash_reader()
}
fn as_hash_reader_mut(&mut self) -> Option<&mut dyn HashReaderMut> {
self.inner.as_hash_reader_mut()
}
}
impl TryGetIndex for HardLimitReader {
fn try_get_index(&self) -> Option<&Index> {
self.inner.try_get_index()
}
}
#[cfg(test)]
mod tests {
use std::vec;
use crate::WarpReader;
use super::*;
use rustfs_utils::read_full;
use tokio::io::{AsyncReadExt, BufReader};
#[tokio::test]
async fn test_hardlimit_reader_normal() {
let data = b"hello world";
let reader = BufReader::new(&data[..]);
let reader = Box::new(WarpReader::new(reader));
let hardlimit = HardLimitReader::new(reader, 20);
let mut r = hardlimit;
let mut buf = Vec::new();
let n = r.read_to_end(&mut buf).await.unwrap();
assert_eq!(n, data.len());
assert_eq!(&buf, data);
}
#[tokio::test]
async fn test_hardlimit_reader_exact_limit() {
let data = b"1234567890";
let reader = BufReader::new(&data[..]);
let reader = Box::new(WarpReader::new(reader));
let hardlimit = HardLimitReader::new(reader, 10);
let mut r = hardlimit;
let mut buf = Vec::new();
let n = r.read_to_end(&mut buf).await.unwrap();
assert_eq!(n, 10);
assert_eq!(&buf, data);
}
#[tokio::test]
async fn test_hardlimit_reader_exceed_limit() {
let data = b"abcdef";
let reader = BufReader::new(&data[..]);
let reader = Box::new(WarpReader::new(reader));
let hardlimit = HardLimitReader::new(reader, 3);
let mut r = hardlimit;
let mut buf = vec![0u8; 10];
// Reading exceeds limit, should return error
let err = match read_full(&mut r, &mut buf).await {
Ok(n) => {
println!("Read {n} bytes");
assert_eq!(n, 3);
assert_eq!(&buf[..n], b"abc");
None
}
Err(e) => Some(e),
};
assert!(err.is_some());
let err = err.unwrap();
assert_eq!(err.kind(), std::io::ErrorKind::Other);
}
#[tokio::test]
async fn test_hardlimit_reader_empty() {
let data = b"";
let reader = BufReader::new(&data[..]);
let reader = Box::new(WarpReader::new(reader));
let hardlimit = HardLimitReader::new(reader, 5);
let mut r = hardlimit;
let mut buf = Vec::new();
let n = r.read_to_end(&mut buf).await.unwrap();
assert_eq!(n, 0);
assert_eq!(&buf, data);
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/rio/src/limit_reader.rs | crates/rio/src/limit_reader.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! LimitReader: a wrapper for AsyncRead that limits the total number of bytes read.
//!
//! # Example
//! ```
//! use tokio::io::{AsyncReadExt, BufReader};
//! use rustfs_rio::LimitReader;
//!
//! #[tokio::main]
//! async fn main() {
//! let data = b"hello world";
//! let reader = BufReader::new(&data[..]);
//! let mut limit_reader = LimitReader::new(reader, data.len());
//!
//! let mut buf = Vec::new();
//! let n = limit_reader.read_to_end(&mut buf).await.unwrap();
//! assert_eq!(n, data.len());
//! assert_eq!(&buf, data);
//! }
//! ```
use pin_project_lite::pin_project;
use std::pin::Pin;
use std::task::{Context, Poll};
use tokio::io::{AsyncRead, ReadBuf};
use crate::{EtagResolvable, HashReaderDetector, HashReaderMut, TryGetIndex};
pin_project! {
#[derive(Debug)]
pub struct LimitReader<R> {
#[pin]
pub inner: R,
limit: usize,
read: usize,
}
}
/// A wrapper for AsyncRead that limits the total number of bytes read.
impl<R> LimitReader<R>
where
R: AsyncRead + Unpin + Send + Sync,
{
/// Create a new LimitReader wrapping `inner`, with a total read limit of `limit` bytes.
pub fn new(inner: R, limit: usize) -> Self {
Self { inner, limit, read: 0 }
}
}
impl<R> AsyncRead for LimitReader<R>
where
R: AsyncRead + Unpin + Send + Sync,
{
fn poll_read(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>) -> Poll<std::io::Result<()>> {
let mut this = self.project();
let remaining = this.limit.saturating_sub(*this.read);
if remaining == 0 {
return Poll::Ready(Ok(()));
}
let orig_remaining = buf.remaining();
let allowed = remaining.min(orig_remaining);
if allowed == 0 {
return Poll::Ready(Ok(()));
}
if allowed == orig_remaining {
let before_size = buf.filled().len();
let poll = this.inner.as_mut().poll_read(cx, buf);
if let Poll::Ready(Ok(())) = &poll {
let n = buf.filled().len() - before_size;
*this.read += n;
}
poll
} else {
let mut temp = vec![0u8; allowed];
let mut temp_buf = ReadBuf::new(&mut temp);
let poll = this.inner.as_mut().poll_read(cx, &mut temp_buf);
if let Poll::Ready(Ok(())) = &poll {
let n = temp_buf.filled().len();
buf.put_slice(temp_buf.filled());
*this.read += n;
}
poll
}
}
}
impl<R> EtagResolvable for LimitReader<R>
where
R: EtagResolvable,
{
fn try_resolve_etag(&mut self) -> Option<String> {
self.inner.try_resolve_etag()
}
}
impl<R> HashReaderDetector for LimitReader<R>
where
R: HashReaderDetector,
{
fn is_hash_reader(&self) -> bool {
self.inner.is_hash_reader()
}
fn as_hash_reader_mut(&mut self) -> Option<&mut dyn HashReaderMut> {
self.inner.as_hash_reader_mut()
}
}
impl<R> TryGetIndex for LimitReader<R> where R: AsyncRead + Unpin + Send + Sync {}
#[cfg(test)]
mod tests {
use std::io::Cursor;
use super::*;
use tokio::io::{AsyncReadExt, BufReader};
#[tokio::test]
async fn test_limit_reader_exact() {
let data = b"hello world";
let reader = BufReader::new(&data[..]);
let mut limit_reader = LimitReader::new(reader, data.len());
let mut buf = Vec::new();
let n = limit_reader.read_to_end(&mut buf).await.unwrap();
assert_eq!(n, data.len());
assert_eq!(&buf, data);
}
#[tokio::test]
async fn test_limit_reader_less_than_data() {
let data = b"hello world";
let reader = BufReader::new(&data[..]);
let mut limit_reader = LimitReader::new(reader, 5);
let mut buf = Vec::new();
let n = limit_reader.read_to_end(&mut buf).await.unwrap();
assert_eq!(n, 5);
assert_eq!(&buf, b"hello");
}
#[tokio::test]
async fn test_limit_reader_zero() {
let data = b"hello world";
let reader = BufReader::new(&data[..]);
let mut limit_reader = LimitReader::new(reader, 0);
let mut buf = Vec::new();
let n = limit_reader.read_to_end(&mut buf).await.unwrap();
assert_eq!(n, 0);
assert!(buf.is_empty());
}
#[tokio::test]
async fn test_limit_reader_multiple_reads() {
let data = b"abcdefghij";
let reader = BufReader::new(&data[..]);
let mut limit_reader = LimitReader::new(reader, 7);
let mut buf1 = [0u8; 3];
let n1 = limit_reader.read(&mut buf1).await.unwrap();
assert_eq!(n1, 3);
assert_eq!(&buf1, b"abc");
let mut buf2 = [0u8; 5];
let n2 = limit_reader.read(&mut buf2).await.unwrap();
assert_eq!(n2, 4);
assert_eq!(&buf2[..n2], b"defg");
let mut buf3 = [0u8; 2];
let n3 = limit_reader.read(&mut buf3).await.unwrap();
assert_eq!(n3, 0);
}
#[tokio::test]
async fn test_limit_reader_large_file() {
use rand::Rng;
// Generate a 3MB random byte array for testing
let size = 3 * 1024 * 1024;
let mut data = vec![0u8; size];
rand::rng().fill(&mut data[..]);
let reader = Cursor::new(data.clone());
let mut limit_reader = LimitReader::new(reader, size);
// Read data into buffer
let mut buf = Vec::new();
let n = limit_reader.read_to_end(&mut buf).await.unwrap();
assert_eq!(n, size);
assert_eq!(buf.len(), size);
assert_eq!(&buf, &data);
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/rio/src/lib.rs | crates/rio/src/lib.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Default encryption block size - aligned with system default read buffer size (1MB)
pub const DEFAULT_ENCRYPTION_BLOCK_SIZE: usize = 1024 * 1024;
mod limit_reader;
pub use limit_reader::LimitReader;
mod etag_reader;
pub use etag_reader::EtagReader;
mod compress_index;
mod compress_reader;
pub use compress_reader::{CompressReader, DecompressReader};
mod encrypt_reader;
pub use encrypt_reader::{DecryptReader, EncryptReader};
mod hardlimit_reader;
pub use hardlimit_reader::HardLimitReader;
mod hash_reader;
pub use hash_reader::*;
mod checksum;
pub use checksum::*;
mod errors;
pub use errors::*;
pub mod reader;
pub use reader::WarpReader;
mod writer;
pub use writer::*;
mod http_reader;
pub use http_reader::*;
pub use compress_index::TryGetIndex;
mod etag;
pub trait Reader: tokio::io::AsyncRead + Unpin + Send + Sync + EtagResolvable + HashReaderDetector + TryGetIndex {}
// Trait for types that can be recursively searched for etag capability
pub trait EtagResolvable {
fn is_etag_reader(&self) -> bool {
false
}
fn try_resolve_etag(&mut self) -> Option<String> {
None
}
}
// Generic function that can work with any EtagResolvable type
pub fn resolve_etag_generic<R>(reader: &mut R) -> Option<String>
where
R: EtagResolvable,
{
reader.try_resolve_etag()
}
/// Trait to detect and manipulate HashReader instances
pub trait HashReaderDetector {
fn is_hash_reader(&self) -> bool {
false
}
fn as_hash_reader_mut(&mut self) -> Option<&mut dyn HashReaderMut> {
None
}
}
impl Reader for crate::HashReader {}
impl Reader for crate::HardLimitReader {}
impl Reader for crate::EtagReader {}
impl<R> Reader for crate::LimitReader<R> where R: Reader {}
impl<R> Reader for crate::CompressReader<R> where R: Reader {}
impl<R> Reader for crate::EncryptReader<R> where R: Reader {}
impl<R> Reader for crate::DecryptReader<R> where R: Reader {}
impl EtagResolvable for Box<dyn Reader> {
fn try_resolve_etag(&mut self) -> Option<String> {
self.as_mut().try_resolve_etag()
}
}
impl HashReaderDetector for Box<dyn Reader> {
fn is_hash_reader(&self) -> bool {
self.as_ref().is_hash_reader()
}
fn as_hash_reader_mut(&mut self) -> Option<&mut dyn HashReaderMut> {
self.as_mut().as_hash_reader_mut()
}
}
impl TryGetIndex for Box<dyn Reader> {
fn try_get_index(&self) -> Option<&compress_index::Index> {
self.as_ref().try_get_index()
}
}
impl Reader for Box<dyn Reader> {}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/rio/src/etag_reader.rs | crates/rio/src/etag_reader.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::compress_index::{Index, TryGetIndex};
use crate::{EtagResolvable, HashReaderDetector, HashReaderMut, Reader};
use md5::{Digest, Md5};
use pin_project_lite::pin_project;
use std::pin::Pin;
use std::task::{Context, Poll};
use tokio::io::{AsyncRead, ReadBuf};
use tracing::error;
pin_project! {
pub struct EtagReader {
#[pin]
pub inner: Box<dyn Reader>,
pub md5: Md5,
pub finished: bool,
pub checksum: Option<String>,
}
}
impl EtagReader {
pub fn new(inner: Box<dyn Reader>, checksum: Option<String>) -> Self {
Self {
inner,
md5: Md5::new(),
finished: false,
checksum,
}
}
/// Get the final md5 value (etag) as a hex string, only compute once.
/// Can be called multiple times, always returns the same result after finished.
pub fn get_etag(&mut self) -> String {
let etag = self.md5.clone().finalize().to_vec();
hex_simd::encode_to_string(etag, hex_simd::AsciiCase::Lower)
}
}
impl AsyncRead for EtagReader {
fn poll_read(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>) -> Poll<std::io::Result<()>> {
let mut this = self.project();
let orig_filled = buf.filled().len();
let poll = this.inner.as_mut().poll_read(cx, buf);
if let Poll::Ready(Ok(())) = &poll {
let filled = &buf.filled()[orig_filled..];
if !filled.is_empty() {
this.md5.update(filled);
} else {
// EOF
*this.finished = true;
if let Some(checksum) = this.checksum {
let etag = this.md5.clone().finalize().to_vec();
let etag_hex = hex_simd::encode_to_string(etag, hex_simd::AsciiCase::Lower);
if *checksum != etag_hex {
error!("Checksum mismatch, expected={:?}, actual={:?}", checksum, etag_hex);
return Poll::Ready(Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "Checksum mismatch")));
}
}
}
}
poll
}
}
impl EtagResolvable for EtagReader {
fn is_etag_reader(&self) -> bool {
true
}
fn try_resolve_etag(&mut self) -> Option<String> {
// EtagReader provides its own etag, not delegating to inner
if let Some(checksum) = &self.checksum {
Some(checksum.clone())
} else if self.finished {
Some(self.get_etag())
} else {
None
}
}
}
impl HashReaderDetector for EtagReader {
fn is_hash_reader(&self) -> bool {
self.inner.is_hash_reader()
}
fn as_hash_reader_mut(&mut self) -> Option<&mut dyn HashReaderMut> {
self.inner.as_hash_reader_mut()
}
}
impl TryGetIndex for EtagReader {
fn try_get_index(&self) -> Option<&Index> {
self.inner.try_get_index()
}
}
#[cfg(test)]
mod tests {
use crate::WarpReader;
use super::*;
use std::io::Cursor;
use tokio::io::{AsyncReadExt, BufReader};
#[tokio::test]
async fn test_etag_reader_basic() {
let data = b"hello world";
let mut hasher = Md5::new();
hasher.update(data);
let hex = faster_hex::hex_string(hasher.finalize().as_slice());
let expected = hex.to_string();
let reader = BufReader::new(&data[..]);
let reader = Box::new(WarpReader::new(reader));
let mut etag_reader = EtagReader::new(reader, None);
let mut buf = Vec::new();
let n = etag_reader.read_to_end(&mut buf).await.unwrap();
assert_eq!(n, data.len());
assert_eq!(&buf, data);
let etag = etag_reader.try_resolve_etag();
assert_eq!(etag, Some(expected));
}
#[tokio::test]
async fn test_etag_reader_empty() {
let data = b"";
let mut hasher = Md5::new();
hasher.update(data);
let hex = faster_hex::hex_string(hasher.finalize().as_slice());
let expected = hex.to_string();
let reader = BufReader::new(&data[..]);
let reader = Box::new(WarpReader::new(reader));
let mut etag_reader = EtagReader::new(reader, None);
let mut buf = Vec::new();
let n = etag_reader.read_to_end(&mut buf).await.unwrap();
assert_eq!(n, 0);
assert!(buf.is_empty());
let etag = etag_reader.try_resolve_etag();
assert_eq!(etag, Some(expected));
}
#[tokio::test]
async fn test_etag_reader_multiple_get() {
let data = b"abc123";
let mut hasher = Md5::new();
hasher.update(data);
let hex = faster_hex::hex_string(hasher.finalize().as_slice());
let expected = hex.to_string();
let reader = BufReader::new(&data[..]);
let reader = Box::new(WarpReader::new(reader));
let mut etag_reader = EtagReader::new(reader, None);
let mut buf = Vec::new();
let _ = etag_reader.read_to_end(&mut buf).await.unwrap();
// Call etag multiple times, should always return the same result
let etag1 = { etag_reader.try_resolve_etag() };
let etag2 = { etag_reader.try_resolve_etag() };
assert_eq!(etag1, Some(expected.clone()));
assert_eq!(etag2, Some(expected.clone()));
}
#[tokio::test]
async fn test_etag_reader_not_finished() {
let data = b"abc123";
let reader = BufReader::new(&data[..]);
let reader = Box::new(WarpReader::new(reader));
let mut etag_reader = EtagReader::new(reader, None);
// Do not read to end, etag should be None
let mut buf = [0u8; 2];
let _ = etag_reader.read(&mut buf).await.unwrap();
assert_eq!(etag_reader.try_resolve_etag(), None);
}
#[tokio::test]
async fn test_etag_reader_large_data() {
use rand::Rng;
// Generate 3MB random data
let size = 3 * 1024 * 1024;
let mut data = vec![0u8; size];
rand::rng().fill(&mut data[..]);
let mut hasher = Md5::new();
hasher.update(&data);
let cloned_data = data.clone();
let hex = faster_hex::hex_string(hasher.finalize().as_slice());
let expected = hex.to_string();
let reader = Cursor::new(data.clone());
let reader = Box::new(WarpReader::new(reader));
let mut etag_reader = EtagReader::new(reader, None);
let mut buf = Vec::new();
let n = etag_reader.read_to_end(&mut buf).await.unwrap();
assert_eq!(n, size);
assert_eq!(&buf, &cloned_data);
let etag = etag_reader.try_resolve_etag();
assert_eq!(etag, Some(expected));
}
#[tokio::test]
async fn test_etag_reader_checksum_match() {
let data = b"checksum test data";
let mut hasher = Md5::new();
hasher.update(data);
let expected = hex_simd::encode_to_string(hasher.finalize(), hex_simd::AsciiCase::Lower);
let reader = BufReader::new(&data[..]);
let reader = Box::new(WarpReader::new(reader));
let mut etag_reader = EtagReader::new(reader, Some(expected.clone()));
let mut buf = Vec::new();
let n = etag_reader.read_to_end(&mut buf).await.unwrap();
assert_eq!(n, data.len());
assert_eq!(&buf, data);
// Verification passed, etag should equal expected
assert_eq!(etag_reader.try_resolve_etag(), Some(expected));
}
#[tokio::test]
async fn test_etag_reader_checksum_mismatch() {
let data = b"checksum test data";
let wrong_checksum = "deadbeefdeadbeefdeadbeefdeadbeef".to_string();
let reader = BufReader::new(&data[..]);
let reader = Box::new(WarpReader::new(reader));
let mut etag_reader = EtagReader::new(reader, Some(wrong_checksum.clone()));
let mut buf = Vec::new();
// Verification failed, should return InvalidData error
let err = etag_reader.read_to_end(&mut buf).await.unwrap_err();
assert_eq!(err.kind(), std::io::ErrorKind::InvalidData);
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/rio/src/checksum.rs | crates/rio/src/checksum.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::errors::ChecksumMismatch;
use base64::{Engine as _, engine::general_purpose};
use bytes::Bytes;
use http::HeaderMap;
use sha1::Sha1;
use sha2::{Digest, Sha256};
use std::collections::HashMap;
use std::io::Write;
pub const SHA256_SIZE: usize = 32;
/// RustFS multipart checksum metadata key
pub const RUSTFS_MULTIPART_CHECKSUM: &str = "x-rustfs-multipart-checksum";
/// RustFS multipart checksum type metadata key
pub const RUSTFS_MULTIPART_CHECKSUM_TYPE: &str = "x-rustfs-multipart-checksum-type";
/// Checksum type enumeration with flags
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
pub struct ChecksumType(pub u32);
impl ChecksumType {
/// Checksum will be sent in trailing header
pub const TRAILING: ChecksumType = ChecksumType(1 << 0);
/// SHA256 checksum
pub const SHA256: ChecksumType = ChecksumType(1 << 1);
/// SHA1 checksum
pub const SHA1: ChecksumType = ChecksumType(1 << 2);
/// CRC32 checksum with IEEE table
pub const CRC32: ChecksumType = ChecksumType(1 << 3);
/// CRC32 checksum with Castagnoli table
pub const CRC32C: ChecksumType = ChecksumType(1 << 4);
/// Invalid checksum
pub const INVALID: ChecksumType = ChecksumType(1 << 5);
/// Multipart checksum
pub const MULTIPART: ChecksumType = ChecksumType(1 << 6);
/// Checksum includes multipart checksums
pub const INCLUDES_MULTIPART: ChecksumType = ChecksumType(1 << 7);
/// CRC64 with NVME polynomial
pub const CRC64_NVME: ChecksumType = ChecksumType(1 << 8);
/// Full object checksum
pub const FULL_OBJECT: ChecksumType = ChecksumType(1 << 9);
/// No checksum
pub const NONE: ChecksumType = ChecksumType(0);
const BASE_TYPE_MASK: u32 = Self::SHA256.0 | Self::SHA1.0 | Self::CRC32.0 | Self::CRC32C.0 | Self::CRC64_NVME.0;
/// Check if this checksum type has all flags of the given type
pub fn is(self, t: ChecksumType) -> bool {
if t == Self::NONE {
return self == Self::NONE;
}
(self.0 & t.0) == t.0
}
/// Merge another checksum type into this one
pub fn merge(&mut self, other: ChecksumType) -> &mut Self {
self.0 |= other.0;
self
}
/// Get the base checksum type (without flags)
pub fn base(self) -> ChecksumType {
ChecksumType(self.0 & Self::BASE_TYPE_MASK)
}
/// Get the header key for this checksum type
pub fn key(self) -> Option<&'static str> {
match self.base() {
Self::CRC32 => Some("x-amz-checksum-crc32"),
Self::CRC32C => Some("x-amz-checksum-crc32c"),
Self::SHA1 => Some("x-amz-checksum-sha1"),
Self::SHA256 => Some("x-amz-checksum-sha256"),
Self::CRC64_NVME => Some("x-amz-checksum-crc64nvme"),
_ => None,
}
}
/// Get the size of the raw (unencoded) checksum in bytes
pub fn raw_byte_len(self) -> usize {
match self.base() {
Self::CRC32 | Self::CRC32C => 4,
Self::SHA1 => 20,
Self::SHA256 => SHA256_SIZE,
Self::CRC64_NVME => 8,
_ => 0,
}
}
/// Check if the checksum type is set and valid
pub fn is_set(self) -> bool {
!self.is(Self::INVALID) && !self.base().is(Self::NONE)
}
/// Check if this checksum type can be merged
pub fn can_merge(self) -> bool {
self.is(Self::CRC64_NVME) || self.is(Self::CRC32C) || self.is(Self::CRC32)
}
/// Create a hasher for this checksum type
pub fn hasher(self) -> Option<Box<dyn ChecksumHasher>> {
match self.base() {
Self::CRC32 => Some(Box::new(Crc32IeeeHasher::new())),
Self::CRC32C => Some(Box::new(Crc32CastagnoliHasher::new())),
Self::SHA1 => Some(Box::new(Sha1Hasher::new())),
Self::SHA256 => Some(Box::new(Sha256Hasher::new())),
Self::CRC64_NVME => Some(Box::new(Crc64NvmeHasher::new())),
_ => None,
}
}
/// Check if checksum is trailing
pub fn trailing(self) -> bool {
self.is(Self::TRAILING)
}
/// Check if full object checksum was requested
pub fn full_object_requested(self) -> bool {
(self.0 & Self::FULL_OBJECT.0) == Self::FULL_OBJECT.0 || self.is(Self::CRC64_NVME)
}
/// Get object type string for x-amz-checksum-type header
pub fn obj_type(self) -> &'static str {
if self.full_object_requested() {
"FULL_OBJECT"
} else if self.is_set() {
"COMPOSITE"
} else {
""
}
}
pub fn from_header(headers: &HeaderMap) -> Self {
Self::from_string_with_obj_type(
headers
.get("x-amz-checksum-algorithm")
.and_then(|v| v.to_str().ok())
.unwrap_or(""),
headers.get("x-amz-checksum-type").and_then(|v| v.to_str().ok()).unwrap_or(""),
)
}
/// Create checksum type from string algorithm
pub fn from_string(alg: &str) -> Self {
Self::from_string_with_obj_type(alg, "")
}
/// Create checksum type from algorithm and object type
pub fn from_string_with_obj_type(alg: &str, obj_type: &str) -> Self {
let full = match obj_type {
"FULL_OBJECT" => Self::FULL_OBJECT,
"COMPOSITE" | "" => Self::NONE,
_ => return Self::INVALID,
};
match alg.to_uppercase().as_str() {
"CRC32" => ChecksumType(Self::CRC32.0 | full.0),
"CRC32C" => ChecksumType(Self::CRC32C.0 | full.0),
"SHA1" => {
if full != Self::NONE {
return Self::INVALID;
}
Self::SHA1
}
"SHA256" => {
if full != Self::NONE {
return Self::INVALID;
}
Self::SHA256
}
"CRC64NVME" => {
// AWS seems to ignore full value and just assume it
Self::CRC64_NVME
}
"" => {
if full != Self::NONE {
return Self::INVALID;
}
Self::NONE
}
_ => Self::INVALID,
}
}
}
impl std::fmt::Display for ChecksumType {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self.base() {
Self::CRC32 => write!(f, "CRC32"),
Self::CRC32C => write!(f, "CRC32C"),
Self::SHA1 => write!(f, "SHA1"),
Self::SHA256 => write!(f, "SHA256"),
Self::CRC64_NVME => write!(f, "CRC64NVME"),
Self::NONE => write!(f, ""),
_ => write!(f, "invalid"),
}
}
}
/// Base checksum types list
pub const BASE_CHECKSUM_TYPES: &[ChecksumType] = &[
ChecksumType::SHA256,
ChecksumType::SHA1,
ChecksumType::CRC32,
ChecksumType::CRC64_NVME,
ChecksumType::CRC32C,
];
/// Checksum structure containing type and encoded value
#[derive(Debug, Clone, PartialEq, Default)]
pub struct Checksum {
pub checksum_type: ChecksumType,
pub encoded: String,
pub raw: Vec<u8>,
pub want_parts: i32,
}
impl Checksum {
/// Create a new checksum from data
pub fn new_from_data(checksum_type: ChecksumType, data: &[u8]) -> Option<Self> {
if !checksum_type.is_set() {
return None;
}
let mut hasher = checksum_type.hasher()?;
hasher.write_all(data).ok()?;
let raw = hasher.finalize();
let encoded = general_purpose::STANDARD.encode(&raw);
let checksum = Checksum {
checksum_type,
encoded,
raw,
want_parts: 0,
};
if checksum.valid() { Some(checksum) } else { None }
}
/// Create a new checksum from algorithm string and base64 value
pub fn new_from_string(alg: &str, value: &str) -> Option<Self> {
Self::new_with_type(ChecksumType::from_string(alg), value)
}
/// Create a new checksum with specific type and value
pub fn new_with_type(mut checksum_type: ChecksumType, value: &str) -> Option<Self> {
if !checksum_type.is_set() {
return None;
}
let mut want_parts = 0;
let value_string;
// Handle multipart format (value-parts)
if value.contains('-') {
let parts: Vec<&str> = value.split('-').collect();
if parts.len() != 2 {
return None;
}
value_string = parts[0].to_string();
want_parts = parts[1].parse().ok()?;
checksum_type = ChecksumType(checksum_type.0 | ChecksumType::MULTIPART.0);
} else {
value_string = value.to_string();
}
// let raw = base64_simd::URL_SAFE_NO_PAD.decode_to_vec(&value_string).ok()?;
let raw = general_purpose::STANDARD.decode(&value_string).ok()?;
let checksum = Checksum {
checksum_type,
encoded: value_string,
raw,
want_parts,
};
if checksum.valid() { Some(checksum) } else { None }
}
/// Check if checksum is valid
pub fn valid(&self) -> bool {
if self.checksum_type == ChecksumType::INVALID {
return false;
}
if self.encoded.is_empty() || self.checksum_type.trailing() {
return self.checksum_type.is(ChecksumType::NONE) || self.checksum_type.trailing();
}
self.checksum_type.raw_byte_len() == self.raw.len()
}
/// Check if content matches this checksum
pub fn matches(&self, content: &[u8], parts: i32) -> Result<(), ChecksumMismatch> {
if self.encoded.is_empty() {
return Ok(());
}
let mut hasher = self.checksum_type.hasher().ok_or_else(|| ChecksumMismatch {
want: self.encoded.clone(),
got: "no hasher available".to_string(),
})?;
hasher.write_all(content).map_err(|_| ChecksumMismatch {
want: self.encoded.clone(),
got: "write error".to_string(),
})?;
let sum = hasher.finalize();
if self.want_parts > 0 && self.want_parts != parts {
return Err(ChecksumMismatch {
want: format!("{}-{}", self.encoded, self.want_parts),
got: format!("{}-{}", general_purpose::STANDARD.encode(&sum), parts),
});
}
if sum != self.raw {
return Err(ChecksumMismatch {
want: self.encoded.clone(),
got: general_purpose::STANDARD.encode(&sum),
});
}
Ok(())
}
/// Convert checksum to map representation
pub fn as_map(&self) -> Option<HashMap<String, String>> {
if !self.valid() {
return None;
}
let mut map = HashMap::new();
map.insert(self.checksum_type.to_string(), self.encoded.clone());
Some(map)
}
pub fn to_bytes(&self, parts: &[u8]) -> Bytes {
self.append_to(Vec::new(), parts).into()
}
/// Append checksum to byte buffer
pub fn append_to(&self, mut buffer: Vec<u8>, parts: &[u8]) -> Vec<u8> {
// Encode checksum type as varint
let mut type_bytes = Vec::new();
encode_varint(&mut type_bytes, self.checksum_type.0 as u64);
buffer.extend_from_slice(&type_bytes);
// Remove trailing flag when serializing
let crc = self.raw.clone();
if self.checksum_type.trailing() {
// When serializing, we don't care if it was trailing
}
if crc.len() != self.checksum_type.raw_byte_len() {
return buffer;
}
buffer.extend_from_slice(&crc);
if self.checksum_type.is(ChecksumType::MULTIPART) {
let mut checksums = 0;
if self.want_parts > 0 && !self.checksum_type.is(ChecksumType::INCLUDES_MULTIPART) {
checksums = self.want_parts;
}
// Ensure we don't divide by 0
let raw_len = self.checksum_type.raw_byte_len();
if raw_len == 0 || !parts.len().is_multiple_of(raw_len) {
checksums = 0;
} else if !parts.is_empty() {
checksums = (parts.len() / raw_len) as i32;
}
let parts_to_append = if self.checksum_type.is(ChecksumType::INCLUDES_MULTIPART) {
parts
} else {
&[]
};
let mut checksums_bytes = Vec::new();
encode_varint(&mut checksums_bytes, checksums as u64);
buffer.extend_from_slice(&checksums_bytes);
if !parts_to_append.is_empty() {
buffer.extend_from_slice(parts_to_append);
}
}
buffer
}
/// Add a part checksum into the current checksum, as if the content of each was appended.
/// The size of the content that produced the second checksum must be provided.
/// Not all checksum types can be merged, use the can_merge method to check.
/// Checksum types must match.
pub fn add_part(&mut self, other: &Checksum, size: i64) -> Result<(), String> {
if !other.checksum_type.can_merge() {
return Err("checksum type cannot be merged".to_string());
}
if size == 0 {
return Ok(());
}
if !self.checksum_type.is(other.checksum_type.base()) {
return Err(format!(
"checksum type does not match got {} and {}",
self.checksum_type, other.checksum_type
));
}
// If never set, just add first checksum
if self.raw.is_empty() {
self.raw = other.raw.clone();
self.encoded = other.encoded.clone();
return Ok(());
}
if !self.valid() {
return Err("invalid base checksum".to_string());
}
if !other.valid() {
return Err("invalid part checksum".to_string());
}
match self.checksum_type.base() {
ChecksumType::CRC32 => {
let crc1 = u32::from_be_bytes([self.raw[0], self.raw[1], self.raw[2], self.raw[3]]);
let crc2 = u32::from_be_bytes([other.raw[0], other.raw[1], other.raw[2], other.raw[3]]);
let combined = crc32_combine(0xEDB88320, crc1, crc2, size); // IEEE polynomial
self.raw = combined.to_be_bytes().to_vec();
}
ChecksumType::CRC32C => {
let crc1 = u32::from_be_bytes([self.raw[0], self.raw[1], self.raw[2], self.raw[3]]);
let crc2 = u32::from_be_bytes([other.raw[0], other.raw[1], other.raw[2], other.raw[3]]);
let combined = crc32_combine(0x82F63B78, crc1, crc2, size); // Castagnoli polynomial
self.raw = combined.to_be_bytes().to_vec();
}
ChecksumType::CRC64_NVME => {
let crc1 = u64::from_be_bytes([
self.raw[0],
self.raw[1],
self.raw[2],
self.raw[3],
self.raw[4],
self.raw[5],
self.raw[6],
self.raw[7],
]);
let crc2 = u64::from_be_bytes([
other.raw[0],
other.raw[1],
other.raw[2],
other.raw[3],
other.raw[4],
other.raw[5],
other.raw[6],
other.raw[7],
]);
let combined = crc64_combine(CRC64_NVME_POLYNOMIAL.reverse_bits(), crc1, crc2, size);
self.raw = combined.to_be_bytes().to_vec();
}
_ => {
return Err(format!("unknown checksum type: {}", self.checksum_type));
}
}
self.encoded = general_purpose::STANDARD.encode(&self.raw);
Ok(())
}
}
/// Get content checksum from headers
pub fn get_content_checksum(headers: &HeaderMap) -> Result<Option<Checksum>, std::io::Error> {
// Check for trailing checksums
if let Some(trailer_header) = headers.get("x-amz-trailer") {
let mut result = None;
let trailer_str = trailer_header
.to_str()
.map_err(|_| std::io::Error::new(std::io::ErrorKind::InvalidData, "Invalid header value"))?;
let trailing_headers: Vec<&str> = trailer_str.split(',').map(|s| s.trim()).collect();
for header in trailing_headers {
let mut duplicates = false;
for &checksum_type in crate::checksum::BASE_CHECKSUM_TYPES {
if let Some(key) = checksum_type.key()
&& header.eq_ignore_ascii_case(key)
{
duplicates = result.is_some();
result = Some(Checksum {
checksum_type: ChecksumType(checksum_type.0 | ChecksumType::TRAILING.0),
encoded: String::new(),
raw: Vec::new(),
want_parts: 0,
});
}
}
if duplicates {
return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "Invalid checksum"));
}
}
if let Some(mut res) = result {
match headers.get("x-amz-checksum-type").and_then(|v| v.to_str().ok()) {
Some("FULL_OBJECT") => {
if !res.checksum_type.can_merge() {
return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "Invalid checksum"));
}
res.checksum_type = ChecksumType(res.checksum_type.0 | ChecksumType::FULL_OBJECT.0);
}
Some("COMPOSITE") | Some("") | None => {}
_ => return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "Invalid checksum")),
}
return Ok(Some(res));
}
}
let (checksum_type, value) = get_content_checksum_direct(headers);
if checksum_type == ChecksumType::NONE {
if value.is_empty() {
return Ok(None);
}
return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "Invalid checksum"));
}
let checksum = Checksum::new_with_type(checksum_type, &value);
Ok(checksum)
}
/// Get content checksum type and value directly from headers
fn get_content_checksum_direct(headers: &HeaderMap) -> (ChecksumType, String) {
let mut checksum_type = ChecksumType::NONE;
if let Some(alg) = headers.get("x-amz-checksum-algorithm").and_then(|v| v.to_str().ok()) {
checksum_type = ChecksumType::from_string_with_obj_type(
alg,
headers.get("x-amz-checksum-type").and_then(|s| s.to_str().ok()).unwrap_or(""),
);
if headers.get("x-amz-checksum-type").and_then(|v| v.to_str().ok()) == Some("FULL_OBJECT") {
if !checksum_type.can_merge() {
return (ChecksumType::INVALID, String::new());
}
checksum_type = ChecksumType(checksum_type.0 | ChecksumType::FULL_OBJECT.0);
}
if checksum_type.is_set()
&& let Some(key) = checksum_type.key()
{
if let Some(value) = headers.get(key).and_then(|v| v.to_str().ok()) {
return (checksum_type, value.to_string());
} else {
return (ChecksumType::NONE, String::new());
}
}
return (checksum_type, String::new());
}
// Check individual checksum headers
for &ct in crate::checksum::BASE_CHECKSUM_TYPES {
if let Some(key) = ct.key()
&& let Some(value) = headers.get(key).and_then(|v| v.to_str().ok())
{
// If already set, invalid
if checksum_type != ChecksumType::NONE {
return (ChecksumType::INVALID, String::new());
}
checksum_type = ct;
if headers.get("x-amz-checksum-type").and_then(|v| v.to_str().ok()) == Some("FULL_OBJECT") {
if !checksum_type.can_merge() {
return (ChecksumType::INVALID, String::new());
}
checksum_type = ChecksumType(checksum_type.0 | ChecksumType::FULL_OBJECT.0);
}
return (checksum_type, value.to_string());
}
}
(checksum_type, String::new())
}
/// Trait for checksum hashers
pub trait ChecksumHasher: Write + Send + Sync {
fn finalize(&mut self) -> Vec<u8>;
fn reset(&mut self);
}
/// CRC32 IEEE hasher
pub struct Crc32IeeeHasher {
hasher: crc_fast::Digest,
}
impl Default for Crc32IeeeHasher {
fn default() -> Self {
Self::new()
}
}
impl Crc32IeeeHasher {
pub fn new() -> Self {
Self {
hasher: crc_fast::Digest::new(crc_fast::CrcAlgorithm::Crc32IsoHdlc),
}
}
}
impl Write for Crc32IeeeHasher {
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
self.hasher.update(buf);
Ok(buf.len())
}
fn flush(&mut self) -> std::io::Result<()> {
Ok(())
}
}
impl ChecksumHasher for Crc32IeeeHasher {
fn finalize(&mut self) -> Vec<u8> {
(self.hasher.clone().finalize() as u32).to_be_bytes().to_vec()
}
fn reset(&mut self) {
self.hasher = crc_fast::Digest::new(crc_fast::CrcAlgorithm::Crc32IsoHdlc);
}
}
/// CRC32 Castagnoli hasher
pub struct Crc32CastagnoliHasher {
hasher: crc_fast::Digest,
}
impl Default for Crc32CastagnoliHasher {
fn default() -> Self {
Self::new()
}
}
impl Crc32CastagnoliHasher {
pub fn new() -> Self {
Self {
hasher: crc_fast::Digest::new(crc_fast::CrcAlgorithm::Crc32Iscsi),
}
}
}
impl Write for Crc32CastagnoliHasher {
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
self.hasher.update(buf);
Ok(buf.len())
}
fn flush(&mut self) -> std::io::Result<()> {
Ok(())
}
}
impl ChecksumHasher for Crc32CastagnoliHasher {
fn finalize(&mut self) -> Vec<u8> {
(self.hasher.clone().finalize() as u32).to_be_bytes().to_vec()
}
fn reset(&mut self) {
self.hasher = crc_fast::Digest::new(crc_fast::CrcAlgorithm::Crc32Iscsi);
}
}
/// SHA1 hasher
pub struct Sha1Hasher {
hasher: Sha1,
}
impl Default for Sha1Hasher {
fn default() -> Self {
Self::new()
}
}
impl Sha1Hasher {
pub fn new() -> Self {
Self { hasher: Sha1::new() }
}
}
impl Write for Sha1Hasher {
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
self.hasher.update(buf);
Ok(buf.len())
}
fn flush(&mut self) -> std::io::Result<()> {
Ok(())
}
}
impl ChecksumHasher for Sha1Hasher {
fn finalize(&mut self) -> Vec<u8> {
self.hasher.clone().finalize().to_vec()
}
fn reset(&mut self) {
self.hasher = Sha1::new();
}
}
/// SHA256 hasher
pub struct Sha256Hasher {
hasher: Sha256,
}
impl Default for Sha256Hasher {
fn default() -> Self {
Self::new()
}
}
impl Sha256Hasher {
pub fn new() -> Self {
Self { hasher: Sha256::new() }
}
}
impl Write for Sha256Hasher {
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
self.hasher.update(buf);
Ok(buf.len())
}
fn flush(&mut self) -> std::io::Result<()> {
Ok(())
}
}
impl ChecksumHasher for Sha256Hasher {
fn finalize(&mut self) -> Vec<u8> {
self.hasher.clone().finalize().to_vec()
}
fn reset(&mut self) {
self.hasher = Sha256::new();
}
}
/// CRC64 NVME hasher
pub struct Crc64NvmeHasher {
hasher: crc_fast::Digest,
}
impl Default for Crc64NvmeHasher {
fn default() -> Self {
Self::new()
}
}
impl Crc64NvmeHasher {
pub fn new() -> Self {
Self {
hasher: crc_fast::Digest::new(crc_fast::CrcAlgorithm::Crc64Nvme),
}
}
}
impl Write for Crc64NvmeHasher {
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
self.hasher.update(buf);
Ok(buf.len())
}
fn flush(&mut self) -> std::io::Result<()> {
Ok(())
}
}
impl ChecksumHasher for Crc64NvmeHasher {
fn finalize(&mut self) -> Vec<u8> {
self.hasher.clone().finalize().to_be_bytes().to_vec()
}
fn reset(&mut self) {
self.hasher = crc_fast::Digest::new(crc_fast::CrcAlgorithm::Crc64Nvme);
}
}
/// Encode unsigned integer as varint
fn encode_varint(buf: &mut Vec<u8>, mut value: u64) {
while value >= 0x80 {
buf.push((value as u8) | 0x80);
value >>= 7;
}
buf.push(value as u8);
}
/// Decode varint from buffer
pub fn decode_varint(buf: &[u8]) -> Option<(u64, usize)> {
let mut result = 0u64;
let mut shift = 0;
let mut pos = 0;
for &byte in buf {
pos += 1;
result |= ((byte & 0x7F) as u64) << shift;
if byte & 0x80 == 0 {
return Some((result, pos));
}
shift += 7;
if shift >= 64 {
return None; // Overflow
}
}
None // Incomplete varint
}
/// Read checksums from byte buffer
pub fn read_checksums(mut buf: &[u8], part: i32) -> (HashMap<String, String>, bool) {
let mut result = HashMap::new();
let mut is_multipart = false;
while !buf.is_empty() {
let (checksum_type_val, n) = match decode_varint(buf) {
Some((val, n)) => (val, n),
None => break,
};
buf = &buf[n..];
let checksum_type = ChecksumType(checksum_type_val as u32);
let length = checksum_type.raw_byte_len();
if length == 0 || buf.len() < length {
break;
}
let checksum_bytes = &buf[..length];
buf = &buf[length..];
let mut checksum_str = general_purpose::STANDARD.encode(checksum_bytes);
if checksum_type.is(ChecksumType::MULTIPART) {
is_multipart = true;
let (parts_count, n) = match decode_varint(buf) {
Some((val, n)) => (val, n),
None => break,
};
buf = &buf[n..];
if !checksum_type.full_object_requested() {
checksum_str = format!("{checksum_str}-{parts_count}");
} else if part <= 0 {
result.insert("x-amz-checksum-type".to_string(), "FULL_OBJECT".to_string());
}
if part > 0 {
checksum_str.clear();
}
if checksum_type.is(ChecksumType::INCLUDES_MULTIPART) {
let want_len = parts_count as usize * length;
if buf.len() < want_len {
break;
}
// Read part checksum
if part > 0 && (part as u64) <= parts_count {
let offset = ((part - 1) as usize) * length;
let part_checksum = &buf[offset..offset + length];
checksum_str = general_purpose::STANDARD.encode(part_checksum);
}
buf = &buf[want_len..];
}
} else if part > 1 {
// For non-multipart, checksum is part 1
checksum_str.clear();
}
if !checksum_str.is_empty() {
result.insert(checksum_type.to_string(), checksum_str);
}
}
(result, is_multipart)
}
/// Read all part checksums from buffer
pub fn read_part_checksums(mut buf: &[u8]) -> Vec<HashMap<String, String>> {
let mut result = Vec::new();
while !buf.is_empty() {
let (checksum_type_val, n) = match decode_varint(buf) {
Some((val, n)) => (val, n),
None => break,
};
buf = &buf[n..];
let checksum_type = ChecksumType(checksum_type_val as u32);
let length = checksum_type.raw_byte_len();
if length == 0 || buf.len() < length {
break;
}
// Skip main checksum
buf = &buf[length..];
let (parts_count, n) = match decode_varint(buf) {
Some((val, n)) => (val, n),
None => break,
};
buf = &buf[n..];
if !checksum_type.is(ChecksumType::INCLUDES_MULTIPART) {
continue;
}
if result.is_empty() {
result.resize(parts_count as usize, HashMap::new());
}
for part_checksum in result.iter_mut() {
if buf.len() < length {
break;
}
let checksum_bytes = &buf[..length];
buf = &buf[length..];
let checksum_str = general_purpose::STANDARD.encode(checksum_bytes);
part_checksum.insert(checksum_type.to_string(), checksum_str);
}
}
result
}
/// CRC64 NVME polynomial constant
const CRC64_NVME_POLYNOMIAL: u64 = 0xad93d23594c93659;
/// GF(2) matrix multiplication
fn gf2_matrix_times(mat: &[u64], mut vec: u64) -> u64 {
let mut sum = 0u64;
let mut mat_iter = mat.iter();
while vec != 0 {
if vec & 1 != 0
&& let Some(&m) = mat_iter.next()
{
sum ^= m;
}
vec >>= 1;
mat_iter.next();
}
sum
}
/// Square a GF(2) matrix
fn gf2_matrix_square(square: &mut [u64], mat: &[u64]) {
if square.len() != mat.len() {
panic!("square matrix size mismatch");
}
for (i, &m) in mat.iter().enumerate() {
square[i] = gf2_matrix_times(mat, m);
}
}
/// Combine two CRC32 values
///
/// Returns the combined CRC-32 hash value of the two passed CRC-32
/// hash values crc1 and crc2. poly represents the generator polynomial
/// and len2 specifies the byte length that the crc2 hash covers.
fn crc32_combine(poly: u32, crc1: u32, crc2: u32, len2: i64) -> u32 {
// Degenerate case (also disallow negative lengths)
if len2 <= 0 {
return crc1;
}
let mut even = [0u64; 32]; // even-power-of-two zeros operator
let mut odd = [0u64; 32]; // odd-power-of-two zeros operator
// Put operator for one zero bit in odd
odd[0] = poly as u64; // CRC-32 polynomial
let mut row = 1u64;
for (_i, odd_val) in odd.iter_mut().enumerate().skip(1) {
*odd_val = row;
row <<= 1;
}
// Put operator for two zero bits in even
gf2_matrix_square(&mut even, &odd);
// Put operator for four zero bits in odd
gf2_matrix_square(&mut odd, &even);
// Apply len2 zeros to crc1 (first square will put the operator for one
// zero byte, eight zero bits, in even)
let mut crc1n = crc1 as u64;
let mut len2 = len2;
loop {
// Apply zeros operator for this bit of len2
gf2_matrix_square(&mut even, &odd);
if len2 & 1 != 0 {
crc1n = gf2_matrix_times(&even, crc1n);
}
len2 >>= 1;
// If no more bits set, then done
if len2 == 0 {
break;
}
// Another iteration of the loop with odd and even swapped
gf2_matrix_square(&mut odd, &even);
if len2 & 1 != 0 {
crc1n = gf2_matrix_times(&odd, crc1n);
}
len2 >>= 1;
// If no more bits set, then done
if len2 == 0 {
break;
}
}
// Return combined crc
crc1n ^= crc2 as u64;
crc1n as u32
}
/// Combine two CRC64 values
fn crc64_combine(poly: u64, crc1: u64, crc2: u64, len2: i64) -> u64 {
// Degenerate case (also disallow negative lengths)
if len2 <= 0 {
return crc1;
}
let mut even = [0u64; 64]; // even-power-of-two zeros operator
let mut odd = [0u64; 64]; // odd-power-of-two zeros operator
// Put operator for one zero bit in odd
odd[0] = poly; // CRC-64 polynomial
let mut row = 1u64;
for (_i, odd_val) in odd.iter_mut().enumerate().skip(1) {
*odd_val = row;
row <<= 1;
}
// Put operator for two zero bits in even
gf2_matrix_square(&mut even, &odd);
// Put operator for four zero bits in odd
gf2_matrix_square(&mut odd, &even);
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | true |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/rio/src/http_reader.rs | crates/rio/src/http_reader.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::{EtagResolvable, HashReaderDetector, HashReaderMut};
use bytes::Bytes;
use futures::{Stream, TryStreamExt as _};
use http::HeaderMap;
use pin_project_lite::pin_project;
use reqwest::{Certificate, Client, Identity, Method, RequestBuilder};
use std::error::Error as _;
use std::io::{self, Error};
use std::ops::Not as _;
use std::pin::Pin;
use std::sync::LazyLock;
use std::task::{Context, Poll};
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
use tokio::sync::mpsc;
use tokio_util::io::StreamReader;
use tracing::error;
/// Get the TLS path from the RUSTFS_TLS_PATH environment variable.
/// If the variable is not set, return None.
fn tls_path() -> Option<&'static std::path::PathBuf> {
static TLS_PATH: LazyLock<Option<std::path::PathBuf>> = LazyLock::new(|| {
std::env::var("RUSTFS_TLS_PATH")
.ok()
.and_then(|s| if s.is_empty() { None } else { Some(s.into()) })
});
TLS_PATH.as_ref()
}
/// Load CA root certificates from the RUSTFS_TLS_PATH directory.
/// The CA certificates should be in PEM format and stored in the file
/// specified by the RUSTFS_CA_CERT constant.
/// If the file does not exist or cannot be read, return the builder unchanged.
fn load_ca_roots_from_tls_path(builder: reqwest::ClientBuilder) -> reqwest::ClientBuilder {
let Some(tp) = tls_path() else {
return builder;
};
let ca_path = tp.join(rustfs_config::RUSTFS_CA_CERT);
if !ca_path.exists() {
return builder;
}
let Ok(certs_der) = rustfs_utils::load_cert_bundle_der_bytes(ca_path.to_str().unwrap_or_default()) else {
return builder;
};
let mut b = builder;
for der in certs_der {
if let Ok(cert) = Certificate::from_der(&der) {
b = b.add_root_certificate(cert);
}
}
b
}
/// Load optional mTLS identity from the RUSTFS_TLS_PATH directory.
/// The client certificate and private key should be in PEM format and stored in the files
/// specified by RUSTFS_CLIENT_CERT_FILENAME and RUSTFS_CLIENT_KEY_FILENAME constants.
/// If the files do not exist or cannot be read, return None.
fn load_optional_mtls_identity_from_tls_path() -> Option<Identity> {
let tp = tls_path()?;
let cert = std::fs::read(tp.join(rustfs_config::RUSTFS_CLIENT_CERT_FILENAME)).ok()?;
let key = std::fs::read(tp.join(rustfs_config::RUSTFS_CLIENT_KEY_FILENAME)).ok()?;
let mut pem = Vec::with_capacity(cert.len() + key.len() + 1);
pem.extend_from_slice(&cert);
if !pem.ends_with(b"\n") {
pem.push(b'\n');
}
pem.extend_from_slice(&key);
match Identity::from_pem(&pem) {
Ok(id) => Some(id),
Err(e) => {
error!("Failed to load mTLS identity from PEM: {e}");
None
}
}
}
fn get_http_client() -> Client {
// Reuse the HTTP connection pool in the global `reqwest::Client` instance
// TODO: interact with load balancing?
static CLIENT: LazyLock<Client> = LazyLock::new(|| {
let mut builder = Client::builder()
.connect_timeout(std::time::Duration::from_secs(5))
.tcp_keepalive(std::time::Duration::from_secs(10))
.http2_keep_alive_interval(std::time::Duration::from_secs(5))
.http2_keep_alive_timeout(std::time::Duration::from_secs(3))
.http2_keep_alive_while_idle(true);
// HTTPS root trust + optional mTLS identity from RUSTFS_TLS_PATH
builder = load_ca_roots_from_tls_path(builder);
if let Some(id) = load_optional_mtls_identity_from_tls_path() {
builder = builder.identity(id);
}
builder.build().expect("Failed to create global HTTP client")
});
CLIENT.clone()
}
static HTTP_DEBUG_LOG: bool = false;
#[inline(always)]
fn http_debug_log(args: std::fmt::Arguments) {
if HTTP_DEBUG_LOG {
println!("{args}");
}
}
macro_rules! http_log {
($($arg:tt)*) => {
http_debug_log(format_args!($($arg)*));
};
}
pin_project! {
pub struct HttpReader {
url:String,
method: Method,
headers: HeaderMap,
#[pin]
inner: StreamReader<Pin<Box<dyn Stream<Item=std::io::Result<Bytes>>+Send+Sync>>, Bytes>,
}
}
impl HttpReader {
pub async fn new(url: String, method: Method, headers: HeaderMap, body: Option<Vec<u8>>) -> io::Result<Self> {
// http_log!("[HttpReader::new] url: {url}, method: {method:?}, headers: {headers:?}");
Self::with_capacity(url, method, headers, body, 0).await
}
/// Create a new HttpReader from a URL. The request is performed immediately.
pub async fn with_capacity(
url: String,
method: Method,
headers: HeaderMap,
body: Option<Vec<u8>>,
_read_buf_size: usize,
) -> io::Result<Self> {
// http_log!(
// "[HttpReader::with_capacity] url: {url}, method: {method:?}, headers: {headers:?}, buf_size: {}",
// _read_buf_size
// );
// First, check if the connection is available (HEAD)
let client = get_http_client();
let head_resp = client.head(&url).headers(headers.clone()).send().await;
match head_resp {
Ok(resp) => {
http_log!("[HttpReader::new] HEAD status: {}", resp.status());
if !resp.status().is_success() {
return Err(Error::other(format!("HEAD failed: url: {}, status {}", url, resp.status())));
}
}
Err(e) => {
http_log!("[HttpReader::new] HEAD error: {e}");
return Err(Error::other(e.source().map(|s| s.to_string()).unwrap_or_else(|| e.to_string())));
}
}
let client = get_http_client();
let mut request: RequestBuilder = client.request(method.clone(), url.clone()).headers(headers.clone());
if let Some(body) = body {
request = request.body(body);
}
let resp = request
.send()
.await
.map_err(|e| Error::other(format!("HttpReader HTTP request error: {e}")))?;
if resp.status().is_success().not() {
return Err(Error::other(format!(
"HttpReader HTTP request failed with non-200 status {}",
resp.status()
)));
}
let stream = resp
.bytes_stream()
.map_err(|e| Error::other(format!("HttpReader stream error: {e}")));
Ok(Self {
inner: StreamReader::new(Box::pin(stream)),
url,
method,
headers,
})
}
pub fn url(&self) -> &str {
&self.url
}
pub fn method(&self) -> &Method {
&self.method
}
pub fn headers(&self) -> &HeaderMap {
&self.headers
}
}
impl AsyncRead for HttpReader {
fn poll_read(mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>) -> Poll<std::io::Result<()>> {
// http_log!(
// "[HttpReader::poll_read] url: {}, method: {:?}, buf.remaining: {}",
// self.url,
// self.method,
// buf.remaining()
// );
// Read from the inner stream
Pin::new(&mut self.inner).poll_read(cx, buf)
}
}
impl EtagResolvable for HttpReader {
fn is_etag_reader(&self) -> bool {
false
}
fn try_resolve_etag(&mut self) -> Option<String> {
None
}
}
impl HashReaderDetector for HttpReader {
fn is_hash_reader(&self) -> bool {
false
}
fn as_hash_reader_mut(&mut self) -> Option<&mut dyn HashReaderMut> {
None
}
}
struct ReceiverStream {
receiver: mpsc::Receiver<Option<Bytes>>,
}
impl Stream for ReceiverStream {
type Item = Result<Bytes, std::io::Error>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let poll = Pin::new(&mut self.receiver).poll_recv(cx);
// match &poll {
// Poll::Ready(Some(Some(bytes))) => {
// // http_log!("[ReceiverStream] poll_next: got {} bytes", bytes.len());
// }
// Poll::Ready(Some(None)) => {
// // http_log!("[ReceiverStream] poll_next: sender shutdown");
// }
// Poll::Ready(None) => {
// // http_log!("[ReceiverStream] poll_next: channel closed");
// }
// Poll::Pending => {
// // http_log!("[ReceiverStream] poll_next: pending");
// }
// }
match poll {
Poll::Ready(Some(Some(bytes))) => Poll::Ready(Some(Ok(bytes))),
Poll::Ready(Some(None)) => Poll::Ready(None), // Sender shutdown
Poll::Ready(None) => Poll::Ready(None),
Poll::Pending => Poll::Pending,
}
}
}
pin_project! {
pub struct HttpWriter {
url:String,
method: Method,
headers: HeaderMap,
err_rx: tokio::sync::oneshot::Receiver<std::io::Error>,
sender: tokio::sync::mpsc::Sender<Option<Bytes>>,
handle: tokio::task::JoinHandle<std::io::Result<()>>,
finish:bool,
}
}
impl HttpWriter {
/// Create a new HttpWriter for the given URL. The HTTP request is performed in the background.
pub async fn new(url: String, method: Method, headers: HeaderMap) -> io::Result<Self> {
// http_log!("[HttpWriter::new] url: {url}, method: {method:?}, headers: {headers:?}");
let url_clone = url.clone();
let method_clone = method.clone();
let headers_clone = headers.clone();
// First, try to write empty data to check if writable
let client = get_http_client();
let resp = client.put(&url).headers(headers.clone()).body(Vec::new()).send().await;
match resp {
Ok(resp) => {
// http_log!("[HttpWriter::new] empty PUT status: {}", resp.status());
if !resp.status().is_success() {
return Err(Error::other(format!("Empty PUT failed: status {}", resp.status())));
}
}
Err(e) => {
// http_log!("[HttpWriter::new] empty PUT error: {e}");
return Err(Error::other(format!("Empty PUT failed: {e}")));
}
}
let (sender, receiver) = tokio::sync::mpsc::channel::<Option<Bytes>>(8);
let (err_tx, err_rx) = tokio::sync::oneshot::channel::<io::Error>();
let handle = tokio::spawn(async move {
let stream = ReceiverStream { receiver };
let body = reqwest::Body::wrap_stream(stream);
// http_log!(
// "[HttpWriter::spawn] sending HTTP request: url={url_clone}, method={method_clone:?}, headers={headers_clone:?}"
// );
let client = get_http_client();
let request = client
.request(method_clone, url_clone.clone())
.headers(headers_clone.clone())
.body(body);
// Hold the request until the shutdown signal is received
let response = request.send().await;
match response {
Ok(resp) => {
// http_log!("[HttpWriter::spawn] got response: status={}", resp.status());
if !resp.status().is_success() {
let _ = err_tx.send(Error::other(format!(
"HttpWriter HTTP request failed with non-200 status {}",
resp.status()
)));
return Err(Error::other(format!("HTTP request failed with non-200 status {}", resp.status())));
}
}
Err(e) => {
// http_log!("[HttpWriter::spawn] HTTP request error: {e}");
let _ = err_tx.send(Error::other(format!("HTTP request failed: {e}")));
return Err(Error::other(format!("HTTP request failed: {e}")));
}
}
// http_log!("[HttpWriter::spawn] HTTP request completed, exiting");
Ok(())
});
// http_log!("[HttpWriter::new] connection established successfully");
Ok(Self {
url,
method,
headers,
err_rx,
sender,
handle,
finish: false,
})
}
pub fn url(&self) -> &str {
&self.url
}
pub fn method(&self) -> &Method {
&self.method
}
pub fn headers(&self) -> &HeaderMap {
&self.headers
}
}
impl AsyncWrite for HttpWriter {
fn poll_write(mut self: Pin<&mut Self>, _cx: &mut Context<'_>, buf: &[u8]) -> Poll<io::Result<usize>> {
// http_log!(
// "[HttpWriter::poll_write] url: {}, method: {:?}, buf.len: {}",
// self.url,
// self.method,
// buf.len()
// );
if let Ok(e) = Pin::new(&mut self.err_rx).try_recv() {
return Poll::Ready(Err(e));
}
self.sender
.try_send(Some(Bytes::copy_from_slice(buf)))
.map_err(|e| Error::other(format!("HttpWriter send error: {e}")))?;
Poll::Ready(Ok(buf.len()))
}
fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
Poll::Ready(Ok(()))
}
fn poll_shutdown(mut self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
// let url = self.url.clone();
// let method = self.method.clone();
if !self.finish {
// http_log!("[HttpWriter::poll_shutdown] url: {}, method: {:?}", url, method);
self.sender
.try_send(None)
.map_err(|e| Error::other(format!("HttpWriter shutdown error: {e}")))?;
// http_log!(
// "[HttpWriter::poll_shutdown] sent shutdown signal to HTTP request, url: {}, method: {:?}",
// url,
// method
// );
self.finish = true;
}
// Wait for the HTTP request to complete
use futures::FutureExt;
match Pin::new(&mut self.get_mut().handle).poll_unpin(_cx) {
Poll::Ready(Ok(_)) => {
// http_log!(
// "[HttpWriter::poll_shutdown] HTTP request finished successfully, url: {}, method: {:?}",
// url,
// method
// );
}
Poll::Ready(Err(e)) => {
// http_log!("[HttpWriter::poll_shutdown] HTTP request failed: {e}, url: {}, method: {:?}", url, method);
return Poll::Ready(Err(Error::other(format!("HTTP request failed: {e}"))));
}
Poll::Pending => {
// http_log!("[HttpWriter::poll_shutdown] HTTP request pending, url: {}, method: {:?}", url, method);
return Poll::Pending;
}
}
Poll::Ready(Ok(()))
}
}
// #[cfg(test)]
// mod tests {
// use super::*;
// use reqwest::Method;
// use std::vec;
// use tokio::io::{AsyncReadExt, AsyncWriteExt};
// #[tokio::test]
// async fn test_http_writer_err() {
// // Use a real local server for integration, or mockito for unit test
// // Here, we use the Go test server at 127.0.0.1:8081 (scripts/testfile.go)
// let url = "http://127.0.0.1:8081/testfile".to_string();
// let data = vec![42u8; 8];
// // Write
// // Add header X-Deny-Write = 1 to simulate non-writable situation
// let mut headers = HeaderMap::new();
// headers.insert("X-Deny-Write", "1".parse().unwrap());
// // Here we use PUT method
// let writer_result = HttpWriter::new(url.clone(), Method::PUT, headers).await;
// match writer_result {
// Ok(mut writer) => {
// // If creation succeeds, write should fail
// let write_result = writer.write_all(&data).await;
// assert!(write_result.is_err(), "write_all should fail when server denies write");
// if let Err(e) = write_result {
// println!("write_all error: {e}");
// }
// let shutdown_result = writer.shutdown().await;
// if let Err(e) = shutdown_result {
// println!("shutdown error: {e}");
// }
// }
// Err(e) => {
// // Direct construction failure is also acceptable
// println!("HttpWriter::new error: {e}");
// assert!(
// e.to_string().contains("Empty PUT failed") || e.to_string().contains("Forbidden"),
// "unexpected error: {e}"
// );
// return;
// }
// }
// // Should not reach here
// panic!("HttpWriter should not allow writing when server denies write");
// }
// #[tokio::test]
// async fn test_http_writer_and_reader_ok() {
// // Use local Go test server
// let url = "http://127.0.0.1:8081/testfile".to_string();
// let data = vec![99u8; 512 * 1024]; // 512KB of data
// // Write (without X-Deny-Write)
// let headers = HeaderMap::new();
// let mut writer = HttpWriter::new(url.clone(), Method::PUT, headers).await.unwrap();
// writer.write_all(&data).await.unwrap();
// writer.shutdown().await.unwrap();
// http_log!("Wrote {} bytes to {} (ok case)", data.len(), url);
// // Read back
// let mut reader = HttpReader::with_capacity(url.clone(), Method::GET, HeaderMap::new(), 8192)
// .await
// .unwrap();
// let mut buf = Vec::new();
// reader.read_to_end(&mut buf).await.unwrap();
// assert_eq!(buf, data);
// // println!("Read {} bytes from {} (ok case)", buf.len(), url);
// // tokio::time::sleep(std::time::Duration::from_secs(2)).await; // Wait for server to process
// // println!("[test_http_writer_and_reader_ok] completed successfully");
// }
// }
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/rio/src/writer.rs | crates/rio/src/writer.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::io::Cursor;
use std::pin::Pin;
use tokio::io::AsyncWrite;
use crate::HttpWriter;
pub enum Writer {
Cursor(Cursor<Vec<u8>>),
Http(HttpWriter),
Other(Box<dyn AsyncWrite + Unpin + Send + Sync>),
}
impl Writer {
/// Create a Writer::Other from any AsyncWrite + Unpin + Send type.
pub fn from_tokio_writer<W>(w: W) -> Self
where
W: AsyncWrite + Unpin + Send + Sync + 'static,
{
Writer::Other(Box::new(w))
}
pub fn from_cursor(w: Cursor<Vec<u8>>) -> Self {
Writer::Cursor(w)
}
pub fn from_http(w: HttpWriter) -> Self {
Writer::Http(w)
}
pub fn into_cursor_inner(self) -> Option<Vec<u8>> {
match self {
Writer::Cursor(w) => Some(w.into_inner()),
_ => None,
}
}
pub fn as_cursor(&mut self) -> Option<&mut Cursor<Vec<u8>>> {
match self {
Writer::Cursor(w) => Some(w),
_ => None,
}
}
pub fn as_http(&mut self) -> Option<&mut HttpWriter> {
match self {
Writer::Http(w) => Some(w),
_ => None,
}
}
pub fn into_http(self) -> Option<HttpWriter> {
match self {
Writer::Http(w) => Some(w),
_ => None,
}
}
pub fn into_cursor(self) -> Option<Cursor<Vec<u8>>> {
match self {
Writer::Cursor(w) => Some(w),
_ => None,
}
}
}
impl AsyncWrite for Writer {
fn poll_write(
self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
buf: &[u8],
) -> std::task::Poll<std::io::Result<usize>> {
match self.get_mut() {
Writer::Cursor(w) => Pin::new(w).poll_write(cx, buf),
Writer::Http(w) => Pin::new(w).poll_write(cx, buf),
Writer::Other(w) => Pin::new(w.as_mut()).poll_write(cx, buf),
}
}
fn poll_flush(self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> std::task::Poll<std::io::Result<()>> {
match self.get_mut() {
Writer::Cursor(w) => Pin::new(w).poll_flush(cx),
Writer::Http(w) => Pin::new(w).poll_flush(cx),
Writer::Other(w) => Pin::new(w.as_mut()).poll_flush(cx),
}
}
fn poll_shutdown(self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> std::task::Poll<std::io::Result<()>> {
match self.get_mut() {
Writer::Cursor(w) => Pin::new(w).poll_shutdown(cx),
Writer::Http(w) => Pin::new(w).poll_shutdown(cx),
Writer::Other(w) => Pin::new(w.as_mut()).poll_shutdown(cx),
}
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/rio/src/etag.rs | crates/rio/src/etag.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*!
# AsyncRead Wrapper Types with ETag Support
This module demonstrates a pattern for handling wrapped AsyncRead types where:
- Reader types contain the actual ETag capability
- Wrapper types need to be recursively unwrapped
- The system can handle arbitrary nesting like `CompressReader<EncryptReader<EtagReader<R>>>`
## Key Components
### Trait-Based Approach
The `EtagResolvable` trait provides a clean way to handle recursive unwrapping:
- Reader types implement it by returning their ETag directly
- Wrapper types implement it by delegating to their inner type
## Usage Examples
```rust
use rustfs_rio::{CompressReader, EtagReader, resolve_etag_generic};
use rustfs_rio::WarpReader;
use rustfs_utils::compress::CompressionAlgorithm;
use tokio::io::BufReader;
use std::io::Cursor;
// Direct usage with trait-based approach
let data = b"test data";
let reader = BufReader::new(Cursor::new(&data[..]));
let reader = Box::new(WarpReader::new(reader));
let etag_reader = EtagReader::new(reader, Some("test_etag".to_string()));
let mut reader = CompressReader::new(etag_reader, CompressionAlgorithm::Gzip);
let etag = resolve_etag_generic(&mut reader);
```
*/
#[cfg(test)]
mod tests {
use crate::{CompressReader, EncryptReader, EtagReader, HashReader};
use crate::{WarpReader, resolve_etag_generic};
use md5::Md5;
use rustfs_utils::compress::CompressionAlgorithm;
use std::io::Cursor;
use tokio::io::BufReader;
#[test]
fn test_etag_reader_resolution() {
let data = b"test data";
let reader = BufReader::new(Cursor::new(&data[..]));
let reader = Box::new(WarpReader::new(reader));
let mut etag_reader = EtagReader::new(reader, Some("test_etag".to_string()));
// Test direct ETag resolution
assert_eq!(resolve_etag_generic(&mut etag_reader), Some("test_etag".to_string()));
}
#[test]
fn test_hash_reader_resolution() {
let data = b"test data";
let reader = BufReader::new(Cursor::new(&data[..]));
let reader = Box::new(WarpReader::new(reader));
let mut hash_reader =
HashReader::new(reader, data.len() as i64, data.len() as i64, Some("hash_etag".to_string()), None, false).unwrap();
// Test HashReader ETag resolution
assert_eq!(resolve_etag_generic(&mut hash_reader), Some("hash_etag".to_string()));
}
#[test]
fn test_compress_reader_delegation() {
let data = b"test data for compression";
let reader = BufReader::new(Cursor::new(&data[..]));
let reader = Box::new(WarpReader::new(reader));
let etag_reader = EtagReader::new(reader, Some("compress_etag".to_string()));
let mut compress_reader = CompressReader::new(etag_reader, CompressionAlgorithm::Gzip);
// Test that CompressReader delegates to inner EtagReader
assert_eq!(resolve_etag_generic(&mut compress_reader), Some("compress_etag".to_string()));
}
#[test]
fn test_encrypt_reader_delegation() {
let data = b"test data for encryption";
let reader = BufReader::new(Cursor::new(&data[..]));
let reader = Box::new(WarpReader::new(reader));
let etag_reader = EtagReader::new(reader, Some("encrypt_etag".to_string()));
let key = [0u8; 32];
let nonce = [0u8; 12];
let mut encrypt_reader = EncryptReader::new(etag_reader, key, nonce);
// Test that EncryptReader delegates to inner EtagReader
assert_eq!(resolve_etag_generic(&mut encrypt_reader), Some("encrypt_etag".to_string()));
}
#[tokio::test]
async fn test_complex_nesting() {
use md5::Digest;
use tokio::io::AsyncReadExt;
let data = b"test data for complex nesting";
let mut hasher = Md5::new();
hasher.update(data);
let etag = hasher.finalize();
let etag_hex = hex_simd::encode_to_string(etag, hex_simd::AsciiCase::Lower);
let reader = BufReader::new(Cursor::new(&data[..]));
let reader = Box::new(WarpReader::new(reader));
// Create a complex nested structure: CompressReader<EncryptReader<EtagReader<BufReader<Cursor>>>>
let etag_reader = EtagReader::new(reader, Some(etag_hex.clone()));
let key = [0u8; 32];
let nonce = [0u8; 12];
let encrypt_reader = EncryptReader::new(etag_reader, key, nonce);
let mut compress_reader = CompressReader::new(encrypt_reader, CompressionAlgorithm::Gzip);
compress_reader.read_to_end(&mut Vec::new()).await.unwrap();
// Test that nested structure can resolve ETag
assert_eq!(resolve_etag_generic(&mut compress_reader), Some(etag_hex));
}
#[test]
fn test_hash_reader_in_nested_structure() {
let data = b"test data for hash reader nesting";
let reader = BufReader::new(Cursor::new(&data[..]));
let reader = Box::new(WarpReader::new(reader));
// Create nested structure: CompressReader<HashReader<BufReader<Cursor>>>
let hash_reader = HashReader::new(
reader,
data.len() as i64,
data.len() as i64,
Some("hash_nested_etag".to_string()),
None,
false,
)
.unwrap();
let mut compress_reader = CompressReader::new(hash_reader, CompressionAlgorithm::Deflate);
// Test that nested HashReader can be resolved
assert_eq!(resolve_etag_generic(&mut compress_reader), Some("hash_nested_etag".to_string()));
}
#[tokio::test]
async fn test_comprehensive_etag_extraction() {
use md5::Digest;
use tokio::io::AsyncReadExt;
println!("🔍 Testing comprehensive ETag extraction with real reader types...");
// Test 1: Simple EtagReader
let data1 = b"simple test";
let mut hasher = Md5::new();
hasher.update(data1);
let etag = hasher.finalize();
let etag_hex = hex_simd::encode_to_string(etag, hex_simd::AsciiCase::Lower);
let reader1 = BufReader::new(Cursor::new(&data1[..]));
let reader1 = Box::new(WarpReader::new(reader1));
let mut etag_reader = EtagReader::new(reader1, Some(etag_hex.clone()));
etag_reader.read_to_end(&mut Vec::new()).await.unwrap();
assert_eq!(resolve_etag_generic(&mut etag_reader), Some(etag_hex.clone()));
// Test 2: HashReader with ETag
let data2 = b"hash test";
let mut hasher = Md5::new();
hasher.update(data2);
let etag = hasher.finalize();
let etag_hex = hex_simd::encode_to_string(etag, hex_simd::AsciiCase::Lower);
let reader2 = BufReader::new(Cursor::new(&data2[..]));
let reader2 = Box::new(WarpReader::new(reader2));
let mut hash_reader =
HashReader::new(reader2, data2.len() as i64, data2.len() as i64, Some(etag_hex.clone()), None, false).unwrap();
hash_reader.read_to_end(&mut Vec::new()).await.unwrap();
assert_eq!(resolve_etag_generic(&mut hash_reader), Some(etag_hex.clone()));
// Test 3: Single wrapper - CompressReader<EtagReader>
let data3 = b"compress test";
let mut hasher = Md5::new();
hasher.update(data3);
let etag = hasher.finalize();
let etag_hex = hex_simd::encode_to_string(etag, hex_simd::AsciiCase::Lower);
let reader3 = BufReader::new(Cursor::new(&data3[..]));
let reader3 = Box::new(WarpReader::new(reader3));
let etag_reader3 = EtagReader::new(reader3, Some(etag_hex.clone()));
let mut compress_reader = CompressReader::new(etag_reader3, CompressionAlgorithm::Zstd);
compress_reader.read_to_end(&mut Vec::new()).await.unwrap();
assert_eq!(resolve_etag_generic(&mut compress_reader), Some(etag_hex.clone()));
// Test 4: Double wrapper - CompressReader<EncryptReader<EtagReader>>
let data4 = b"double wrap test";
let mut hasher = Md5::new();
hasher.update(data4);
let etag = hasher.finalize();
let etag_hex = hex_simd::encode_to_string(etag, hex_simd::AsciiCase::Lower);
let reader4 = BufReader::new(Cursor::new(&data4[..]));
let reader4 = Box::new(WarpReader::new(reader4));
let etag_reader4 = EtagReader::new(reader4, Some(etag_hex.clone()));
let key = [1u8; 32];
let nonce = [1u8; 12];
let encrypt_reader4 = EncryptReader::new(etag_reader4, key, nonce);
let mut compress_reader4 = CompressReader::new(encrypt_reader4, CompressionAlgorithm::Gzip);
compress_reader4.read_to_end(&mut Vec::new()).await.unwrap();
assert_eq!(resolve_etag_generic(&mut compress_reader4), Some(etag_hex.clone()));
println!("✅ All ETag extraction methods work correctly!");
println!("✅ Trait-based approach handles recursive unwrapping!");
println!("✅ Complex nesting patterns with real reader types are supported!");
}
#[test]
fn test_real_world_scenario() {
println!("🔍 Testing real-world ETag extraction scenario with actual reader types...");
// Simulate a real-world scenario where we have nested AsyncRead wrappers
// and need to extract ETag information from deeply nested structures
let data = b"Real world test data that might be compressed and encrypted";
let base_reader = BufReader::new(Cursor::new(&data[..]));
let base_reader = Box::new(WarpReader::new(base_reader));
// Create a complex nested structure that might occur in practice:
// CompressReader<EncryptReader<HashReader<BufReader<Cursor>>>>
let hash_reader = HashReader::new(
base_reader,
data.len() as i64,
data.len() as i64,
Some("real_world_etag".to_string()),
None,
false,
)
.unwrap();
let key = [42u8; 32];
let nonce = [24u8; 12];
let encrypt_reader = EncryptReader::new(hash_reader, key, nonce);
let mut compress_reader = CompressReader::new(encrypt_reader, CompressionAlgorithm::Deflate);
// Extract ETag using our generic system
let extracted_etag = resolve_etag_generic(&mut compress_reader);
println!("📋 Extracted ETag: {extracted_etag:?}");
assert_eq!(extracted_etag, Some("real_world_etag".to_string()));
// Test another complex nesting with EtagReader at the core
let data2 = b"Another real world scenario";
let base_reader2 = BufReader::new(Cursor::new(&data2[..]));
let base_reader2 = Box::new(WarpReader::new(base_reader2));
let etag_reader = EtagReader::new(base_reader2, Some("core_etag".to_string()));
let key2 = [99u8; 32];
let nonce2 = [88u8; 12];
let encrypt_reader2 = EncryptReader::new(etag_reader, key2, nonce2);
let mut compress_reader2 = CompressReader::new(encrypt_reader2, CompressionAlgorithm::Zstd);
let trait_etag = resolve_etag_generic(&mut compress_reader2);
println!("📋 Trait-based ETag: {trait_etag:?}");
assert_eq!(trait_etag, Some("core_etag".to_string()));
println!("✅ Real-world scenario test passed!");
println!(" - Successfully extracted ETag from nested CompressReader<EncryptReader<HashReader<AsyncRead>>>");
println!(" - Successfully extracted ETag from nested CompressReader<EncryptReader<EtagReader<AsyncRead>>>");
println!(" - Trait-based approach works with real reader types");
println!(" - System handles arbitrary nesting depths with actual implementations");
}
#[test]
fn test_no_etag_scenarios() {
println!("🔍 Testing scenarios where no ETag is available...");
// Test with HashReader that has no etag
let data = b"no etag test";
let reader = BufReader::new(Cursor::new(&data[..]));
let reader = Box::new(WarpReader::new(reader));
let mut hash_reader_no_etag = HashReader::new(reader, data.len() as i64, data.len() as i64, None, None, false).unwrap();
assert_eq!(resolve_etag_generic(&mut hash_reader_no_etag), None);
// Test with EtagReader that has None etag
let data2 = b"no etag test 2";
let reader2 = BufReader::new(Cursor::new(&data2[..]));
let reader2 = Box::new(WarpReader::new(reader2));
let mut etag_reader_none = EtagReader::new(reader2, None);
assert_eq!(resolve_etag_generic(&mut etag_reader_none), None);
// Test nested structure with no ETag at the core
let data3 = b"nested no etag test";
let reader3 = BufReader::new(Cursor::new(&data3[..]));
let reader3 = Box::new(WarpReader::new(reader3));
let etag_reader3 = EtagReader::new(reader3, None);
let mut compress_reader3 = CompressReader::new(etag_reader3, CompressionAlgorithm::Gzip);
assert_eq!(resolve_etag_generic(&mut compress_reader3), None);
println!("✅ No ETag scenarios handled correctly!");
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/rio/src/encrypt_reader.rs | crates/rio/src/encrypt_reader.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::HashReaderDetector;
use crate::HashReaderMut;
use crate::compress_index::{Index, TryGetIndex};
use crate::{EtagResolvable, Reader};
use aes_gcm::aead::Aead;
use aes_gcm::{Aes256Gcm, KeyInit, Nonce};
use pin_project_lite::pin_project;
use rustfs_utils::{put_uvarint, put_uvarint_len};
use std::io::Error;
use std::pin::Pin;
use std::task::{Context, Poll};
use tokio::io::{AsyncRead, ReadBuf};
use tracing::debug;
pin_project! {
/// A reader wrapper that encrypts data on the fly using AES-256-GCM.
/// This is a demonstration. For production, use a secure and audited crypto library.
#[derive(Debug)]
pub struct EncryptReader<R> {
#[pin]
pub inner: R,
key: [u8; 32], // AES-256-GCM key
nonce: [u8; 12], // 96-bit nonce for GCM
buffer: Vec<u8>,
buffer_pos: usize,
finished: bool,
}
}
impl<R> EncryptReader<R>
where
R: Reader,
{
pub fn new(inner: R, key: [u8; 32], nonce: [u8; 12]) -> Self {
Self {
inner,
key,
nonce,
buffer: Vec::new(),
buffer_pos: 0,
finished: false,
}
}
}
impl<R> AsyncRead for EncryptReader<R>
where
R: AsyncRead + Unpin + Send + Sync,
{
fn poll_read(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>) -> Poll<std::io::Result<()>> {
let mut this = self.project();
// Serve from buffer if any
if *this.buffer_pos < this.buffer.len() {
let to_copy = std::cmp::min(buf.remaining(), this.buffer.len() - *this.buffer_pos);
buf.put_slice(&this.buffer[*this.buffer_pos..*this.buffer_pos + to_copy]);
*this.buffer_pos += to_copy;
if *this.buffer_pos == this.buffer.len() {
this.buffer.clear();
*this.buffer_pos = 0;
}
return Poll::Ready(Ok(()));
}
if *this.finished {
return Poll::Ready(Ok(()));
}
// Read a fixed block size from inner
let block_size = 8 * 1024;
let mut temp = vec![0u8; block_size];
let mut temp_buf = ReadBuf::new(&mut temp);
match this.inner.as_mut().poll_read(cx, &mut temp_buf) {
Poll::Pending => Poll::Pending,
Poll::Ready(Ok(())) => {
let n = temp_buf.filled().len();
if n == 0 {
// EOF, write end header
let mut header = [0u8; 8];
header[0] = 0xFF; // type: end
*this.buffer = header.to_vec();
*this.buffer_pos = 0;
*this.finished = true;
let to_copy = std::cmp::min(buf.remaining(), this.buffer.len());
buf.put_slice(&this.buffer[..to_copy]);
*this.buffer_pos += to_copy;
Poll::Ready(Ok(()))
} else {
// Encrypt the chunk
let cipher = Aes256Gcm::new_from_slice(this.key).expect("key");
let nonce = Nonce::try_from(this.nonce.as_slice()).map_err(|_| Error::other("invalid nonce length"))?;
let plaintext = &temp_buf.filled()[..n];
let plaintext_len = plaintext.len();
let crc = {
let mut hasher = crc_fast::Digest::new(crc_fast::CrcAlgorithm::Crc32IsoHdlc);
hasher.update(plaintext);
hasher.finalize() as u32
};
let ciphertext = cipher
.encrypt(&nonce, plaintext)
.map_err(|e| Error::other(format!("encrypt error: {e}")))?;
let int_len = put_uvarint_len(plaintext_len as u64);
let clen = int_len + ciphertext.len() + 4;
// Header: 8 bytes
// 0: type (0 = encrypted, 0xFF = end)
// 1-3: length (little endian u24, ciphertext length)
// 4-7: CRC32 of ciphertext (little endian u32)
let mut header = [0u8; 8];
header[0] = 0x00; // 0 = encrypted
header[1] = (clen & 0xFF) as u8;
header[2] = ((clen >> 8) & 0xFF) as u8;
header[3] = ((clen >> 16) & 0xFF) as u8;
header[4] = (crc & 0xFF) as u8;
header[5] = ((crc >> 8) & 0xFF) as u8;
header[6] = ((crc >> 16) & 0xFF) as u8;
header[7] = ((crc >> 24) & 0xFF) as u8;
debug!(
"encrypt block header typ=0 len={} header={:?} plaintext_len={} ciphertext_len={}",
clen,
header,
plaintext_len,
ciphertext.len()
);
let mut out = Vec::with_capacity(8 + int_len + ciphertext.len());
out.extend_from_slice(&header);
let mut plaintext_len_buf = vec![0u8; int_len];
put_uvarint(&mut plaintext_len_buf, plaintext_len as u64);
out.extend_from_slice(&plaintext_len_buf);
out.extend_from_slice(&ciphertext);
*this.buffer = out;
*this.buffer_pos = 0;
let to_copy = std::cmp::min(buf.remaining(), this.buffer.len());
buf.put_slice(&this.buffer[..to_copy]);
*this.buffer_pos += to_copy;
Poll::Ready(Ok(()))
}
}
Poll::Ready(Err(e)) => Poll::Ready(Err(e)),
}
}
}
impl<R> EtagResolvable for EncryptReader<R>
where
R: EtagResolvable,
{
fn try_resolve_etag(&mut self) -> Option<String> {
self.inner.try_resolve_etag()
}
}
impl<R> HashReaderDetector for EncryptReader<R>
where
R: EtagResolvable + HashReaderDetector,
{
fn is_hash_reader(&self) -> bool {
self.inner.is_hash_reader()
}
fn as_hash_reader_mut(&mut self) -> Option<&mut dyn HashReaderMut> {
self.inner.as_hash_reader_mut()
}
}
impl<R> TryGetIndex for EncryptReader<R>
where
R: TryGetIndex,
{
fn try_get_index(&self) -> Option<&Index> {
self.inner.try_get_index()
}
}
pin_project! {
/// A reader wrapper that decrypts data on the fly using AES-256-GCM.
/// This is a demonstration. For production, use a secure and audited crypto library.
#[derive(Debug)]
pub struct DecryptReader<R> {
#[pin]
pub inner: R,
key: [u8; 32], // AES-256-GCM key
base_nonce: [u8; 12], // Base nonce recorded in object metadata
current_nonce: [u8; 12], // Active nonce for the current encrypted segment
multipart_mode: bool,
current_part: usize,
buffer: Vec<u8>,
buffer_pos: usize,
finished: bool,
// For block framing
header_buf: [u8; 8],
header_read: usize,
header_done: bool,
ciphertext_buf: Option<Vec<u8>>,
ciphertext_read: usize,
ciphertext_len: usize,
}
}
impl<R> DecryptReader<R>
where
R: Reader,
{
pub fn new(inner: R, key: [u8; 32], nonce: [u8; 12]) -> Self {
Self {
inner,
key,
base_nonce: nonce,
current_nonce: nonce,
multipart_mode: false,
current_part: 0,
buffer: Vec::new(),
buffer_pos: 0,
finished: false,
header_buf: [0u8; 8],
header_read: 0,
header_done: false,
ciphertext_buf: None,
ciphertext_read: 0,
ciphertext_len: 0,
}
}
pub fn new_multipart(inner: R, key: [u8; 32], base_nonce: [u8; 12]) -> Self {
let first_part = 1;
let initial_nonce = derive_part_nonce(&base_nonce, first_part);
debug!("decrypt_reader: initialized multipart mode");
Self {
inner,
key,
base_nonce,
current_nonce: initial_nonce,
multipart_mode: true,
current_part: first_part,
buffer: Vec::new(),
buffer_pos: 0,
finished: false,
header_buf: [0u8; 8],
header_read: 0,
header_done: false,
ciphertext_buf: None,
ciphertext_read: 0,
ciphertext_len: 0,
}
}
}
impl<R> AsyncRead for DecryptReader<R>
where
R: AsyncRead + Unpin + Send + Sync,
{
fn poll_read(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>) -> Poll<std::io::Result<()>> {
let mut this = self.project();
loop {
// Serve buffered plaintext first
if *this.buffer_pos < this.buffer.len() {
let to_copy = std::cmp::min(buf.remaining(), this.buffer.len() - *this.buffer_pos);
buf.put_slice(&this.buffer[*this.buffer_pos..*this.buffer_pos + to_copy]);
*this.buffer_pos += to_copy;
if *this.buffer_pos == this.buffer.len() {
this.buffer.clear();
*this.buffer_pos = 0;
}
return Poll::Ready(Ok(()));
}
if *this.finished {
return Poll::Ready(Ok(()));
}
// Read header (8 bytes)
while !*this.header_done && *this.header_read < 8 {
let mut temp = [0u8; 8];
let mut temp_buf = ReadBuf::new(&mut temp[0..8 - *this.header_read]);
match this.inner.as_mut().poll_read(cx, &mut temp_buf) {
Poll::Pending => return Poll::Pending,
Poll::Ready(Ok(())) => {
let n = temp_buf.filled().len();
if n == 0 {
*this.finished = true;
return Poll::Ready(Ok(()));
}
this.header_buf[*this.header_read..*this.header_read + n].copy_from_slice(&temp_buf.filled()[..n]);
*this.header_read += n;
}
Poll::Ready(Err(e)) => return Poll::Ready(Err(e)),
}
if *this.header_read < 8 {
return Poll::Pending;
}
}
if !*this.header_done && *this.header_read == 8 {
*this.header_done = true;
}
if !*this.header_done {
return Poll::Pending;
}
let typ = this.header_buf[0];
let len =
(this.header_buf[1] as usize) | ((this.header_buf[2] as usize) << 8) | ((this.header_buf[3] as usize) << 16);
let crc = (this.header_buf[4] as u32)
| ((this.header_buf[5] as u32) << 8)
| ((this.header_buf[6] as u32) << 16)
| ((this.header_buf[7] as u32) << 24);
*this.header_read = 0;
*this.header_done = false;
if typ == 0xFF {
if *this.multipart_mode {
debug!(
next_part = *this.current_part + 1,
"decrypt_reader: reached segment terminator, advancing to next part"
);
*this.current_part += 1;
*this.current_nonce = derive_part_nonce(this.base_nonce, *this.current_part);
this.ciphertext_buf.take();
*this.ciphertext_read = 0;
*this.ciphertext_len = 0;
continue;
}
*this.finished = true;
this.ciphertext_buf.take();
*this.ciphertext_read = 0;
*this.ciphertext_len = 0;
continue;
}
tracing::debug!(typ = typ, len = len, "decrypt block header");
if len == 0 {
tracing::warn!("encountered zero-length encrypted block, treating as end of stream");
*this.finished = true;
this.ciphertext_buf.take();
*this.ciphertext_read = 0;
*this.ciphertext_len = 0;
continue;
}
let Some(payload_len) = len.checked_sub(4) else {
tracing::error!("invalid encrypted block length: typ={} len={} header={:?}", typ, len, this.header_buf);
return Poll::Ready(Err(Error::other("Invalid encrypted block length")));
};
if this.ciphertext_buf.is_none() {
*this.ciphertext_buf = Some(vec![0u8; payload_len]);
*this.ciphertext_len = payload_len;
*this.ciphertext_read = 0;
}
let ciphertext_buf = this.ciphertext_buf.as_mut().unwrap();
while *this.ciphertext_read < *this.ciphertext_len {
let mut temp_buf = ReadBuf::new(&mut ciphertext_buf[*this.ciphertext_read..]);
match this.inner.as_mut().poll_read(cx, &mut temp_buf) {
Poll::Pending => return Poll::Pending,
Poll::Ready(Ok(())) => {
let n = temp_buf.filled().len();
if n == 0 {
break;
}
*this.ciphertext_read += n;
}
Poll::Ready(Err(e)) => {
this.ciphertext_buf.take();
*this.ciphertext_read = 0;
*this.ciphertext_len = 0;
return Poll::Ready(Err(e));
}
}
}
if *this.ciphertext_read < *this.ciphertext_len {
return Poll::Pending;
}
let (plaintext_len, uvarint_len) = rustfs_utils::uvarint(&ciphertext_buf[0..16]);
let ciphertext = &ciphertext_buf[uvarint_len as usize..];
let cipher = Aes256Gcm::new_from_slice(this.key).expect("key");
let nonce = Nonce::try_from(this.current_nonce.as_slice()).map_err(|_| Error::other("invalid nonce length"))?;
let plaintext = cipher
.decrypt(&nonce, ciphertext)
.map_err(|e| Error::other(format!("decrypt error: {e}")))?;
debug!(
part = *this.current_part,
plaintext_len = plaintext.len(),
"decrypt_reader: decrypted chunk"
);
if plaintext.len() != plaintext_len as usize {
this.ciphertext_buf.take();
*this.ciphertext_read = 0;
*this.ciphertext_len = 0;
return Poll::Ready(Err(Error::other("Plaintext length mismatch")));
}
let actual_crc = {
let mut hasher = crc_fast::Digest::new(crc_fast::CrcAlgorithm::Crc32IsoHdlc);
hasher.update(&plaintext);
hasher.finalize() as u32
};
if actual_crc != crc {
this.ciphertext_buf.take();
*this.ciphertext_read = 0;
*this.ciphertext_len = 0;
return Poll::Ready(Err(Error::other("CRC32 mismatch")));
}
*this.buffer = plaintext;
*this.buffer_pos = 0;
this.ciphertext_buf.take();
*this.ciphertext_read = 0;
*this.ciphertext_len = 0;
let to_copy = std::cmp::min(buf.remaining(), this.buffer.len());
buf.put_slice(&this.buffer[..to_copy]);
*this.buffer_pos += to_copy;
return Poll::Ready(Ok(()));
}
}
}
impl<R> EtagResolvable for DecryptReader<R>
where
R: EtagResolvable,
{
fn try_resolve_etag(&mut self) -> Option<String> {
self.inner.try_resolve_etag()
}
}
impl<R> HashReaderDetector for DecryptReader<R>
where
R: EtagResolvable + HashReaderDetector,
{
fn is_hash_reader(&self) -> bool {
self.inner.is_hash_reader()
}
fn as_hash_reader_mut(&mut self) -> Option<&mut dyn HashReaderMut> {
self.inner.as_hash_reader_mut()
}
}
impl<R> TryGetIndex for DecryptReader<R>
where
R: TryGetIndex,
{
fn try_get_index(&self) -> Option<&Index> {
self.inner.try_get_index()
}
}
fn derive_part_nonce(base: &[u8; 12], part_number: usize) -> [u8; 12] {
let mut nonce = *base;
let mut suffix = [0u8; 4];
suffix.copy_from_slice(&nonce[8..12]);
let current = u32::from_be_bytes(suffix);
let next = current.wrapping_add(part_number as u32);
nonce[8..12].copy_from_slice(&next.to_be_bytes());
nonce
}
#[cfg(test)]
mod tests {
use std::io::Cursor;
use crate::WarpReader;
use super::*;
use rand::RngCore;
use tokio::io::{AsyncReadExt, BufReader};
#[tokio::test]
async fn test_encrypt_decrypt_reader_aes256gcm() {
let data = b"hello sse encrypt";
let mut key = [0u8; 32];
let mut nonce = [0u8; 12];
rand::rng().fill_bytes(&mut key);
rand::rng().fill_bytes(&mut nonce);
let reader = BufReader::new(&data[..]);
let encrypt_reader = EncryptReader::new(WarpReader::new(reader), key, nonce);
// Encrypt
let mut encrypt_reader = encrypt_reader;
let mut encrypted = Vec::new();
encrypt_reader.read_to_end(&mut encrypted).await.unwrap();
// Decrypt using DecryptReader
let reader = Cursor::new(encrypted.clone());
let decrypt_reader = DecryptReader::new(WarpReader::new(reader), key, nonce);
let mut decrypt_reader = decrypt_reader;
let mut decrypted = Vec::new();
decrypt_reader.read_to_end(&mut decrypted).await.unwrap();
assert_eq!(&decrypted, data);
}
#[tokio::test]
async fn test_decrypt_reader_only() {
// Encrypt some data first
let data = b"test decrypt only";
let mut key = [0u8; 32];
let mut nonce = [0u8; 12];
rand::rng().fill_bytes(&mut key);
rand::rng().fill_bytes(&mut nonce);
// Encrypt
let reader = BufReader::new(&data[..]);
let encrypt_reader = EncryptReader::new(WarpReader::new(reader), key, nonce);
let mut encrypt_reader = encrypt_reader;
let mut encrypted = Vec::new();
encrypt_reader.read_to_end(&mut encrypted).await.unwrap();
// Now test DecryptReader
let reader = Cursor::new(encrypted.clone());
let decrypt_reader = DecryptReader::new(WarpReader::new(reader), key, nonce);
let mut decrypt_reader = decrypt_reader;
let mut decrypted = Vec::new();
decrypt_reader.read_to_end(&mut decrypted).await.unwrap();
assert_eq!(&decrypted, data);
}
#[tokio::test]
async fn test_encrypt_decrypt_reader_large() {
use rand::Rng;
let size = 1024 * 1024;
let mut data = vec![0u8; size];
rand::rng().fill(&mut data[..]);
let mut key = [0u8; 32];
let mut nonce = [0u8; 12];
rand::rng().fill_bytes(&mut key);
rand::rng().fill_bytes(&mut nonce);
let reader = std::io::Cursor::new(data.clone());
let encrypt_reader = EncryptReader::new(WarpReader::new(reader), key, nonce);
let mut encrypt_reader = encrypt_reader;
let mut encrypted = Vec::new();
encrypt_reader.read_to_end(&mut encrypted).await.unwrap();
let reader = std::io::Cursor::new(encrypted.clone());
let decrypt_reader = DecryptReader::new(WarpReader::new(reader), key, nonce);
let mut decrypt_reader = decrypt_reader;
let mut decrypted = Vec::new();
decrypt_reader.read_to_end(&mut decrypted).await.unwrap();
assert_eq!(&decrypted, &data);
}
#[tokio::test]
async fn test_decrypt_reader_multipart_segments() {
let mut key = [0u8; 32];
let mut base_nonce = [0u8; 12];
rand::rng().fill_bytes(&mut key);
rand::rng().fill_bytes(&mut base_nonce);
let part_one = vec![0xA5; 512 * 1024];
let part_two = vec![0x5A; 256 * 1024];
async fn encrypt_part(data: &[u8], key: [u8; 32], base_nonce: [u8; 12], part_number: usize) -> Vec<u8> {
let nonce = derive_part_nonce(&base_nonce, part_number);
let reader = BufReader::new(Cursor::new(data.to_vec()));
let mut encrypt_reader = EncryptReader::new(WarpReader::new(reader), key, nonce);
let mut encrypted = Vec::new();
encrypt_reader.read_to_end(&mut encrypted).await.unwrap();
encrypted
}
let encrypted_one = encrypt_part(&part_one, key, base_nonce, 1).await;
let encrypted_two = encrypt_part(&part_two, key, base_nonce, 2).await;
let mut combined = Vec::with_capacity(encrypted_one.len() + encrypted_two.len());
combined.extend_from_slice(&encrypted_one);
combined.extend_from_slice(&encrypted_two);
let reader = BufReader::new(Cursor::new(combined));
let mut decrypt_reader = DecryptReader::new_multipart(WarpReader::new(reader), key, base_nonce);
let mut decrypted = Vec::new();
decrypt_reader.read_to_end(&mut decrypted).await.unwrap();
let mut expected = Vec::with_capacity(part_one.len() + part_two.len());
expected.extend_from_slice(&part_one);
expected.extend_from_slice(&part_two);
assert_eq!(decrypted, expected);
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/rio/src/compress_index.rs | crates/rio/src/compress_index.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use bytes::Bytes;
use serde::{Deserialize, Serialize};
use std::io::{self, Read, Seek, SeekFrom};
const S2_INDEX_HEADER: &[u8] = b"s2idx\x00";
const S2_INDEX_TRAILER: &[u8] = b"\x00xdi2s";
const MAX_INDEX_ENTRIES: usize = 1 << 16;
const MIN_INDEX_DIST: i64 = 1 << 20;
// const MIN_INDEX_DIST: i64 = 0;
pub trait TryGetIndex {
fn try_get_index(&self) -> Option<&Index> {
None
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Index {
pub total_uncompressed: i64,
pub total_compressed: i64,
info: Vec<IndexInfo>,
est_block_uncomp: i64,
}
impl Default for Index {
fn default() -> Self {
Self::new()
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct IndexInfo {
pub compressed_offset: i64,
pub uncompressed_offset: i64,
}
#[allow(dead_code)]
impl Index {
pub fn new() -> Self {
Self {
total_uncompressed: -1,
total_compressed: -1,
info: Vec::new(),
est_block_uncomp: 0,
}
}
#[allow(dead_code)]
fn reset(&mut self, max_block: usize) {
self.est_block_uncomp = max_block as i64;
self.total_compressed = -1;
self.total_uncompressed = -1;
self.info.clear();
}
pub fn len(&self) -> usize {
self.info.len()
}
fn alloc_infos(&mut self, n: usize) {
if n > MAX_INDEX_ENTRIES {
panic!("n > MAX_INDEX_ENTRIES");
}
self.info = Vec::with_capacity(n);
}
pub fn add(&mut self, compressed_offset: i64, uncompressed_offset: i64) -> io::Result<()> {
if self.info.is_empty() {
self.info.push(IndexInfo {
compressed_offset,
uncompressed_offset,
});
return Ok(());
}
let last_idx = self.info.len() - 1;
let latest = &mut self.info[last_idx];
if latest.uncompressed_offset == uncompressed_offset {
latest.compressed_offset = compressed_offset;
return Ok(());
}
if latest.uncompressed_offset > uncompressed_offset {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!(
"internal error: Earlier uncompressed received ({} > {})",
latest.uncompressed_offset, uncompressed_offset
),
));
}
if latest.compressed_offset > compressed_offset {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!(
"internal error: Earlier compressed received ({} > {})",
latest.uncompressed_offset, uncompressed_offset
),
));
}
if latest.uncompressed_offset + MIN_INDEX_DIST > uncompressed_offset {
return Ok(());
}
self.info.push(IndexInfo {
compressed_offset,
uncompressed_offset,
});
self.total_compressed = compressed_offset;
self.total_uncompressed = uncompressed_offset;
Ok(())
}
pub fn find(&self, offset: i64) -> io::Result<(i64, i64)> {
if self.total_uncompressed < 0 {
return Err(io::Error::other("corrupt index"));
}
let mut offset = offset;
if offset < 0 {
offset += self.total_uncompressed;
if offset < 0 {
return Err(io::Error::new(io::ErrorKind::UnexpectedEof, "offset out of bounds"));
}
}
if offset > self.total_uncompressed {
return Err(io::Error::new(io::ErrorKind::UnexpectedEof, "offset out of bounds"));
}
if self.info.is_empty() {
return Err(io::Error::new(io::ErrorKind::UnexpectedEof, "empty index"));
}
if self.info.len() > 200 {
let n = self
.info
.binary_search_by(|info| {
if info.uncompressed_offset > offset {
std::cmp::Ordering::Greater
} else {
std::cmp::Ordering::Less
}
})
.unwrap_or_else(|i| i);
if n == 0 {
return Ok((self.info[0].compressed_offset, self.info[0].uncompressed_offset));
}
return Ok((self.info[n - 1].compressed_offset, self.info[n - 1].uncompressed_offset));
}
let mut compressed_off = 0;
let mut uncompressed_off = 0;
for info in &self.info {
if info.uncompressed_offset > offset {
break;
}
compressed_off = info.compressed_offset;
uncompressed_off = info.uncompressed_offset;
}
Ok((compressed_off, uncompressed_off))
}
fn reduce(&mut self) {
if self.info.len() < MAX_INDEX_ENTRIES && self.est_block_uncomp >= MIN_INDEX_DIST {
return;
}
let mut remove_n = (self.info.len() + 1) / MAX_INDEX_ENTRIES;
let src = self.info.clone();
let mut j = 0;
while self.est_block_uncomp * (remove_n as i64 + 1) < MIN_INDEX_DIST && self.info.len() / (remove_n + 1) > 1000 {
remove_n += 1;
}
let mut idx = 0;
while idx < src.len() {
self.info[j] = src[idx].clone();
j += 1;
idx += remove_n + 1;
}
self.info.truncate(j);
self.est_block_uncomp += self.est_block_uncomp * remove_n as i64;
}
pub fn into_vec(mut self) -> Bytes {
let mut b = Vec::new();
self.append_to(&mut b, self.total_uncompressed, self.total_compressed);
Bytes::from(b)
}
pub fn append_to(&mut self, b: &mut Vec<u8>, uncomp_total: i64, comp_total: i64) {
self.reduce();
let init_size = b.len();
// Add skippable header
b.extend_from_slice(&[0x50, 0x2A, 0x4D, 0x18]); // ChunkTypeIndex
b.extend_from_slice(&[0, 0, 0]); // Placeholder for chunk length
// Add header
b.extend_from_slice(S2_INDEX_HEADER);
// Add total sizes
let mut tmp = [0u8; 8];
let n = write_varint(&mut tmp, uncomp_total);
b.extend_from_slice(&tmp[..n]);
let n = write_varint(&mut tmp, comp_total);
b.extend_from_slice(&tmp[..n]);
let n = write_varint(&mut tmp, self.est_block_uncomp);
b.extend_from_slice(&tmp[..n]);
let n = write_varint(&mut tmp, self.info.len() as i64);
b.extend_from_slice(&tmp[..n]);
// Check if we should add uncompressed offsets
let mut has_uncompressed = 0u8;
for (idx, info) in self.info.iter().enumerate() {
if idx == 0 {
if info.uncompressed_offset != 0 {
has_uncompressed = 1;
break;
}
continue;
}
if info.uncompressed_offset != self.info[idx - 1].uncompressed_offset + self.est_block_uncomp {
has_uncompressed = 1;
break;
}
}
b.push(has_uncompressed);
// Add uncompressed offsets if needed
if has_uncompressed == 1 {
for (idx, info) in self.info.iter().enumerate() {
let mut u_off = info.uncompressed_offset;
if idx > 0 {
let prev = &self.info[idx - 1];
u_off -= prev.uncompressed_offset + self.est_block_uncomp;
}
let n = write_varint(&mut tmp, u_off);
b.extend_from_slice(&tmp[..n]);
}
}
// Add compressed offsets
let mut c_predict = self.est_block_uncomp / 2;
for (idx, info) in self.info.iter().enumerate() {
let mut c_off = info.compressed_offset;
if idx > 0 {
let prev = &self.info[idx - 1];
c_off -= prev.compressed_offset + c_predict;
c_predict += c_off / 2;
}
let n = write_varint(&mut tmp, c_off);
b.extend_from_slice(&tmp[..n]);
}
// Add total size and trailer
let total_size = (b.len() - init_size + 4 + S2_INDEX_TRAILER.len()) as u32;
b.extend_from_slice(&total_size.to_le_bytes());
b.extend_from_slice(S2_INDEX_TRAILER);
// Update chunk length
let chunk_len = b.len() - init_size - 4;
b[init_size + 1] = chunk_len as u8;
b[init_size + 2] = (chunk_len >> 8) as u8;
b[init_size + 3] = (chunk_len >> 16) as u8;
}
pub fn load<'a>(&mut self, mut b: &'a [u8]) -> io::Result<&'a [u8]> {
if b.len() <= 4 + S2_INDEX_HEADER.len() + S2_INDEX_TRAILER.len() {
return Err(io::Error::new(io::ErrorKind::UnexpectedEof, "buffer too small"));
}
if b[0] != 0x50 || b[1] != 0x2A || b[2] != 0x4D || b[3] != 0x18 {
return Err(io::Error::other("invalid chunk type"));
}
let chunk_len = (b[1] as usize) | ((b[2] as usize) << 8) | ((b[3] as usize) << 16);
b = &b[4..];
if b.len() < chunk_len {
return Err(io::Error::new(io::ErrorKind::UnexpectedEof, "buffer too small"));
}
if !b.starts_with(S2_INDEX_HEADER) {
return Err(io::Error::other("invalid header"));
}
b = &b[S2_INDEX_HEADER.len()..];
// Read total uncompressed
let (v, n) = read_varint(b)?;
if v < 0 {
return Err(io::Error::other("invalid uncompressed size"));
}
self.total_uncompressed = v;
b = &b[n..];
// Read total compressed
let (v, n) = read_varint(b)?;
if v < 0 {
return Err(io::Error::other("invalid compressed size"));
}
self.total_compressed = v;
b = &b[n..];
// Read est block uncomp
let (v, n) = read_varint(b)?;
if v < 0 {
return Err(io::Error::other("invalid block size"));
}
self.est_block_uncomp = v;
b = &b[n..];
// Read number of entries
let (v, n) = read_varint(b)?;
if v < 0 || v > MAX_INDEX_ENTRIES as i64 {
return Err(io::Error::other("invalid number of entries"));
}
let entries = v as usize;
b = &b[n..];
self.alloc_infos(entries);
if b.is_empty() {
return Err(io::Error::new(io::ErrorKind::UnexpectedEof, "buffer too small"));
}
let has_uncompressed = b[0];
b = &b[1..];
if has_uncompressed & 1 != has_uncompressed {
return Err(io::Error::other("invalid uncompressed flag"));
}
// Read uncompressed offsets
for idx in 0..entries {
let mut u_off = 0i64;
if has_uncompressed != 0 {
let (v, n) = read_varint(b)?;
u_off = v;
b = &b[n..];
}
if idx > 0 {
let prev = self.info[idx - 1].uncompressed_offset;
u_off += prev + self.est_block_uncomp;
if u_off <= prev {
return Err(io::Error::other("invalid offset"));
}
}
if u_off < 0 {
return Err(io::Error::other("negative offset"));
}
self.info[idx].uncompressed_offset = u_off;
}
// Read compressed offsets
let mut c_predict = self.est_block_uncomp / 2;
for idx in 0..entries {
let (v, n) = read_varint(b)?;
let mut c_off = v;
b = &b[n..];
if idx > 0 {
c_predict += c_off / 2;
let prev = self.info[idx - 1].compressed_offset;
c_off += prev + c_predict;
if c_off <= prev {
return Err(io::Error::other("invalid offset"));
}
}
if c_off < 0 {
return Err(io::Error::other("negative offset"));
}
self.info[idx].compressed_offset = c_off;
}
if b.len() < 4 + S2_INDEX_TRAILER.len() {
return Err(io::Error::new(io::ErrorKind::UnexpectedEof, "buffer too small"));
}
// Skip size
b = &b[4..];
// Check trailer
if !b.starts_with(S2_INDEX_TRAILER) {
return Err(io::Error::other("invalid trailer"));
}
Ok(&b[S2_INDEX_TRAILER.len()..])
}
pub fn load_stream<R: Read + Seek>(&mut self, mut rs: R) -> io::Result<()> {
// Go to end
rs.seek(SeekFrom::End(-10))?;
let mut tmp = [0u8; 10];
rs.read_exact(&mut tmp)?;
// Check trailer
if &tmp[4..4 + S2_INDEX_TRAILER.len()] != S2_INDEX_TRAILER {
return Err(io::Error::other("invalid trailer"));
}
let sz = u32::from_le_bytes(tmp[..4].try_into().unwrap());
if sz > 0x7fffffff {
return Err(io::Error::other("size too large"));
}
rs.seek(SeekFrom::End(-(sz as i64)))?;
let mut buf = vec![0u8; sz as usize];
rs.read_exact(&mut buf)?;
self.load(&buf)?;
Ok(())
}
pub fn to_json(&self) -> serde_json::Result<Vec<u8>> {
#[derive(Serialize)]
struct Offset {
compressed: i64,
uncompressed: i64,
}
#[derive(Serialize)]
struct IndexJson {
total_uncompressed: i64,
total_compressed: i64,
offsets: Vec<Offset>,
est_block_uncompressed: i64,
}
let json = IndexJson {
total_uncompressed: self.total_uncompressed,
total_compressed: self.total_compressed,
offsets: self
.info
.iter()
.map(|info| Offset {
compressed: info.compressed_offset,
uncompressed: info.uncompressed_offset,
})
.collect(),
est_block_uncompressed: self.est_block_uncomp,
};
serde_json::to_vec_pretty(&json)
}
}
// Helper functions for varint encoding/decoding
fn write_varint(buf: &mut [u8], mut v: i64) -> usize {
let mut n = 0;
while v >= 0x80 {
buf[n] = (v as u8) | 0x80;
v >>= 7;
n += 1;
}
buf[n] = v as u8;
n + 1
}
fn read_varint(buf: &[u8]) -> io::Result<(i64, usize)> {
let mut result = 0i64;
let mut shift = 0;
let mut n = 0;
while n < buf.len() {
let byte = buf[n];
n += 1;
result |= ((byte & 0x7F) as i64) << shift;
if byte < 0x80 {
return Ok((result, n));
}
shift += 7;
}
Err(io::Error::new(io::ErrorKind::UnexpectedEof, "unexpected EOF"))
}
// Helper functions for index header manipulation
#[allow(dead_code)]
pub fn remove_index_headers(b: &[u8]) -> Option<&[u8]> {
if b.len() < 4 + S2_INDEX_TRAILER.len() {
return None;
}
// Skip size
let b = &b[4..];
// Check trailer
if !b.starts_with(S2_INDEX_TRAILER) {
return None;
}
Some(&b[S2_INDEX_TRAILER.len()..])
}
#[allow(dead_code)]
pub fn restore_index_headers(in_data: &[u8]) -> Vec<u8> {
if in_data.is_empty() {
return Vec::new();
}
let mut b = Vec::with_capacity(4 + S2_INDEX_HEADER.len() + in_data.len() + S2_INDEX_TRAILER.len() + 4);
b.extend_from_slice(&[0x50, 0x2A, 0x4D, 0x18]);
b.extend_from_slice(S2_INDEX_HEADER);
b.extend_from_slice(in_data);
let total_size = (b.len() + 4 + S2_INDEX_TRAILER.len()) as u32;
b.extend_from_slice(&total_size.to_le_bytes());
b.extend_from_slice(S2_INDEX_TRAILER);
let chunk_len = b.len() - 4;
b[1] = chunk_len as u8;
b[2] = (chunk_len >> 8) as u8;
b[3] = (chunk_len >> 16) as u8;
b
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_index_new() {
let index = Index::new();
assert_eq!(index.total_uncompressed, -1);
assert_eq!(index.total_compressed, -1);
assert!(index.info.is_empty());
assert_eq!(index.est_block_uncomp, 0);
}
#[test]
fn test_index_add() -> io::Result<()> {
let mut index = Index::new();
// Test adding first index
index.add(100, 1000)?;
assert_eq!(index.info.len(), 1);
assert_eq!(index.info[0].compressed_offset, 100);
assert_eq!(index.info[0].uncompressed_offset, 1000);
// Test adding index with same uncompressed offset
index.add(200, 1000)?;
assert_eq!(index.info.len(), 1);
assert_eq!(index.info[0].compressed_offset, 200);
assert_eq!(index.info[0].uncompressed_offset, 1000);
// Test adding new index (ensure distance is large enough)
index.add(300, 2000 + MIN_INDEX_DIST)?;
assert_eq!(index.info.len(), 2);
assert_eq!(index.info[1].compressed_offset, 300);
assert_eq!(index.info[1].uncompressed_offset, 2000 + MIN_INDEX_DIST);
Ok(())
}
#[test]
fn test_index_add_errors() {
let mut index = Index::new();
// Add initial index
index.add(100, 1000).unwrap();
// Test adding smaller uncompressed offset
let err = index.add(200, 500).unwrap_err();
assert_eq!(err.kind(), io::ErrorKind::InvalidData);
// Test adding smaller compressed offset
let err = index.add(50, 2000).unwrap_err();
assert_eq!(err.kind(), io::ErrorKind::InvalidData);
}
#[test]
fn test_index_find() -> io::Result<()> {
let mut index = Index::new();
index.total_uncompressed = 1000 + MIN_INDEX_DIST * 3;
index.total_compressed = 5000;
// Add some test data, ensure index spacing meets MIN_INDEX_DIST requirement
index.add(100, 1000)?;
index.add(300, 1000 + MIN_INDEX_DIST)?;
index.add(500, 1000 + MIN_INDEX_DIST * 2)?;
// Test finding existing offset
let (comp, uncomp) = index.find(1500)?;
assert_eq!(comp, 100);
assert_eq!(uncomp, 1000);
// Test finding boundary value
let (comp, uncomp) = index.find(1000 + MIN_INDEX_DIST)?;
assert_eq!(comp, 300);
assert_eq!(uncomp, 1000 + MIN_INDEX_DIST);
// Test finding last index
let (comp, uncomp) = index.find(1000 + MIN_INDEX_DIST * 2)?;
assert_eq!(comp, 500);
assert_eq!(uncomp, 1000 + MIN_INDEX_DIST * 2);
Ok(())
}
#[test]
fn test_index_find_errors() {
let mut index = Index::new();
index.total_uncompressed = 10000;
index.total_compressed = 5000;
// Test uninitialized index
let uninit_index = Index::new();
let err = uninit_index.find(1000).unwrap_err();
assert_eq!(err.kind(), io::ErrorKind::Other);
// Test offset out of range
let err = index.find(15000).unwrap_err();
assert_eq!(err.kind(), io::ErrorKind::UnexpectedEof);
// Test negative offset
let err = match index.find(-1000) {
Ok(_) => panic!("should be error"),
Err(e) => e,
};
assert_eq!(err.kind(), io::ErrorKind::UnexpectedEof);
}
#[test]
fn test_index_reduce() {
let mut index = Index::new();
index.est_block_uncomp = MIN_INDEX_DIST;
// Add entries exceeding maximum index count, ensure spacing meets MIN_INDEX_DIST requirement
for i in 0..MAX_INDEX_ENTRIES + 100 {
index.add(i as i64 * 100, i as i64 * MIN_INDEX_DIST).unwrap();
}
// Manually call reduce method
index.reduce();
// Verify index count has been correctly reduced
assert!(index.info.len() <= MAX_INDEX_ENTRIES);
}
#[test]
fn test_index_json() -> io::Result<()> {
let mut index = Index::new();
// Add some test data
index.add(100, 1000)?;
index.add(300, 2000 + MIN_INDEX_DIST)?;
// Test JSON serialization
let json = index.to_json().unwrap();
let json_str = String::from_utf8(json).unwrap();
println!("json_str: {json_str}");
// Verify JSON content
assert!(json_str.contains("\"compressed\": 100"));
assert!(json_str.contains("\"uncompressed\": 1000"));
assert!(json_str.contains("\"est_block_uncompressed\": 0"));
Ok(())
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/rio/src/hash_reader.rs | crates/rio/src/hash_reader.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! HashReader implementation with generic support
//!
//! This module provides a generic `HashReader<R>` that can wrap any type implementing
//! `AsyncRead + Unpin + Send + Sync + 'static + EtagResolvable`.
//!
//! ## Migration from the original Reader enum
//!
//! The original `HashReader::new` method that worked with the `Reader` enum
//! has been replaced with a generic approach. To preserve the original logic:
//!
//! ### Original logic (before generics):
//! ```ignore
//! // Original code would do:
//! // 1. Check if inner is already a HashReader
//! // 2. If size > 0, wrap with HardLimitReader
//! // 3. If !diskable_md5, wrap with EtagReader
//! // 4. Create HashReader with the wrapped reader
//!
//! let reader = HashReader::new(inner, size, actual_size, etag, diskable_md5)?;
//! ```
//!
//! ### New generic approach:
//! ```rust
//! use rustfs_rio::{HashReader, HardLimitReader, EtagReader};
//! use tokio::io::BufReader;
//! use std::io::Cursor;
//! use rustfs_rio::WarpReader;
//!
//! # tokio_test::block_on(async {
//! let data = b"hello world";
//! let reader = BufReader::new(Cursor::new(&data[..]));
//! let reader = Box::new(WarpReader::new(reader));
//! let size = data.len() as i64;
//! let actual_size = size;
//! let etag = None;
//! let diskable_md5 = false;
//!
//! // Method 1: Simple creation (recommended for most cases)
//! let hash_reader = HashReader::new(reader, size, actual_size, etag.clone(), None, diskable_md5).unwrap();
//!
//! // Method 2: With manual wrapping to recreate original logic
//! let reader2 = BufReader::new(Cursor::new(&data[..]));
//! let reader2 = Box::new(WarpReader::new(reader2));
//! let wrapped_reader: Box<dyn rustfs_rio::Reader> = if size > 0 {
//! if !diskable_md5 {
//! // Wrap with both HardLimitReader and EtagReader
//! let hard_limit = HardLimitReader::new(reader2, size);
//! Box::new(EtagReader::new(Box::new(hard_limit), etag.clone()))
//! } else {
//! // Only wrap with HardLimitReader
//! Box::new(HardLimitReader::new(reader2, size))
//! }
//! } else if !diskable_md5 {
//! // Only wrap with EtagReader
//! Box::new(EtagReader::new(reader2, etag.clone()))
//! } else {
//! // No wrapping needed
//! reader2
//! };
//! let hash_reader2 = HashReader::new(wrapped_reader, size, actual_size, etag.clone(), None, diskable_md5).unwrap();
//! # });
//! ```
//!
//! ## HashReader Detection
//!
//! The `HashReaderDetector` trait allows detection of existing HashReader instances:
//!
//! ```rust
//! use rustfs_rio::{HashReader, HashReaderDetector};
//! use tokio::io::BufReader;
//! use std::io::Cursor;
//! use rustfs_rio::WarpReader;
//!
//! # tokio_test::block_on(async {
//! let data = b"test";
//! let reader = BufReader::new(Cursor::new(&data[..]));
//! let hash_reader = HashReader::new(Box::new(WarpReader::new(reader)), 4, 4, None, None,false).unwrap();
//!
//! // Check if a type is a HashReader
//! assert!(hash_reader.is_hash_reader());
//!
//! // Use new for compatibility (though it's simpler to use new() directly)
//! let reader2 = BufReader::new(Cursor::new(&data[..]));
//! let result = HashReader::new(Box::new(WarpReader::new(reader2)), 4, 4, None, None, false);
//! assert!(result.is_ok());
//! # });
//! ```
use crate::Checksum;
use crate::ChecksumHasher;
use crate::ChecksumType;
use crate::Sha256Hasher;
use crate::compress_index::{Index, TryGetIndex};
use crate::get_content_checksum;
use crate::{EtagReader, EtagResolvable, HardLimitReader, HashReaderDetector, Reader, WarpReader};
use base64::Engine;
use base64::engine::general_purpose;
use http::HeaderMap;
use pin_project_lite::pin_project;
use s3s::TrailingHeaders;
use std::collections::HashMap;
use std::io::Cursor;
use std::io::Write;
use std::mem;
use std::pin::Pin;
use std::task::{Context, Poll};
use tokio::io::{AsyncRead, ReadBuf};
use tracing::error;
/// Trait for mutable operations on HashReader
pub trait HashReaderMut {
fn into_inner(self) -> Box<dyn Reader>;
fn take_inner(&mut self) -> Box<dyn Reader>;
fn bytes_read(&self) -> u64;
fn checksum(&self) -> &Option<String>;
fn set_checksum(&mut self, checksum: Option<String>);
fn size(&self) -> i64;
fn set_size(&mut self, size: i64);
fn actual_size(&self) -> i64;
fn set_actual_size(&mut self, actual_size: i64);
fn content_hash(&self) -> &Option<Checksum>;
fn content_sha256(&self) -> &Option<String>;
fn get_trailer(&self) -> Option<&TrailingHeaders>;
fn set_trailer(&mut self, trailer: Option<TrailingHeaders>);
}
pin_project! {
pub struct HashReader {
#[pin]
pub inner: Box<dyn Reader>,
pub size: i64,
checksum: Option<String>,
pub actual_size: i64,
pub diskable_md5: bool,
bytes_read: u64,
content_hash: Option<Checksum>,
content_hasher: Option<Box<dyn ChecksumHasher>>,
content_sha256: Option<String>,
content_sha256_hasher: Option<Sha256Hasher>,
checksum_on_finish: bool,
trailer_s3s: Option<TrailingHeaders>,
}
}
impl HashReader {
pub fn new(
mut inner: Box<dyn Reader>,
size: i64,
actual_size: i64,
md5hex: Option<String>,
sha256hex: Option<String>,
diskable_md5: bool,
) -> std::io::Result<Self> {
// Check if it's already a HashReader and update its parameters
if let Some(existing_hash_reader) = inner.as_hash_reader_mut() {
if existing_hash_reader.bytes_read() > 0 {
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidData,
"Cannot create HashReader from an already read HashReader",
));
}
if let Some(checksum) = existing_hash_reader.checksum()
&& let Some(ref md5) = md5hex
&& checksum != md5
{
return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "HashReader checksum mismatch"));
}
if existing_hash_reader.size() > 0 && size > 0 && existing_hash_reader.size() != size {
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidData,
format!("HashReader size mismatch: expected {}, got {}", existing_hash_reader.size(), size),
));
}
existing_hash_reader.set_checksum(md5hex.clone());
if existing_hash_reader.size() < 0 && size >= 0 {
existing_hash_reader.set_size(size);
}
if existing_hash_reader.actual_size() <= 0 && actual_size >= 0 {
existing_hash_reader.set_actual_size(actual_size);
}
let size = existing_hash_reader.size();
let actual_size = existing_hash_reader.actual_size();
let content_hash = existing_hash_reader.content_hash().clone();
let content_hasher = existing_hash_reader
.content_hash()
.clone()
.map(|hash| hash.checksum_type.hasher().unwrap());
let content_sha256 = existing_hash_reader.content_sha256().clone();
let content_sha256_hasher = existing_hash_reader.content_sha256().clone().map(|_| Sha256Hasher::new());
let inner = existing_hash_reader.take_inner();
return Ok(Self {
inner,
size,
checksum: md5hex.clone(),
actual_size,
diskable_md5,
bytes_read: 0,
content_sha256,
content_sha256_hasher,
content_hash,
content_hasher,
checksum_on_finish: false,
trailer_s3s: existing_hash_reader.get_trailer().cloned(),
});
}
if size > 0 {
let hr = HardLimitReader::new(inner, size);
inner = Box::new(hr);
if !diskable_md5 && !inner.is_hash_reader() {
let er = EtagReader::new(inner, md5hex.clone());
inner = Box::new(er);
}
} else if !diskable_md5 {
let er = EtagReader::new(inner, md5hex.clone());
inner = Box::new(er);
}
Ok(Self {
inner,
size,
checksum: md5hex,
actual_size,
diskable_md5,
bytes_read: 0,
content_hash: None,
content_hasher: None,
content_sha256: sha256hex.clone(),
content_sha256_hasher: sha256hex.clone().map(|_| Sha256Hasher::new()),
checksum_on_finish: false,
trailer_s3s: None,
})
}
pub fn into_inner(self) -> Box<dyn Reader> {
self.inner
}
/// Update HashReader parameters
pub fn update_params(&mut self, size: i64, actual_size: i64, etag: Option<String>) {
if self.size < 0 && size >= 0 {
self.size = size;
}
if self.actual_size <= 0 && actual_size > 0 {
self.actual_size = actual_size;
}
if etag.is_some() {
self.checksum = etag;
}
}
pub fn size(&self) -> i64 {
self.size
}
pub fn actual_size(&self) -> i64 {
self.actual_size
}
pub fn add_checksum_from_s3s(
&mut self,
headers: &HeaderMap,
trailing_headers: Option<TrailingHeaders>,
ignore_value: bool,
) -> Result<(), std::io::Error> {
let cs = get_content_checksum(headers)?;
if ignore_value {
return Ok(());
}
if let Some(checksum) = cs {
if checksum.checksum_type.trailing() {
self.trailer_s3s = trailing_headers.clone();
}
self.content_hash = Some(checksum.clone());
return self.add_non_trailing_checksum(Some(checksum), ignore_value);
}
Ok(())
}
pub fn add_checksum_no_trailer(&mut self, header: &HeaderMap, ignore_value: bool) -> Result<(), std::io::Error> {
let cs = get_content_checksum(header)?;
if let Some(checksum) = cs {
self.content_hash = Some(checksum.clone());
return self.add_non_trailing_checksum(Some(checksum), ignore_value);
}
Ok(())
}
pub fn add_non_trailing_checksum(&mut self, checksum: Option<Checksum>, ignore_value: bool) -> Result<(), std::io::Error> {
if let Some(checksum) = checksum {
self.content_hash = Some(checksum.clone());
if ignore_value {
return Ok(());
}
if let Some(hasher) = checksum.checksum_type.hasher() {
self.content_hasher = Some(hasher);
} else {
return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "Invalid checksum type"));
}
tracing::debug!("add_non_trailing_checksum checksum={checksum:?}");
}
Ok(())
}
pub fn checksum(&self) -> Option<Checksum> {
if self
.content_hash
.as_ref()
.is_none_or(|v| !v.checksum_type.is_set() || !v.valid())
{
return None;
}
self.content_hash.clone()
}
pub fn content_crc_type(&self) -> Option<ChecksumType> {
self.content_hash.as_ref().map(|v| v.checksum_type)
}
pub fn content_crc(&self) -> HashMap<String, String> {
let mut map = HashMap::new();
if let Some(checksum) = self.content_hash.as_ref() {
if !checksum.valid() || checksum.checksum_type.is(ChecksumType::NONE) {
return map;
}
if checksum.checksum_type.trailing() {
if let Some(trailer) = self.trailer_s3s.as_ref()
&& let Some(Some(checksum_str)) = trailer.read(|headers| {
checksum
.checksum_type
.key()
.and_then(|key| headers.get(key).and_then(|value| value.to_str().ok().map(|s| s.to_string())))
})
{
map.insert(checksum.checksum_type.to_string(), checksum_str);
}
return map;
}
map.insert(checksum.checksum_type.to_string(), checksum.encoded.clone());
return map;
}
map
}
}
impl HashReaderMut for HashReader {
fn into_inner(self) -> Box<dyn Reader> {
self.inner
}
fn take_inner(&mut self) -> Box<dyn Reader> {
// Replace inner with an empty reader to move it out safely while keeping self valid
mem::replace(&mut self.inner, Box::new(WarpReader::new(Cursor::new(Vec::new()))))
}
fn bytes_read(&self) -> u64 {
self.bytes_read
}
fn checksum(&self) -> &Option<String> {
&self.checksum
}
fn set_checksum(&mut self, checksum: Option<String>) {
self.checksum = checksum;
}
fn size(&self) -> i64 {
self.size
}
fn set_size(&mut self, size: i64) {
self.size = size;
}
fn actual_size(&self) -> i64 {
self.actual_size
}
fn set_actual_size(&mut self, actual_size: i64) {
self.actual_size = actual_size;
}
fn content_hash(&self) -> &Option<Checksum> {
&self.content_hash
}
fn content_sha256(&self) -> &Option<String> {
&self.content_sha256
}
fn get_trailer(&self) -> Option<&TrailingHeaders> {
self.trailer_s3s.as_ref()
}
fn set_trailer(&mut self, trailer: Option<TrailingHeaders>) {
self.trailer_s3s = trailer;
}
}
impl AsyncRead for HashReader {
fn poll_read(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>) -> Poll<std::io::Result<()>> {
let this = self.project();
let before = buf.filled().len();
match this.inner.poll_read(cx, buf) {
Poll::Pending => Poll::Pending,
Poll::Ready(Ok(())) => {
let data = &buf.filled()[before..];
let filled = data.len();
*this.bytes_read += filled as u64;
if filled > 0 {
// Update SHA256 hasher
if let Some(hasher) = this.content_sha256_hasher
&& let Err(e) = hasher.write_all(data)
{
error!("SHA256 hasher write error, error={:?}", e);
return Poll::Ready(Err(std::io::Error::other(e)));
}
// Update content hasher
if let Some(hasher) = this.content_hasher
&& let Err(e) = hasher.write_all(data)
{
return Poll::Ready(Err(std::io::Error::other(e)));
}
}
if filled == 0 && !*this.checksum_on_finish {
// check SHA256
if let (Some(hasher), Some(expected_sha256)) = (this.content_sha256_hasher, this.content_sha256) {
let sha256 = hex_simd::encode_to_string(hasher.finalize(), hex_simd::AsciiCase::Lower);
if sha256 != *expected_sha256 {
error!("SHA256 mismatch, expected={:?}, actual={:?}", expected_sha256, sha256);
return Poll::Ready(Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "SHA256 mismatch")));
}
}
// check content hasher
if let (Some(hasher), Some(expected_content_hash)) = (this.content_hasher, this.content_hash) {
if expected_content_hash.checksum_type.trailing()
&& let Some(trailer) = this.trailer_s3s.as_ref()
&& let Some(Some(checksum_str)) = trailer.read(|headers| {
expected_content_hash
.checksum_type
.key()
.and_then(|key| headers.get(key).and_then(|value| value.to_str().ok().map(|s| s.to_string())))
})
{
expected_content_hash.encoded = checksum_str;
expected_content_hash.raw = general_purpose::STANDARD
.decode(&expected_content_hash.encoded)
.map_err(|_| std::io::Error::other("Invalid base64 checksum"))?;
if expected_content_hash.raw.is_empty() {
return Poll::Ready(Err(std::io::Error::other("Content hash mismatch")));
}
}
let content_hash = hasher.finalize();
if content_hash != expected_content_hash.raw {
let expected_hex = hex_simd::encode_to_string(&expected_content_hash.raw, hex_simd::AsciiCase::Lower);
let actual_hex = hex_simd::encode_to_string(content_hash, hex_simd::AsciiCase::Lower);
error!(
"Content hash mismatch, type={:?}, encoded={:?}, expected={:?}, actual={:?}",
expected_content_hash.checksum_type, expected_content_hash.encoded, expected_hex, actual_hex
);
// Use ChecksumMismatch error so that API layer can return BadDigest
let checksum_err = crate::errors::ChecksumMismatch {
want: expected_hex,
got: actual_hex,
};
return Poll::Ready(Err(std::io::Error::new(std::io::ErrorKind::InvalidData, checksum_err)));
}
}
*this.checksum_on_finish = true;
}
Poll::Ready(Ok(()))
}
Poll::Ready(Err(e)) => Poll::Ready(Err(e)),
}
}
}
impl EtagResolvable for HashReader {
fn try_resolve_etag(&mut self) -> Option<String> {
if self.diskable_md5 {
return None;
}
if let Some(etag) = self.inner.try_resolve_etag() {
return Some(etag);
}
// If no etag from inner and we have a stored checksum, return it
self.checksum.clone()
}
}
impl HashReaderDetector for HashReader {
fn is_hash_reader(&self) -> bool {
true
}
fn as_hash_reader_mut(&mut self) -> Option<&mut dyn HashReaderMut> {
Some(self)
}
}
impl TryGetIndex for HashReader {
fn try_get_index(&self) -> Option<&Index> {
self.inner.try_get_index()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{DecryptReader, WarpReader, encrypt_reader};
use std::io::Cursor;
use tokio::io::{AsyncReadExt, BufReader};
#[tokio::test]
async fn test_hashreader_wrapping_logic() {
let data = b"hello world";
let size = data.len() as i64;
let actual_size = size;
let etag = None;
// Test 1: Simple creation
let reader1 = BufReader::new(Cursor::new(&data[..]));
let reader1 = Box::new(WarpReader::new(reader1));
let hash_reader1 = HashReader::new(reader1, size, actual_size, etag.clone(), None, false).unwrap();
assert_eq!(hash_reader1.size(), size);
assert_eq!(hash_reader1.actual_size(), actual_size);
// Test 2: With HardLimitReader wrapping
let reader2 = BufReader::new(Cursor::new(&data[..]));
let reader2 = Box::new(WarpReader::new(reader2));
let hard_limit = HardLimitReader::new(reader2, size);
let hard_limit = Box::new(hard_limit);
let hash_reader2 = HashReader::new(hard_limit, size, actual_size, etag.clone(), None, false).unwrap();
assert_eq!(hash_reader2.size(), size);
assert_eq!(hash_reader2.actual_size(), actual_size);
// Test 3: With EtagReader wrapping
let reader3 = BufReader::new(Cursor::new(&data[..]));
let reader3 = Box::new(WarpReader::new(reader3));
let etag_reader = EtagReader::new(reader3, etag.clone());
let etag_reader = Box::new(etag_reader);
let hash_reader3 = HashReader::new(etag_reader, size, actual_size, etag.clone(), None, false).unwrap();
assert_eq!(hash_reader3.size(), size);
assert_eq!(hash_reader3.actual_size(), actual_size);
}
#[tokio::test]
async fn test_hashreader_etag_basic() {
let data = b"hello hashreader";
let reader = BufReader::new(Cursor::new(&data[..]));
let reader = Box::new(WarpReader::new(reader));
let mut hash_reader = HashReader::new(reader, data.len() as i64, data.len() as i64, None, None, false).unwrap();
let mut buf = Vec::new();
let _ = hash_reader.read_to_end(&mut buf).await.unwrap();
// Since we removed EtagReader integration, etag might be None
let _etag = hash_reader.try_resolve_etag();
// Just check that we can call etag() without error
assert_eq!(buf, data);
}
#[tokio::test]
async fn test_hashreader_diskable_md5() {
let data = b"no etag";
let reader = BufReader::new(Cursor::new(&data[..]));
let reader = Box::new(WarpReader::new(reader));
let mut hash_reader = HashReader::new(reader, data.len() as i64, data.len() as i64, None, None, true).unwrap();
let mut buf = Vec::new();
let _ = hash_reader.read_to_end(&mut buf).await.unwrap();
// Etag should be None when diskable_md5 is true
let etag = hash_reader.try_resolve_etag();
assert!(etag.is_none());
assert_eq!(buf, data);
}
#[tokio::test]
async fn test_hashreader_new_logic() {
let data = b"test data";
let reader = BufReader::new(Cursor::new(&data[..]));
let reader = Box::new(WarpReader::new(reader));
// Create a HashReader first
let hash_reader =
HashReader::new(reader, data.len() as i64, data.len() as i64, Some("test_etag".to_string()), None, false).unwrap();
let hash_reader = Box::new(WarpReader::new(hash_reader));
// Now try to create another HashReader from the existing one using new
let result = HashReader::new(
hash_reader,
data.len() as i64,
data.len() as i64,
Some("test_etag".to_string()),
None,
false,
);
assert!(result.is_ok());
let final_reader = result.unwrap();
assert_eq!(final_reader.checksum, Some("test_etag".to_string()));
assert_eq!(final_reader.size(), data.len() as i64);
}
#[tokio::test]
async fn test_for_wrapping_readers() {
use crate::{CompressReader, DecompressReader};
use md5::{Digest, Md5};
use rand::Rng;
use rand::RngCore;
use rustfs_utils::compress::CompressionAlgorithm;
// Generate 1MB random data
let size = 1024 * 1024;
let mut data = vec![0u8; size];
rand::rng().fill(&mut data[..]);
let mut hasher = Md5::new();
hasher.update(&data);
let hex = faster_hex::hex_string(hasher.finalize().as_slice());
let expected = hex.to_string();
println!("expected: {expected}");
let reader = Cursor::new(data.clone());
let reader = BufReader::new(reader);
// Enable compression test
let is_compress = true;
let size = data.len() as i64;
let actual_size = data.len() as i64;
let reader = Box::new(WarpReader::new(reader));
// Create HashReader
let mut hr = HashReader::new(reader, size, actual_size, Some(expected.clone()), None, false).unwrap();
// If compression is enabled, compress data first
let compressed_data = if is_compress {
let mut compressed_buf = Vec::new();
let compress_reader = CompressReader::new(hr, CompressionAlgorithm::Gzip);
let mut compress_reader = compress_reader;
compress_reader.read_to_end(&mut compressed_buf).await.unwrap();
println!("Original size: {}, Compressed size: {}", data.len(), compressed_buf.len());
compressed_buf
} else {
// If not compressing, read original data directly
let mut buf = Vec::new();
hr.read_to_end(&mut buf).await.unwrap();
buf
};
let mut key = [0u8; 32];
let mut nonce = [0u8; 12];
rand::rng().fill_bytes(&mut key);
rand::rng().fill_bytes(&mut nonce);
let is_encrypt = true;
if is_encrypt {
// Encrypt compressed data
let encrypt_reader = encrypt_reader::EncryptReader::new(WarpReader::new(Cursor::new(compressed_data)), key, nonce);
let mut encrypted_data = Vec::new();
let mut encrypt_reader = encrypt_reader;
encrypt_reader.read_to_end(&mut encrypted_data).await.unwrap();
println!("Encrypted size: {}", encrypted_data.len());
// Decrypt data
let decrypt_reader = DecryptReader::new(WarpReader::new(Cursor::new(encrypted_data)), key, nonce);
let mut decrypt_reader = decrypt_reader;
let mut decrypted_data = Vec::new();
decrypt_reader.read_to_end(&mut decrypted_data).await.unwrap();
if is_compress {
// If compression was used, decompress is needed
let decompress_reader =
DecompressReader::new(WarpReader::new(Cursor::new(decrypted_data)), CompressionAlgorithm::Gzip);
let mut decompress_reader = decompress_reader;
let mut final_data = Vec::new();
decompress_reader.read_to_end(&mut final_data).await.unwrap();
println!("Final decompressed size: {}", final_data.len());
assert_eq!(final_data.len() as i64, actual_size);
assert_eq!(&final_data, &data);
} else {
// Without compression we can compare the decrypted bytes directly
assert_eq!(decrypted_data.len() as i64, actual_size);
assert_eq!(&decrypted_data, &data);
}
return;
}
// When encryption is disabled, only handle compression/decompression
if is_compress {
let decompress_reader =
DecompressReader::new(WarpReader::new(Cursor::new(compressed_data)), CompressionAlgorithm::Gzip);
let mut decompress_reader = decompress_reader;
let mut decompressed = Vec::new();
decompress_reader.read_to_end(&mut decompressed).await.unwrap();
assert_eq!(decompressed.len() as i64, actual_size);
assert_eq!(&decompressed, &data);
} else {
assert_eq!(compressed_data.len() as i64, actual_size);
assert_eq!(&compressed_data, &data);
}
// Validate the etag (compression alters the payload, so this may require adjustments)
println!("Test completed successfully with compression: {is_compress}, encryption: {is_encrypt}");
}
#[tokio::test]
async fn test_compression_with_compressible_data() {
use crate::{CompressReader, DecompressReader};
use rustfs_utils::compress::CompressionAlgorithm;
// Create highly compressible data (repeated pattern)
let pattern = b"Hello, World! This is a test pattern that should compress well. ";
let repeat_count = 16384; // 16K repetitions
let mut data = Vec::new();
for _ in 0..repeat_count {
data.extend_from_slice(pattern);
}
println!("Original data size: {} bytes", data.len());
let reader = BufReader::new(Cursor::new(data.clone()));
let reader = Box::new(WarpReader::new(reader));
let hash_reader = HashReader::new(reader, data.len() as i64, data.len() as i64, None, None, false).unwrap();
// Test compression
let compress_reader = CompressReader::new(hash_reader, CompressionAlgorithm::Gzip);
let mut compressed_data = Vec::new();
let mut compress_reader = compress_reader;
compress_reader.read_to_end(&mut compressed_data).await.unwrap();
println!("Compressed data size: {} bytes", compressed_data.len());
println!("Compression ratio: {:.2}%", (compressed_data.len() as f64 / data.len() as f64) * 100.0);
// Verify compression actually reduced size for this compressible data
assert!(compressed_data.len() < data.len(), "Compression should reduce size for repetitive data");
// Test decompression
let decompress_reader = DecompressReader::new(Cursor::new(compressed_data), CompressionAlgorithm::Gzip);
let mut decompressed_data = Vec::new();
let mut decompress_reader = decompress_reader;
decompress_reader.read_to_end(&mut decompressed_data).await.unwrap();
// Verify decompressed data matches original
assert_eq!(decompressed_data.len(), data.len());
assert_eq!(&decompressed_data, &data);
println!("Compression/decompression test passed successfully!");
}
#[tokio::test]
async fn test_compression_algorithms() {
use crate::{CompressReader, DecompressReader};
use rustfs_utils::compress::CompressionAlgorithm;
let data = b"This is test data for compression algorithm testing. ".repeat(1000);
println!("Testing with {} bytes of data", data.len());
let algorithms = vec![
CompressionAlgorithm::Gzip,
CompressionAlgorithm::Deflate,
CompressionAlgorithm::Zstd,
];
for algorithm in algorithms {
println!("\nTesting algorithm: {algorithm:?}");
let reader = BufReader::new(Cursor::new(data.clone()));
let reader = Box::new(WarpReader::new(reader));
let hash_reader = HashReader::new(reader, data.len() as i64, data.len() as i64, None, None, false).unwrap();
// Compress
let compress_reader = CompressReader::new(hash_reader, algorithm);
let mut compressed_data = Vec::new();
let mut compress_reader = compress_reader;
compress_reader.read_to_end(&mut compressed_data).await.unwrap();
println!(
" Compressed size: {} bytes (ratio: {:.2}%)",
compressed_data.len(),
(compressed_data.len() as f64 / data.len() as f64) * 100.0
);
// Decompress
let decompress_reader = DecompressReader::new(Cursor::new(compressed_data), algorithm);
let mut decompressed_data = Vec::new();
let mut decompress_reader = decompress_reader;
decompress_reader.read_to_end(&mut decompressed_data).await.unwrap();
// Verify
assert_eq!(decompressed_data.len(), data.len());
assert_eq!(&decompressed_data, &data);
println!(" ✓ Algorithm {algorithm:?} test passed");
}
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.