repo
stringlengths
6
65
file_url
stringlengths
81
311
file_path
stringlengths
6
227
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 15:31:58
2026-01-04 20:25:31
truncated
bool
2 classes
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/extension/src/lib.rs
packages/extension/src/lib.rs
//! This file exports functions into the vscode extension use dioxus_autofmt::{FormattedBlock, IndentOptions, IndentType}; use wasm_bindgen::prelude::*; #[wasm_bindgen] pub fn format_rsx(raw: String, use_tabs: bool, indent_size: usize) -> String { let block = dioxus_autofmt::fmt_block( &raw, 0, IndentOptions::new( if use_tabs { IndentType::Tabs } else { IndentType::Spaces }, indent_size, false, ), ); block.unwrap() } #[wasm_bindgen] pub fn format_selection( raw: String, use_tabs: bool, indent_size: usize, base_indent: usize, ) -> String { let block = dioxus_autofmt::fmt_block( &raw, base_indent, IndentOptions::new( if use_tabs { IndentType::Tabs } else { IndentType::Spaces }, indent_size, false, ), ); block.unwrap() } #[wasm_bindgen] pub struct FormatBlockInstance { new: String, _edits: Vec<FormattedBlock>, } #[wasm_bindgen] impl FormatBlockInstance { #[wasm_bindgen] pub fn formatted(&self) -> String { self.new.clone() } #[wasm_bindgen] pub fn length(&self) -> usize { self._edits.len() } } #[wasm_bindgen] pub fn format_file(contents: String, use_tabs: bool, indent_size: usize) -> FormatBlockInstance { // todo: use rustfmt for this instead let options = IndentOptions::new( if use_tabs { IndentType::Tabs } else { IndentType::Spaces }, indent_size, false, ); let Ok(Ok(_edits)) = syn::parse_file(&contents) .map(|file| dioxus_autofmt::try_fmt_file(&contents, &file, options)) else { return FormatBlockInstance { new: contents, _edits: Vec::new(), }; }; let out = dioxus_autofmt::apply_formats(&contents, _edits.clone()); FormatBlockInstance { new: out, _edits } } #[wasm_bindgen] pub fn translate_rsx(contents: String, _component: bool) -> String { // Ensure we're loading valid HTML let dom = html_parser::Dom::parse(&contents).unwrap(); let callbody = dioxus_rsx_rosetta::rsx_from_html(&dom); // Convert the HTML to RSX dioxus_autofmt::write_block_out(&callbody).unwrap() }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/native/src/prelude.rs
packages/native/src/prelude.rs
// RSX and component definition pub use dioxus_core; pub use dioxus_core::{ consume_context, provide_context, spawn, suspend, try_consume_context, use_drop, use_hook, AnyhowContext, Attribute, Callback, Component, Element, ErrorBoundary, ErrorContext, Event, EventHandler, Fragment, HasAttributes, IntoDynNode, RenderError, ScopeId, SuspenseBoundary, SuspenseContext, VNode, VirtualDom, }; #[allow(deprecated)] pub use dioxus_core_macro::{component, rsx, Props}; pub use dioxus_html as dioxus_elements; pub use dioxus_html::{ events::*, extensions::*, global_attributes, keyboard_types, svg_attributes, traits::*, GlobalAttributesExtension, SvgAttributesExtension, }; pub use dioxus_html::{Code, Key, Location, Modifiers}; // Assets pub use manganis::{self, *}; // Hooks, signals, stores pub use dioxus_hooks::*; pub use dioxus_signals::{self, *}; pub use dioxus_stores::{self, store, use_store, GlobalStore, ReadStore, Store, WriteStore}; // Document and History pub use dioxus_document::{self as document, Meta, Stylesheet, Title}; pub use dioxus_history::{history, History};
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/native/src/lib.rs
packages/native/src/lib.rs
#![cfg_attr(docsrs, feature(doc_cfg))] //! A native renderer for Dioxus. //! //! ## Feature flags //! - `default`: Enables the features listed below. //! - `accessibility`: Enables [`accesskit`](https://docs.rs/accesskit/latest/accesskit/) accessibility support. //! - `hot-reload`: Enables hot-reloading of Dioxus RSX. //! - `menu`: Enables the [`muda`](https://docs.rs/muda/latest/muda/) menubar. //! - `tracing`: Enables tracing support. mod assets; mod contexts; mod dioxus_application; mod dioxus_renderer; mod link_handler; #[cfg(feature = "prelude")] pub mod prelude; #[doc(inline)] pub use dioxus_native_dom::*; pub use anyrender_vello::{CustomPaintCtx, CustomPaintSource, DeviceHandle, TextureHandle}; use assets::DioxusNativeNetProvider; pub use dioxus_application::{DioxusNativeApplication, DioxusNativeEvent}; pub use dioxus_renderer::{DioxusNativeWindowRenderer, Features, Limits}; #[cfg(not(all(target_os = "ios", target_abi = "sim")))] pub use dioxus_renderer::use_wgpu; use blitz_shell::{create_default_event_loop, BlitzShellEvent, Config, WindowConfig}; use dioxus_core::{ComponentFunction, Element, VirtualDom}; use link_handler::DioxusNativeNavigationProvider; use std::any::Any; use std::sync::Arc; use winit::window::WindowAttributes; /// Launch an interactive HTML/CSS renderer driven by the Dioxus virtualdom pub fn launch(app: fn() -> Element) { launch_cfg(app, vec![], vec![]) } pub fn launch_cfg( app: fn() -> Element, contexts: Vec<Box<dyn Fn() -> Box<dyn Any> + Send + Sync>>, cfg: Vec<Box<dyn Any>>, ) { launch_cfg_with_props(app, (), contexts, cfg) } // todo: props shouldn't have the clone bound - should try and match dioxus-desktop behavior pub fn launch_cfg_with_props<P: Clone + 'static, M: 'static>( app: impl ComponentFunction<P, M>, props: P, contexts: Vec<Box<dyn Fn() -> Box<dyn Any> + Send + Sync>>, configs: Vec<Box<dyn Any>>, ) { // Macro to attempt to downcast a type out of a Box<dyn Any> macro_rules! try_read_config { ($input:ident, $store:ident, $kind:ty) => { // Try to downcast the Box<dyn Any> to type $kind match $input.downcast::<$kind>() { // If the type matches then write downcast value to variable $store Ok(value) => { $store = Some(*value); continue; } // Else extract the original Box<dyn Any> value out of the error type // and return it so that we can try again with a different type. Err(cfg) => cfg, } }; } // Read config values let mut features = None; let mut limits = None; let mut window_attributes = None; let mut _config = None; for mut cfg in configs { cfg = try_read_config!(cfg, features, Features); cfg = try_read_config!(cfg, limits, Limits); cfg = try_read_config!(cfg, window_attributes, WindowAttributes); cfg = try_read_config!(cfg, _config, Config); let _ = cfg; } let event_loop = create_default_event_loop::<BlitzShellEvent>(); // Turn on the runtime and enter it #[cfg(feature = "net")] let rt = tokio::runtime::Builder::new_multi_thread() .enable_all() .build() .unwrap(); #[cfg(feature = "net")] let _guard = rt.enter(); // Setup hot-reloading if enabled. #[cfg(all( feature = "hot-reload", debug_assertions, not(target_os = "android"), not(target_os = "ios") ))] { let proxy = event_loop.create_proxy(); dioxus_devtools::connect(move |event| { let dxn_event = DioxusNativeEvent::DevserverEvent(event); let _ = proxy.send_event(BlitzShellEvent::embedder_event(dxn_event)); }) } // Spin up the virtualdom // We're going to need to hit it with a special waker // Note that we are delaying the initialization of window-specific contexts (net provider, document, etc) let mut vdom = VirtualDom::new_with_props(app, props); // Add contexts for context in contexts { vdom.insert_any_root_context(context()); } let net_provider = Some(DioxusNativeNetProvider::shared(event_loop.create_proxy())); #[cfg(feature = "html")] let html_parser_provider = Some(Arc::new(blitz_html::HtmlProvider) as _); #[cfg(not(feature = "html"))] let html_parser_provider = None; let navigation_provider = Some(Arc::new(DioxusNativeNavigationProvider) as _); // Create document + window from the baked virtualdom let doc = DioxusDocument::new( vdom, DocumentConfig { net_provider, html_parser_provider, navigation_provider, ..Default::default() }, ); #[cfg(not(all(target_os = "ios", target_abi = "sim")))] let renderer = DioxusNativeWindowRenderer::with_features_and_limits(features, limits); #[cfg(all(target_os = "ios", target_abi = "sim"))] let renderer = DioxusNativeWindowRenderer::new(); let config = WindowConfig::with_attributes( Box::new(doc) as _, renderer.clone(), window_attributes.unwrap_or_default(), ); // Create application let mut application = DioxusNativeApplication::new(event_loop.create_proxy(), config); // Run event loop event_loop.run_app(&mut application).unwrap(); }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/native/src/link_handler.rs
packages/native/src/link_handler.rs
use blitz_traits::{ navigation::{NavigationOptions, NavigationProvider}, net::Method, }; pub(crate) struct DioxusNativeNavigationProvider; impl NavigationProvider for DioxusNativeNavigationProvider { fn navigate_to(&self, options: NavigationOptions) { if options.method == Method::GET && matches!(options.url.scheme(), "http" | "https" | "mailto") { if let Err(_err) = webbrowser::open(options.url.as_str()) { #[cfg(feature = "tracing")] tracing::error!("Failed to open URL: {}", _err); } } } }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/native/src/contexts.rs
packages/native/src/contexts.rs
use blitz_shell::BlitzShellEvent; use dioxus_document::{Document, NoOpDocument}; use winit::{event_loop::EventLoopProxy, window::WindowId}; use crate::DioxusNativeEvent; pub struct DioxusNativeDocument { pub(crate) proxy: EventLoopProxy<BlitzShellEvent>, pub(crate) window: WindowId, } impl DioxusNativeDocument { pub(crate) fn new(proxy: EventLoopProxy<BlitzShellEvent>, window: WindowId) -> Self { Self { proxy, window } } } impl Document for DioxusNativeDocument { fn eval(&self, _js: String) -> dioxus_document::Eval { NoOpDocument.eval(_js) } fn create_head_element( &self, name: &str, attributes: &[(&str, String)], contents: Option<String>, ) { let window = self.window; _ = self.proxy.send_event(BlitzShellEvent::embedder_event( DioxusNativeEvent::CreateHeadElement { name: name.to_string(), attributes: attributes .iter() .map(|(name, value)| (name.to_string(), value.clone())) .collect(), contents, window, }, )); } fn set_title(&self, title: String) { self.create_head_element("title", &[], Some(title)); } fn create_meta(&self, props: dioxus_document::MetaProps) { let attributes = props.attributes(); self.create_head_element("meta", &attributes, None); } fn create_script(&self, props: dioxus_document::ScriptProps) { let attributes = props.attributes(); self.create_head_element("script", &attributes, props.script_contents().ok()); } fn create_style(&self, props: dioxus_document::StyleProps) { let attributes = props.attributes(); self.create_head_element("style", &attributes, props.style_contents().ok()); } fn create_link(&self, props: dioxus_document::LinkProps) { let attributes = props.attributes(); self.create_head_element("link", &attributes, None); } fn create_head_component(&self) -> bool { true } }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/native/src/assets.rs
packages/native/src/assets.rs
use blitz_shell::BlitzShellNetCallback; use std::sync::Arc; use blitz_dom::net::Resource; use blitz_shell::BlitzShellEvent; use blitz_traits::net::{NetCallback, NetProvider}; use winit::event_loop::EventLoopProxy; pub struct DioxusNativeNetProvider { callback: Arc<dyn NetCallback<Resource> + 'static>, #[cfg(feature = "net")] inner_net_provider: Arc<dyn NetProvider<Resource> + 'static>, } impl DioxusNativeNetProvider { pub fn shared(proxy: EventLoopProxy<BlitzShellEvent>) -> Arc<dyn NetProvider<Resource>> { Arc::new(Self::new(proxy)) as Arc<dyn NetProvider<Resource>> } pub fn new(proxy: EventLoopProxy<BlitzShellEvent>) -> Self { let net_callback = BlitzShellNetCallback::shared(proxy); #[cfg(feature = "net")] let net_provider = blitz_net::Provider::shared(net_callback.clone()); Self { callback: net_callback, #[cfg(feature = "net")] inner_net_provider: net_provider, } } } impl NetProvider<Resource> for DioxusNativeNetProvider { fn fetch( &self, doc_id: usize, request: blitz_traits::net::Request, handler: blitz_traits::net::BoxedHandler<Resource>, ) { if request.url.scheme() == "dioxus" { #[allow(clippy::single_match)] // cfg'd code has multiple branches in some configurations match dioxus_asset_resolver::native::serve_asset(request.url.path()) { Ok(res) => { #[cfg(feature = "tracing")] tracing::trace!("fetching asset from file system success {request:#?}"); handler.bytes(doc_id, res.into_body().into(), self.callback.clone()) } Err(_) => { #[cfg(feature = "tracing")] tracing::warn!("fetching asset from file system error {request:#?}"); } } } else { #[cfg(feature = "net")] self.inner_net_provider.fetch(doc_id, request, handler); #[cfg(all(not(feature = "net"), feature = "tracing"))] tracing::warn!("net feature not enabled, cannot fetch {request:#?}"); } } }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/native/src/dioxus_application.rs
packages/native/src/dioxus_application.rs
use blitz_shell::{BlitzApplication, View}; use dioxus_core::{provide_context, ScopeId}; use dioxus_history::{History, MemoryHistory}; use std::rc::Rc; use winit::application::ApplicationHandler; use winit::event::{StartCause, WindowEvent}; use winit::event_loop::{ActiveEventLoop, EventLoopProxy}; use winit::window::WindowId; use crate::DioxusNativeWindowRenderer; use crate::{contexts::DioxusNativeDocument, BlitzShellEvent, DioxusDocument, WindowConfig}; /// Dioxus-native specific event type pub enum DioxusNativeEvent { /// A hotreload event, basically telling us to update our templates. #[cfg(all( feature = "hot-reload", debug_assertions, not(target_os = "android"), not(target_os = "ios") ))] DevserverEvent(dioxus_devtools::DevserverMsg), /// Create a new head element from the Link and Title elements /// /// todo(jon): these should probabkly be synchronous somehow CreateHeadElement { window: WindowId, name: String, attributes: Vec<(String, String)>, contents: Option<String>, }, } pub struct DioxusNativeApplication { pending_window: Option<WindowConfig<DioxusNativeWindowRenderer>>, inner: BlitzApplication<DioxusNativeWindowRenderer>, proxy: EventLoopProxy<BlitzShellEvent>, } impl DioxusNativeApplication { pub fn new( proxy: EventLoopProxy<BlitzShellEvent>, config: WindowConfig<DioxusNativeWindowRenderer>, ) -> Self { Self { pending_window: Some(config), inner: BlitzApplication::new(proxy.clone()), proxy, } } pub fn add_window(&mut self, window_config: WindowConfig<DioxusNativeWindowRenderer>) { self.inner.add_window(window_config); } fn handle_blitz_shell_event( &mut self, event_loop: &ActiveEventLoop, event: &DioxusNativeEvent, ) { match event { #[cfg(all( feature = "hot-reload", debug_assertions, not(target_os = "android"), not(target_os = "ios") ))] DioxusNativeEvent::DevserverEvent(event) => match event { dioxus_devtools::DevserverMsg::HotReload(hotreload_message) => { for window in self.inner.windows.values_mut() { let doc = window.downcast_doc_mut::<DioxusDocument>(); // Apply changes to vdom dioxus_devtools::apply_changes(&doc.vdom, hotreload_message); // Reload changed assets for asset_path in &hotreload_message.assets { if let Some(url) = asset_path.to_str() { doc.reload_resource_by_href(url); } } window.poll(); } } dioxus_devtools::DevserverMsg::Shutdown => event_loop.exit(), dioxus_devtools::DevserverMsg::FullReloadStart => {} dioxus_devtools::DevserverMsg::FullReloadFailed => {} dioxus_devtools::DevserverMsg::FullReloadCommand => {} _ => {} }, DioxusNativeEvent::CreateHeadElement { name, attributes, contents, window, } => { if let Some(window) = self.inner.windows.get_mut(window) { let doc = window.downcast_doc_mut::<DioxusDocument>(); doc.create_head_element(name, attributes, contents); window.poll(); } } // Suppress unused variable warning #[cfg(not(all( feature = "hot-reload", debug_assertions, not(target_os = "android"), not(target_os = "ios") )))] #[allow(unreachable_patterns)] _ => { let _ = event_loop; let _ = event; } } } } impl ApplicationHandler<BlitzShellEvent> for DioxusNativeApplication { fn resumed(&mut self, event_loop: &ActiveEventLoop) { #[cfg(feature = "tracing")] tracing::debug!("Injecting document provider into all windows"); if let Some(config) = self.pending_window.take() { let mut window = View::init(config, event_loop, &self.proxy); let renderer = window.renderer.clone(); let window_id = window.window_id(); let doc = window.downcast_doc_mut::<DioxusDocument>(); doc.vdom.in_scope(ScopeId::ROOT, || { let shared: Rc<dyn dioxus_document::Document> = Rc::new(DioxusNativeDocument::new(self.proxy.clone(), window_id)); provide_context(shared); }); // Add history let history_provider: Rc<dyn History> = Rc::new(MemoryHistory::default()); doc.vdom .in_scope(ScopeId::ROOT, move || provide_context(history_provider)); // Add renderer doc.vdom .in_scope(ScopeId::ROOT, move || provide_context(renderer)); // Queue rebuild doc.initial_build(); // And then request redraw window.request_redraw(); // todo(jon): we should actually mess with the pending windows instead of passing along the contexts self.inner.windows.insert(window_id, window); } self.inner.resumed(event_loop); } fn suspended(&mut self, event_loop: &ActiveEventLoop) { self.inner.suspended(event_loop); } fn new_events(&mut self, event_loop: &ActiveEventLoop, cause: StartCause) { self.inner.new_events(event_loop, cause); } fn window_event( &mut self, event_loop: &ActiveEventLoop, window_id: WindowId, event: WindowEvent, ) { self.inner.window_event(event_loop, window_id, event); } fn user_event(&mut self, event_loop: &ActiveEventLoop, event: BlitzShellEvent) { match event { BlitzShellEvent::Embedder(event) => { if let Some(event) = event.downcast_ref::<DioxusNativeEvent>() { self.handle_blitz_shell_event(event_loop, event); } } event => self.inner.user_event(event_loop, event), } } }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/native/src/dioxus_renderer.rs
packages/native/src/dioxus_renderer.rs
use std::cell::RefCell; use std::rc::Rc; use std::sync::Arc; use anyrender::WindowRenderer; pub use anyrender_vello::{ wgpu::{Features, Limits}, CustomPaintSource, VelloRendererOptions, }; #[cfg(not(all(target_os = "ios", target_abi = "sim")))] pub use anyrender_vello::VelloWindowRenderer as InnerRenderer; #[cfg(all(target_os = "ios", target_abi = "sim"))] pub use anyrender_vello_cpu::VelloCpuWindowRenderer as InnerRenderer; #[cfg(not(all(target_os = "ios", target_abi = "sim")))] pub fn use_wgpu<T: CustomPaintSource>(create_source: impl FnOnce() -> T) -> u64 { use dioxus_core::{consume_context, use_hook_with_cleanup}; let (_renderer, id) = use_hook_with_cleanup( || { let renderer = consume_context::<DioxusNativeWindowRenderer>(); let source = Box::new(create_source()); let id = renderer.register_custom_paint_source(source); (renderer, id) }, |(renderer, id)| { renderer.unregister_custom_paint_source(id); }, ); id } #[derive(Clone)] pub struct DioxusNativeWindowRenderer { inner: Rc<RefCell<InnerRenderer>>, } impl Default for DioxusNativeWindowRenderer { fn default() -> Self { Self::new() } } impl DioxusNativeWindowRenderer { pub fn new() -> Self { let vello_renderer = InnerRenderer::new(); Self::with_inner_renderer(vello_renderer) } #[cfg(not(all(target_os = "ios", target_abi = "sim")))] pub fn with_features_and_limits(features: Option<Features>, limits: Option<Limits>) -> Self { let vello_renderer = InnerRenderer::with_options(VelloRendererOptions { features, limits, ..Default::default() }); Self::with_inner_renderer(vello_renderer) } fn with_inner_renderer(vello_renderer: InnerRenderer) -> Self { Self { inner: Rc::new(RefCell::new(vello_renderer)), } } } #[cfg(not(all(target_os = "ios", target_abi = "sim")))] impl DioxusNativeWindowRenderer { pub fn register_custom_paint_source(&self, source: Box<dyn CustomPaintSource>) -> u64 { self.inner.borrow_mut().register_custom_paint_source(source) } pub fn unregister_custom_paint_source(&self, id: u64) { self.inner.borrow_mut().unregister_custom_paint_source(id) } } impl WindowRenderer for DioxusNativeWindowRenderer { type ScenePainter<'a> = <InnerRenderer as WindowRenderer>::ScenePainter<'a> where Self: 'a; fn resume(&mut self, window: Arc<dyn anyrender::WindowHandle>, width: u32, height: u32) { self.inner.borrow_mut().resume(window, width, height) } fn suspend(&mut self) { self.inner.borrow_mut().suspend() } fn is_active(&self) -> bool { self.inner.borrow().is_active() } fn set_size(&mut self, width: u32, height: u32) { self.inner.borrow_mut().set_size(width, height) } fn render<F: FnOnce(&mut Self::ScenePainter<'_>)>(&mut self, draw_fn: F) { self.inner.borrow_mut().render(draw_fn) } }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/native-dom/src/lib.rs
packages/native-dom/src/lib.rs
#![cfg_attr(docsrs, feature(doc_cfg))] //! Core headless native renderer for Dioxus. //! //! ## Feature flags //! - `default`: Enables the features listed below. //! - `accessibility`: Enables [`accesskit`](https://docs.rs/accesskit/latest/accesskit/) accessibility support. //! - `hot-reload`: Enables hot-reloading of Dioxus RSX. //! - `menu`: Enables the [`muda`](https://docs.rs/muda/latest/muda/) menubar. //! - `tracing`: Enables tracing support. mod dioxus_document; mod events; mod mutation_writer; pub use blitz_dom::DocumentConfig; pub use dioxus_document::DioxusDocument; use blitz_dom::{ns, LocalName, Namespace, QualName}; type NodeId = usize; pub(crate) fn qual_name(local_name: &str, namespace: Option<&str>) -> QualName { QualName { prefix: None, ns: namespace.map(Namespace::from).unwrap_or(ns!(html)), local: LocalName::from(local_name), } } // Syntax sugar to make tracing calls less noisy in function below macro_rules! trace { ($pattern:literal) => {{ #[cfg(feature = "tracing")] tracing::debug!($pattern); }}; ($pattern:literal, $item1:expr) => {{ #[cfg(feature = "tracing")] tracing::debug!($pattern, $item1); }}; ($pattern:literal, $item1:expr, $item2:expr) => {{ #[cfg(feature = "tracing")] tracing::debug!($pattern, $item1, $item2); }}; ($pattern:literal, $item1:expr, $item2:expr, $item3:expr) => {{ #[cfg(feature = "tracing")] tracing::debug!($pattern, $item1, $item2); }}; ($pattern:literal, $item1:expr, $item2:expr, $item3:expr, $item4:expr) => {{ #[cfg(feature = "tracing")] tracing::debug!($pattern, $item1, $item2, $item3, $item4); }}; } pub(crate) use trace;
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/native-dom/src/mutation_writer.rs
packages/native-dom/src/mutation_writer.rs
//! Integration between Dioxus and Blitz use crate::{qual_name, trace, NodeId}; use blitz_dom::{BaseDocument, DocumentMutator}; use blitz_traits::events::DomEventKind; use dioxus_core::{ AttributeValue, ElementId, Template, TemplateAttribute, TemplateNode, WriteMutations, }; use rustc_hash::FxHashMap; use std::str::FromStr as _; /// The state of the Dioxus integration with the RealDom #[derive(Debug)] pub struct DioxusState { /// Store of templates keyed by unique name pub(crate) templates: FxHashMap<Template, Vec<NodeId>>, /// Stack machine state for applying dioxus mutations pub(crate) stack: Vec<NodeId>, /// Mapping from vdom ElementId -> rdom NodeId pub(crate) node_id_mapping: Vec<Option<NodeId>>, /// Count of each handler type pub(crate) event_handler_counts: [u32; 32], } impl DioxusState { /// Initialize the DioxusState in the RealDom pub fn create(root_id: usize) -> Self { Self { templates: FxHashMap::default(), stack: vec![root_id], node_id_mapping: vec![Some(root_id)], event_handler_counts: [0; 32], } } /// Convert an ElementId to a NodeId pub fn element_to_node_id(&self, element_id: ElementId) -> NodeId { self.try_element_to_node_id(element_id).unwrap() } /// Attempt to convert an ElementId to a NodeId. This will return None if the ElementId is not in the RealDom. pub fn try_element_to_node_id(&self, element_id: ElementId) -> Option<NodeId> { self.node_id_mapping.get(element_id.0).copied().flatten() } pub(crate) fn anchor_and_nodes(&mut self, id: ElementId, m: usize) -> (usize, Vec<usize>) { let anchor_node_id = self.element_to_node_id(id); let new_nodes = self.m_stack_nodes(m); (anchor_node_id, new_nodes) } pub(crate) fn m_stack_nodes(&mut self, m: usize) -> Vec<usize> { self.stack.split_off(self.stack.len() - m) } } /// A writer for mutations that can be used with the RealDom. pub struct MutationWriter<'a> { /// The realdom associated with this writer pub docm: DocumentMutator<'a>, /// The state associated with this writer pub state: &'a mut DioxusState, } impl<'a> MutationWriter<'a> { pub fn new(doc: &'a mut BaseDocument, state: &'a mut DioxusState) -> Self { MutationWriter { docm: doc.mutate(), state, } } } impl MutationWriter<'_> { /// Update an ElementId -> NodeId mapping fn set_id_mapping(&mut self, node_id: NodeId, element_id: ElementId) { let element_id: usize = element_id.0; // Ensure node_id_mapping is large enough to contain element_id if self.state.node_id_mapping.len() <= element_id { self.state.node_id_mapping.resize(element_id + 1, None); } // Set the new mapping self.state.node_id_mapping[element_id] = Some(node_id); } /// Create a ElementId -> NodeId mapping and push the node to the stack fn map_new_node(&mut self, node_id: NodeId, element_id: ElementId) { self.set_id_mapping(node_id, element_id); self.state.stack.push(node_id); } /// Find a child in the document by child index path fn load_child(&self, path: &[u8]) -> NodeId { let top_of_stack_node_id = *self.state.stack.last().unwrap(); self.docm.node_at_path(top_of_stack_node_id, path) } } impl WriteMutations for MutationWriter<'_> { fn assign_node_id(&mut self, path: &'static [u8], id: ElementId) { trace!("assign_node_id path:{:?} id:{}", path, id.0); // If there is an existing node already mapped to that ID and it has no parent, then drop it // TODO: more automated GC/ref-counted semantics for node lifetimes if let Some(node_id) = self.state.try_element_to_node_id(id) { self.docm.remove_node_if_unparented(node_id); } // Map the node at specified path self.set_id_mapping(self.load_child(path), id); } fn create_placeholder(&mut self, id: ElementId) { trace!("create_placeholder id:{}", id.0); let node_id = self.docm.create_comment_node(); self.map_new_node(node_id, id); } fn create_text_node(&mut self, value: &str, id: ElementId) { trace!("create_text_node id:{} text:{}", id.0, value); let node_id = self.docm.create_text_node(value); self.map_new_node(node_id, id); } fn append_children(&mut self, id: ElementId, m: usize) { trace!("append_children id:{} m:{}", id.0, m); let (parent_id, child_node_ids) = self.state.anchor_and_nodes(id, m); self.docm.append_children(parent_id, &child_node_ids); } fn insert_nodes_after(&mut self, id: ElementId, m: usize) { trace!("insert_nodes_after id:{} m:{}", id.0, m); let (anchor_node_id, new_node_ids) = self.state.anchor_and_nodes(id, m); self.docm.insert_nodes_after(anchor_node_id, &new_node_ids); } fn insert_nodes_before(&mut self, id: ElementId, m: usize) { trace!("insert_nodes_before id:{} m:{}", id.0, m); let (anchor_node_id, new_node_ids) = self.state.anchor_and_nodes(id, m); self.docm.insert_nodes_before(anchor_node_id, &new_node_ids); } fn replace_node_with(&mut self, id: ElementId, m: usize) { trace!("replace_node_with id:{} m:{}", id.0, m); let (anchor_node_id, new_node_ids) = self.state.anchor_and_nodes(id, m); self.docm.replace_node_with(anchor_node_id, &new_node_ids); } fn replace_placeholder_with_nodes(&mut self, path: &'static [u8], m: usize) { trace!("replace_placeholder_with_nodes path:{:?} m:{}", path, m); // WARNING: DO NOT REORDER // The order of the following two lines is very important as "m_stack_nodes" mutates // the stack and then "load_child" reads from the top of the stack. let new_node_ids = self.state.m_stack_nodes(m); let anchor_node_id = self.load_child(path); self.docm.replace_node_with(anchor_node_id, &new_node_ids); } fn remove_node(&mut self, id: ElementId) { trace!("remove_node id:{}", id.0); let node_id = self.state.element_to_node_id(id); self.docm.remove_node(node_id); } fn push_root(&mut self, id: ElementId) { trace!("push_root id:{}", id.0); let node_id = self.state.element_to_node_id(id); self.state.stack.push(node_id); } fn set_node_text(&mut self, value: &str, id: ElementId) { trace!("set_node_text id:{} value:{}", id.0, value); let node_id = self.state.element_to_node_id(id); self.docm.set_node_text(node_id, value); } fn set_attribute( &mut self, local_name: &'static str, ns: Option<&'static str>, value: &AttributeValue, id: ElementId, ) { let node_id = self.state.element_to_node_id(id); fn is_falsy(val: &AttributeValue) -> bool { match val { AttributeValue::None => true, AttributeValue::Text(val) => val == "false", AttributeValue::Bool(val) => !val, AttributeValue::Int(val) => *val == 0, AttributeValue::Float(val) => *val == 0.0, _ => false, } } let falsy = is_falsy(value); match value { AttributeValue::None => { set_attribute_inner(&mut self.docm, local_name, ns, None, falsy, node_id) } AttributeValue::Text(value) => { set_attribute_inner(&mut self.docm, local_name, ns, Some(value), falsy, node_id) } AttributeValue::Float(value) => { let value = value.to_string(); set_attribute_inner(&mut self.docm, local_name, ns, Some(&value), falsy, node_id); } AttributeValue::Int(value) => { let value = value.to_string(); set_attribute_inner(&mut self.docm, local_name, ns, Some(&value), falsy, node_id); } AttributeValue::Bool(value) => { let value = value.to_string(); set_attribute_inner(&mut self.docm, local_name, ns, Some(&value), falsy, node_id); } _ => { // FIXME: support all attribute types } }; } fn load_template(&mut self, template: Template, index: usize, id: ElementId) { // TODO: proper template node support let template_entry = self.state.templates.entry(template).or_insert_with(|| { let template_root_ids: Vec<NodeId> = template .roots .iter() .map(|root| create_template_node(&mut self.docm, root)) .collect(); template_root_ids }); let template_node_id = template_entry[index]; let clone_id = self.docm.deep_clone_node(template_node_id); trace!("load_template template_node_id:{template_node_id} clone_id:{clone_id}"); self.map_new_node(clone_id, id); } fn create_event_listener(&mut self, name: &'static str, id: ElementId) { // We're going to actually set the listener here as a placeholder - in JS this would also be a placeholder // we might actually just want to attach the attribute to the root element (delegation) let value = AttributeValue::Text("<rust func>".into()); self.set_attribute(name, None, &value, id); // Also set the data-dioxus-id attribute so we can find the element later let value = AttributeValue::Text(id.0.to_string()); self.set_attribute("data-dioxus-id", None, &value, id); // node.add_event_listener(name); if let Ok(kind) = DomEventKind::from_str(name) { let idx = kind.discriminant() as usize; self.state.event_handler_counts[idx] += 1; } } fn remove_event_listener(&mut self, name: &'static str, _id: ElementId) { if let Ok(kind) = DomEventKind::from_str(name) { let idx = kind.discriminant() as usize; self.state.event_handler_counts[idx] -= 1; } } } fn create_template_node(docm: &mut DocumentMutator<'_>, node: &TemplateNode) -> NodeId { match node { TemplateNode::Element { tag, namespace, attrs, children, } => { let name = qual_name(tag, *namespace); // let attrs = attrs.iter().filter_map(map_template_attr).collect(); let node_id = docm.create_element(name, Vec::new()); for attr in attrs.iter() { let TemplateAttribute::Static { name, value, namespace, } = attr else { continue; }; let falsy = *value == "false"; set_attribute_inner(docm, name, *namespace, Some(value), falsy, node_id); } let child_ids: Vec<NodeId> = children .iter() .map(|child| create_template_node(docm, child)) .collect(); docm.append_children(node_id, &child_ids); node_id } TemplateNode::Text { text } => docm.create_text_node(text), TemplateNode::Dynamic { .. } => docm.create_comment_node(), } } fn set_attribute_inner( docm: &mut DocumentMutator<'_>, local_name: &'static str, ns: Option<&'static str>, value: Option<&str>, is_falsy: bool, node_id: usize, ) { trace!("set_attribute node_id:{node_id} ns: {ns:?} name:{local_name}, value:{value:?}"); // Dioxus has overloaded the style namespace to accumulate style attributes without a `style` block // TODO: accumulate style attributes into a single style element. if ns == Some("style") { match value { Some(value) => docm.set_style_property(node_id, local_name, value), None => docm.remove_style_property(node_id, local_name), } return; } let name = qual_name(local_name, ns); // FIXME: more principled handling of special case attributes match value { None => docm.clear_attribute(node_id, name), Some(value) => { if local_name == "checked" && is_falsy { docm.clear_attribute(node_id, name); } else if local_name == "dangerous_inner_html" { docm.set_inner_html(node_id, value); } else { docm.set_attribute(node_id, name, value); } } } }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/native-dom/src/dioxus_document.rs
packages/native-dom/src/dioxus_document.rs
//! Integration between Dioxus and Blitz use crate::events::{BlitzKeyboardData, NativeClickData, NativeConverter, NativeFormData}; use crate::mutation_writer::{DioxusState, MutationWriter}; use crate::qual_name; use crate::NodeId; use blitz_dom::{ Attribute, BaseDocument, Document, DocumentConfig, EventDriver, EventHandler, Node, DEFAULT_CSS, }; use blitz_traits::events::{DomEvent, DomEventData, EventState, UiEvent}; use dioxus_core::{ElementId, Event, VirtualDom}; use dioxus_html::{set_event_converter, PlatformEventData}; use futures_util::task::noop_waker; use futures_util::{pin_mut, FutureExt}; use std::ops::{Deref, DerefMut}; use std::sync::LazyLock; use std::task::{Context as TaskContext, Waker}; use std::{any::Any, rc::Rc}; fn wrap_event_data<T: Any>(value: T) -> Rc<dyn Any> { Rc::new(PlatformEventData::new(Box::new(value))) } /// Get the value of the "dioxus-data-id" attribute parsed aa usize fn get_dioxus_id(node: &Node) -> Option<ElementId> { node.element_data()? .attrs .iter() .find(|attr| *attr.name.local == *"data-dioxus-id") .and_then(|attr| attr.value.parse::<usize>().ok()) .map(ElementId) } /// Integrates [`BaseDocument`] from [`blitz-dom`](blitz_dom) with [`VirtualDom`] from [`dioxus-core`](dioxus_core) /// /// ### Example /// /// ```rust /// use blitz_traits::shell::{Viewport, ColorScheme}; /// use dioxus_native_dom::{DioxusDocument, DocumentConfig}; /// use dioxus::prelude::*; /// /// // Example Dioxus app /// fn app() -> Element { /// rsx! { /// div { "Hello, world!" } /// } /// } /// /// fn main() { /// let vdom = VirtualDom::new(app); /// let mut doc = DioxusDocument::new(vdom, DocumentConfig { /// viewport: Some(Viewport::new(800, 600, 1.0, ColorScheme::Light)), /// ..Default::default() /// }); /// doc.initial_build(); /// } /// ``` /// /// You can just push events into the [`DioxusDocument`] with [`doc.handle_ui_event(..)`](Self::handle_ui_event) /// and then flush the changes with [`doc.poll(..)`](Self::poll) pub struct DioxusDocument { pub inner: BaseDocument, pub vdom: VirtualDom, pub vdom_state: DioxusState, #[allow(unused)] pub(crate) html_element_id: NodeId, #[allow(unused)] pub(crate) head_element_id: NodeId, #[allow(unused)] pub(crate) body_element_id: NodeId, #[allow(unused)] pub(crate) main_element_id: NodeId, } impl DioxusDocument { /// Create a new [`DioxusDocument`] from a [`VirtualDom`]. pub fn new(vdom: VirtualDom, mut config: DocumentConfig) -> Self { // Only really needs to happen once set_event_converter(Box::new(NativeConverter {})); config.base_url = Some( config .base_url .unwrap_or_else(|| String::from("dioxus://index.html")), ); let mut doc = BaseDocument::new(config); // Include default stylesheet doc.add_user_agent_stylesheet(DEFAULT_CSS); // Create some minimal HTML to render the app into. // HTML is equivalent to: // // <html> // <head></head> // <body> // <div id="main"></div> // </body> // </html> // // TODO: Support arbitrary "index.html" templates // Create the html element let mut mutr = doc.mutate(); let html_element_id = mutr.create_element(qual_name("html", None), vec![]); mutr.append_children(mutr.doc.root_node().id, &[html_element_id]); // Create the body element let head_element_id = mutr.create_element(qual_name("head", None), vec![]); mutr.append_children(html_element_id, &[head_element_id]); // Create the body element let body_element_id = mutr.create_element(qual_name("body", None), vec![]); mutr.append_children(html_element_id, &[body_element_id]); // Create another virtual element to hold the root <div id="main"></div> under the html element let main_attr = blitz_dom::Attribute { name: qual_name("id", None), value: "main".to_string(), }; let main_element_id = mutr.create_element(qual_name("main", None), vec![main_attr]); mutr.append_children(body_element_id, &[main_element_id]); drop(mutr); let vdom_state = DioxusState::create(main_element_id); Self { vdom, vdom_state, inner: doc, html_element_id, head_element_id, body_element_id, main_element_id, } } /// Run an initial build of the Dioxus vdom pub fn initial_build(&mut self) { let mut writer = MutationWriter::new(&mut self.inner, &mut self.vdom_state); self.vdom.rebuild(&mut writer); } /// Used to respond to a `CreateHeadElement` event generated by Dioxus. These /// events allow Dioxus to create elements in the `<head>` of the document. #[doc(hidden)] pub fn create_head_element( &mut self, name: &str, attributes: &[(String, String)], contents: &Option<String>, ) { let mut mutr = self.inner.mutate(); let attributes = attributes .iter() .map(|(name, value)| Attribute { name: qual_name(name, None), value: value.clone(), }) .collect(); let new_elem_id = mutr.create_element(qual_name(name, None), attributes); mutr.append_children(self.head_element_id, &[new_elem_id]); if let Some(contents) = contents { let text_node_id = mutr.create_text_node(contents); mutr.append_children(new_elem_id, &[text_node_id]); } } } // Implement DocumentLike and required traits for DioxusDocument impl Document for DioxusDocument { fn id(&self) -> usize { self.inner.id() } fn as_any_mut(&mut self) -> &mut dyn Any { self } fn poll(&mut self, cx: Option<TaskContext>) -> bool { { let fut = self.vdom.wait_for_work(); pin_mut!(fut); static NOOP_WAKER: LazyLock<Waker> = LazyLock::new(noop_waker); let mut cx = cx.unwrap_or_else(|| TaskContext::from_waker(&NOOP_WAKER)); match fut.poll_unpin(&mut cx) { std::task::Poll::Ready(_) => {} std::task::Poll::Pending => return false, } } let mut writer = MutationWriter::new(&mut self.inner, &mut self.vdom_state); self.vdom.render_immediate(&mut writer); true } fn handle_ui_event(&mut self, event: UiEvent) { let handler = DioxusEventHandler { vdom: &mut self.vdom, vdom_state: &mut self.vdom_state, }; let mut driver = EventDriver::new(self.inner.mutate(), handler); driver.handle_ui_event(event); } } impl Deref for DioxusDocument { type Target = BaseDocument; fn deref(&self) -> &BaseDocument { &self.inner } } impl DerefMut for DioxusDocument { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } } impl From<DioxusDocument> for BaseDocument { fn from(doc: DioxusDocument) -> BaseDocument { doc.inner } } pub struct DioxusEventHandler<'v> { vdom: &'v mut VirtualDom, #[allow(dead_code, reason = "WIP")] vdom_state: &'v mut DioxusState, } impl EventHandler for DioxusEventHandler<'_> { fn handle_event( &mut self, chain: &[usize], event: &mut DomEvent, mutr: &mut blitz_dom::DocumentMutator<'_>, event_state: &mut EventState, ) { // As an optimisation we maintain a count of the total number event handlers of a given type // If this count is zero then we can skip handling that kind of event entirely. let event_kind_idx = event.data.discriminant() as usize; let event_kind_count = self.vdom_state.event_handler_counts[event_kind_idx]; if event_kind_count == 0 { return; } let event_data = match &event.data { DomEventData::MouseMove(mevent) | DomEventData::MouseDown(mevent) | DomEventData::MouseUp(mevent) | DomEventData::Click(mevent) => Some(wrap_event_data(NativeClickData(mevent.clone()))), DomEventData::KeyDown(kevent) | DomEventData::KeyUp(kevent) | DomEventData::KeyPress(kevent) => { Some(wrap_event_data(BlitzKeyboardData(kevent.clone()))) } DomEventData::Input(data) => Some(wrap_event_data(NativeFormData { value: data.value.clone(), values: vec![], })), // TODO: Implement IME handling DomEventData::Ime(_) => None, }; let Some(event_data) = event_data else { return; }; for &node_id in chain { // Get dioxus vdom id for node let dioxus_id = mutr.doc.get_node(node_id).and_then(get_dioxus_id); let Some(id) = dioxus_id else { continue; }; // Handle event in vdom let dx_event = Event::new(event_data.clone(), event.bubbles); self.vdom .runtime() .handle_event(event.name(), dx_event.clone(), id); // Update event state if !dx_event.default_action_enabled() { event_state.prevent_default(); } if !dx_event.propagates() { event_state.stop_propagation(); break; } } } }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/native-dom/src/events.rs
packages/native-dom/src/events.rs
use blitz_traits::events::{BlitzKeyEvent, BlitzMouseButtonEvent, MouseEventButton}; use dioxus_html::{ geometry::{ClientPoint, ElementPoint, PagePoint, ScreenPoint}, input_data::{MouseButton, MouseButtonSet}, point_interaction::{ InteractionElementOffset, InteractionLocation, ModifiersInteraction, PointerInteraction, }, AnimationData, CancelData, ClipboardData, CompositionData, DragData, FocusData, FormData, FormValue, HasFileData, HasFocusData, HasFormData, HasKeyboardData, HasMouseData, HtmlEventConverter, ImageData, KeyboardData, MediaData, MountedData, MouseData, PlatformEventData, PointerData, ResizeData, ScrollData, SelectionData, ToggleData, TouchData, TransitionData, VisibleData, WheelData, }; use keyboard_types::{Code, Key, Location, Modifiers}; use std::any::Any; pub struct NativeConverter {} impl HtmlEventConverter for NativeConverter { fn convert_cancel_data(&self, _event: &PlatformEventData) -> CancelData { unimplemented!("todo: convert_cancel_data in dioxus-native. requires support in blitz") } fn convert_form_data(&self, event: &PlatformEventData) -> FormData { event.downcast::<NativeFormData>().unwrap().clone().into() } fn convert_mouse_data(&self, event: &PlatformEventData) -> MouseData { event.downcast::<NativeClickData>().unwrap().clone().into() } fn convert_keyboard_data(&self, event: &PlatformEventData) -> KeyboardData { event .downcast::<BlitzKeyboardData>() .unwrap() .clone() .into() } fn convert_focus_data(&self, _event: &PlatformEventData) -> FocusData { NativeFocusData {}.into() } fn convert_animation_data(&self, _event: &PlatformEventData) -> AnimationData { unimplemented!("todo: convert_animation_data in dioxus-native. requires support in blitz") } fn convert_clipboard_data(&self, _event: &PlatformEventData) -> ClipboardData { unimplemented!("todo: convert_clipboard_data in dioxus-native. requires support in blitz") } fn convert_composition_data(&self, _event: &PlatformEventData) -> CompositionData { unimplemented!("todo: convert_composition_data in dioxus-native. requires support in blitz") } fn convert_drag_data(&self, _event: &PlatformEventData) -> DragData { unimplemented!("todo: convert_drag_data in dioxus-native. requires support in blitz") } fn convert_image_data(&self, _event: &PlatformEventData) -> ImageData { unimplemented!("todo: convert_image_data in dioxus-native. requires support in blitz") } fn convert_media_data(&self, _event: &PlatformEventData) -> MediaData { unimplemented!("todo: convert_media_data in dioxus-native. requires support in blitz") } fn convert_mounted_data(&self, _event: &PlatformEventData) -> MountedData { unimplemented!("todo: convert_mounted_data in dioxus-native. requires support in blitz") } fn convert_pointer_data(&self, _event: &PlatformEventData) -> PointerData { unimplemented!("todo: convert_pointer_data in dioxus-native. requires support in blitz") } fn convert_scroll_data(&self, _event: &PlatformEventData) -> ScrollData { unimplemented!("todo: convert_scroll_data in dioxus-native. requires support in blitz") } fn convert_selection_data(&self, _event: &PlatformEventData) -> SelectionData { unimplemented!("todo: convert_selection_data in dioxus-native. requires support in blitz") } fn convert_toggle_data(&self, _event: &PlatformEventData) -> ToggleData { unimplemented!("todo: convert_toggle_data in dioxus-native. requires support in blitz") } fn convert_touch_data(&self, _event: &PlatformEventData) -> TouchData { unimplemented!("todo: convert_touch_data in dioxus-native. requires support in blitz") } fn convert_transition_data(&self, _event: &PlatformEventData) -> TransitionData { unimplemented!("todo: convert_transition_data in dioxus-native. requires support in blitz") } fn convert_wheel_data(&self, _event: &PlatformEventData) -> WheelData { unimplemented!("todo: convert_wheel_data in dioxus-native. requires support in blitz") } fn convert_resize_data(&self, _event: &PlatformEventData) -> ResizeData { unimplemented!("todo: convert_resize_data in dioxus-native. requires support in blitz") } fn convert_visible_data(&self, _event: &PlatformEventData) -> VisibleData { unimplemented!("todo: convert_visible_data in dioxus-native. requires support in blitz") } } #[derive(Clone, Debug)] pub struct NativeFormData { pub value: String, pub values: Vec<(String, FormValue)>, } impl HasFormData for NativeFormData { fn as_any(&self) -> &dyn std::any::Any { self as &dyn std::any::Any } fn value(&self) -> String { self.value.clone() } fn values(&self) -> Vec<(String, FormValue)> { self.values.clone() } fn valid(&self) -> bool { // todo: actually implement validation here. true } } impl HasFileData for NativeFormData { fn files(&self) -> Vec<dioxus_html::FileData> { vec![] } } #[derive(Clone, Debug)] pub(crate) struct BlitzKeyboardData(pub(crate) BlitzKeyEvent); impl ModifiersInteraction for BlitzKeyboardData { fn modifiers(&self) -> Modifiers { self.0.modifiers } } impl HasKeyboardData for BlitzKeyboardData { fn key(&self) -> Key { self.0.key.clone() } fn code(&self) -> Code { self.0.code } fn location(&self) -> Location { self.0.location } fn is_auto_repeating(&self) -> bool { self.0.is_auto_repeating } fn is_composing(&self) -> bool { self.0.is_composing } fn as_any(&self) -> &dyn std::any::Any { self as &dyn Any } } #[derive(Clone)] pub struct NativeClickData(pub(crate) BlitzMouseButtonEvent); impl InteractionLocation for NativeClickData { fn client_coordinates(&self) -> ClientPoint { ClientPoint::new(self.0.x as _, self.0.y as _) } // these require blitz to pass them along, or a dom rect fn screen_coordinates(&self) -> ScreenPoint { unimplemented!() } fn page_coordinates(&self) -> PagePoint { unimplemented!() } } impl InteractionElementOffset for NativeClickData { fn element_coordinates(&self) -> ElementPoint { unimplemented!() } } impl ModifiersInteraction for NativeClickData { fn modifiers(&self) -> Modifiers { self.0.mods } } impl PointerInteraction for NativeClickData { fn trigger_button(&self) -> Option<MouseButton> { Some(match self.0.button { MouseEventButton::Main => MouseButton::Primary, MouseEventButton::Auxiliary => MouseButton::Auxiliary, MouseEventButton::Secondary => MouseButton::Secondary, MouseEventButton::Fourth => MouseButton::Fourth, MouseEventButton::Fifth => MouseButton::Fifth, }) } fn held_buttons(&self) -> MouseButtonSet { dioxus_html::input_data::decode_mouse_button_set(self.0.buttons.bits() as u16) } } impl HasMouseData for NativeClickData { fn as_any(&self) -> &dyn std::any::Any { self as &dyn std::any::Any } } #[derive(Clone)] pub struct NativeFocusData {} impl HasFocusData for NativeFocusData { fn as_any(&self) -> &dyn std::any::Any { self as &dyn std::any::Any } }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/logger/src/lib.rs
packages/logger/src/lib.rs
use tracing::{ subscriber::{set_global_default, SetGlobalDefaultError}, Level, }; pub use tracing; /// Attempt to initialize the subscriber if it doesn't already exist, with default settings. /// /// See [`crate::init`] for more info. /// /// If you're doing setup before your `dioxus::launch` function that requires lots of logging, then /// it might be worth calling this earlier than launch. /// /// `dioxus::launch` calls this for you automatically and won't replace any facade you've already set. /// /// # Example /// /// ```rust, ignore /// use dioxus::prelude::*; /// use tracing::info; /// /// fn main() { /// dioxus::logger::initialize_default(); /// /// info!("Doing some work before launching..."); /// /// dioxus::launch(App); /// } /// /// #[component] /// fn App() -> Element { /// info!("App rendered"); /// rsx! { /// p { "hi" } /// } /// } /// ``` pub fn initialize_default() { if tracing::dispatcher::has_been_set() { return; } if cfg!(debug_assertions) { _ = init(Level::DEBUG); } else { _ = init(Level::INFO); } } /// Initialize `dioxus-logger` with a specified max filter. /// /// Generally it is best to initialize the logger before launching your Dioxus app. /// Works on Web, Desktop, Fullstack, and Liveview. /// /// # Example /// /// ```rust, ignore /// use dioxus::prelude::*; /// use dioxus::logger::tracing::{Level, info}; /// /// fn main() { /// dioxus::logger::init(Level::INFO).expect("logger failed to init"); /// dioxus::launch(App); /// } /// /// #[component] /// fn App() -> Element { /// info!("App rendered"); /// rsx! { /// p { "hi" } /// } /// } /// ``` pub fn init(level: Level) -> Result<(), SetGlobalDefaultError> { /* The default logger is currently set to log in fmt mode (meaning print directly to stdout) Eventually we want to change the output mode to be `json` when running under `dx`. This would let use re-format the tracing spans to be better integrated with `dx` */ #[cfg(target_arch = "wasm32")] { use tracing_subscriber::layer::SubscriberExt; use tracing_subscriber::Registry; let layer_config = tracing_wasm::WASMLayerConfigBuilder::new() .set_max_level(level) .build(); let layer = tracing_wasm::WASMLayer::new(layer_config); let reg = Registry::default().with(layer); set_global_default(reg) } #[cfg(not(target_arch = "wasm32"))] { let sub = tracing_subscriber::FmtSubscriber::builder() .with_max_level(level) .with_env_filter( tracing_subscriber::EnvFilter::builder() .with_default_directive(level.into()) .from_env_lossy() .add_directive("hyper_util=warn".parse().unwrap()), // hyper has `debug!` sitting around in some places that are spammy ); if !dioxus_cli_config::is_cli_enabled() { return set_global_default(sub.finish()); } // todo(jon): this is a small hack to clean up logging when running under the CLI // eventually we want to emit everything as json and let the CLI manage the parsing + display set_global_default(sub.without_time().with_target(false).finish()) } }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/devtools/src/lib.rs
packages/devtools/src/lib.rs
use dioxus_core::internal::HotReloadedTemplate; use dioxus_core::{ScopeId, VirtualDom}; use dioxus_signals::{GlobalKey, Signal, WritableExt}; pub use dioxus_devtools_types::*; pub use subsecond; use subsecond::PatchError; /// Applies template and literal changes to the VirtualDom /// /// Assets need to be handled by the renderer. pub fn apply_changes(dom: &VirtualDom, msg: &HotReloadMsg) { try_apply_changes(dom, msg).unwrap() } /// Applies template and literal changes to the VirtualDom, but doesn't panic if patching fails. /// /// Assets need to be handled by the renderer. pub fn try_apply_changes(dom: &VirtualDom, msg: &HotReloadMsg) -> Result<(), PatchError> { dom.runtime().in_scope(ScopeId::ROOT, || { // 1. Update signals... let ctx = dioxus_signals::get_global_context(); for template in &msg.templates { let value = template.template.clone(); let key = GlobalKey::File { file: template.key.file.as_str(), line: template.key.line as _, column: template.key.column as _, index: template.key.index as _, }; if let Some(mut signal) = ctx.get_signal_with_key(key.clone()) { signal.set(Some(value)); } } // 2. Attempt to hotpatch if let Some(jump_table) = msg.jump_table.as_ref().cloned() { if msg.for_build_id == Some(dioxus_cli_config::build_id()) { let our_pid = if cfg!(target_family = "wasm") { None } else { Some(std::process::id()) }; if msg.for_pid == our_pid { unsafe { subsecond::apply_patch(jump_table) }?; dom.runtime().force_all_dirty(); ctx.clear::<Signal<Option<HotReloadedTemplate>>>(); } } } Ok(()) }) } /// Connect to the devserver and handle its messages with a callback. /// /// This doesn't use any form of security or protocol, so it's not safe to expose to the internet. #[cfg(not(target_family = "wasm"))] pub fn connect(callback: impl FnMut(DevserverMsg) + Send + 'static) { let Some(endpoint) = dioxus_cli_config::devserver_ws_endpoint() else { return; }; connect_at(endpoint, callback); } /// Connect to the devserver and handle hot-patch messages only, implementing the subsecond hotpatch /// protocol. /// /// This is intended to be used by non-dioxus projects that want to use hotpatching. /// /// To handle the full devserver protocol, use `connect` instead. #[cfg(not(target_family = "wasm"))] pub fn connect_subsecond() { connect(|msg| { if let DevserverMsg::HotReload(hot_reload_msg) = msg { if let Some(jumptable) = hot_reload_msg.jump_table { if hot_reload_msg.for_pid == Some(std::process::id()) { unsafe { subsecond::apply_patch(jumptable).unwrap() }; } } } }); } #[cfg(not(target_family = "wasm"))] pub fn connect_at(endpoint: String, mut callback: impl FnMut(DevserverMsg) + Send + 'static) { std::thread::spawn(move || { let uri = format!( "{endpoint}?aslr_reference={}&build_id={}&pid={}", subsecond::aslr_reference(), dioxus_cli_config::build_id(), std::process::id() ); let (mut websocket, _req) = match tungstenite::connect(uri) { Ok((websocket, req)) => (websocket, req), Err(_) => return, }; while let Ok(msg) = websocket.read() { if let tungstenite::Message::Text(text) = msg { if let Ok(msg) = serde_json::from_str(&text) { callback(msg); } } } }); } /// Run this asynchronous future to completion. /// /// Whenever your code changes, the future is dropped and a new one is created using the new function. /// /// This is useful for using subsecond outside of dioxus, like with axum. To pass args to the underlying /// function, you can use the `serve_subsecond_with_args` function. /// /// ```rust, ignore /// #[tokio::main] /// async fn main() { /// dioxus_devtools::serve_subsecond(router_main).await; /// } /// /// async fn router_main() { /// use axum::{Router, routing::get}; /// /// let app = Router::new().route("/", get(test_route)); /// /// let listener = tokio::net::TcpListener::bind("0.0.0.0:3000").await.unwrap(); /// println!("Server running on http://localhost:3000"); /// /// axum::serve(listener, app.clone()).await.unwrap() /// } /// /// async fn test_route() -> axum::response::Html<&'static str> { /// "axum works!!!!!".into() /// } /// ``` #[cfg(feature = "serve")] #[cfg(not(target_family = "wasm"))] pub async fn serve_subsecond<O, F>(mut callback: impl FnMut() -> F) where F: std::future::Future<Output = O> + 'static, { serve_subsecond_with_args((), move |_args| callback()).await } /// Run this asynchronous future to completion. /// /// Whenever your code changes, the future is dropped and a new one is created using the new function. /// /// ```rust, ignore /// #[tokio::main] /// async fn main() { /// let args = ("abc".to_string(),); /// dioxus_devtools::serve_subsecond_with_args(args, router_main).await; /// } /// /// async fn router_main(args: (String,)) { /// use axum::{Router, routing::get}; /// /// let app = Router::new().route("/", get(test_route)); /// /// let listener = tokio::net::TcpListener::bind("0.0.0.0:3000").await.unwrap(); /// println!("Server running on http://localhost:3000 -> {}", args.0); /// /// axum::serve(listener, app.clone()).await.unwrap() /// } /// /// async fn test_route() -> axum::response::Html<&'static str> { /// "axum works!!!!!".into() /// } /// ``` #[cfg(feature = "serve")] pub async fn serve_subsecond_with_args<A: Clone, O, F>(args: A, mut callback: impl FnMut(A) -> F) where F: std::future::Future<Output = O> + 'static, { let (tx, mut rx) = futures_channel::mpsc::unbounded(); connect(move |msg| { if let DevserverMsg::HotReload(hot_reload_msg) = msg { if let Some(jumptable) = hot_reload_msg.jump_table { if hot_reload_msg.for_pid == Some(std::process::id()) { unsafe { subsecond::apply_patch(jumptable).unwrap() }; tx.unbounded_send(()).unwrap(); } } } }); let wrapped = move |args| -> std::pin::Pin<Box<dyn std::future::Future<Output = O>>> { Box::pin(callback(args)) }; let mut hotfn = subsecond::HotFn::current(wrapped); let mut cur_future = hotfn.call((args.clone(),)); loop { use futures_util::StreamExt; let res = futures_util::future::select(cur_future, rx.next()).await; match res { futures_util::future::Either::Left(_completed) => _ = rx.next().await, futures_util::future::Either::Right(_reload) => {} } cur_future = hotfn.call((args.clone(),)); } }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/dioxus/src/launch.rs
packages/dioxus/src/launch.rs
#![allow(clippy::new_without_default)] #![allow(unused)] use dioxus_config_macro::*; use dioxus_core::{Element, LaunchConfig}; use std::any::Any; use crate::prelude::*; /// Launch your Dioxus application with the given root component, context and config. /// The platform will be determined from cargo features. /// /// For a builder API, see `LaunchBuilder` defined in the `dioxus` crate. /// /// # Feature selection /// /// - `web`: Enables the web platform. /// - `desktop`: Enables the desktop platform. /// - `mobile`: Enables the mobile (ios + android webview) platform. /// - `server`: Enables the server (axum + server-side-rendering) platform. /// - `liveview`: Enables the liveview (websocke) platform. /// - `native`: Enables the native (wgpu + winit renderer) platform. /// /// Currently `native` is its own platform that is not compatible with desktop or mobile since it /// unifies both platforms into one. If "desktop" and "native" are enabled, then the native renderer /// will be used. /// /// # Feature priority /// /// If multiple renderers are enabled, the order of priority goes: /// /// 1. liveview /// 2. server /// 3. native /// 4. desktop /// 5. mobile /// 6. web /// /// However, we don't recommend enabling multiple renderers at the same time due to feature conflicts /// and bloating of the binary size. /// /// # Example /// ```rust, no_run /// use dioxus::prelude::*; /// /// fn main() { /// dioxus::launch(app); /// } /// /// fn app() -> Element { /// rsx! { /// div { "Hello, world!" } /// } /// } /// ``` pub fn launch(app: fn() -> Element) { #[allow(deprecated)] LaunchBuilder::new().launch(app) } /// A builder for a fullstack app. #[must_use] pub struct LaunchBuilder { platform: KnownPlatform, contexts: Vec<ContextFn>, configs: Vec<Box<dyn Any>>, } pub type LaunchFn = fn(fn() -> Element, Vec<ContextFn>, Vec<Box<dyn Any>>); /// A context function is a Send and Sync closure that returns a boxed trait object pub type ContextFn = Box<dyn Fn() -> Box<dyn Any> + Send + Sync + 'static>; enum KnownPlatform { Web, Desktop, Mobile, Server, Liveview, Native, Other(LaunchFn), } #[allow(clippy::redundant_closure)] // clippy doesnt doesn't understand our coercion to fn impl LaunchBuilder { /// Create a new builder for your application. This will create a launch configuration for the current platform based on the features enabled on the `dioxus` crate. // If you aren't using a third party renderer and this is not a docs.rs build, generate a warning about no renderer being enabled #[cfg_attr( all(not(any( docsrs, feature = "third-party-renderer", feature = "liveview", feature = "desktop", feature = "mobile", feature = "web", feature = "fullstack", ))), deprecated( note = "No renderer is enabled. You must enable a renderer feature on the dioxus crate before calling the launch function.\nAdd `web`, `desktop`, `mobile`, or `fullstack` to the `features` of dioxus field in your Cargo.toml.\n# Example\n```toml\n# ...\n[dependencies]\ndioxus = { version = \"0.5.0\", features = [\"web\"] }\n# ...\n```" ) )] pub fn new() -> LaunchBuilder { let platform = if cfg!(feature = "native") { KnownPlatform::Native } else if cfg!(feature = "desktop") { KnownPlatform::Desktop } else if cfg!(feature = "mobile") { KnownPlatform::Mobile } else if cfg!(feature = "web") { KnownPlatform::Web } else if cfg!(feature = "server") { KnownPlatform::Server } else if cfg!(feature = "liveview") { KnownPlatform::Liveview } else { panic!("No platform feature enabled. Please enable one of the following features: liveview, desktop, mobile, web, tui, fullstack to use the launch API.") }; LaunchBuilder { platform, contexts: Vec::new(), configs: Vec::new(), } } /// Launch your web application. #[cfg(feature = "web")] #[cfg_attr(docsrs, doc(cfg(feature = "web")))] pub fn web() -> LaunchBuilder { LaunchBuilder { platform: KnownPlatform::Web, contexts: Vec::new(), configs: Vec::new(), } } /// Launch your desktop application. #[cfg(feature = "desktop")] #[cfg_attr(docsrs, doc(cfg(feature = "desktop")))] pub fn desktop() -> LaunchBuilder { LaunchBuilder { platform: KnownPlatform::Desktop, contexts: Vec::new(), configs: Vec::new(), } } /// Launch your fullstack axum server. #[cfg(all(feature = "fullstack", feature = "server"))] #[cfg_attr(docsrs, doc(cfg(all(feature = "fullstack", feature = "server"))))] pub fn server() -> LaunchBuilder { LaunchBuilder { platform: KnownPlatform::Server, contexts: Vec::new(), configs: Vec::new(), } } /// Launch your fullstack application. #[cfg(feature = "mobile")] #[cfg_attr(docsrs, doc(cfg(feature = "mobile")))] pub fn mobile() -> LaunchBuilder { LaunchBuilder { platform: KnownPlatform::Mobile, contexts: Vec::new(), configs: Vec::new(), } } /// Provide a custom launch function for your application. /// /// Useful for third party renderers to tap into the launch builder API without having to reimplement it. /// /// # Example /// ```rust, no_run /// use dioxus::prelude::*; /// use std::any::Any; /// /// #[derive(Default)] /// struct Config; /// /// fn my_custom_launcher(root: fn() -> Element, contexts: Vec<Box<dyn Fn() -> Box<dyn Any> + Send + Sync>>, cfg: Vec<Box<dyn Any>>) { /// println!("launching with root: {:?}", root()); /// loop { /// println!("running..."); /// } /// } /// /// fn app() -> Element { /// rsx! { /// div { "Hello, world!" } /// } /// } /// /// dioxus::LaunchBuilder::custom(my_custom_launcher).launch(app); /// ``` pub fn custom(launch_fn: LaunchFn) -> LaunchBuilder { LaunchBuilder { platform: KnownPlatform::Other(launch_fn), contexts: vec![], configs: Vec::new(), } } /// Inject state into the root component's context that is created on the thread that the app is launched on. /// /// # Example /// ```rust, no_run /// use dioxus::prelude::*; /// use std::any::Any; /// /// #[derive(Default)] /// struct MyState { /// value: i32, /// } /// /// fn app() -> Element { /// rsx! { /// div { "Hello, world!" } /// } /// } /// /// dioxus::LaunchBuilder::new() /// .with_context_provider(|| Box::new(MyState { value: 42 })) /// .launch(app); /// ``` pub fn with_context_provider( mut self, state: impl Fn() -> Box<dyn Any> + Send + Sync + 'static, ) -> Self { self.contexts.push(Box::new(state)); self } /// Inject state into the root component's context. /// /// # Example /// ```rust, no_run /// use dioxus::prelude::*; /// use std::any::Any; /// /// #[derive(Clone)] /// struct MyState { /// value: i32, /// } /// /// fn app() -> Element { /// rsx! { /// div { "Hello, world!" } /// } /// } /// /// dioxus::LaunchBuilder::new() /// .with_context(MyState { value: 42 }) /// .launch(app); /// ``` pub fn with_context(mut self, state: impl Any + Clone + Send + Sync + 'static) -> Self { self.contexts .push(Box::new(move || Box::new(state.clone()))); self } /// Provide a platform-specific config to the builder. /// /// # Example /// ```rust, no_run /// use dioxus::prelude::*; /// use dioxus_desktop::{Config, WindowBuilder}; /// /// fn app() -> Element { /// rsx! { /// div { "Hello, world!" } /// } /// } /// /// dioxus::LaunchBuilder::new() /// .with_cfg(desktop! { /// Config::new().with_window( /// WindowBuilder::new() /// .with_title("My App") /// ) /// }) /// .launch(app); /// ``` pub fn with_cfg(mut self, config: impl LaunchConfig) -> Self { self.configs.push(Box::new(config)); self } /// Launch your application. #[allow(clippy::diverging_sub_expression)] pub fn launch(self, app: fn() -> Element) { let Self { platform, contexts, configs, } = self; // Make sure to turn on the logger if the user specified the logger feaature #[cfg(feature = "logger")] dioxus_logger::initialize_default(); // Set any flags if we're running under fullstack #[cfg(feature = "fullstack")] { use dioxus_fullstack::{get_server_url, set_server_url}; // Make sure to set the server_fn endpoint if the user specified the fullstack feature // We only set this on native targets #[cfg(any(feature = "desktop", feature = "mobile", feature = "native"))] if get_server_url().is_empty() { let serverurl = format!( "http://{}:{}", std::env::var("DIOXUS_DEVSERVER_IP") .unwrap_or_else(|_| "127.0.0.1".to_string()), std::env::var("DIOXUS_DEVSERVER_PORT").unwrap_or_else(|_| "8080".to_string()) ) .leak(); set_server_url(serverurl); } // If there is a base path set, call server functions from that base path #[cfg(feature = "web")] if let Some(base_path) = dioxus_cli_config::base_path() { let base_path = base_path.trim_matches('/'); set_server_url(format!("{}/{}", get_server_url(), base_path).leak()); } } // If native is specified, we override the webview launcher #[cfg(feature = "native")] if matches!(platform, KnownPlatform::Native) { return dioxus_native::launch_cfg(app, contexts, configs); } #[cfg(feature = "mobile")] if matches!(platform, KnownPlatform::Mobile) { return dioxus_desktop::launch::launch(app, contexts, configs); } #[cfg(feature = "desktop")] if matches!(platform, KnownPlatform::Desktop) { return dioxus_desktop::launch::launch(app, contexts, configs); } #[cfg(feature = "server")] if matches!(platform, KnownPlatform::Server) { return dioxus_server::launch_cfg(app, contexts, configs); } #[cfg(feature = "web")] if matches!(platform, KnownPlatform::Web) { return dioxus_web::launch::launch(app, contexts, configs); } #[cfg(feature = "liveview")] if matches!(platform, KnownPlatform::Liveview) { return dioxus_liveview::launch::launch(app, contexts, configs); } // If the platform is not one of the above, then we assume it's a custom platform if let KnownPlatform::Other(launch_fn) = platform { return launch_fn(app, contexts, configs); } // If we're here, then we have no platform feature enabled and third-party-renderer is enabled if cfg!(feature = "third-party-renderer") { panic!("No first party renderer feature enabled. It looks like you are trying to use a third party renderer. You will need to use the launch function from the third party renderer crate."); } panic!("No platform feature enabled. Please enable one of the following features: liveview, desktop, mobile, web, tui, fullstack to use the launch API.") } }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/dioxus/src/lib.rs
packages/dioxus/src/lib.rs
#![doc = include_str!("../README.md")] //! //! ## Dioxus Crate Features //! //! This crate has several features that can be enabled to change the active renderer and enable various integrations: //! //! - `signals`: (default) re-exports `dioxus-signals` //! - `macro`: (default) re-exports `dioxus-macro` //! - `html`: (default) exports `dioxus-html` as the default elements to use in rsx //! - `hooks`: (default) re-exports `dioxus-hooks` //! - `hot-reload`: (default) enables hot rsx reloading in all renderers that support it //! - `router`: exports the [router](https://dioxuslabs.com/learn/0.7/essentials/router/) and enables any router features for the current platform //! - `third-party-renderer`: Just disables warnings about no active platform when no renderers are enabled //! - `logger`: Enable the default tracing subscriber for Dioxus apps //! //! Platform features (the current platform determines what platform the [`launch()`] function runs): //! //! - `fullstack`: enables the fullstack platform. This must be used in combination with the `web` feature for wasm builds and `server` feature for server builds //! - `desktop`: enables the desktop platform //! - `mobile`: enables the mobile platform //! - `web`: enables the web platform. If the fullstack platform is enabled, this will set the fullstack platform to client mode //! - `liveview`: enables the liveview platform //! - `server`: enables the server variant of dioxus #![doc(html_logo_url = "https://avatars.githubusercontent.com/u/79236386")] #![doc(html_favicon_url = "https://avatars.githubusercontent.com/u/79236386")] #![cfg_attr(docsrs, feature(doc_cfg))] pub use dioxus_core; #[doc(inline)] pub use dioxus_core::{CapturedError, Ok, Result}; #[cfg(feature = "launch")] #[cfg_attr(docsrs, doc(cfg(feature = "launch")))] mod launch; pub use dioxus_core as core; #[cfg(feature = "launch")] #[cfg_attr(docsrs, doc(cfg(feature = "launch")))] pub use crate::launch::*; #[cfg(feature = "hooks")] #[cfg_attr(docsrs, doc(cfg(feature = "hooks")))] pub use dioxus_hooks as hooks; #[cfg(feature = "signals")] #[cfg_attr(docsrs, doc(cfg(feature = "signals")))] pub use dioxus_signals as signals; #[cfg(feature = "signals")] #[cfg_attr(docsrs, doc(cfg(feature = "signals")))] pub use dioxus_stores as stores; pub mod events { #[cfg(feature = "html")] #[cfg_attr(docsrs, doc(cfg(feature = "html")))] pub use dioxus_html::events::*; } #[cfg(feature = "document")] #[cfg_attr(docsrs, doc(cfg(feature = "document")))] pub use dioxus_document as document; #[cfg(feature = "document")] #[cfg_attr(docsrs, doc(cfg(feature = "document")))] pub use dioxus_history as history; #[cfg(feature = "html")] #[cfg_attr(docsrs, doc(cfg(feature = "html")))] pub use dioxus_html as html; #[cfg(feature = "macro")] #[cfg_attr(docsrs, doc(cfg(feature = "macro")))] pub use dioxus_core_macro as core_macro; #[cfg(feature = "logger")] #[cfg_attr(docsrs, doc(cfg(feature = "logger")))] pub use dioxus_logger as logger; #[cfg(feature = "cli-config")] #[cfg_attr(docsrs, doc(cfg(feature = "cli-config")))] pub use dioxus_cli_config as cli_config; #[cfg(feature = "server")] #[cfg_attr(docsrs, doc(cfg(feature = "server")))] pub use dioxus_server as server; #[cfg(feature = "server")] pub use dioxus_server::serve; #[cfg(feature = "devtools")] #[cfg_attr(docsrs, doc(cfg(feature = "devtools")))] pub use dioxus_devtools as devtools; #[cfg(feature = "web")] #[cfg_attr(docsrs, doc(cfg(feature = "web")))] pub use dioxus_web as web; #[cfg(feature = "router")] #[cfg_attr(docsrs, doc(cfg(feature = "router")))] pub use dioxus_router as router; #[cfg(feature = "fullstack")] #[cfg_attr(docsrs, doc(cfg(feature = "fullstack")))] pub use dioxus_fullstack as fullstack; #[cfg(feature = "desktop")] #[cfg_attr(docsrs, doc(cfg(feature = "desktop")))] pub use dioxus_desktop as desktop; #[cfg(feature = "mobile")] #[cfg_attr(docsrs, doc(cfg(feature = "mobile")))] pub use dioxus_desktop as mobile; #[cfg(feature = "liveview")] #[cfg_attr(docsrs, doc(cfg(feature = "liveview")))] pub use dioxus_liveview as liveview; #[cfg(feature = "ssr")] #[cfg_attr(docsrs, doc(cfg(feature = "ssr")))] pub use dioxus_ssr as ssr; #[cfg(feature = "warnings")] #[cfg_attr(docsrs, doc(cfg(feature = "warnings")))] pub use warnings; pub use dioxus_config_macros as config_macros; #[cfg(feature = "wasm-split")] #[cfg_attr(docsrs, doc(cfg(feature = "wasm-split")))] pub use wasm_splitter as wasm_split; pub use subsecond; #[cfg(feature = "asset")] #[cfg_attr(docsrs, doc(cfg(feature = "asset")))] #[doc(inline)] pub use dioxus_asset_resolver as asset_resolver; pub mod prelude { #[cfg(feature = "document")] #[cfg_attr(docsrs, doc(cfg(feature = "document")))] #[doc(inline)] pub use dioxus_document::{self as document, Meta, Stylesheet, Title}; #[cfg(feature = "document")] #[cfg_attr(docsrs, doc(cfg(feature = "document")))] #[doc(inline)] pub use dioxus_history::{history, History}; #[cfg(feature = "launch")] #[cfg_attr(docsrs, doc(cfg(feature = "launch")))] #[doc(inline)] pub use crate::launch::*; #[cfg(feature = "hooks")] #[cfg_attr(docsrs, doc(cfg(feature = "hooks")))] #[doc(inline)] pub use crate::hooks::*; #[cfg(feature = "signals")] #[cfg_attr(docsrs, doc(cfg(feature = "signals")))] #[doc(inline)] pub use dioxus_signals::{self, *}; #[cfg(feature = "signals")] pub use dioxus_stores::{self, store, use_store, GlobalStore, ReadStore, Store, WriteStore}; #[cfg(feature = "macro")] #[cfg_attr(docsrs, doc(cfg(feature = "macro")))] #[allow(deprecated)] #[doc(inline)] pub use dioxus_core_macro::{component, rsx, Props}; #[cfg(feature = "launch")] #[cfg_attr(docsrs, doc(cfg(feature = "launch")))] pub use dioxus_config_macro::*; #[cfg(feature = "html")] #[cfg_attr(docsrs, doc(cfg(feature = "html")))] pub use dioxus_html as dioxus_elements; #[cfg(feature = "html")] #[cfg_attr(docsrs, doc(cfg(feature = "html")))] #[doc(inline)] pub use dioxus_elements::{Code, Key, Location, Modifiers}; #[cfg(feature = "html")] #[cfg_attr(docsrs, doc(cfg(feature = "html")))] #[doc(no_inline)] pub use dioxus_elements::{ events::*, extensions::*, global_attributes, keyboard_types, svg_attributes, traits::*, GlobalAttributesExtension, SvgAttributesExtension, }; #[cfg(feature = "devtools")] #[cfg_attr(docsrs, doc(cfg(feature = "devtools")))] pub use dioxus_devtools; pub use dioxus_core; #[cfg(feature = "fullstack")] #[cfg_attr(docsrs, doc(cfg(feature = "fullstack")))] #[doc(inline)] pub use dioxus_fullstack::{ self as dioxus_fullstack, delete, get, patch, post, put, server, use_loader, use_server_cached, use_server_future, HttpError, OrHttpError, ServerFnError, ServerFnResult, StatusCode, }; #[cfg(feature = "server")] #[cfg_attr(docsrs, doc(cfg(feature = "server")))] #[doc(inline)] pub use dioxus_server::{self, serve, DioxusRouterExt, ServeConfig, ServerFunction}; #[cfg(feature = "router")] #[cfg_attr(docsrs, doc(cfg(feature = "router")))] pub use dioxus_router; #[cfg(feature = "router")] #[cfg_attr(docsrs, doc(cfg(feature = "router")))] #[doc(inline)] pub use dioxus_router::{ hooks::*, navigator, use_navigator, GoBackButton, GoForwardButton, Link, NavigationTarget, Outlet, Routable, Router, }; #[cfg(feature = "asset")] #[cfg_attr(docsrs, doc(cfg(feature = "asset")))] #[doc(inline)] pub use manganis::{self, *}; #[cfg(feature = "wasm-split")] #[cfg_attr(docsrs, doc(cfg(feature = "wasm-split")))] pub use wasm_splitter as wasm_split; #[doc(inline)] pub use dioxus_core::{ consume_context, provide_context, spawn, suspend, try_consume_context, use_drop, use_hook, AnyhowContext, Attribute, Callback, Component, Element, ErrorBoundary, ErrorContext, Event, EventHandler, Fragment, HasAttributes, IntoDynNode, RenderError, Result, ScopeId, SuspenseBoundary, SuspenseContext, VNode, VirtualDom, }; #[cfg(feature = "logger")] pub use dioxus_logger::tracing::{debug, error, info, trace, warn}; }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/dioxus/benches/jsframework.rs
packages/dioxus/benches/jsframework.rs
#![allow(non_snake_case, non_upper_case_globals)] //! This benchmark tests just the overhead of Dioxus itself. //! //! For the JS Framework Benchmark, both the framework and the browser is benchmarked together. Dioxus prepares changes //! to be made, but the change application phase will be just as performant as the vanilla wasm_bindgen code. In essence, //! we are measuring the overhead of Dioxus, not the performance of the "apply" phase. //! //! //! Pre-templates (Mac M1): //! - 3ms to create 1_000 rows //! - 30ms to create 10_000 rows //! //! Post-templates //! - 580us to create 1_000 rows //! - 6.2ms to create 10_000 rows //! //! As pure "overhead", these are amazing good numbers, mostly slowed down by hitting the global allocator. //! These numbers don't represent Dioxus with the heuristic engine installed, so I assume it'll be even faster. use criterion::{criterion_group, criterion_main, Criterion}; use dioxus::prelude::*; use dioxus_core::NoOpMutations; use rand::prelude::*; criterion_group!(mbenches, create_rows); criterion_main!(mbenches); fn create_rows(c: &mut Criterion) { c.bench_function("create rows", |b| { let mut dom = VirtualDom::new(app); dom.rebuild(&mut dioxus_core::NoOpMutations); b.iter(|| { dom.rebuild(&mut NoOpMutations); }) }); } fn app() -> Element { let mut rng = SmallRng::from_os_rng(); rsx! ( table { tbody { for f in 0..10_000_usize { table_row { row_id: f, label: Label::new(&mut rng) } } } } ) } #[derive(PartialEq, Props, Clone, Copy)] struct RowProps { row_id: usize, label: Label, } fn table_row(props: RowProps) -> Element { let [adj, col, noun] = props.label.0; rsx! { tr { td { class:"col-md-1", "{props.row_id}" } td { class:"col-md-1", onclick: move |_| { /* run onselect */ }, a { class: "lbl", "{adj}" "{col}" "{noun}" } } td { class: "col-md-1", a { class: "remove", onclick: move |_| {/* remove */}, span { class: "glyphicon glyphicon-remove remove", aria_hidden: "true" } } } td { class: "col-md-6" } } } } #[derive(PartialEq, Clone, Copy)] struct Label([&'static str; 3]); impl Label { fn new(rng: &mut SmallRng) -> Self { Label([ ADJECTIVES.choose(rng).unwrap(), COLOURS.choose(rng).unwrap(), NOUNS.choose(rng).unwrap(), ]) } } static ADJECTIVES: &[&str] = &[ "pretty", "large", "big", "small", "tall", "short", "long", "handsome", "plain", "quaint", "clean", "elegant", "easy", "angry", "crazy", "helpful", "mushy", "odd", "unsightly", "adorable", "important", "inexpensive", "cheap", "expensive", "fancy", ]; static COLOURS: &[&str] = &[ "red", "yellow", "blue", "green", "pink", "brown", "purple", "brown", "white", "black", "orange", ]; static NOUNS: &[&str] = &[ "table", "chair", "house", "bbq", "desk", "car", "pony", "cookie", "sandwich", "burger", "pizza", "mouse", "keyboard", ];
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli-config/src/lib.rs
packages/cli-config/src/lib.rs
//! <div align="center"> //! <img //! src="https://github.com/user-attachments/assets/6c7e227e-44ff-4e53-824a-67949051149c" //! alt="Build web, desktop, and mobile apps with a single codebase." //! width="100%" //! class="darkmode-image" //! > //! </div> //! //! # Dioxus CLI configuration //! //! This crate exposes the various configuration options that the Dioxus CLI sets when running the //! application during development. //! //! Note that these functions will return a different value when running under the CLI, so make sure //! not to rely on them when running in a production environment. //! //! ## Constants //! //! The various constants here are the names of the environment variables that the CLI sets. We recommend //! using the functions in this crate to access the values of these environment variables indirectly. //! //! The CLI uses this crate and the associated constants to *set* the environment variables, but as //! a consumer of the CLI, you would want to read the values of these environment variables using //! the provided functions. //! //! ## Example Usage //! //! We recommend using the functions here to access the values of the environment variables set by the CLI. //! For example, you might use the [`fullstack_address_or_localhost`] function to get the address that //! the CLI is requesting the application to be served on. //! //! ```rust, ignore //! async fn launch_axum(app: axum::Router<()>) { //! // Read the PORT and ADDR environment variables set by the CLI //! let addr = dioxus_cli_config::fullstack_address_or_localhost(); //! //! // Bind to the address and serve the application //! let listener = tokio::net::TcpListener::bind(&addr).await.unwrap(); //! axum::serve(listener, app.into_make_service()) //! .await //! .unwrap(); //! } //! ``` //! //! ## Stability //! //! The *values* that these functions return are *not* guaranteed to be stable between patch releases //! of Dioxus. At any time, we might change the values that the CLI sets or the way that they are read. //! //! We also don't guarantee the stability of the env var names themselves. If you want to rely on a //! particular env var, use the defined constants in your code. use std::{ net::{IpAddr, Ipv4Addr, SocketAddr}, path::PathBuf, }; pub const CLI_ENABLED_ENV: &str = "DIOXUS_CLI_ENABLED"; pub const SERVER_IP_ENV: &str = "IP"; pub const SERVER_PORT_ENV: &str = "PORT"; pub const DEVSERVER_IP_ENV: &str = "DIOXUS_DEVSERVER_IP"; pub const DEVSERVER_PORT_ENV: &str = "DIOXUS_DEVSERVER_PORT"; pub const ALWAYS_ON_TOP_ENV: &str = "DIOXUS_ALWAYS_ON_TOP"; pub const ASSET_ROOT_ENV: &str = "DIOXUS_ASSET_ROOT"; pub const APP_TITLE_ENV: &str = "DIOXUS_APP_TITLE"; pub const PRODUCT_NAME_ENV: &str = "DIOXUS_PRODUCT_NAME"; #[deprecated(since = "0.6.0", note = "The CLI currently does not set this.")] #[doc(hidden)] pub const OUT_DIR: &str = "DIOXUS_OUT_DIR"; pub const SESSION_CACHE_DIR: &str = "DIOXUS_SESSION_CACHE_DIR"; pub const BUILD_ID: &str = "DIOXUS_BUILD_ID"; /// Reads an environment variable at runtime in debug mode or at compile time in /// release mode. When bundling in release mode, we will not be running under the /// environment variables that the CLI sets, so we need to read them at compile time. macro_rules! read_env_config { ($name:expr) => {{ #[cfg(debug_assertions)] { // In debug mode, read the environment variable set by the CLI at runtime std::env::var($name).ok() } #[cfg(not(debug_assertions))] { // In release mode, read the environment variable set by the CLI at compile time // This means the value will still be available when running the application // standalone. // We don't always read the environment variable at compile time to avoid rebuilding // this crate when the environment variable changes. option_env!($name).map(ToString::to_string) } }}; } /// Get the address of the devserver for use over a raw socket /// /// This returns a [`SocketAddr`], meaning that you still need to connect to it using a socket with /// the appropriate protocol and path. /// /// For reference, the devserver typically lives on `127.0.0.1:8080` and serves the devserver websocket /// on `127.0.0.1:8080/_dioxus`. pub fn devserver_raw_addr() -> Option<SocketAddr> { let port = std::env::var(DEVSERVER_PORT_ENV).ok(); if cfg!(target_os = "android") { // Since `adb reverse` is used for Android, the 127.0.0.1 will always be // the correct IP address. let port = port.unwrap_or("8080".to_string()); return Some(format!("127.0.0.1:{}", port).parse().unwrap()); } let port = port?; let ip = std::env::var(DEVSERVER_IP_ENV).ok()?; format!("{}:{}", ip, port).parse().ok() } /// Get the address of the devserver for use over a websocket /// /// This is meant for internal use, though if you are building devtools around Dioxus, this would be /// useful to connect as a "listener" to the devserver. /// /// Unlike [`devserver_raw_addr`], this returns a string that can be used directly to connect to the /// devserver over a websocket. IE `ws://127.0.0.1:8080/_dioxus`. pub fn devserver_ws_endpoint() -> Option<String> { let addr = devserver_raw_addr()?; Some(format!("ws://{addr}/_dioxus")) } /// Get the IP that the server should be bound to. /// /// This is set by the CLI and is used to bind the server to a specific address. /// You can manually set the ip by setting the `IP` environment variable. /// /// ```sh /// IP=0.0.0.0 ./server /// ``` pub fn server_ip() -> Option<IpAddr> { std::env::var(SERVER_IP_ENV) .ok() .and_then(|s| s.parse().ok()) } /// Get the port that the server should listen on. /// /// This is set by the CLI and is used to bind the server to a specific port. /// You can manually set the port by setting the `PORT` environment variable. /// /// ```sh /// PORT=8081 ./server /// ``` pub fn server_port() -> Option<u16> { std::env::var(SERVER_PORT_ENV) .ok() .and_then(|s| s.parse().ok()) } /// Get the full address that the server should listen on. /// /// This is a convenience function that combines the `server_ip` and `server_port` functions and then /// falls back to `localhost:8080` if the environment variables are not set. /// /// ## Example /// /// ```rust, ignore /// async fn launch_axum(app: axum::Router<()>) { /// // Read the PORT and ADDR environment variables set by the CLI /// let addr = dioxus_cli_config::fullstack_address_or_localhost(); /// /// // Bind to the address and serve the application /// let listener = tokio::net::TcpListener::bind(&addr).await.unwrap(); /// axum::serve(listener, app.into_make_service()) /// .await /// .unwrap(); /// } /// ``` /// /// ## Stability /// /// In the future, we might change the address from 127.0.0.1 to 0.0.0.0. pub fn fullstack_address_or_localhost() -> SocketAddr { let ip = server_ip().unwrap_or_else(|| IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1))); let port = server_port().unwrap_or(8080); SocketAddr::new(ip, port) } /// Get the title of the application, usually set by the Dioxus.toml. /// /// This is used to set the title of the desktop window if the app itself doesn't set it. pub fn app_title() -> Option<String> { read_env_config!("DIOXUS_APP_TITLE") } /// Check if the application should forced to "float" on top of other windows. /// /// The CLI sets this based on the `--always-on-top` flag and the settings system. pub fn always_on_top() -> Option<bool> { std::env::var(ALWAYS_ON_TOP_ENV) .ok() .and_then(|s| s.parse().ok()) } /// Check if the CLI is enabled when running the application. /// /// The CLI *always* sets this value to true when running the application. /// /// ## Note /// /// On Android and the Web, this *might* not be reliable since there isn't always a consistent way to /// pass off the CLI environment variables to the application. pub fn is_cli_enabled() -> bool { // todo: (jon) - on android and web we should fix this... std::env::var(CLI_ENABLED_ENV).is_ok() } /// Get the path where the application will be served from. /// /// This is used by the router to format the URLs. For example, an app with a base path of `dogapp` will /// be served at `http://localhost:8080/dogapp`. /// /// All assets will be served from this base path as well, ie `http://localhost:8080/dogapp/assets/logo.png`. #[allow(unreachable_code)] pub fn base_path() -> Option<String> { // This may trigger when compiling to the server if you depend on another crate that pulls in // the web feature. It might be better for the renderers to provide the current platform // as a global context #[cfg(all(feature = "web", target_arch = "wasm32"))] { return web_base_path(); } read_env_config!("DIOXUS_ASSET_ROOT") } #[cfg(feature = "web")] #[wasm_bindgen::prelude::wasm_bindgen(inline_js = r#" export function getMetaContents(meta_name) { const selector = document.querySelector(`meta[name="${meta_name}"]`); if (!selector) { return null; } return selector.content; } "#)] extern "C" { #[wasm_bindgen(js_name = getMetaContents)] pub fn get_meta_contents(selector: &str) -> Option<String>; } /// Get the path where the application is served from in the browser. /// /// This uses wasm_bindgen on the browser to extract the base path from a meta element. #[cfg(feature = "web")] pub fn web_base_path() -> Option<String> { // In debug mode, we get the base path from the meta element which can be hot reloaded and changed without recompiling #[cfg(debug_assertions)] { thread_local! { static BASE_PATH: std::cell::OnceCell<Option<String>> = const { std::cell::OnceCell::new() }; } BASE_PATH.with(|f| f.get_or_init(|| get_meta_contents(ASSET_ROOT_ENV)).clone()) } // In release mode, we get the base path from the environment variable #[cfg(not(debug_assertions))] { option_env!("DIOXUS_ASSET_ROOT").map(ToString::to_string) } } /// Format a meta element for the base path to be used in the output HTML #[doc(hidden)] pub fn format_base_path_meta_element(base_path: &str) -> String { format!(r#"<meta name="{ASSET_ROOT_ENV}" content="{base_path}">"#,) } /// Get the path to the output directory where the application is being built. /// /// This might not return a valid path - we don't recommend relying on this. #[doc(hidden)] #[deprecated( since = "0.6.0", note = "The does not set the OUT_DIR environment variable." )] pub fn out_dir() -> Option<PathBuf> { #[allow(deprecated)] { std::env::var(OUT_DIR).ok().map(PathBuf::from) } } /// Get the directory where this app can write to for this session that's guaranteed to be stable /// between reloads of the same app. This is useful for emitting state like window position and size /// so the app can restore it when it's next opened. /// /// Note that this cache dir is really only useful for platforms that can access it. Web/Android /// don't have access to this directory, so it's not useful for them. /// /// This is designed with desktop executables in mind. pub fn session_cache_dir() -> Option<PathBuf> { if cfg!(target_os = "android") { return Some(android_session_cache_dir()); } std::env::var(SESSION_CACHE_DIR).ok().map(PathBuf::from) } /// The session cache directory for android pub fn android_session_cache_dir() -> PathBuf { PathBuf::from("/data/local/tmp/dx/") } /// The unique build id for this application, used to disambiguate between different builds of the same /// application. pub fn build_id() -> u64 { #[cfg(target_arch = "wasm32")] { 0 } #[cfg(not(target_arch = "wasm32"))] { std::env::var(BUILD_ID) .ok() .and_then(|s| s.parse().ok()) .unwrap_or(0) } } /// The product name of the bundled application. pub fn product_name() -> Option<String> { read_env_config!("DIOXUS_PRODUCT_NAME") }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli-harnesses/harness-renderer-swap/src/main.rs
packages/cli-harnesses/harness-renderer-swap/src/main.rs
use dioxus::prelude::*; fn main() { dioxus::launch(|| rsx! { "hello world!" }) }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli-harnesses/harness-simple-web/src/main.rs
packages/cli-harnesses/harness-simple-web/src/main.rs
fn main() { println!("Hello, world!"); }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli-harnesses/harness-fullstack-desktop-with-default/src/main.rs
packages/cli-harnesses/harness-fullstack-desktop-with-default/src/main.rs
use dioxus::prelude::*; fn main() { dioxus::launch(|| rsx! { "hello world!" }) }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli-harnesses/harness-simple-dedicated-server/src/main.rs
packages/cli-harnesses/harness-simple-dedicated-server/src/main.rs
fn main() { println!("Hello, world!"); }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli-harnesses/harness-fullstack-multi-target/src/main.rs
packages/cli-harnesses/harness-fullstack-multi-target/src/main.rs
use dioxus::prelude::*; fn main() { dioxus::launch(|| rsx! { "hello world!" }) }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli-harnesses/harness-fullstack-desktop-with-features/src/main.rs
packages/cli-harnesses/harness-fullstack-desktop-with-features/src/main.rs
use dioxus::prelude::*; fn main() { dioxus::launch(|| rsx! { "hello world!" }) }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli-harnesses/harness-simple-desktop/src/main.rs
packages/cli-harnesses/harness-simple-desktop/src/main.rs
fn main() { println!("Hello, world!"); }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli-harnesses/harness-default-to-non-default/src/main.rs
packages/cli-harnesses/harness-default-to-non-default/src/main.rs
use dioxus::prelude::*; fn main() { dioxus::launch(|| rsx! { "hello world!" }) }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli-harnesses/harness-simple-fullstack/src/main.rs
packages/cli-harnesses/harness-simple-fullstack/src/main.rs
use dioxus::prelude::*; fn main() { dioxus::launch(|| rsx! { "hello world!" }) }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli-harnesses/harness-simple-mobile/src/main.rs
packages/cli-harnesses/harness-simple-mobile/src/main.rs
fn main() { println!("Hello, world!"); }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli-harnesses/harness-fullstack-desktop/src/main.rs
packages/cli-harnesses/harness-fullstack-desktop/src/main.rs
use dioxus::prelude::*; fn main() { dioxus::launch(|| rsx! { "hello world!" }) }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli-harnesses/harness-fullstack-with-optional-tokio/src/main.rs
packages/cli-harnesses/harness-fullstack-with-optional-tokio/src/main.rs
use dioxus::prelude::*; fn main() { dioxus::launch(|| rsx! { "hello world!" }) }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli-harnesses/harness-no-dioxus/src/main.rs
packages/cli-harnesses/harness-no-dioxus/src/main.rs
fn main() { println!("Hello, world!"); }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli-harnesses/harness-simple-fullstack-with-default/src/main.rs
packages/cli-harnesses/harness-simple-fullstack-with-default/src/main.rs
use dioxus::prelude::*; fn main() { dioxus::launch(|| rsx! { "hello world!" }) }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli-harnesses/harness-fullstack-multi-target-no-default/src/main.rs
packages/cli-harnesses/harness-fullstack-multi-target-no-default/src/main.rs
use dioxus::prelude::*; fn main() { dioxus::launch(|| rsx! { "hello world!" }) }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli-harnesses/harness-simple-fullstack-native-with-default/src/main.rs
packages/cli-harnesses/harness-simple-fullstack-native-with-default/src/main.rs
use dioxus::prelude::*; fn main() { dioxus::launch(|| rsx! { "hello world!" }) }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli-harnesses/harness-simple-dedicated-client/src/main.rs
packages/cli-harnesses/harness-simple-dedicated-client/src/main.rs
fn main() { println!("Hello, world!"); }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/const-serialize/src/struct.rs
packages/const-serialize/src/struct.rs
use crate::*; /// Plain old data for a field. Stores the offset of the field in the struct and the layout of the field. #[derive(Debug, Copy, Clone)] pub struct StructFieldLayout { name: &'static str, offset: usize, layout: Layout, } impl StructFieldLayout { /// Create a new struct field layout pub const fn new(name: &'static str, offset: usize, layout: Layout) -> Self { Self { name, offset, layout, } } } /// Layout for a struct. The struct layout is just a list of fields with offsets #[derive(Debug, Copy, Clone)] pub struct StructLayout { pub(crate) size: usize, pub(crate) data: &'static [StructFieldLayout], } impl StructLayout { /// Create a new struct layout pub const fn new(size: usize, data: &'static [StructFieldLayout]) -> Self { Self { size, data } } } /// Serialize a struct that is stored at the pointer passed in pub(crate) const unsafe fn serialize_const_struct( ptr: *const (), to: ConstVec<u8>, layout: &StructLayout, ) -> ConstVec<u8> { let mut i = 0; let field_count = layout.data.len(); let mut to = write_map(to, field_count); while i < field_count { // Serialize the field at the offset pointer in the struct let StructFieldLayout { name, offset, layout, } = &layout.data[i]; to = write_map_key(to, name); let field = ptr.wrapping_byte_add(*offset as _); to = serialize_const_ptr(field, to, layout); i += 1; } to } /// Deserialize a struct type into the out buffer at the offset passed in. Returns a new version of the buffer with the data added. pub(crate) const fn deserialize_const_struct<'a>( from: &'a [u8], layout: &StructLayout, out: &mut [MaybeUninit<u8>], ) -> Option<&'a [u8]> { let Ok((map, from)) = take_map(from) else { return None; }; let mut i = 0; while i < layout.data.len() { // Deserialize the field at the offset pointer in the struct let StructFieldLayout { name, offset, layout, } = &layout.data[i]; let Ok(Some(from)) = map.find(name) else { return None; }; let Some((_, field_bytes)) = out.split_at_mut_checked(*offset) else { return None; }; if deserialize_const_ptr(from, layout, field_bytes).is_none() { return None; } i += 1; } Some(from) } macro_rules! impl_serialize_const_tuple { ($($generic:ident: $generic_number:expr),*) => { impl_serialize_const_tuple!(@impl ($($generic,)*) = $($generic: $generic_number),*); }; (@impl $inner:ty = $($generic:ident: $generic_number:expr),*) => { unsafe impl<$($generic: SerializeConst),*> SerializeConst for ($($generic,)*) { const MEMORY_LAYOUT: Layout = { Layout::Struct(StructLayout { size: std::mem::size_of::<($($generic,)*)>(), data: &[ $( StructFieldLayout::new(stringify!($generic_number), std::mem::offset_of!($inner, $generic_number), $generic::MEMORY_LAYOUT), )* ], }) }; } }; } impl_serialize_const_tuple!(T1: 0); impl_serialize_const_tuple!(T1: 0, T2: 1); impl_serialize_const_tuple!(T1: 0, T2: 1, T3: 2); impl_serialize_const_tuple!(T1: 0, T2: 1, T3: 2, T4: 3); impl_serialize_const_tuple!(T1: 0, T2: 1, T3: 2, T4: 3, T5: 4); impl_serialize_const_tuple!(T1: 0, T2: 1, T3: 2, T4: 3, T5: 4, T6: 5); impl_serialize_const_tuple!(T1: 0, T2: 1, T3: 2, T4: 3, T5: 4, T6: 5, T7: 6); impl_serialize_const_tuple!(T1: 0, T2: 1, T3: 2, T4: 3, T5: 4, T6: 5, T7: 6, T8: 7); impl_serialize_const_tuple!(T1: 0, T2: 1, T3: 2, T4: 3, T5: 4, T6: 5, T7: 6, T8: 7, T9: 8); impl_serialize_const_tuple!(T1: 0, T2: 1, T3: 2, T4: 3, T5: 4, T6: 5, T7: 6, T8: 7, T9: 8, T10: 9);
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/const-serialize/src/array.rs
packages/const-serialize/src/array.rs
use crate::*; /// The layout for a constant sized array. The array layout is just a length and an item layout. #[derive(Debug, Copy, Clone)] pub struct ArrayLayout { pub(crate) len: usize, pub(crate) item_layout: &'static Layout, } impl ArrayLayout { /// Create a new array layout pub const fn new(len: usize, item_layout: &'static Layout) -> Self { Self { len, item_layout } } } unsafe impl<const N: usize, T: SerializeConst> SerializeConst for [T; N] { const MEMORY_LAYOUT: Layout = Layout::Array(ArrayLayout { len: N, item_layout: &T::MEMORY_LAYOUT, }); } /// Serialize a constant sized array that is stored at the pointer passed in pub(crate) const unsafe fn serialize_const_array( ptr: *const (), mut to: ConstVec<u8>, layout: &ArrayLayout, ) -> ConstVec<u8> { let len = layout.len; let mut i = 0; to = write_array(to, len); while i < len { let field = ptr.wrapping_byte_offset((i * layout.item_layout.size()) as _); to = serialize_const_ptr(field, to, layout.item_layout); i += 1; } to } /// Deserialize an array type into the out buffer at the offset passed in. Returns a new version of the buffer with the data added. pub(crate) const fn deserialize_const_array<'a>( from: &'a [u8], layout: &ArrayLayout, mut out: &mut [MaybeUninit<u8>], ) -> Option<&'a [u8]> { let item_layout = layout.item_layout; let Ok((_, mut from)) = take_array(from) else { return None; }; let mut i = 0; while i < layout.len { let Some(new_from) = deserialize_const_ptr(from, item_layout, out) else { return None; }; let Some((_, item_out)) = out.split_at_mut_checked(item_layout.size()) else { return None; }; out = item_out; from = new_from; i += 1; } Some(from) }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/const-serialize/src/lib.rs
packages/const-serialize/src/lib.rs
#![doc = include_str!("../README.md")] #![warn(missing_docs)] use std::mem::MaybeUninit; mod const_buffers; pub use const_buffers::ConstReadBuffer; mod cbor; mod const_vec; mod r#enum; pub use r#enum::*; mod r#struct; pub use r#struct::*; mod primitive; pub use primitive::*; mod list; pub use list::*; mod array; pub use array::*; mod str; pub use str::*; pub use const_serialize_macro::SerializeConst; pub use const_vec::ConstVec; use crate::cbor::{ str_eq, take_array, take_bytes, take_map, take_number, take_str, write_array, write_bytes, write_map, write_map_key, write_number, }; /// The layout for a type. This layout defines a sequence of locations and reversed or not bytes. These bytes will be copied from during serialization and copied into during deserialization. #[derive(Debug, Copy, Clone)] pub enum Layout { /// An enum layout Enum(EnumLayout), /// A struct layout Struct(StructLayout), /// An array layout Array(ArrayLayout), /// A primitive layout Primitive(PrimitiveLayout), /// A dynamically sized list layout List(ListLayout), } impl Layout { /// The size of the type in bytes. pub const fn size(&self) -> usize { match self { Layout::Enum(layout) => layout.size, Layout::Struct(layout) => layout.size, Layout::Array(layout) => layout.len * layout.item_layout.size(), Layout::List(layout) => layout.size, Layout::Primitive(layout) => layout.size, } } } /// A trait for types that can be serialized and deserialized in const. /// /// # Safety /// The layout must accurately describe the memory layout of the type pub unsafe trait SerializeConst: Sized { /// The memory layout of the type. This type must have plain old data; no pointers or references. const MEMORY_LAYOUT: Layout; /// Assert that the memory layout of the type is the same as the size of the type const _ASSERT: () = assert!(Self::MEMORY_LAYOUT.size() == std::mem::size_of::<Self>()); } /// Serialize a pointer to a type that is stored at the pointer passed in const unsafe fn serialize_const_ptr( ptr: *const (), to: ConstVec<u8>, layout: &Layout, ) -> ConstVec<u8> { match layout { Layout::Enum(layout) => serialize_const_enum(ptr, to, layout), Layout::Struct(layout) => serialize_const_struct(ptr, to, layout), Layout::Array(layout) => serialize_const_array(ptr, to, layout), Layout::List(layout) => serialize_const_list(ptr, to, layout), Layout::Primitive(layout) => serialize_const_primitive(ptr, to, layout), } } /// Serialize a type into a buffer /// /// # Example /// /// ```rust /// use const_serialize::{ConstVec, SerializeConst, serialize_const}; /// /// #[derive(Clone, Copy, Debug, PartialEq, SerializeConst)] /// struct Struct { /// a: u32, /// b: u8, /// c: u32, /// } /// /// let mut buffer = ConstVec::new(); /// buffer = serialize_const(&Struct { /// a: 0x11111111, /// b: 0x22, /// c: 0x33333333, /// }, buffer); /// assert_eq!(buffer.as_ref(), &[0xa3, 0x61, 0x61, 0x1a, 0x11, 0x11, 0x11, 0x11, 0x61, 0x62, 0x18, 0x22, 0x61, 0x63, 0x1a, 0x33, 0x33, 0x33, 0x33]); /// ``` #[must_use = "The data is serialized into the returned buffer"] pub const fn serialize_const<T: SerializeConst>(data: &T, to: ConstVec<u8>) -> ConstVec<u8> { let ptr = data as *const T as *const (); // SAFETY: The pointer is valid and the layout is correct unsafe { serialize_const_ptr(ptr, to, &T::MEMORY_LAYOUT) } } /// Deserialize a type into the out buffer at the offset passed in. Returns a new version of the buffer with the data added. const fn deserialize_const_ptr<'a>( from: &'a [u8], layout: &Layout, out: &mut [MaybeUninit<u8>], ) -> Option<&'a [u8]> { match layout { Layout::Enum(layout) => deserialize_const_enum(from, layout, out), Layout::Struct(layout) => deserialize_const_struct(from, layout, out), Layout::Array(layout) => deserialize_const_array(from, layout, out), Layout::List(layout) => deserialize_const_list(from, layout, out), Layout::Primitive(layout) => deserialize_const_primitive(from, layout, out), } } /// Deserialize a type into the output buffer. Accepts `(type, ConstVec<u8>)` as input and returns `Option<(&'a [u8], Instance of type)>` /// /// # Example /// ```rust /// # use const_serialize::{deserialize_const, serialize_const, ConstVec, SerializeConst}; /// #[derive(Clone, Copy, Debug, PartialEq, SerializeConst)] /// struct Struct { /// a: u32, /// b: u8, /// c: u32, /// d: u32, /// } /// /// let mut buffer = ConstVec::new(); /// buffer = serialize_const(&Struct { /// a: 0x11111111, /// b: 0x22, /// c: 0x33333333, /// d: 0x44444444, /// }, buffer); /// let buf = buffer.as_ref(); /// assert_eq!(deserialize_const!(Struct, buf).unwrap().1, Struct { /// a: 0x11111111, /// b: 0x22, /// c: 0x33333333, /// d: 0x44444444, /// }); /// ``` #[macro_export] macro_rules! deserialize_const { ($type:ty, $buffer:expr) => { unsafe { const __SIZE: usize = std::mem::size_of::<$type>(); $crate::deserialize_const_raw::<__SIZE, $type>($buffer) } }; } /// Deserialize a buffer into a type. This will return None if the buffer doesn't have enough data to fill the type. /// # Safety /// N must be `std::mem::size_of::<T>()` #[must_use = "The data is deserialized from the input buffer"] pub const unsafe fn deserialize_const_raw<const N: usize, T: SerializeConst>( from: &[u8], ) -> Option<(&[u8], T)> { // Create uninitized memory with the size of the type let mut out = [MaybeUninit::uninit(); N]; // Fill in the bytes into the buffer for the type let Some(from) = deserialize_const_ptr(from, &T::MEMORY_LAYOUT, &mut out) else { return None; }; // Now that the memory is filled in, transmute it into the type Some((from, unsafe { std::mem::transmute_copy::<[MaybeUninit<u8>; N], T>(&out) })) } /// Check if the serialized representation of two items are the same pub const fn serialize_eq<T: SerializeConst>(first: &T, second: &T) -> bool { let first_serialized = ConstVec::<u8>::new(); let first_serialized = serialize_const(first, first_serialized); let second_serialized = ConstVec::<u8>::new(); let second_serialized = serialize_const(second, second_serialized); let first_buf = first_serialized.as_ref(); let second_buf = second_serialized.as_ref(); if first_buf.len() != second_buf.len() { return false; } let mut i = 0; while i < first_buf.len() { if first_buf[i] != second_buf[i] { return false; } i += 1; } true }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/const-serialize/src/list.rs
packages/const-serialize/src/list.rs
use crate::*; /// The layout for a dynamically sized list. The list layout is just a length and an item layout. #[derive(Debug, Copy, Clone)] pub struct ListLayout { /// The size of the struct backing the list pub(crate) size: usize, /// The byte offset of the length field len_offset: usize, /// The layout of the length field len_layout: PrimitiveLayout, /// The byte offset of the data field data_offset: usize, /// The layout of the data field data_layout: ArrayLayout, } impl ListLayout { /// Create a new list layout pub const fn new( size: usize, len_offset: usize, len_layout: PrimitiveLayout, data_offset: usize, data_layout: ArrayLayout, ) -> Self { Self { size, len_offset, len_layout, data_offset, data_layout, } } } /// Serialize a dynamically sized list that is stored at the pointer passed in pub(crate) const unsafe fn serialize_const_list( ptr: *const (), mut to: ConstVec<u8>, layout: &ListLayout, ) -> ConstVec<u8> { // Read the length of the list let len_ptr = ptr.wrapping_byte_offset(layout.len_offset as _); let len = layout.len_layout.read(len_ptr as *const u8) as usize; let data_ptr = ptr.wrapping_byte_offset(layout.data_offset as _); let item_layout = layout.data_layout.item_layout; // If the item size is 1, deserialize as bytes directly if item_layout.size() == 1 { let slice = std::slice::from_raw_parts(data_ptr as *const u8, len); to = write_bytes(to, slice); } // Otherwise, deserialize as a list of items else { let mut i = 0; to = write_array(to, len); while i < len { let item = data_ptr.wrapping_byte_offset((i * item_layout.size()) as _); to = serialize_const_ptr(item, to, item_layout); i += 1; } } to } /// Deserialize a list type into the out buffer at the offset passed in. Returns a new version of the buffer with the data added. pub(crate) const fn deserialize_const_list<'a>( from: &'a [u8], layout: &ListLayout, out: &mut [MaybeUninit<u8>], ) -> Option<&'a [u8]> { let Some((_, len_out)) = out.split_at_mut_checked(layout.len_offset) else { return None; }; // If the list items are only one byte, serialize as bytes directly let item_layout = layout.data_layout.item_layout; if item_layout.size() == 1 { let Ok((bytes, new_from)) = take_bytes(from) else { return None; }; // Write out the length of the list layout.len_layout.write(bytes.len() as u32, len_out); let Some((_, data_out)) = out.split_at_mut_checked(layout.data_offset) else { return None; }; let mut offset = 0; while offset < bytes.len() { data_out[offset] = MaybeUninit::new(bytes[offset]); offset += 1; } Some(new_from) } // Otherwise, serialize as an list of objects else { let Ok((len, mut from)) = take_array(from) else { return None; }; // Write out the length of the list layout.len_layout.write(len as u32, len_out); let Some((_, mut data_out)) = out.split_at_mut_checked(layout.data_offset) else { return None; }; let mut i = 0; while i < len { let Some(new_from) = deserialize_const_ptr(from, item_layout, data_out) else { return None; }; let Some((_, item_out)) = data_out.split_at_mut_checked(item_layout.size()) else { return None; }; data_out = item_out; from = new_from; i += 1; } Some(from) } }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/const-serialize/src/cbor.rs
packages/const-serialize/src/cbor.rs
//! Const serialization utilities for the CBOR data format. //! //! ## Overview of the format //! //! Const serialize only supports a subset of the CBOR format, specifically the major types: //! - UnsignedInteger //! - NegativeInteger //! - Bytes //! - String //! - Array //! //! Each item in CBOR starts with a leading byte, which determines the type of the item and additional information. //! The additional information is encoded in the lower 5 bits of the leading byte and generally indicates either a //! small number or how many of the next bytes are part of the first number. //! //! Resources: //! The spec: <https://www.rfc-editor.org/rfc/rfc8949.html> //! A playground to check examples against: <https://cbor.me/> use crate::ConstVec; /// Each item in CBOR starts with a leading byte, which determines the type of the item and additional information. /// /// The first 3 bits of the leading byte are the major type, which indicates the type of the item. #[repr(u8)] #[derive(PartialEq)] enum MajorType { /// An unsigned integer in the range 0..2^64. The value of the number is encoded in the remaining bits of the leading byte and any additional bytes. UnsignedInteger = 0, /// An unsigned integer in the range -2^64..-1. The value of the number is encoded in the remaining bits of the leading byte and any additional bytes NegativeInteger = 1, /// A byte sequence. The number of bytes in the sequence is encoded in the remaining bits of the leading byte and any additional bytes. Bytes = 2, /// A text sequence. The number of bytes in the sequence is encoded in the remaining bits of the leading byte and any additional bytes. Text = 3, /// A dynamically sized array of non-uniform data items. The number of items in the array is encoded in the remaining bits of the leading byte and any additional bytes. Array = 4, /// A map of pairs of data items. The first item in each pair is the key and the second item is the value. The number of items in the array is encoded in the remaining bits of the leading byte and any additional bytes. Map = 5, /// Tagged values - not supported Tagged = 6, /// Floating point values - not supported Float = 7, } impl MajorType { /// The bitmask for the major type in the leading byte const MASK: u8 = 0b0001_1111; const fn from_byte(byte: u8) -> Self { match byte >> 5 { 0 => MajorType::UnsignedInteger, 1 => MajorType::NegativeInteger, 2 => MajorType::Bytes, 3 => MajorType::Text, 4 => MajorType::Array, 5 => MajorType::Map, 6 => MajorType::Tagged, 7 => MajorType::Float, _ => panic!("Invalid major type"), } } } /// Get the length of the item in bytes without deserialization. const fn item_length(bytes: &[u8]) -> Result<usize, ()> { let [head, rest @ ..] = bytes else { return Err(()); }; let major = MajorType::from_byte(*head); let additional_information = *head & MajorType::MASK; let length_of_item = match major { // The length of the number is the total of: // - The length of the number (which may be 0 if the number is encoded in additional information) MajorType::UnsignedInteger | MajorType::NegativeInteger => { get_length_of_number(additional_information) as usize } // The length of the text or bytes is the total of: // - The length of the number that denotes the length of the text or bytes // - The length of the text or bytes themselves MajorType::Text | MajorType::Bytes => { let length_of_number = get_length_of_number(additional_information); let Ok((length_of_bytes, _)) = grab_u64_with_byte_length(rest, length_of_number, additional_information) else { return Err(()); }; length_of_number as usize + length_of_bytes as usize } // The length of the map is the total of: // - The length of the number that denotes the number of items // - The length of the pairs of items themselves MajorType::Array | MajorType::Map => { let length_of_number = get_length_of_number(additional_information); let Ok((length_of_items, _)) = grab_u64_with_byte_length(rest, length_of_number, additional_information) else { return Err(()); }; let mut total_length = length_of_number as usize; let mut items_left = length_of_items * if let MajorType::Map = major { 2 } else { 1 }; while items_left > 0 { let Some((_, after)) = rest.split_at_checked(total_length) else { return Err(()); }; let Ok(item_length) = item_length(after) else { return Err(()); }; total_length += item_length; items_left -= 1; } total_length } _ => return Err(()), }; let length_of_head = 1; Ok(length_of_head + length_of_item) } /// Read a number from the buffer, returning the number and the remaining bytes. pub(crate) const fn take_number(bytes: &[u8]) -> Result<(i64, &[u8]), ()> { let [head, rest @ ..] = bytes else { return Err(()); }; let major = MajorType::from_byte(*head); let additional_information = *head & MajorType::MASK; match major { MajorType::UnsignedInteger => { let Ok((number, rest)) = grab_u64(rest, additional_information) else { return Err(()); }; Ok((number as i64, rest)) } MajorType::NegativeInteger => { let Ok((number, rest)) = grab_u64(rest, additional_information) else { return Err(()); }; Ok((-(1 + number as i64), rest)) } _ => Err(()), } } /// Write a number to the buffer pub(crate) const fn write_number<const MAX_SIZE: usize>( vec: ConstVec<u8, MAX_SIZE>, number: i64, ) -> ConstVec<u8, MAX_SIZE> { match number { 0.. => write_major_type_and_u64(vec, MajorType::UnsignedInteger, number as u64), ..0 => write_major_type_and_u64(vec, MajorType::NegativeInteger, (-(number + 1)) as u64), } } /// Write the major type along with a number to the buffer. The first byte /// contains both the major type and the additional information which contains /// either the number itself or the number of extra bytes the number occupies. const fn write_major_type_and_u64<const MAX_SIZE: usize>( vec: ConstVec<u8, MAX_SIZE>, major: MajorType, number: u64, ) -> ConstVec<u8, MAX_SIZE> { let major = (major as u8) << 5; match number { // For numbers less than 24, store the number in the lower bits // of the first byte 0..24 => { let additional_information = number as u8; let byte = major | additional_information; vec.push(byte) } // For larger numbers, store the number of extra bytes the number occupies 24.. => { let log2_additional_bytes = log2_bytes_for_number(number); let additional_bytes = 1 << log2_additional_bytes; let additional_information = log2_additional_bytes + 24; let byte = major | additional_information; let mut vec = vec.push(byte); let mut byte = 0; while byte < additional_bytes { vec = vec.push((number >> ((additional_bytes - byte - 1) * 8)) as u8); byte += 1; } vec } } } /// Find the number of bytes required to store a number and return the log2 of the number of bytes. /// This is the number stored in the additional information field if the number is more than 24. const fn log2_bytes_for_number(number: u64) -> u8 { let required_bytes = ((64 - number.leading_zeros()).div_ceil(8)) as u8; #[allow(clippy::match_overlapping_arm)] match required_bytes { ..=1 => 0, ..=2 => 1, ..=4 => 2, _ => 3, } } /// Take bytes from a slice and return the bytes and the remaining slice. pub(crate) const fn take_bytes(bytes: &[u8]) -> Result<(&[u8], &[u8]), ()> { let [head, rest @ ..] = bytes else { return Err(()); }; let major = MajorType::from_byte(*head); let additional_information = *head & MajorType::MASK; if let MajorType::Bytes = major { take_bytes_from(rest, additional_information) } else { Err(()) } } /// Write bytes to a buffer and return the new buffer. pub(crate) const fn write_bytes<const MAX_SIZE: usize>( vec: ConstVec<u8, MAX_SIZE>, bytes: &[u8], ) -> ConstVec<u8, MAX_SIZE> { let vec = write_major_type_and_u64(vec, MajorType::Bytes, bytes.len() as u64); vec.extend(bytes) } /// Take a string from a buffer and return the string and the remaining buffer. pub(crate) const fn take_str(bytes: &[u8]) -> Result<(&str, &[u8]), ()> { let [head, rest @ ..] = bytes else { return Err(()); }; let major = MajorType::from_byte(*head); let additional_information = *head & MajorType::MASK; if let MajorType::Text = major { let Ok((bytes, rest)) = take_bytes_from(rest, additional_information) else { return Err(()); }; let Ok(string) = std::str::from_utf8(bytes) else { return Err(()); }; Ok((string, rest)) } else { Err(()) } } /// Write a string to a buffer and return the new buffer. pub(crate) const fn write_str<const MAX_SIZE: usize>( vec: ConstVec<u8, MAX_SIZE>, string: &str, ) -> ConstVec<u8, MAX_SIZE> { let vec = write_major_type_and_u64(vec, MajorType::Text, string.len() as u64); vec.extend(string.as_bytes()) } /// Take the length and header of an array from a buffer and return the length and the remaining buffer. /// You must loop over the elements of the array and parse them outside of this method. pub(crate) const fn take_array(bytes: &[u8]) -> Result<(usize, &[u8]), ()> { let [head, rest @ ..] = bytes else { return Err(()); }; let major = MajorType::from_byte(*head); let additional_information = *head & MajorType::MASK; if let MajorType::Array = major { let Ok((length, rest)) = take_len_from(rest, additional_information) else { return Err(()); }; Ok((length as usize, rest)) } else { Err(()) } } /// Write the header and length of an array. pub(crate) const fn write_array<const MAX_SIZE: usize>( vec: ConstVec<u8, MAX_SIZE>, len: usize, ) -> ConstVec<u8, MAX_SIZE> { write_major_type_and_u64(vec, MajorType::Array, len as u64) } /// Write the header and length of a map. pub(crate) const fn write_map<const MAX_SIZE: usize>( vec: ConstVec<u8, MAX_SIZE>, len: usize, ) -> ConstVec<u8, MAX_SIZE> { // We write 2 * len as the length of the map because each key-value pair is a separate entry. write_major_type_and_u64(vec, MajorType::Map, len as u64) } /// Write the key of a map entry. pub(crate) const fn write_map_key<const MAX_SIZE: usize>( value: ConstVec<u8, MAX_SIZE>, key: &str, ) -> ConstVec<u8, MAX_SIZE> { write_str(value, key) } /// Take a map from the byte slice and return the map reference and the remaining bytes. pub(crate) const fn take_map<'a>(bytes: &'a [u8]) -> Result<(MapRef<'a>, &'a [u8]), ()> { let [head, rest @ ..] = bytes else { return Err(()); }; let major = MajorType::from_byte(*head); let additional_information = *head & MajorType::MASK; if let MajorType::Map = major { let Ok((length, rest)) = take_len_from(rest, additional_information) else { return Err(()); }; let mut after_map = rest; let mut items_left = length * 2; while items_left > 0 { // Skip the value let Ok(len) = item_length(after_map) else { return Err(()); }; let Some((_, rest)) = after_map.split_at_checked(len) else { return Err(()); }; after_map = rest; items_left -= 1; } Ok((MapRef::new(rest, length as usize), after_map)) } else { Err(()) } } /// A reference to a CBOR map. pub(crate) struct MapRef<'a> { /// The bytes of the map. pub(crate) bytes: &'a [u8], /// The length of the map. pub(crate) len: usize, } impl<'a> MapRef<'a> { /// Create a new map reference. const fn new(bytes: &'a [u8], len: usize) -> Self { Self { bytes, len } } /// Find a key in the map and return the buffer associated with it. pub(crate) const fn find(&self, key: &str) -> Result<Option<&[u8]>, ()> { let mut bytes = self.bytes; let mut items_left = self.len; while items_left > 0 { let Ok((str, rest)) = take_str(bytes) else { return Err(()); }; if str_eq(key, str) { return Ok(Some(rest)); } // Skip the value associated with the key we don't care about let Ok(len) = item_length(rest) else { return Err(()); }; let Some((_, rest)) = rest.split_at_checked(len) else { return Err(()); }; bytes = rest; items_left -= 1; } Ok(None) } } /// Compare two strings for equality at compile time. pub(crate) const fn str_eq(a: &str, b: &str) -> bool { let a_bytes = a.as_bytes(); let b_bytes = b.as_bytes(); let a_len = a_bytes.len(); let b_len = b_bytes.len(); if a_len != b_len { return false; } let mut index = 0; while index < a_len { if a_bytes[index] != b_bytes[index] { return false; } index += 1; } true } /// Take the length from the additional information byte and return it along with the remaining bytes. const fn take_len_from(rest: &[u8], additional_information: u8) -> Result<(u64, &[u8]), ()> { match additional_information { // If additional_information < 24, the argument's value is the value of the additional information. 0..24 => Ok((additional_information as u64, rest)), // If additional_information is between 24 and 28, the argument's value is held in the n following bytes. 24..28 => { let Ok((number, rest)) = grab_u64(rest, additional_information) else { return Err(()); }; Ok((number, rest)) } _ => Err(()), } } /// Take a list of bytes from the byte slice and the additional information byte /// and return the bytes and the remaining bytes. pub(crate) const fn take_bytes_from( rest: &[u8], additional_information: u8, ) -> Result<(&[u8], &[u8]), ()> { let Ok((number, rest)) = grab_u64(rest, additional_information) else { return Err(()); }; let Some((bytes, rest)) = rest.split_at_checked(number as usize) else { return Err(()); }; Ok((bytes, rest)) } /// Find the length of the number based on the additional information byte. const fn get_length_of_number(additional_information: u8) -> u8 { match additional_information { 0..24 => 0, 24..28 => 1 << (additional_information - 24), _ => 0, } } /// Read a u64 from the byte slice and the additional information byte. const fn grab_u64(rest: &[u8], additional_information: u8) -> Result<(u64, &[u8]), ()> { grab_u64_with_byte_length( rest, get_length_of_number(additional_information), additional_information, ) } /// Read a u64 from the byte slice and the additional information byte along with the byte length. const fn grab_u64_with_byte_length( mut rest: &[u8], byte_length: u8, additional_information: u8, ) -> Result<(u64, &[u8]), ()> { match byte_length { 0 => Ok((additional_information as u64, rest)), n => { let mut value = 0; let mut count = 0; while count < n { let [next, remaining @ ..] = rest else { return Err(()); }; value = (value << 8) | *next as u64; rest = remaining; count += 1; } Ok((value, rest)) } } } #[cfg(test)] mod tests { use super::*; #[test] fn test_parse_byte() { for byte in 0..=255 { let bytes = if byte < 24 { [byte, 0] } else { [24, byte] }; let (item, _) = take_number(&bytes).unwrap(); assert_eq!(item, byte as _); } for byte in 1..=255 { let bytes = if byte < 24 { [(byte - 1) | 0b0010_0000, 0] } else { [0b0010_0000 | 24, byte - 1] }; let (item, _) = take_number(&bytes).unwrap(); assert_eq!(item, -(byte as i64)); } } #[test] fn test_byte_roundtrip() { for byte in 0..=255 { let vec = write_number(ConstVec::new(), byte as _); println!("{vec:?}"); let (item, _) = take_number(vec.as_ref()).unwrap(); assert_eq!(item, byte as _); } for byte in 0..=255 { let vec = write_number(ConstVec::new(), -(byte as i64)); let (item, _) = take_number(vec.as_ref()).unwrap(); assert_eq!(item, -(byte as i64)); } } #[test] fn test_number_roundtrip() { for _ in 0..100 { let value = rand::random::<i64>(); let vec = write_number(ConstVec::new(), value); let (item, _) = take_number(vec.as_ref()).unwrap(); assert_eq!(item, value); } } #[test] fn test_bytes_roundtrip() { for _ in 0..100 { let len = (rand::random::<u8>() % 100) as usize; let bytes = rand::random::<[u8; 100]>(); let vec = write_bytes(ConstVec::new(), &bytes[..len]); let (item, _) = take_bytes(vec.as_ref()).unwrap(); assert_eq!(item, &bytes[..len]); } } #[test] fn test_array_roundtrip() { for _ in 0..100 { let len = (rand::random::<u8>() % 100) as usize; let mut vec = write_array(ConstVec::new(), len); for i in 0..len { vec = write_number(vec, i as _); } let (len, mut remaining) = take_array(vec.as_ref()).unwrap(); for i in 0..len { let (item, rest) = take_number(remaining).unwrap(); remaining = rest; assert_eq!(item, i as i64); } } } #[test] fn test_map_roundtrip() { use rand::prelude::SliceRandom; for _ in 0..100 { let len = (rand::random::<u8>() % 10) as usize; let mut vec = write_map(ConstVec::new(), len); let mut random_order_indexes = (0..len).collect::<Vec<_>>(); random_order_indexes.shuffle(&mut rand::rng()); for &i in &random_order_indexes { vec = write_map_key(vec, &i.to_string()); vec = write_number(vec, i as _); } println!("len: {}", len); println!("Map: {:?}", vec); let (map, remaining) = take_map(vec.as_ref()).unwrap(); println!("remaining: {:?}", remaining); assert!(remaining.is_empty()); for i in 0..len { let key = i.to_string(); let key_location = map .find(&key) .expect("encoding is valid") .expect("key exists"); let (value, _) = take_number(key_location).unwrap(); assert_eq!(value, i as i64); } } } #[test] fn test_item_length_str() { #[rustfmt::skip] let input = [ /* text(1) */ 0x61, /* "1" */ 0x31, /* text(1) */ 0x61, /* "1" */ 0x31, ]; let Ok(length) = item_length(&input) else { panic!("Failed to calculate length"); }; assert_eq!(length, 2); } #[test] fn test_item_length_map() { #[rustfmt::skip] let input = [ /* map(1) */ 0xA1, /* text(1) */ 0x61, /* "A" */ 0x41, /* map(2) */ 0xA2, /* text(3) */ 0x63, /* "one" */ 0x6F, 0x6E, 0x65, /* unsigned(286331153) */ 0x1A, 0x11, 0x11, 0x11, 0x11, /* text(3) */ 0x63, /* "two" */ 0x74, 0x77, 0x6F, /* unsigned(34) */ 0x18, 0x22, ]; let Ok(length) = item_length(&input) else { panic!("Failed to calculate length"); }; assert_eq!(length, input.len()); } }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/const-serialize/src/primitive.rs
packages/const-serialize/src/primitive.rs
use crate::*; use std::mem::MaybeUninit; /// The layout for a primitive type. The bytes will be reversed if the target is big endian. #[derive(Debug, Copy, Clone)] pub struct PrimitiveLayout { pub(crate) size: usize, } impl PrimitiveLayout { /// Create a new primitive layout pub const fn new(size: usize) -> Self { Self { size } } /// Read the value from the given pointer /// /// # Safety /// The pointer must be valid for reads of `self.size` bytes. pub const unsafe fn read(self, byte_ptr: *const u8) -> u32 { let mut value = 0; let mut offset = 0; while offset < self.size { // If the bytes are reversed, walk backwards from the end of the number when pushing bytes let byte = if cfg!(target_endian = "big") { unsafe { byte_ptr .wrapping_byte_add((self.size - offset - 1) as _) .read() } } else { unsafe { byte_ptr.wrapping_byte_add(offset as _).read() } }; value |= (byte as u32) << (offset * 8); offset += 1; } value } /// Write the value to the given buffer pub const fn write(self, value: u32, out: &mut [MaybeUninit<u8>]) { let bytes = value.to_ne_bytes(); let mut offset = 0; while offset < self.size { out[offset] = MaybeUninit::new(bytes[offset]); offset += 1; } } } macro_rules! impl_serialize_const { ($type:ty) => { unsafe impl SerializeConst for $type { const MEMORY_LAYOUT: Layout = Layout::Primitive(PrimitiveLayout { size: std::mem::size_of::<$type>(), }); } }; } impl_serialize_const!(u8); impl_serialize_const!(u16); impl_serialize_const!(u32); impl_serialize_const!(u64); impl_serialize_const!(i8); impl_serialize_const!(i16); impl_serialize_const!(i32); impl_serialize_const!(i64); impl_serialize_const!(bool); impl_serialize_const!(f32); impl_serialize_const!(f64); /// Serialize a primitive type that is stored at the pointer passed in pub(crate) const unsafe fn serialize_const_primitive( ptr: *const (), to: ConstVec<u8>, layout: &PrimitiveLayout, ) -> ConstVec<u8> { let ptr = ptr as *const u8; let mut offset = 0; let mut i64_bytes = [0u8; 8]; while offset < layout.size { // If the bytes are reversed, walk backwards from the end of the number when pushing bytes let byte = unsafe { if cfg!(any(target_endian = "big", feature = "test-big-endian")) { ptr.wrapping_byte_offset((layout.size - offset - 1) as _) .read() } else { ptr.wrapping_byte_offset(offset as _).read() } }; i64_bytes[offset] = byte; offset += 1; } let number = i64::from_ne_bytes(i64_bytes); write_number(to, number) } /// Deserialize a primitive type into the out buffer at the offset passed in. Returns a new version of the buffer with the data added. pub(crate) const fn deserialize_const_primitive<'a>( from: &'a [u8], layout: &PrimitiveLayout, out: &mut [MaybeUninit<u8>], ) -> Option<&'a [u8]> { let mut offset = 0; let Ok((number, from)) = take_number(from) else { return None; }; let bytes = number.to_le_bytes(); while offset < layout.size { // If the bytes are reversed, walk backwards from the end of the number when filling in bytes let byte = bytes[offset]; if cfg!(any(target_endian = "big", feature = "test-big-endian")) { out[layout.size - offset - 1] = MaybeUninit::new(byte); } else { out[offset] = MaybeUninit::new(byte); } offset += 1; } Some(from) }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/const-serialize/src/const_vec.rs
packages/const-serialize/src/const_vec.rs
#![allow(dead_code)] use std::{fmt::Debug, hash::Hash, mem::MaybeUninit}; use crate::ConstReadBuffer; const DEFAULT_MAX_SIZE: usize = 2usize.pow(10); /// [`ConstVec`] is a version of [`Vec`] that is usable in const contexts. It has /// a fixed maximum size, but it can can grow and shrink within that size limit /// as needed. /// /// # Example /// ```rust /// # use const_serialize::ConstVec; /// const EMPTY: ConstVec<u8> = ConstVec::new(); /// // Methods that mutate the vector will return a new vector /// const ONE: ConstVec<u8> = EMPTY.push(1); /// const TWO: ConstVec<u8> = ONE.push(2); /// const THREE: ConstVec<u8> = TWO.push(3); /// const FOUR: ConstVec<u8> = THREE.push(4); /// // If a value is also returned, that will be placed in a tuple in the return value /// // along with the new vector /// const POPPED: (ConstVec<u8>, Option<u8>) = FOUR.pop(); /// assert_eq!(POPPED.0, THREE); /// assert_eq!(POPPED.1.unwrap(), 4); /// ``` pub struct ConstVec<T, const MAX_SIZE: usize = DEFAULT_MAX_SIZE> { memory: [MaybeUninit<T>; MAX_SIZE], len: u32, } impl<T: Clone, const MAX_SIZE: usize> Clone for ConstVec<T, MAX_SIZE> { fn clone(&self) -> Self { let mut cloned = Self::new_with_max_size(); for i in 0..self.len as usize { cloned = cloned.push(self.get(i).unwrap().clone()); } cloned } } impl<T: Copy, const MAX_SIZE: usize> Copy for ConstVec<T, MAX_SIZE> {} impl<T: PartialEq, const MAX_SIZE: usize> PartialEq for ConstVec<T, MAX_SIZE> { fn eq(&self, other: &Self) -> bool { self.as_ref() == other.as_ref() } } impl<T: Hash, const MAX_SIZE: usize> Hash for ConstVec<T, MAX_SIZE> { fn hash<H: std::hash::Hasher>(&self, state: &mut H) { self.as_ref().hash(state) } } impl<T, const MAX_SIZE: usize> Default for ConstVec<T, MAX_SIZE> { fn default() -> Self { Self::new_with_max_size() } } impl<T: Debug, const MAX_SIZE: usize> Debug for ConstVec<T, MAX_SIZE> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("ConstVec") .field("len", &self.len) .field("memory", &self.as_ref()) .finish() } } impl<T> ConstVec<T> { /// Create a new empty [`ConstVec`] pub const fn new() -> Self { Self::new_with_max_size() } } impl<T, const MAX_SIZE: usize> ConstVec<T, MAX_SIZE> { /// Create a new empty [`ConstVec`] with a custom maximum size /// /// # Example /// ```rust /// # use const_serialize::ConstVec; /// const EMPTY: ConstVec<u8, 10> = ConstVec::new_with_max_size(); /// ``` pub const fn new_with_max_size() -> Self { Self { memory: [const { MaybeUninit::uninit() }; MAX_SIZE], len: 0, } } /// Push a value onto the end of the [`ConstVec`] /// /// # Example /// ```rust /// # use const_serialize::ConstVec; /// const EMPTY: ConstVec<u8> = ConstVec::new(); /// const ONE: ConstVec<u8> = EMPTY.push(1); /// assert_eq!(ONE.as_ref(), &[1]); /// ``` pub const fn push(mut self, value: T) -> Self { self.memory[self.len as usize] = MaybeUninit::new(value); self.len += 1; self } /// Extend the [`ConstVec`] with the contents of a slice /// /// # Example /// ```rust /// # use const_serialize::ConstVec; /// const EMPTY: ConstVec<u8> = ConstVec::new(); /// const ONE: ConstVec<u8> = EMPTY.extend(&[1, 2, 3]); /// assert_eq!(ONE.as_ref(), &[1, 2, 3]); /// ``` pub const fn extend(mut self, other: &[T]) -> Self where T: Copy, { let mut i = 0; while i < other.len() { self = self.push(other[i]); i += 1; } self } /// Get a reference to the value at the given index /// /// # Example /// ```rust /// # use const_serialize::ConstVec; /// const EMPTY: ConstVec<u8> = ConstVec::new(); /// const ONE: ConstVec<u8> = EMPTY.push(1); /// assert_eq!(ONE.get(0), Some(&1)); /// ``` pub const fn get(&self, index: usize) -> Option<&T> { if index < self.len as usize { Some(unsafe { &*self.memory[index].as_ptr() }) } else { None } } /// Get the length of the [`ConstVec`] /// /// # Example /// ```rust /// # use const_serialize::ConstVec; /// const EMPTY: ConstVec<u8> = ConstVec::new(); /// const ONE: ConstVec<u8> = EMPTY.push(1); /// assert_eq!(ONE.len(), 1); /// ``` pub const fn len(&self) -> usize { self.len as usize } /// Check if the [`ConstVec`] is empty /// /// # Example /// ```rust /// # use const_serialize::ConstVec; /// const EMPTY: ConstVec<u8> = ConstVec::new(); /// assert!(EMPTY.is_empty()); /// const ONE: ConstVec<u8> = EMPTY.push(1); /// assert!(!ONE.is_empty()); /// ``` pub const fn is_empty(&self) -> bool { self.len == 0 } /// Get a reference to the underlying slice /// /// # Example /// ```rust /// # use const_serialize::ConstVec; /// const EMPTY: ConstVec<u8> = ConstVec::new(); /// const ONE: ConstVec<u8> = EMPTY.push(1); /// assert_eq!(ONE.as_ref(), &[1]); /// ``` pub const fn as_ref(&self) -> &[T] { unsafe { &*(self.memory.split_at(self.len as usize).0 as *const [MaybeUninit<T>] as *const [T]) } } /// Swap the values at the given indices /// /// # Example /// ```rust /// # use const_serialize::ConstVec; /// const EMPTY: ConstVec<u8> = ConstVec::new(); /// const ONE: ConstVec<u8> = EMPTY.push(1); /// const TWO: ConstVec<u8> = ONE.push(2); /// const THREE: ConstVec<u8> = TWO.swap(0, 1); /// assert_eq!(THREE.as_ref(), &[2, 1]); /// ``` pub const fn swap(mut self, first: usize, second: usize) -> Self where T: Copy, { assert!(first < self.len as usize); assert!(second < self.len as usize); let temp = self.memory[first]; self.memory[first] = self.memory[second]; self.memory[second] = temp; self } /// Pop a value off the end of the [`ConstVec`] /// /// # Example /// ```rust /// # use const_serialize::ConstVec; /// const EMPTY: ConstVec<u8> = ConstVec::new(); /// const ONE: ConstVec<u8> = EMPTY.push(1); /// const TWO: ConstVec<u8> = ONE.push(2); /// const THREE: ConstVec<u8> = TWO.push(3); /// const POPPED: (ConstVec<u8>, Option<u8>) = THREE.pop(); /// assert_eq!(POPPED.0, TWO); /// assert_eq!(POPPED.1.unwrap(), 3); /// ``` pub const fn pop(mut self) -> (Self, Option<T>) where T: Copy, { let value = if self.len > 0 { self.len -= 1; let last = self.len as usize; let last_value = unsafe { self.memory[last].assume_init() }; Some(last_value) } else { None }; (self, value) } /// Remove the value at the given index /// /// # Example /// ```rust /// # use const_serialize::ConstVec; /// const EMPTY: ConstVec<u8> = ConstVec::new(); /// const ONE: ConstVec<u8> = EMPTY.push(1); /// const TWO: ConstVec<u8> = ONE.push(2); /// const THREE: ConstVec<u8> = TWO.push(3); /// const REMOVED: (ConstVec<u8>, Option<u8>) = THREE.remove(1); /// assert_eq!(REMOVED.0.as_ref(), &[1, 3]); /// assert_eq!(REMOVED.1.unwrap(), 2); /// ``` pub const fn remove(mut self, index: usize) -> (Self, Option<T>) where T: Copy, { let value = if index < self.len as usize { let value = unsafe { self.memory[index].assume_init() }; let mut swap_index = index; while swap_index + 1 < self.len as usize { self.memory[swap_index] = self.memory[swap_index + 1]; swap_index += 1; } self.len -= 1; Some(value) } else { None }; (self, value) } /// Set the value at the given index /// /// # Example /// ```rust /// # use const_serialize::ConstVec; /// const EMPTY: ConstVec<u8> = ConstVec::new(); /// const ONE: ConstVec<u8> = EMPTY.push(1); /// const TWO: ConstVec<u8> = ONE.set(0, 2); /// assert_eq!(TWO.as_ref(), &[2]); /// ``` pub const fn set(mut self, index: usize, value: T) -> Self { if index >= self.len as usize { panic!("Out of bounds") } self.memory[index] = MaybeUninit::new(value); self } pub(crate) const fn into_parts(self) -> ([MaybeUninit<T>; MAX_SIZE], usize) { (self.memory, self.len as usize) } /// Split the [`ConstVec`] into two at the given index /// /// # Example /// ```rust /// # use const_serialize::ConstVec; /// const EMPTY: ConstVec<u8> = ConstVec::new(); /// const ONE: ConstVec<u8> = EMPTY.push(1); /// const TWO: ConstVec<u8> = ONE.push(2); /// const THREE: ConstVec<u8> = TWO.push(3); /// const SPLIT: (ConstVec<u8>, ConstVec<u8>) = THREE.split_at(1); /// assert_eq!(SPLIT.0.as_ref(), &[1]); /// assert_eq!(SPLIT.1.as_ref(), &[2, 3]); /// ``` pub const fn split_at(&self, index: usize) -> (Self, Self) where T: Copy, { assert!(index <= self.len as usize); let slice = self.as_ref(); let (left, right) = slice.split_at(index); let mut left_vec = Self::new_with_max_size(); let mut i = 0; while i < left.len() { left_vec = left_vec.push(left[i]); i += 1; } let mut right_vec = Self::new_with_max_size(); i = 0; while i < right.len() { right_vec = right_vec.push(right[i]); i += 1; } (left_vec, right_vec) } } impl<const MAX_SIZE: usize> ConstVec<u8, MAX_SIZE> { /// Convert the [`ConstVec`] into a [`ConstReadBuffer`] /// /// # Example /// ```rust /// # use const_serialize::{ConstVec, ConstReadBuffer}; /// const EMPTY: ConstVec<u8> = ConstVec::new(); /// const ONE: ConstVec<u8> = EMPTY.push(1); /// const TWO: ConstVec<u8> = ONE.push(2); /// const READ: ConstReadBuffer = TWO.read(); /// ``` pub const fn read(&self) -> ConstReadBuffer<'_> { ConstReadBuffer::new(self.as_ref()) } } #[test] fn test_const_vec() { const VEC: ConstVec<u32> = { let mut vec = ConstVec::new(); vec = vec.push(1234); vec = vec.push(5678); vec }; assert_eq!(VEC.as_ref(), &[1234, 5678]); let vec = VEC; let (vec, value) = vec.pop(); assert_eq!(value, Some(5678)); let (vec, value) = vec.pop(); assert_eq!(value, Some(1234)); let (vec, value) = vec.pop(); assert_eq!(value, None); assert_eq!(vec.as_ref(), &[]); } #[test] fn test_const_vec_len() { const VEC: ConstVec<u32> = { let mut vec = ConstVec::new(); vec = vec.push(1234); vec = vec.push(5678); vec }; assert_eq!(VEC.len(), 2); } #[test] fn test_const_vec_get() { const VEC: ConstVec<u32> = { let mut vec = ConstVec::new(); vec = vec.push(1234); vec = vec.push(5678); vec }; assert_eq!(VEC.get(0), Some(&1234)); assert_eq!(VEC.get(1), Some(&5678)); assert_eq!(VEC.get(2), None); } #[test] fn test_const_vec_swap() { const VEC: ConstVec<u32> = { let mut vec = ConstVec::new(); vec = vec.push(1234); vec = vec.push(5678); vec }; let mut vec = VEC; assert_eq!(vec.as_ref(), &[1234, 5678]); vec = vec.swap(0, 1); assert_eq!(vec.as_ref(), &[5678, 1234]); vec = vec.swap(0, 1); assert_eq!(vec.as_ref(), &[1234, 5678]); } #[test] fn test_const_vec_remove() { const VEC: ConstVec<u32> = { let mut vec = ConstVec::new(); vec = vec.push(1234); vec = vec.push(5678); vec }; let vec = VEC; println!("{:?}", vec); assert_eq!(vec.as_ref(), &[1234, 5678]); let (vec, value) = vec.remove(0); assert_eq!(value, Some(1234)); assert_eq!(vec.as_ref(), &[5678]); let (vec, value) = vec.remove(0); assert_eq!(value, Some(5678)); assert_eq!(vec.as_ref(), &[]); } #[test] fn test_const_vec_extend() { const VEC: ConstVec<u32> = { let mut vec = ConstVec::new(); vec = vec.push(1234); vec = vec.push(5678); vec = vec.extend(&[91011, 1213]); vec }; let vec = VEC; println!("{:?}", vec); assert_eq!(vec.as_ref(), &[1234, 5678, 91011, 1213]); }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/const-serialize/src/const_buffers.rs
packages/const-serialize/src/const_buffers.rs
/// A buffer that can be read from at compile time. This is very similar to [Cursor](std::io::Cursor) but is /// designed to be used in const contexts. #[derive(Debug, Clone, Copy, PartialEq)] pub struct ConstReadBuffer<'a> { location: usize, memory: &'a [u8], } impl<'a> ConstReadBuffer<'a> { /// Create a new buffer from a byte slice pub const fn new(memory: &'a [u8]) -> Self { Self { location: 0, memory, } } /// Get the next byte from the buffer. Returns `None` if the buffer is empty. /// This will return the new version of the buffer with the first byte removed. pub const fn get(mut self) -> Option<(Self, u8)> { if self.location >= self.memory.len() { return None; } let value = self.memory[self.location]; self.location += 1; Some((self, value)) } /// Get a reference to the underlying byte slice pub const fn as_ref(&self) -> &[u8] { self.memory } /// Get a slice of the buffer from the current location to the end of the buffer pub const fn remaining(&self) -> &[u8] { self.memory.split_at(self.location).1 } }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/const-serialize/src/enum.rs
packages/const-serialize/src/enum.rs
use crate::*; /// Serialize an enum that is stored at the pointer passed in pub(crate) const unsafe fn serialize_const_enum( ptr: *const (), mut to: ConstVec<u8>, layout: &EnumLayout, ) -> ConstVec<u8> { let byte_ptr = ptr as *const u8; let discriminant = layout.discriminant.read(byte_ptr); let mut i = 0; while i < layout.variants.len() { // If the variant is the discriminated one, serialize it let EnumVariant { tag, name, data, .. } = &layout.variants[i]; if discriminant == *tag { to = write_map(to, 1); to = write_map_key(to, name); let data_ptr = ptr.wrapping_byte_offset(layout.variants_offset as _); to = serialize_const_struct(data_ptr, to, data); break; } i += 1; } to } /// The layout for an enum. The enum layout is just a discriminate size and a tag layout. #[derive(Debug, Copy, Clone)] pub struct EnumLayout { pub(crate) size: usize, discriminant: PrimitiveLayout, variants_offset: usize, variants: &'static [EnumVariant], } impl EnumLayout { /// Create a new enum layout pub const fn new( size: usize, discriminant: PrimitiveLayout, variants: &'static [EnumVariant], ) -> Self { let mut max_align = 1; let mut i = 0; while i < variants.len() { let EnumVariant { align, .. } = &variants[i]; if *align > max_align { max_align = *align; } i += 1; } let variants_offset_raw = discriminant.size; let padding = (max_align - (variants_offset_raw % max_align)) % max_align; let variants_offset = variants_offset_raw + padding; assert!(variants_offset % max_align == 0); Self { size, discriminant, variants_offset, variants, } } } /// The layout for an enum variant. The enum variant layout is just a struct layout with a tag and alignment. #[derive(Debug, Copy, Clone)] pub struct EnumVariant { name: &'static str, // Note: tags may not be sequential tag: u32, data: StructLayout, align: usize, } impl EnumVariant { /// Create a new enum variant layout pub const fn new(name: &'static str, tag: u32, data: StructLayout, align: usize) -> Self { Self { name, tag, data, align, } } } /// Deserialize an enum type into the out buffer at the offset passed in. Returns a new version of the buffer with the data added. pub(crate) const fn deserialize_const_enum<'a>( from: &'a [u8], layout: &EnumLayout, out: &mut [MaybeUninit<u8>], ) -> Option<&'a [u8]> { // First, deserialize the map let Ok((map, remaining)) = take_map(from) else { return None; }; // Then get the only field which is the tag let Ok((deserilized_name, from)) = take_str(map.bytes) else { return None; }; // Then, deserialize the variant let mut i = 0; let mut matched_variant = false; while i < layout.variants.len() { // If the variant is the discriminated one, deserialize it let EnumVariant { name, data, tag, .. } = &layout.variants[i]; if str_eq(deserilized_name, name) { layout.discriminant.write(*tag, out); let Some((_, out)) = out.split_at_mut_checked(layout.variants_offset) else { return None; }; if deserialize_const_struct(from, data, out).is_none() { return None; } matched_variant = true; break; } i += 1; } if !matched_variant { return None; } Some(remaining) }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/const-serialize/src/str.rs
packages/const-serialize/src/str.rs
use crate::*; use std::{char, fmt::Debug, hash::Hash, mem::MaybeUninit}; const MAX_STR_SIZE: usize = 256; /// A string that is stored in a constant sized buffer that can be serialized and deserialized at compile time #[derive(Clone, Copy)] pub struct ConstStr { bytes: [MaybeUninit<u8>; MAX_STR_SIZE], len: u32, } impl Debug for ConstStr { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("ConstStr") .field("str", &self.as_str()) .finish() } } #[cfg(feature = "serde")] mod serde_bytes { use serde::{Deserialize, Serialize, Serializer}; use crate::ConstStr; impl Serialize for ConstStr { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { serializer.serialize_str(self.as_str()) } } impl<'de> Deserialize<'de> for ConstStr { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de>, { let s = String::deserialize(deserializer)?; Ok(ConstStr::new(&s)) } } } unsafe impl SerializeConst for ConstStr { const MEMORY_LAYOUT: Layout = Layout::List(ListLayout::new( std::mem::size_of::<Self>(), std::mem::offset_of!(Self, len), PrimitiveLayout { size: std::mem::size_of::<u32>(), }, std::mem::offset_of!(Self, bytes), ArrayLayout { len: MAX_STR_SIZE, item_layout: &Layout::Primitive(PrimitiveLayout { size: std::mem::size_of::<u8>(), }), }, )); } #[cfg(feature = "const-serialize-07")] unsafe impl const_serialize_07::SerializeConst for ConstStr { const MEMORY_LAYOUT: const_serialize_07::Layout = const_serialize_07::Layout::Struct(const_serialize_07::StructLayout::new( std::mem::size_of::<Self>(), &[ const_serialize_07::StructFieldLayout::new( std::mem::offset_of!(Self, bytes), const_serialize_07::Layout::List(const_serialize_07::ListLayout::new( MAX_STR_SIZE, &const_serialize_07::Layout::Primitive( const_serialize_07::PrimitiveLayout::new(std::mem::size_of::<u8>()), ), )), ), const_serialize_07::StructFieldLayout::new( std::mem::offset_of!(Self, len), const_serialize_07::Layout::Primitive( const_serialize_07::PrimitiveLayout::new(std::mem::size_of::<u32>()), ), ), ], )); } impl ConstStr { /// Create a new constant string pub const fn new(s: &str) -> Self { let str_bytes = s.as_bytes(); // This is serialized as a constant sized array in const-serialize-07 which requires all memory to be initialized let mut bytes = if cfg!(feature = "const-serialize-07") { [MaybeUninit::new(0); MAX_STR_SIZE] } else { [MaybeUninit::uninit(); MAX_STR_SIZE] }; let mut i = 0; while i < str_bytes.len() { bytes[i] = MaybeUninit::new(str_bytes[i]); i += 1; } Self { bytes, len: str_bytes.len() as u32, } } /// Get the bytes of the initialized portion of the string const fn bytes(&self) -> &[u8] { // Safety: All bytes up to the pointer are initialized unsafe { &*(self.bytes.split_at(self.len as usize).0 as *const [MaybeUninit<u8>] as *const [u8]) } } /// Get a reference to the string pub const fn as_str(&self) -> &str { let str_bytes = self.bytes(); match std::str::from_utf8(str_bytes) { Ok(s) => s, Err(_) => panic!( "Invalid utf8; ConstStr should only ever be constructed from valid utf8 strings" ), } } /// Get the length of the string pub const fn len(&self) -> usize { self.len as usize } /// Check if the string is empty pub const fn is_empty(&self) -> bool { self.len == 0 } /// Push a character onto the string pub const fn push(self, byte: char) -> Self { assert!(byte.is_ascii(), "Only ASCII bytes are supported"); let (bytes, len) = char_to_bytes(byte); let (str, _) = bytes.split_at(len); let Ok(str) = std::str::from_utf8(str) else { panic!("Invalid utf8; char_to_bytes should always return valid utf8 bytes") }; self.push_str(str) } /// Push a str onto the string pub const fn push_str(self, str: &str) -> Self { let Self { mut bytes, len } = self; assert!( str.len() + len as usize <= MAX_STR_SIZE, "String is too long" ); let str_bytes = str.as_bytes(); let new_len = len as usize + str_bytes.len(); let mut i = 0; while i < str_bytes.len() { bytes[len as usize + i] = MaybeUninit::new(str_bytes[i]); i += 1; } Self { bytes, len: new_len as u32, } } /// Split the string at a byte index. The byte index must be a char boundary pub const fn split_at(self, index: usize) -> (Self, Self) { let (left, right) = self.bytes().split_at(index); let left = match std::str::from_utf8(left) { Ok(s) => s, Err(_) => { panic!("Invalid utf8; you cannot split at a byte that is not a char boundary") } }; let right = match std::str::from_utf8(right) { Ok(s) => s, Err(_) => { panic!("Invalid utf8; you cannot split at a byte that is not a char boundary") } }; (Self::new(left), Self::new(right)) } /// Split the string at the last occurrence of a character pub const fn rsplit_once(&self, char: char) -> Option<(Self, Self)> { let str = self.as_str(); let mut index = str.len() - 1; // First find the bytes we are searching for let (char_bytes, len) = char_to_bytes(char); let (char_bytes, _) = char_bytes.split_at(len); let bytes = str.as_bytes(); // Then walk backwards from the end of the string loop { let byte = bytes[index]; // Look for char boundaries in the string and check if the bytes match if let Some(char_boundary_len) = utf8_char_boundary_to_char_len(byte) { // Split up the string into three sections: [before_char, in_char, after_char] let (before_char, after_index) = bytes.split_at(index); let (in_char, after_char) = after_index.split_at(char_boundary_len as usize); if in_char.len() != char_boundary_len as usize { panic!("in_char.len() should always be equal to char_boundary_len as usize") } // Check if the bytes for the current char and the target char match let mut in_char_eq = true; let mut i = 0; let min_len = if in_char.len() < char_bytes.len() { in_char.len() } else { char_bytes.len() }; while i < min_len { in_char_eq &= in_char[i] == char_bytes[i]; i += 1; } // If they do, convert the bytes to strings and return the split strings if in_char_eq { let Ok(before_char_str) = std::str::from_utf8(before_char) else { panic!("Invalid utf8; utf8_char_boundary_to_char_len should only return Some when the byte is a character boundary") }; let Ok(after_char_str) = std::str::from_utf8(after_char) else { panic!("Invalid utf8; utf8_char_boundary_to_char_len should only return Some when the byte is a character boundary") }; return Some((Self::new(before_char_str), Self::new(after_char_str))); } } match index.checked_sub(1) { Some(new_index) => index = new_index, None => return None, } } } /// Split the string at the first occurrence of a character pub const fn split_once(&self, char: char) -> Option<(Self, Self)> { let str = self.as_str(); let mut index = 0; // First find the bytes we are searching for let (char_bytes, len) = char_to_bytes(char); let (char_bytes, _) = char_bytes.split_at(len); let bytes = str.as_bytes(); // Then walk forwards from the start of the string while index < bytes.len() { let byte = bytes[index]; // Look for char boundaries in the string and check if the bytes match if let Some(char_boundary_len) = utf8_char_boundary_to_char_len(byte) { // Split up the string into three sections: [before_char, in_char, after_char] let (before_char, after_index) = bytes.split_at(index); let (in_char, after_char) = after_index.split_at(char_boundary_len as usize); if in_char.len() != char_boundary_len as usize { panic!("in_char.len() should always be equal to char_boundary_len as usize") } // Check if the bytes for the current char and the target char match let mut in_char_eq = true; let mut i = 0; let min_len = if in_char.len() < char_bytes.len() { in_char.len() } else { char_bytes.len() }; while i < min_len { in_char_eq &= in_char[i] == char_bytes[i]; i += 1; } // If they do, convert the bytes to strings and return the split strings if in_char_eq { let Ok(before_char_str) = std::str::from_utf8(before_char) else { panic!("Invalid utf8; utf8_char_boundary_to_char_len should only return Some when the byte is a character boundary") }; let Ok(after_char_str) = std::str::from_utf8(after_char) else { panic!("Invalid utf8; utf8_char_boundary_to_char_len should only return Some when the byte is a character boundary") }; return Some((Self::new(before_char_str), Self::new(after_char_str))); } } index += 1 } None } } impl PartialEq for ConstStr { fn eq(&self, other: &Self) -> bool { self.as_str() == other.as_str() } } impl Eq for ConstStr {} impl PartialOrd for ConstStr { fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> { Some(self.cmp(other)) } } impl Ord for ConstStr { fn cmp(&self, other: &Self) -> std::cmp::Ordering { self.as_str().cmp(other.as_str()) } } impl Hash for ConstStr { fn hash<H: std::hash::Hasher>(&self, state: &mut H) { self.as_str().hash(state); } } #[test] fn test_rsplit_once() { let str = ConstStr::new("hello world"); assert_eq!( str.rsplit_once(' '), Some((ConstStr::new("hello"), ConstStr::new("world"))) ); let unicode_str = ConstStr::new("hi😀hello😀world😀world"); assert_eq!( unicode_str.rsplit_once('😀'), Some((ConstStr::new("hi😀hello😀world"), ConstStr::new("world"))) ); assert_eq!(unicode_str.rsplit_once('❌'), None); for _ in 0..100 { let random_str: String = (0..rand::random::<u8>() % 50) .map(|_| rand::random::<char>()) .collect(); let konst = ConstStr::new(&random_str); let mut seen_chars = std::collections::HashSet::new(); for char in random_str.chars().rev() { let (char_bytes, len) = char_to_bytes(char); let char_bytes = &char_bytes[..len]; assert_eq!(char_bytes, char.to_string().as_bytes()); if seen_chars.contains(&char) { continue; } seen_chars.insert(char); let (correct_left, correct_right) = random_str.rsplit_once(char).unwrap(); let (left, right) = konst.rsplit_once(char).unwrap(); println!("splitting {random_str:?} at {char:?}"); assert_eq!(left.as_str(), correct_left); assert_eq!(right.as_str(), correct_right); } } } const CONTINUED_CHAR_MASK: u8 = 0b10000000; const BYTE_CHAR_BOUNDARIES: [u8; 4] = [0b00000000, 0b11000000, 0b11100000, 0b11110000]; // Const version of https://doc.rust-lang.org/src/core/char/methods.rs.html#1765-1797 const fn char_to_bytes(char: char) -> ([u8; 4], usize) { let code = char as u32; let len = char.len_utf8(); let mut bytes = [0; 4]; match len { 1 => { bytes[0] = code as u8; } 2 => { bytes[0] = ((code >> 6) & 0x1F) as u8 | BYTE_CHAR_BOUNDARIES[1]; bytes[1] = (code & 0x3F) as u8 | CONTINUED_CHAR_MASK; } 3 => { bytes[0] = ((code >> 12) & 0x0F) as u8 | BYTE_CHAR_BOUNDARIES[2]; bytes[1] = ((code >> 6) & 0x3F) as u8 | CONTINUED_CHAR_MASK; bytes[2] = (code & 0x3F) as u8 | CONTINUED_CHAR_MASK; } 4 => { bytes[0] = ((code >> 18) & 0x07) as u8 | BYTE_CHAR_BOUNDARIES[3]; bytes[1] = ((code >> 12) & 0x3F) as u8 | CONTINUED_CHAR_MASK; bytes[2] = ((code >> 6) & 0x3F) as u8 | CONTINUED_CHAR_MASK; bytes[3] = (code & 0x3F) as u8 | CONTINUED_CHAR_MASK; } _ => panic!( "encode_utf8: need more than 4 bytes to encode the unicode character, but the buffer has 4 bytes" ), }; (bytes, len) } #[test] fn fuzz_char_to_bytes() { use std::char; for _ in 0..100 { let char = rand::random::<char>(); let (bytes, len) = char_to_bytes(char); let str = std::str::from_utf8(&bytes[..len]).unwrap(); assert_eq!(char.to_string(), str); } } const fn utf8_char_boundary_to_char_len(byte: u8) -> Option<u8> { match byte { 0b00000000..=0b01111111 => Some(1), 0b11000000..=0b11011111 => Some(2), 0b11100000..=0b11101111 => Some(3), 0b11110000..=0b11111111 => Some(4), _ => None, } } #[test] fn fuzz_utf8_byte_to_char_len() { for _ in 0..100 { let random_string: String = (0..rand::random::<u8>()) .map(|_| rand::random::<char>()) .collect(); let bytes = random_string.as_bytes(); let chars: std::collections::HashMap<_, _> = random_string.char_indices().collect(); for (i, byte) in bytes.iter().enumerate() { match utf8_char_boundary_to_char_len(*byte) { Some(char_len) => { let char = chars .get(&i) .unwrap_or_else(|| panic!("{byte:b} is not a character boundary")); assert_eq!(char.len_utf8(), char_len as usize); } None => { assert!(!chars.contains_key(&i), "{byte:b} is a character boundary"); } } } } }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/const-serialize/tests/eq.rs
packages/const-serialize/tests/eq.rs
use const_serialize::{serialize_eq, SerializeConst}; #[derive(Clone, Copy, Debug, PartialEq, SerializeConst)] struct Struct { a: u32, b: u8, c: u32, d: Enum, } #[derive(Clone, Copy, Debug, PartialEq, SerializeConst)] #[repr(C, u8)] enum Enum { A { one: u32, two: u16 }, B { one: u8, two: u16 } = 15, } #[test] fn const_eq() { const { let data = [ Struct { a: 0x11111111, b: 0x22, c: 0x33333333, d: Enum::A { one: 0x44444444, two: 0x5555, }, }, Struct { a: 123, b: 9, c: 38, d: Enum::B { one: 0x44, two: 0x555, }, }, Struct { a: 9, b: 123, c: 39, d: Enum::B { one: 0x46, two: 0x555, }, }, ]; let mut other = data; other[2].a += 1; if serialize_eq(&data, &other) { panic!("data should be different"); } if !serialize_eq(&data, &data) { panic!("data should be the same"); } } }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/const-serialize/tests/lists.rs
packages/const-serialize/tests/lists.rs
use const_serialize::{deserialize_const, serialize_const, ConstVec}; #[test] fn test_serialize_const_layout_list() { let mut buf = ConstVec::new(); buf = serialize_const(&[1u8, 2, 3] as &[u8; 3], buf); println!("{:?}", buf.as_ref()); let buf = buf.as_ref(); assert_eq!(deserialize_const!([u8; 3], buf).unwrap().1, [1, 2, 3]) } #[test] fn test_serialize_const_layout_nested_lists() { let mut buf = ConstVec::new(); buf = serialize_const( &[[1u8, 2, 3], [4u8, 5, 6], [7u8, 8, 9]] as &[[u8; 3]; 3], buf, ); println!("{:?}", buf.as_ref()); let buf = buf.as_ref(); assert_eq!( deserialize_const!([[u8; 3]; 3], buf).unwrap().1, [[1, 2, 3], [4, 5, 6], [7, 8, 9]] ); } #[test] fn test_serialize_list_too_little_data() { let mut buf = ConstVec::new(); buf = buf.push(1); let buf = buf.as_ref(); assert_eq!(deserialize_const!([u64; 10], buf), None); }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/const-serialize/tests/structs.rs
packages/const-serialize/tests/structs.rs
use const_serialize::{deserialize_const, serialize_const, ConstVec, SerializeConst}; use std::mem::MaybeUninit; #[test] fn test_transmute_bytes_to_struct() { struct MyStruct { a: u32, b: u8, c: u32, d: u32, } const SIZE: usize = std::mem::size_of::<MyStruct>(); let mut out = [MaybeUninit::uninit(); SIZE]; let first_align = std::mem::offset_of!(MyStruct, a); let second_align = std::mem::offset_of!(MyStruct, b); let third_align = std::mem::offset_of!(MyStruct, c); let fourth_align = std::mem::offset_of!(MyStruct, d); for (i, byte) in 1234u32.to_le_bytes().iter().enumerate() { out[i + first_align] = MaybeUninit::new(*byte); } for (i, byte) in 12u8.to_le_bytes().iter().enumerate() { out[i + second_align] = MaybeUninit::new(*byte); } for (i, byte) in 13u32.to_le_bytes().iter().enumerate() { out[i + third_align] = MaybeUninit::new(*byte); } for (i, byte) in 14u32.to_le_bytes().iter().enumerate() { out[i + fourth_align] = MaybeUninit::new(*byte); } let out = unsafe { std::mem::transmute_copy::<[MaybeUninit<u8>; SIZE], MyStruct>(&out) }; assert_eq!(out.a, 1234); assert_eq!(out.b, 12); assert_eq!(out.c, 13); assert_eq!(out.d, 14); } #[test] fn test_serialize_const_layout_struct_list() { #[derive(Clone, Copy, Debug, PartialEq, SerializeConst)] struct Struct { a: u32, b: u8, c: u32, d: u32, } impl Struct { #[allow(dead_code)] const fn equal(&self, other: &Struct) -> bool { self.a == other.a && self.b == other.b && self.c == other.c && self.d == other.d } } #[derive(Clone, Copy, Debug, PartialEq, SerializeConst)] struct OtherStruct { a: u32, b: u8, c: Struct, d: u32, } impl OtherStruct { #[allow(dead_code)] const fn equal(&self, other: &OtherStruct) -> bool { self.a == other.a && self.b == other.b && self.c.equal(&other.c) && self.d == other.d } } const INNER_DATA: Struct = Struct { a: 0x11111111, b: 0x22, c: 0x33333333, d: 0x44444444, }; const DATA: [OtherStruct; 3] = [ OtherStruct { a: 0x11111111, b: 0x22, c: INNER_DATA, d: 0x44444444, }, OtherStruct { a: 0x111111, b: 0x23, c: INNER_DATA, d: 0x44444444, }, OtherStruct { a: 0x11111111, b: 0x11, c: INNER_DATA, d: 0x44441144, }, ]; const _ASSERT: () = { let mut buf = ConstVec::new(); buf = serialize_const(&DATA, buf); let buf = buf.as_ref(); let [first, second, third] = match deserialize_const!([OtherStruct; 3], buf) { Some((_, data)) => data, None => panic!("data mismatch"), }; if !(first.equal(&DATA[0]) && second.equal(&DATA[1]) && third.equal(&DATA[2])) { panic!("data mismatch"); } }; const _ASSERT_2: () = { let mut buf = ConstVec::new(); const DATA_AGAIN: [[OtherStruct; 3]; 3] = [DATA, DATA, DATA]; buf = serialize_const(&DATA_AGAIN, buf); let buf = buf.as_ref(); let [first, second, third] = match deserialize_const!([[OtherStruct; 3]; 3], buf) { Some((_, data)) => data, None => panic!("data mismatch"), }; if !(first[0].equal(&DATA[0]) && first[1].equal(&DATA[1]) && first[2].equal(&DATA[2])) { panic!("data mismatch"); } if !(second[0].equal(&DATA[0]) && second[1].equal(&DATA[1]) && second[2].equal(&DATA[2])) { panic!("data mismatch"); } if !(third[0].equal(&DATA[0]) && third[1].equal(&DATA[1]) && third[2].equal(&DATA[2])) { panic!("data mismatch"); } }; let mut buf = ConstVec::new(); buf = serialize_const(&DATA, buf); println!("{:?}", buf.as_ref()); let buf = buf.as_ref(); let (_, data2) = deserialize_const!([OtherStruct; 3], buf).unwrap(); assert_eq!(DATA, data2); } #[test] fn test_serialize_const_layout_struct() { #[derive(Debug, PartialEq, SerializeConst)] struct Struct { a: u32, b: u8, c: u32, d: u32, } #[derive(Debug, PartialEq, SerializeConst)] struct OtherStruct(u32, u8, Struct, u32); println!("{:?}", OtherStruct::MEMORY_LAYOUT); let data = Struct { a: 0x11111111, b: 0x22, c: 0x33333333, d: 0x44444444, }; let data = OtherStruct(0x11111111, 0x22, data, 0x44444444); let mut buf = ConstVec::new(); buf = serialize_const(&data, buf); println!("{:?}", buf.as_ref()); let buf = buf.as_ref(); let (_, data2) = deserialize_const!(OtherStruct, buf).unwrap(); assert_eq!(data, data2); } #[test] fn test_adding_struct_field_non_breaking() { #[derive(Debug, PartialEq, SerializeConst)] struct Initial { a: u32, b: u8, } #[derive(Debug, PartialEq, SerializeConst)] struct New { c: u32, b: u8, a: u32, } let data = New { a: 0x11111111, b: 0x22, c: 0x33333333, }; let mut buf = ConstVec::new(); buf = serialize_const(&data, buf); let buf = buf.as_ref(); // The new struct should be able to deserialize into the initial struct let (_, data2) = deserialize_const!(Initial, buf).unwrap(); assert_eq!( Initial { a: data.a, b: data.b, }, data2 ); }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/const-serialize/tests/primitive.rs
packages/const-serialize/tests/primitive.rs
use const_serialize::{deserialize_const, serialize_const, ConstVec}; #[test] fn test_serialize_const_layout_primitive() { let mut buf = ConstVec::new(); buf = serialize_const(&1234u32, buf); let buf = buf.as_ref(); assert_eq!(deserialize_const!(u32, buf).unwrap().1, 1234u32); let mut buf = ConstVec::new(); buf = serialize_const(&1234u64, buf); let buf = buf.as_ref(); assert_eq!(deserialize_const!(u64, buf).unwrap().1, 1234u64); let mut buf = ConstVec::new(); buf = serialize_const(&1234i32, buf); let buf = buf.as_ref(); assert_eq!(deserialize_const!(i32, buf).unwrap().1, 1234i32); let mut buf = ConstVec::new(); buf = serialize_const(&1234i64, buf); let buf = buf.as_ref(); assert_eq!(deserialize_const!(i64, buf).unwrap().1, 1234i64); let mut buf = ConstVec::new(); buf = serialize_const(&true, buf); assert_eq!(buf.as_ref(), [1u8]); let buf = buf.as_ref(); assert!(deserialize_const!(bool, buf).unwrap().1); let mut buf = ConstVec::new(); buf = serialize_const(&0.631f32, buf); let buf = buf.as_ref(); assert_eq!(deserialize_const!(f32, buf).unwrap().1, 0.631); } #[test] fn test_serialize_primitive_too_little_data() { let mut buf = ConstVec::new(); buf = buf.push(1); buf = buf.push(1); buf = buf.push(1); let buf = buf.as_ref(); assert_eq!(deserialize_const!([u64; 10], buf), None); }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/const-serialize/tests/tuples.rs
packages/const-serialize/tests/tuples.rs
use const_serialize::{deserialize_const, serialize_const, ConstVec}; #[test] fn test_serialize_const_layout_tuple() { let mut buf = ConstVec::new(); buf = serialize_const(&(1234u32, 5678u16), buf); let buf = buf.as_ref(); assert_eq!( deserialize_const!((u32, u16), buf).unwrap().1, (1234u32, 5678u16) ); let mut buf = ConstVec::new(); buf = serialize_const(&(1234f64, 5678u16, 90u8), buf); let buf = buf.as_ref(); assert_eq!( deserialize_const!((f64, u16, u8), buf).unwrap().1, (1234f64, 5678u16, 90u8) ); let mut buf = ConstVec::new(); buf = serialize_const(&(1234u32, 5678u16, 90u8, 1000000f64), buf); let buf = buf.as_ref(); assert_eq!( deserialize_const!((u32, u16, u8, f64), buf).unwrap().1, (1234u32, 5678u16, 90u8, 1000000f64) ); }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/const-serialize/tests/enum.rs
packages/const-serialize/tests/enum.rs
use const_serialize::{deserialize_const, serialize_const, ConstVec, SerializeConst}; use std::mem::MaybeUninit; #[test] fn test_transmute_bytes_to_enum() { #[derive(Clone, Copy, Debug, PartialEq)] #[repr(C, u8)] enum Enum<T> { A { one: u32, two: u16 }, B { one: u8, two: T } = 15, } #[repr(C)] #[derive(Debug, PartialEq)] struct A { one: u32, two: u16, } #[repr(C)] #[derive(Debug, PartialEq)] struct B<T> { one: u8, two: T, } const SIZE: usize = std::mem::size_of::<Enum<u16>>(); let mut out = [MaybeUninit::uninit(); SIZE]; let discriminate_size = std::mem::size_of::<u8>(); let tag_align = 0; let union_alignment = std::mem::align_of::<A>().max(std::mem::align_of::<B<u16>>()); let data_align = (discriminate_size / union_alignment) + union_alignment; let a_one_align = std::mem::offset_of!(A, one); let a_two_align = std::mem::offset_of!(A, two); let b_one_align = std::mem::offset_of!(B<u16>, one); let b_two_align = std::mem::offset_of!(B<u16>, two); let one = 1234u32; let two = 5678u16; let first = Enum::A { one, two }; for (i, byte) in one.to_le_bytes().iter().enumerate() { out[data_align + i + a_one_align] = MaybeUninit::new(*byte); } for (i, byte) in two.to_le_bytes().iter().enumerate() { out[data_align + i + a_two_align] = MaybeUninit::new(*byte); } out[tag_align] = MaybeUninit::new(0); let out = unsafe { std::mem::transmute_copy::<[MaybeUninit<u8>; SIZE], Enum<u16>>(&out) }; assert_eq!(out, first); let mut out = [MaybeUninit::uninit(); SIZE]; let one = 123u8; let two = 58u16; let second = Enum::B { one, two }; for (i, byte) in one.to_le_bytes().iter().enumerate() { out[data_align + i + b_one_align] = MaybeUninit::new(*byte); } for (i, byte) in two.to_le_bytes().iter().enumerate() { out[data_align + i + b_two_align] = MaybeUninit::new(*byte); } out[tag_align] = MaybeUninit::new(15); let out = unsafe { std::mem::transmute_copy::<[MaybeUninit<u8>; SIZE], Enum<u16>>(&out) }; assert_eq!(out, second); } #[test] fn test_serialize_enum() { #[derive(Clone, Copy, Debug, PartialEq, SerializeConst)] #[repr(C, u8)] enum Enum { A { one: u32, two: u16 }, B { one: u8, two: u16 } = 15, } println!("{:#?}", Enum::MEMORY_LAYOUT); let data = Enum::A { one: 0x11111111, two: 0x22, }; let mut buf = ConstVec::new(); buf = serialize_const(&data, buf); println!("{:?}", buf.as_ref()); let buf = buf.as_ref(); assert_eq!(deserialize_const!(Enum, buf).unwrap().1, data); let data = Enum::B { one: 0x11, two: 0x2233, }; let mut buf = ConstVec::new(); buf = serialize_const(&data, buf); println!("{:?}", buf.as_ref()); let buf = buf.as_ref(); assert_eq!(deserialize_const!(Enum, buf).unwrap().1, data); } #[test] fn test_serialize_list_of_lopsided_enums() { #[derive(Clone, Copy, Debug, PartialEq, SerializeConst)] #[repr(C, u8)] enum Enum { A, B { one: u8, two: u16 } = 15, } println!("{:#?}", Enum::MEMORY_LAYOUT); let data = [Enum::A, Enum::A]; let mut buf = ConstVec::new(); buf = serialize_const(&data, buf); println!("{:?}", buf.as_ref()); let buf = buf.as_ref(); assert_eq!(deserialize_const!([Enum; 2], buf).unwrap().1, data); let data = [ Enum::B { one: 0x11, two: 0x2233, }, Enum::B { one: 0x12, two: 0x2244, }, ]; let mut buf = ConstVec::new(); buf = serialize_const(&data, buf); println!("{:?}", buf.as_ref()); let buf = buf.as_ref(); assert_eq!(deserialize_const!([Enum; 2], buf).unwrap().1, data); let data = [ Enum::A, Enum::B { one: 0x11, two: 0x2233, }, ]; let mut buf = ConstVec::new(); buf = serialize_const(&data, buf); println!("{:?}", buf.as_ref()); let buf = buf.as_ref(); assert_eq!(deserialize_const!([Enum; 2], buf).unwrap().1, data); let data = [ Enum::B { one: 0x11, two: 0x2233, }, Enum::A, ]; let mut buf = ConstVec::new(); buf = serialize_const(&data, buf); println!("{:?}", buf.as_ref()); let buf = buf.as_ref(); assert_eq!(deserialize_const!([Enum; 2], buf).unwrap().1, data); } #[test] fn test_serialize_u8_enum() { #[derive(Clone, Copy, Debug, PartialEq, SerializeConst)] #[repr(u8)] enum Enum { A, B, } println!("{:#?}", Enum::MEMORY_LAYOUT); let data = Enum::A; let mut buf = ConstVec::new(); buf = serialize_const(&data, buf); println!("{:?}", buf.as_ref()); let buf = buf.as_ref(); assert_eq!(deserialize_const!(Enum, buf).unwrap().1, data); let data = Enum::B; let mut buf = ConstVec::new(); buf = serialize_const(&data, buf); println!("{:?}", buf.as_ref()); let buf = buf.as_ref(); assert_eq!(deserialize_const!(Enum, buf).unwrap().1, data); } #[test] fn test_serialize_corrupted_enum() { #[derive(Clone, Copy, Debug, PartialEq, SerializeConst)] #[repr(C, u8)] enum Enum { A { one: u32, two: u16 }, } let data = Enum::A { one: 0x11111111, two: 0x22, }; let mut buf = ConstVec::new(); buf = serialize_const(&data, buf); buf = buf.set(0, 2); println!("{:?}", buf.as_ref()); let buf = buf.as_ref(); assert_eq!(deserialize_const!(Enum, buf), None); } #[test] fn test_serialize_nested_enum() { #[derive(Clone, Copy, Debug, PartialEq, SerializeConst)] #[repr(C, u8)] enum Enum { A { one: u32, two: u16 }, B { one: u8, two: InnerEnum } = 15, } #[derive(Clone, Copy, Debug, PartialEq, SerializeConst)] #[repr(C, u16)] enum InnerEnum { A(u8), B { one: u64, two: f64 } = 1000, C { one: u32, two: u16 }, } let data = Enum::A { one: 0x11111111, two: 0x22, }; let mut buf = ConstVec::new(); buf = serialize_const(&data, buf); println!("{:?}", buf.as_ref()); let buf = buf.as_ref(); assert_eq!(deserialize_const!(Enum, buf).unwrap().1, data); let data = Enum::B { one: 0x11, two: InnerEnum::A(0x22), }; let mut buf = ConstVec::new(); buf = serialize_const(&data, buf); println!("{:?}", buf.as_ref()); let buf = buf.as_ref(); assert_eq!(deserialize_const!(Enum, buf).unwrap().1, data); let data = Enum::B { one: 0x11, two: InnerEnum::B { one: 0x2233, two: 0.123456789, }, }; let mut buf = ConstVec::new(); buf = serialize_const(&data, buf); println!("{:?}", buf.as_ref()); let buf = buf.as_ref(); assert_eq!(deserialize_const!(Enum, buf).unwrap().1, data); let data = Enum::B { one: 0x11, two: InnerEnum::C { one: 0x2233, two: 56789, }, }; let mut buf = ConstVec::new(); buf = serialize_const(&data, buf); println!("{:?}", buf.as_ref()); let buf = buf.as_ref(); assert_eq!(deserialize_const!(Enum, buf).unwrap().1, data); } #[test] fn test_adding_enum_field_non_breaking() { #[derive(Debug, PartialEq, SerializeConst)] #[repr(C, u8)] enum Initial { A { a: u32, b: u8 }, } #[derive(Debug, PartialEq, SerializeConst)] #[repr(C, u8)] enum New { A { b: u8, a: u32, c: u32 }, } let data = New::A { a: 0x11111111, b: 0x22, c: 0x33333333, }; let mut buf = ConstVec::new(); buf = serialize_const(&data, buf); let buf = buf.as_ref(); // The new struct should be able to deserialize into the initial struct let (_, data2) = deserialize_const!(Initial, buf).unwrap(); assert_eq!( Initial::A { a: 0x11111111, b: 0x22, }, data2 ); } #[test] fn test_adding_enum_variant_non_breaking() { #[derive(Debug, PartialEq, SerializeConst)] #[repr(C, u8)] enum Initial { A { a: u32, b: u8 }, } #[derive(Debug, PartialEq, SerializeConst)] #[repr(C, u8)] enum New { #[allow(unused)] B { d: u32, e: u8, }, A { c: u32, b: u8, a: u32, }, } let data = New::A { a: 0x11111111, b: 0x22, c: 0x33333333, }; let mut buf = ConstVec::new(); buf = serialize_const(&data, buf); let buf = buf.as_ref(); // The new struct should be able to deserialize into the initial struct let (_, data2) = deserialize_const!(Initial, buf).unwrap(); assert_eq!( Initial::A { a: 0x11111111, b: 0x22, }, data2 ); }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/const-serialize/tests/str.rs
packages/const-serialize/tests/str.rs
use const_serialize::{deserialize_const, serialize_const, ConstStr, ConstVec}; #[test] fn test_serialize_const_layout_str() { let mut buf = ConstVec::new(); let str = ConstStr::new("hello"); buf = serialize_const(&str, buf); println!("{:?}", buf.as_ref()); let buf = buf.as_ref(); assert!(buf.len() < 10); let str = deserialize_const!(ConstStr, buf).unwrap().1; assert_eq!(str.as_str(), "hello"); } #[test] fn test_serialize_const_layout_nested_str() { let mut buf = ConstVec::new(); let str = ConstStr::new("hello"); buf = serialize_const(&[str, str, str] as &[ConstStr; 3], buf); println!("{:?}", buf.as_ref()); assert!(buf.len() < 30); let buf = buf.as_ref(); assert_eq!( deserialize_const!([ConstStr; 3], buf).unwrap().1, [ ConstStr::new("hello"), ConstStr::new("hello"), ConstStr::new("hello") ] ); } #[test] fn test_serialize_str_too_little_data() { let mut buf = ConstVec::new(); buf = buf.push(1); let buf = buf.as_ref(); assert_eq!(deserialize_const!(ConstStr, buf), None); }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/fullstack/src/lib.rs
packages/fullstack/src/lib.rs
// #![warn(missing_docs)] #![allow(clippy::manual_async_fn)] #![allow(clippy::needless_return)] pub use client::{get_server_url, set_server_url}; pub use dioxus_fullstack_core::*; #[doc(inline)] pub use dioxus_fullstack_macro::*; pub use axum_core; pub use headers; pub use http; pub use reqwest; pub use serde; /// Re-export commonly used items from axum, http, and hyper for convenience. pub use axum::{body, extract, response, routing}; #[doc(hidden)] pub use const_format; #[doc(hidden)] pub use const_str; #[doc(hidden)] pub use xxhash_rust; #[cfg(feature = "server")] pub use {axum, axum_extra::TypedHeader, inventory}; #[cfg(feature = "server")] pub(crate) mod spawn; #[cfg(feature = "server")] pub(crate) use spawn::*; pub mod magic; pub use magic::*; pub mod request; pub use request::*; pub use http::StatusCode; pub mod encoding; pub use encoding::*; pub mod lazy; pub use lazy::*; pub use http::{HeaderMap, HeaderValue, Method}; mod client; pub use client::*; pub use axum::extract::Json; pub use axum::response::{NoContent, Redirect}; pub use crate::request::{FromResponse, FromResponseParts}; pub use payloads::*; pub mod payloads { use crate::{ClientRequest, ClientResponse, ClientResult, IntoRequest}; use crate::{FromResponse, FromResponseParts}; use axum::extract::FromRequest; use axum::response::{IntoResponse, IntoResponseParts, ResponseParts}; use bytes::Bytes; use dioxus_fullstack_core::ServerFnError; use futures::Stream; use headers::Header; use http::{header::InvalidHeaderValue, HeaderValue}; use serde::{de::DeserializeOwned, Serialize}; use std::future::Future; mod axum_types; pub mod cbor; pub use cbor::*; pub mod form; pub use form::*; pub mod multipart; pub use multipart::*; #[cfg(feature = "postcard")] pub mod postcard; #[cfg(feature = "postcard")] pub use postcard::*; #[cfg(feature = "msgpack")] pub mod msgpack; #[cfg(feature = "msgpack")] pub use msgpack::*; pub mod text; pub use text::*; pub mod sse; pub use sse::*; pub mod stream; pub use stream::*; pub mod files; pub use files::*; pub mod header; pub use header::*; pub mod query; pub use query::*; #[cfg(feature = "ws")] pub mod websocket; #[cfg(feature = "ws")] pub use websocket::*; }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/fullstack/src/spawn.rs
packages/fullstack/src/spawn.rs
use std::future::Future; /// Spawn a task in the background. If wasm is enabled, this will use the single threaded tokio runtime pub(crate) fn spawn_platform<Fut>( f: impl FnOnce() -> Fut + Send + 'static, ) -> tokio::task::JoinHandle<Fut::Output> where Fut: Future + 'static, Fut::Output: Send + 'static, { #[cfg(not(target_arch = "wasm32"))] { use tokio_util::task::LocalPoolHandle; static TASK_POOL: std::sync::OnceLock<LocalPoolHandle> = std::sync::OnceLock::new(); let pool = TASK_POOL.get_or_init(|| { LocalPoolHandle::new( std::thread::available_parallelism() .map(usize::from) .unwrap_or(1), ) }); pool.spawn_pinned(f) } #[cfg(target_arch = "wasm32")] { tokio::task::spawn_local(f()) } }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/fullstack/src/magic.rs
packages/fullstack/src/magic.rs
//! ServerFn request magical 🧙 decoders and encoders. //! //! The Dioxus Server Function implementation brings a lot of *magic* to the types of endpoints we can handle. //! Our ultimate goal is to handle *all* endpoints, even axum endpoints, with the macro. //! //! Unfortunately, some axum traits like `FromRequest` overlap with some of the default magic we want //! to provide, like allowing DeserializedOwned groups. //! //! Our ultimate goal - to accept all axum handlers - is feasible but not fully implemented. //! //! Broadly, we support the following categories of handlers arguments: //! - Handlers with a single argument that implements `FromRequest` + `IntoRequest` //! - Handlers with multiple arguments that implement *all* `DeserializeOwned` (and thus can be deserialized from a JSON body) //! //! The handler error return types we support are: //! - `Result<T, E> where E: From<ServerFnError> + Serialize + DeserializeOwned` (basically any custom `thiserror` impl) //! - `Result<T, anyhow::Error>` where we transport the error as a string and/or through ServerFnError //! //! The handler return types we support are: //! - `T where T: FromResponse` //! - `T where T: DeserializeOwned` //! //! Note that FromResponse and IntoRequest are *custom* traits defined in this crate. The intention //! is to provide "inverse" traits of the axum traits, allowing types to flow seamlessly between client and server. //! //! These are unfortunately in conflict with the serialization traits. Types like `Bytes` implement both //! IntoResponse and Serialize, so what should you use? //! //! This module implements auto-deref specialization to allow tiering of the above cases. //! //! This is sadly quite "magical", but it works. Because the FromResponse traits are defined in this crate, //! they are sealed against types that implement Deserialize/Serialize, meaning you cannot implement //! FromResponse for a type that implements Serialize. //! //! This module is broken up into several parts, attempting to match how the server macro generates code: //! - ReqwestEncoder: encodes a set of arguments into a reqwest request use crate::{ CantEncode, ClientRequest, ClientResponse, EncodeIsVerified, FromResponse, HttpError, IntoRequest, ServerFnError, }; use axum::response::IntoResponse; use axum_core::extract::{FromRequest, Request}; use bytes::Bytes; use dioxus_fullstack_core::RequestError; use http::StatusCode; use send_wrapper::SendWrapper; use serde::Serialize; use serde::{de::DeserializeOwned, Deserialize}; use std::fmt::Display; use std::{marker::PhantomData, prelude::rust_2024::Future}; #[doc(hidden)] pub struct ServerFnEncoder<In, Out>(PhantomData<fn() -> (In, Out)>); impl<In, Out> ServerFnEncoder<In, Out> { #[doc(hidden)] pub fn new() -> Self { ServerFnEncoder(PhantomData) } } #[doc(hidden)] pub struct ServerFnDecoder<Out>(PhantomData<fn() -> Out>); impl<Out> ServerFnDecoder<Out> { #[doc(hidden)] pub fn new() -> Self { ServerFnDecoder(PhantomData) } } /// A response structure for a regular REST API, with a success and error case where the status is /// encoded in the body and all fields are serializable. This lets you call fetch().await.json() /// and get a strongly typed result. /// /// Eventually we want to support JsonRPC which requires a different format. /// /// We use the `___status` field to avoid conflicts with user-defined fields. Hopefully no one uses this field name! #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] pub enum RestEndpointPayload<T, E> { #[serde(rename = "success")] Success(T), #[serde(rename = "error")] Error(ErrorPayload<E>), } /// The error payload structure for REST API errors. #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] pub struct ErrorPayload<E> { message: String, code: u16, #[serde(skip_serializing_if = "Option::is_none")] data: Option<E>, } /// Convert a `RequestError` into a `ServerFnError`. /// /// This is a separate function to avoid bringing in `reqwest` into fullstack-core. pub fn reqwest_response_to_serverfn_err(err: reqwest::Error) -> ServerFnError { ServerFnError::Request(reqwest_error_to_request_error(err)) } pub fn reqwest_error_to_request_error(err: reqwest::Error) -> RequestError { let message = err.to_string(); if err.is_timeout() { RequestError::Timeout(message) } else if err.is_request() { RequestError::Request(message) } else if err.is_body() { RequestError::Body(message) } else if err.is_decode() { RequestError::Decode(message) } else if err.is_redirect() { RequestError::Redirect(message) } else if let Some(status) = err.status() { RequestError::Status(message, status.as_u16()) } else { #[cfg(not(target_arch = "wasm32"))] { if err.is_connect() { RequestError::Connect(message) } else { RequestError::Request(message) } } #[cfg(target_arch = "wasm32")] { RequestError::Request(message) } } } pub use req_to::*; pub mod req_to { use super::*; pub trait EncodeRequest<In, Out, R> { type VerifyEncode; fn fetch_client( &self, ctx: ClientRequest, data: In, map: fn(In) -> Out, ) -> impl Future<Output = Result<R, RequestError>> + 'static; fn verify_can_serialize(&self) -> Self::VerifyEncode; } /// Using the deserialize path impl<T, O> EncodeRequest<T, O, ClientResponse> for &&&&&&&&&&ServerFnEncoder<T, O> where T: DeserializeOwned + Serialize + 'static, { type VerifyEncode = EncodeIsVerified; fn fetch_client( &self, ctx: ClientRequest, data: T, _map: fn(T) -> O, ) -> impl Future<Output = Result<ClientResponse, RequestError>> + 'static { async move { ctx.send_json(&data).await } } fn verify_can_serialize(&self) -> Self::VerifyEncode { EncodeIsVerified } } /// When we use the FromRequest path, we don't need to deserialize the input type on the client, impl<T, O, R> EncodeRequest<T, O, R> for &&&&&&&&&ServerFnEncoder<T, O> where T: 'static, O: IntoRequest<R>, { type VerifyEncode = EncodeIsVerified; fn fetch_client( &self, ctx: ClientRequest, data: T, map: fn(T) -> O, ) -> impl Future<Output = Result<R, RequestError>> + 'static { O::into_request(map(data), ctx) } fn verify_can_serialize(&self) -> Self::VerifyEncode { EncodeIsVerified } } /// The fall-through case that emits a `CantEncode` type which fails to compile when checked by the macro impl<T, O> EncodeRequest<T, O, ClientResponse> for &ServerFnEncoder<T, O> where T: 'static, { type VerifyEncode = CantEncode; #[allow(clippy::manual_async_fn)] fn fetch_client( &self, _ctx: ClientRequest, _data: T, _map: fn(T) -> O, ) -> impl Future<Output = Result<ClientResponse, RequestError>> + 'static { async move { unimplemented!() } } fn verify_can_serialize(&self) -> Self::VerifyEncode { CantEncode } } } pub use decode_ok::*; mod decode_ok { use crate::{CantDecode, DecodeIsVerified}; use super::*; /// Convert the reqwest response into the desired type, in place. /// The point here is to prefer FromResponse types *first* and then DeserializeOwned types second. /// /// This is because FromResponse types are more specialized and can handle things like websockets and files. /// DeserializeOwned types are more general and can handle things like JSON responses. pub trait RequestDecodeResult<T, R> { type VerifyDecode; fn decode_client_response( &self, res: Result<R, RequestError>, ) -> impl Future<Output = Result<Result<T, ServerFnError>, RequestError>> + Send; fn verify_can_deserialize(&self) -> Self::VerifyDecode; } impl<T: FromResponse<R>, E, R> RequestDecodeResult<T, R> for &&&ServerFnDecoder<Result<T, E>> { type VerifyDecode = DecodeIsVerified; fn decode_client_response( &self, res: Result<R, RequestError>, ) -> impl Future<Output = Result<Result<T, ServerFnError>, RequestError>> + Send { SendWrapper::new(async move { match res { Err(err) => Err(err), Ok(res) => Ok(T::from_response(res).await), } }) } fn verify_can_deserialize(&self) -> Self::VerifyDecode { DecodeIsVerified } } impl<T: DeserializeOwned, E> RequestDecodeResult<T, ClientResponse> for &&ServerFnDecoder<Result<T, E>> { type VerifyDecode = DecodeIsVerified; fn decode_client_response( &self, res: Result<ClientResponse, RequestError>, ) -> impl Future<Output = Result<Result<T, ServerFnError>, RequestError>> + Send { SendWrapper::new(async move { match res { Err(err) => Err(err), Ok(res) => { let status = res.status(); let bytes = res.bytes().await.unwrap(); let as_bytes = if bytes.is_empty() { b"null".as_slice() } else { &bytes }; let res = if status.is_success() { serde_json::from_slice::<T>(as_bytes) .map(RestEndpointPayload::Success) .map_err(|e| ServerFnError::Deserialization(e.to_string())) } else { match serde_json::from_slice::<ErrorPayload<serde_json::Value>>( as_bytes, ) { Ok(res) => Ok(RestEndpointPayload::Error(ErrorPayload { message: res.message, code: res.code, data: res.data, })), Err(err) => { if let Ok(text) = String::from_utf8(as_bytes.to_vec()) { Ok(RestEndpointPayload::Error(ErrorPayload { message: format!("HTTP {}: {}", status.as_u16(), text), code: status.as_u16(), data: None, })) } else { Err(ServerFnError::Deserialization(err.to_string())) } } } }; match res { Ok(RestEndpointPayload::Success(t)) => Ok(Ok(t)), Ok(RestEndpointPayload::Error(err)) => { Ok(Err(ServerFnError::ServerError { message: err.message, details: err.data, code: err.code, })) } Err(e) => Ok(Err(e)), } } } }) } fn verify_can_deserialize(&self) -> Self::VerifyDecode { DecodeIsVerified } } impl<T, R, E> RequestDecodeResult<T, R> for &ServerFnDecoder<Result<T, E>> { type VerifyDecode = CantDecode; fn decode_client_response( &self, _res: Result<R, RequestError>, ) -> impl Future<Output = Result<Result<T, ServerFnError>, RequestError>> + Send { async move { unimplemented!() } } fn verify_can_deserialize(&self) -> Self::VerifyDecode { CantDecode } } pub trait RequestDecodeErr<T, E> { fn decode_client_err( &self, res: Result<Result<T, ServerFnError>, RequestError>, ) -> impl Future<Output = Result<T, E>> + Send; } impl<T, E> RequestDecodeErr<T, E> for &&&ServerFnDecoder<Result<T, E>> where E: From<ServerFnError> + DeserializeOwned + Serialize, { fn decode_client_err( &self, res: Result<Result<T, ServerFnError>, RequestError>, ) -> impl Future<Output = Result<T, E>> + Send { SendWrapper::new(async move { match res { Ok(Ok(res)) => Ok(res), Ok(Err(e)) => match e { ServerFnError::ServerError { details, message, code, } => { // If there are "details", then we try to deserialize them into the error type. // If there aren't, we just create a generic ServerFnError::ServerError with the message. match details { Some(details) => match serde_json::from_value::<E>(details) { Ok(res) => Err(res), Err(err) => Err(E::from(ServerFnError::Deserialization( err.to_string(), ))), }, None => Err(E::from(ServerFnError::ServerError { message, details: None, code, })), } } err => Err(err.into()), }, // todo: implement proper through-error conversion, instead of just ServerFnError::Request // we should expand these cases. Err(err) => Err(ServerFnError::from(err).into()), } }) } } /// Here we convert to ServerFnError and then into the anyhow::Error, letting the user downcast /// from the ServerFnError if they want to. /// /// This loses any actual type information, but is the most flexible for users. impl<T> RequestDecodeErr<T, anyhow::Error> for &&ServerFnDecoder<Result<T, anyhow::Error>> { fn decode_client_err( &self, res: Result<Result<T, ServerFnError>, RequestError>, ) -> impl Future<Output = Result<T, anyhow::Error>> + Send { SendWrapper::new(async move { match res { Ok(Ok(res)) => Ok(res), Ok(Err(e)) => Err(anyhow::Error::from(e)), Err(err) => Err(anyhow::Error::from(err)), } }) } } /// This converts to statuscode, which can be useful but loses a lot of information. impl<T> RequestDecodeErr<T, StatusCode> for &ServerFnDecoder<Result<T, StatusCode>> { fn decode_client_err( &self, res: Result<Result<T, ServerFnError>, RequestError>, ) -> impl Future<Output = Result<T, StatusCode>> + Send { SendWrapper::new(async move { match res { Ok(Ok(res)) => Ok(res), // We do a best-effort conversion from ServerFnError to StatusCode. Ok(Err(e)) => match e { ServerFnError::Request(error) => { Err(StatusCode::from_u16(error.status_code().unwrap_or(500)) .unwrap_or(StatusCode::INTERNAL_SERVER_ERROR)) } ServerFnError::ServerError { message: _message, details: _details, code, } => { Err(StatusCode::from_u16(code) .unwrap_or(StatusCode::INTERNAL_SERVER_ERROR)) } ServerFnError::Registration(_) | ServerFnError::MiddlewareError(_) => { Err(StatusCode::INTERNAL_SERVER_ERROR) } ServerFnError::Deserialization(_) | ServerFnError::Serialization(_) | ServerFnError::Args(_) | ServerFnError::MissingArg(_) | ServerFnError::StreamError(_) => Err(StatusCode::UNPROCESSABLE_ENTITY), ServerFnError::UnsupportedRequestMethod(_) => { Err(StatusCode::METHOD_NOT_ALLOWED) } ServerFnError::Response(_) => Err(StatusCode::INTERNAL_SERVER_ERROR), }, // The reqwest error case, we try to convert the reqwest error into a status code. Err(reqwest_err) => { let code = reqwest_err .status() .unwrap_or(StatusCode::SERVICE_UNAVAILABLE); Err(code) } } }) } } impl<T> RequestDecodeErr<T, HttpError> for &ServerFnDecoder<Result<T, HttpError>> { fn decode_client_err( &self, res: Result<Result<T, ServerFnError>, RequestError>, ) -> impl Future<Output = Result<T, HttpError>> + Send { SendWrapper::new(async move { match res { Ok(Ok(res)) => Ok(res), Ok(Err(res)) => match res { ServerFnError::ServerError { message, code, .. } => Err(HttpError { status: StatusCode::from_u16(code) .unwrap_or(StatusCode::INTERNAL_SERVER_ERROR), message: Some(message), }), _ => HttpError::internal_server_error("Internal Server Error"), }, Err(err) => Err(HttpError::new( err.status().unwrap_or(StatusCode::INTERNAL_SERVER_ERROR), err.to_string(), )), } }) } } } pub use req_from::*; pub mod req_from { use super::*; use axum::{extract::FromRequestParts, response::Response}; use dioxus_fullstack_core::FullstackContext; pub trait ExtractRequest<In, Out, H, M = ()> { fn extract_axum( &self, state: FullstackContext, request: Request, map: fn(In) -> Out, ) -> impl Future<Output = Result<(Out, H), Response>> + 'static; } /// When you're extracting entirely on the server, we need to reject client-consuning request bodies /// This sits above priority in the combined headers on server / body on client case. impl<In, M, H> ExtractRequest<In, (), H, M> for &&&&&&&&&&&ServerFnEncoder<In, ()> where H: FromRequest<FullstackContext, M> + 'static, { fn extract_axum( &self, state: FullstackContext, request: Request, _map: fn(In) -> (), ) -> impl Future<Output = Result<((), H), Response>> + 'static { async move { H::from_request(request, &state) .await .map_err(|e| e.into_response()) .map(|out| ((), out)) } } } // One-arg case impl<In, Out, H> ExtractRequest<In, Out, H> for &&&&&&&&&&ServerFnEncoder<In, Out> where In: DeserializeOwned + 'static, Out: 'static, H: FromRequestParts<FullstackContext>, { fn extract_axum( &self, _state: FullstackContext, request: Request, map: fn(In) -> Out, ) -> impl Future<Output = Result<(Out, H), Response>> + 'static { async move { let (mut parts, body) = request.into_parts(); let headers = H::from_request_parts(&mut parts, &_state) .await .map_err(|e| e.into_response())?; let request = Request::from_parts(parts, body); let bytes = Bytes::from_request(request, &()).await.unwrap(); let as_str = String::from_utf8_lossy(&bytes); let bytes = if as_str.is_empty() { "{}".as_bytes() } else { &bytes }; let out = serde_json::from_slice::<In>(bytes) .map(map) .map_err(|e| ServerFnError::from(e).into_response())?; Ok((out, headers)) } } } /// We skip the BodySerialize wrapper and just go for the output type directly. impl<In, Out, M, H> ExtractRequest<In, Out, H, M> for &&&&&&&&&ServerFnEncoder<In, Out> where Out: FromRequest<FullstackContext, M> + 'static, H: FromRequestParts<FullstackContext>, { fn extract_axum( &self, state: FullstackContext, request: Request, _map: fn(In) -> Out, ) -> impl Future<Output = Result<(Out, H), Response>> + 'static { async move { let (mut parts, body) = request.into_parts(); let headers = H::from_request_parts(&mut parts, &state) .await .map_err(|e| e.into_response())?; let request = Request::from_parts(parts, body); let res = Out::from_request(request, &state) .await .map_err(|e| e.into_response()); res.map(|out| (out, headers)) } } } } pub use resp::*; mod resp { use crate::HttpError; use super::*; use axum::response::Response; use dioxus_core::CapturedError; use http::HeaderValue; /// A trait for converting the result of the Server Function into an Axum response. /// /// This is to work around the issue where we want to return both Deserialize types and FromResponse types. /// Stuff like websockets /// /// We currently have an `Input` type even though it's not useful since we might want to support regular axum endpoints later. /// For now, it's just Result<T, E> where T is either DeserializeOwned or FromResponse pub trait MakeAxumResponse<T, E, R> { fn make_axum_response(self, result: Result<T, E>) -> Result<Response, E>; } // Higher priority impl for special types like websocket/file responses that generate their own responses // The FromResponse impl helps narrow types to those usable on the client impl<T, E, R> MakeAxumResponse<T, E, R> for &&&&ServerFnDecoder<Result<T, E>> where T: FromResponse<R> + IntoResponse, { fn make_axum_response(self, result: Result<T, E>) -> Result<Response, E> { result.map(|v| v.into_response()) } } // Lower priority impl for regular serializable types // We try to match the encoding from the incoming request, otherwise default to JSON impl<T, E> MakeAxumResponse<T, E, ()> for &&&ServerFnDecoder<Result<T, E>> where T: DeserializeOwned + Serialize, { fn make_axum_response(self, result: Result<T, E>) -> Result<Response, E> { match result { Ok(res) => { let body = serde_json::to_string(&res).unwrap(); let mut resp = Response::new(body.into()); resp.headers_mut().insert( http::header::CONTENT_TYPE, HeaderValue::from_static("application/json"), ); *resp.status_mut() = StatusCode::OK; Ok(resp) } Err(err) => Err(err), } } } #[allow(clippy::result_large_err)] pub trait MakeAxumError<E> { fn make_axum_error(self, result: Result<Response, E>) -> Response; } /// Get the status code from the error type if possible. pub trait AsStatusCode { fn as_status_code(&self) -> StatusCode; } impl AsStatusCode for ServerFnError { fn as_status_code(&self) -> StatusCode { match self { Self::ServerError { code, .. } => { StatusCode::from_u16(*code).unwrap_or(StatusCode::INTERNAL_SERVER_ERROR) } _ => StatusCode::INTERNAL_SERVER_ERROR, } } } impl<T, E> MakeAxumError<E> for &&&ServerFnDecoder<Result<T, E>> where E: AsStatusCode + From<ServerFnError> + Serialize + DeserializeOwned + Display, { fn make_axum_error(self, result: Result<Response, E>) -> Response { match result { Ok(res) => res, Err(err) => { let status_code = err.as_status_code(); let err = ErrorPayload { code: status_code.as_u16(), message: err.to_string(), data: Some(err), }; let body = serde_json::to_string(&err).unwrap(); let mut resp = Response::new(body.into()); resp.headers_mut().insert( http::header::CONTENT_TYPE, HeaderValue::from_static("application/json"), ); *resp.status_mut() = status_code; resp } } } } impl<T> MakeAxumError<CapturedError> for &&ServerFnDecoder<Result<T, CapturedError>> { fn make_axum_error(self, result: Result<Response, CapturedError>) -> Response { match result { Ok(res) => res, // Optimize the case where we have sole ownership of the error Err(errr) if errr._strong_count() == 1 => { let err = errr.into_inner().unwrap(); <&&ServerFnDecoder<Result<T, anyhow::Error>> as MakeAxumError<anyhow::Error>>::make_axum_error( &&ServerFnDecoder::new(), Err(err), ) } Err(errr) => { // The `WithHttpError` trait emits ServerFnErrors so we can downcast them here // to create richer responses. let payload = match errr.downcast_ref::<ServerFnError>() { Some(ServerFnError::ServerError { message, code, details, }) => ErrorPayload { message: message.clone(), code: *code, data: details.clone(), }, Some(other) => ErrorPayload { message: other.to_string(), code: 500, data: None, }, None => match errr.downcast_ref::<HttpError>() { Some(http_err) => ErrorPayload { message: http_err .message .clone() .unwrap_or_else(|| http_err.status.to_string()), code: http_err.status.as_u16(), data: None, }, None => ErrorPayload { code: 500, message: errr.to_string(), data: None, }, }, }; let body = serde_json::to_string(&payload).unwrap(); let mut resp = Response::new(body.into()); resp.headers_mut().insert( http::header::CONTENT_TYPE, HeaderValue::from_static("application/json"), ); *resp.status_mut() = StatusCode::INTERNAL_SERVER_ERROR; resp } } } } impl<T> MakeAxumError<anyhow::Error> for &&ServerFnDecoder<Result<T, anyhow::Error>> { fn make_axum_error(self, result: Result<Response, anyhow::Error>) -> Response { match result { Ok(res) => res, Err(errr) => { // The `WithHttpError` trait emits ServerFnErrors so we can downcast them here // to create richer responses. let payload = match errr.downcast::<ServerFnError>() { Ok(ServerFnError::ServerError { message, code, details, }) => ErrorPayload { message, code, data: details, }, Ok(other) => ErrorPayload { message: other.to_string(), code: 500, data: None, }, Err(err) => match err.downcast::<HttpError>() { Ok(http_err) => ErrorPayload { message: http_err .message .unwrap_or_else(|| http_err.status.to_string()), code: http_err.status.as_u16(), data: None, }, Err(err) => ErrorPayload { code: 500, message: err.to_string(), data: None, }, }, }; let body = serde_json::to_string(&payload).unwrap(); let mut resp = Response::new(body.into()); resp.headers_mut().insert( http::header::CONTENT_TYPE, HeaderValue::from_static("application/json"), ); *resp.status_mut() = StatusCode::INTERNAL_SERVER_ERROR; resp } } } } impl<T> MakeAxumError<StatusCode> for &&ServerFnDecoder<Result<T, StatusCode>> { fn make_axum_error(self, result: Result<Response, StatusCode>) -> Response { match result { Ok(resp) => resp, Err(status) => { let body = serde_json::to_string(&ErrorPayload::<()> { code: status.as_u16(), message: status.to_string(), data: None, }) .unwrap(); let mut resp = Response::new(body.into()); resp.headers_mut().insert( http::header::CONTENT_TYPE, HeaderValue::from_static("application/json"), ); *resp.status_mut() = status; resp } } } } impl<T> MakeAxumError<HttpError> for &ServerFnDecoder<Result<T, HttpError>> { fn make_axum_error(self, result: Result<Response, HttpError>) -> Response { match result { Ok(resp) => resp,
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
true
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/fullstack/src/client.rs
packages/fullstack/src/client.rs
#![allow(unreachable_code)] use crate::{reqwest_error_to_request_error, StreamingError}; use bytes::Bytes; use dioxus_fullstack_core::RequestError; use futures::Stream; use futures::{TryFutureExt, TryStreamExt}; use headers::{ContentType, Header}; use http::{response::Parts, Extensions, HeaderMap, HeaderName, HeaderValue, Method, StatusCode}; use send_wrapper::SendWrapper; use serde::{de::DeserializeOwned, Serialize}; use std::sync::{LazyLock, Mutex, OnceLock}; use std::{fmt::Display, pin::Pin, prelude::rust_2024::Future}; use url::Url; pub static GLOBAL_REQUEST_CLIENT: OnceLock<reqwest::Client> = OnceLock::new(); pub type ClientResult = Result<ClientResponse, RequestError>; pub struct ClientRequest { pub url: Url, pub headers: HeaderMap, pub method: Method, pub extensions: Extensions, } impl ClientRequest { /// Create a new ClientRequest with the given method, url path, and query parameters. pub fn new(method: http::Method, path: String, params: &impl Serialize) -> Self { Self::fetch_inner(method, path, serde_qs::to_string(params).unwrap()) } // Shrink monomorphization bloat by moving this to its own function fn fetch_inner(method: http::Method, path: String, query: String) -> ClientRequest { // On wasm, this doesn't matter since we always use relative URLs when making requests anyways let mut server_url = get_server_url(); if server_url.is_empty() { server_url = "http://this.is.not.a.real.url:9000"; } let url = format!( "{server_url}{path}{params}", params = if query.is_empty() { "".to_string() } else { format!("?{}", query) } ) .parse() .unwrap(); let headers = get_request_headers(); ClientRequest { method, url, headers, extensions: Extensions::new(), } } /// Get the HTTP method of this Request. pub fn method(&self) -> &Method { &self.method } pub fn url(&self) -> &Url { &self.url } /// Extend the query parameters of this request with the given serialzable struct. /// /// This will use `serde_qs` to serialize the struct into query parameters. `serde_qs` has various /// restrictions - make sure to read its documentation! pub fn extend_query(mut self, query: &impl Serialize) -> Self { let old_query = self.url.query().unwrap_or(""); let new_query = serde_qs::to_string(query).unwrap(); let combined_query = format!( "{}{}{}", old_query, if old_query.is_empty() { "" } else { "&" }, new_query ); self.url.set_query(Some(&combined_query)); self } /// Add a `Header` to this Request. pub fn header( mut self, name: impl TryInto<HeaderName, Error = impl Display>, value: impl TryInto<HeaderValue, Error = impl Display>, ) -> Result<Self, RequestError> { self.headers.append( name.try_into() .map_err(|d| RequestError::Builder(d.to_string()))?, value .try_into() .map_err(|d| RequestError::Builder(d.to_string()))?, ); Ok(self) } /// Add a `Header` to this Request. pub fn typed_header<H: Header>(mut self, header: H) -> Self { let mut headers = vec![]; header.encode(&mut headers); for header in headers { self.headers.append(H::name(), header); } self } /// Creates a new reqwest client with cookies set pub fn new_reqwest_client() -> reqwest::Client { #[allow(unused_mut)] let mut client = reqwest::Client::builder(); #[cfg(not(target_arch = "wasm32"))] { use std::sync::Arc; use std::sync::LazyLock; static COOKIES: LazyLock<Arc<reqwest::cookie::Jar>> = LazyLock::new(|| Arc::new(reqwest::cookie::Jar::default())); client = client.cookie_store(true).cookie_provider(COOKIES.clone()); } client.build().unwrap() } /// Creates a new reqwest request builder with the method, url, and headers set from this ClientRequest /// /// Using this method attaches `X-Request-Client: dioxus` header to the request. pub fn new_reqwest_request(&self) -> reqwest::RequestBuilder { let client = GLOBAL_REQUEST_CLIENT.get_or_init(Self::new_reqwest_client); let mut req = client .request(self.method.clone(), self.url.clone()) .header("X-Request-Client", "dioxus"); for (key, value) in self.headers.iter() { req = req.header(key, value); } req } /// Using this method attaches `X-Request-Client-Dioxus` header to the request. #[cfg(feature = "web")] pub fn new_gloo_request(&self) -> gloo_net::http::RequestBuilder { let mut builder = gloo_net::http::RequestBuilder::new( format!( "{path}{query_string}", path = self.url.path(), query_string = self .url .query() .map(|query| format!("?{query}")) .unwrap_or_default() ) .as_str(), ) .header("X-Request-Client", "dioxus") .method(self.method.clone()); for (key, value) in self.headers.iter() { let value = match value.to_str() { Ok(v) => v, Err(er) => { tracing::error!("Error converting header {key} value: {}", er); continue; } }; builder = builder.header(key.as_str(), value); } builder } /// Sends the request with multipart/form-data body constructed from the given FormData. #[cfg(not(target_arch = "wasm32"))] pub async fn send_multipart( self, form: &dioxus_html::FormData, ) -> Result<ClientResponse, RequestError> { let mut outgoing = reqwest::multipart::Form::new(); for (key, value) in form.values() { match value { dioxus_html::FormValue::Text(text) => { outgoing = outgoing.text(key.to_string(), text.to_string()); } dioxus_html::FormValue::File(Some(file_data)) => { outgoing = outgoing .file(key.to_string(), file_data.path()) .await .map_err(|e| { RequestError::Builder(format!( "Failed to add file to multipart form: {e}", )) })?; } dioxus_html::FormValue::File(None) => { // No file was selected for this input, so we skip it. outgoing = outgoing.part(key.to_string(), reqwest::multipart::Part::bytes(b"")); } } } let res = self .new_reqwest_request() .multipart(outgoing) .send() .await .map_err(reqwest_error_to_request_error)?; Ok(ClientResponse { response: Box::new(res), extensions: self.extensions, }) } pub async fn send_form(self, data: &impl Serialize) -> Result<ClientResponse, RequestError> { // For GET and HEAD requests, we encode the form data as query parameters. // For other request methods, we encode the form data as the request body. if matches!(*self.method(), Method::GET | Method::HEAD) { return self.extend_query(data).send_empty_body().await; } let body = serde_urlencoded::to_string(data).map_err(|err| RequestError::Body(err.to_string()))?; self.typed_header(ContentType::form_url_encoded()) .send_raw_bytes(body) .await } /// Sends the request with an empty body. pub async fn send_empty_body(self) -> Result<ClientResponse, RequestError> { #[cfg(feature = "web")] if cfg!(target_arch = "wasm32") { return self.send_js_value(wasm_bindgen::JsValue::UNDEFINED).await; } #[cfg(not(target_arch = "wasm32"))] { let res = self .new_reqwest_request() .send() .await .map_err(reqwest_error_to_request_error)?; return Ok(ClientResponse { response: Box::new(res), extensions: self.extensions, }); } unimplemented!() } pub async fn send_raw_bytes( self, bytes: impl Into<Bytes>, ) -> Result<ClientResponse, RequestError> { #[cfg(feature = "web")] if cfg!(target_arch = "wasm32") { let bytes = bytes.into(); let uint_8_array = js_sys::Uint8Array::from(&bytes[..]); return self.send_js_value(uint_8_array.into()).await; } #[cfg(not(target_arch = "wasm32"))] { let res = self .new_reqwest_request() .body(bytes.into()) .send() .await .map_err(reqwest_error_to_request_error)?; return Ok(ClientResponse { response: Box::new(res), extensions: self.extensions, }); } unimplemented!() } /// Sends text data with the `text/plain; charset=utf-8` content type. pub async fn send_text( self, text: impl Into<String> + Into<Bytes>, ) -> Result<ClientResponse, RequestError> { self.typed_header(ContentType::text_utf8()) .send_raw_bytes(text) .await } /// Sends JSON data with the `application/json` content type. pub async fn send_json(self, json: &impl Serialize) -> Result<ClientResponse, RequestError> { let bytes = serde_json::to_vec(json).map_err(|e| RequestError::Serialization(e.to_string()))?; if bytes.is_empty() || bytes == b"{}" || bytes == b"null" { return self.send_empty_body().await; } self.typed_header(ContentType::json()) .send_raw_bytes(bytes) .await } pub async fn send_body_stream( self, stream: impl Stream<Item = Result<Bytes, StreamingError>> + Send + 'static, ) -> Result<ClientResponse, RequestError> { #[cfg(not(target_arch = "wasm32"))] { let res = self .new_reqwest_request() .body(reqwest::Body::wrap_stream(stream)) .send() .await .map_err(reqwest_error_to_request_error)?; return Ok(ClientResponse { response: Box::new(res), extensions: self.extensions, }); } // On the web, we have to buffer the entire stream into a Blob before sending it, // since the Fetch API doesn't support streaming request bodies on browsers yet. #[cfg(feature = "web")] { use wasm_bindgen::JsValue; let stream: Vec<Bytes> = stream.try_collect().await.map_err(|e| { RequestError::Request(format!("Error collecting stream for request body: {}", e)) })?; let uint_8_array = js_sys::Uint8Array::new_with_length(stream.iter().map(|b| b.len() as u32).sum()); let mut offset = 0; for chunk in stream { uint_8_array.set(&js_sys::Uint8Array::from(&chunk[..]), offset); offset += chunk.len() as u32; } return self.send_js_value(JsValue::from(uint_8_array)).await; } unimplemented!() } #[cfg(feature = "web")] pub async fn send_js_value( self, value: wasm_bindgen::JsValue, ) -> Result<ClientResponse, RequestError> { use std::str::FromStr; let inner = self .new_gloo_request() .body(value) .map_err(|e| RequestError::Request(e.to_string()))? .send() .await .map_err(|e| RequestError::Request(e.to_string()))?; let status = inner.status(); let url = inner .url() .parse() .map_err(|e| RequestError::Request(format!("Error parsing response URL: {}", e)))?; let headers = { let mut map = HeaderMap::new(); for (key, value) in inner.headers().entries() { if let Ok(header_value) = http::HeaderValue::from_str(&value) { let header = HeaderName::from_str(&key).unwrap(); map.append(header, header_value); } } map }; let content_length = headers .get(http::header::CONTENT_LENGTH) .and_then(|val| val.to_str().ok()) .and_then(|s| s.parse::<u64>().ok()); let status = http::StatusCode::from_u16(status).unwrap_or(http::StatusCode::OK); Ok(ClientResponse { extensions: self.extensions, response: Box::new(browser::WrappedGlooResponse { inner, headers, status, url, content_length, }), }) } } // On wasm reqwest not being send/sync gets annoying, but it's not relevant since wasm is single-threaded unsafe impl Send for ClientRequest {} unsafe impl Sync for ClientRequest {} /// A wrapper type over the platform's HTTP response type. /// /// This abstracts over the inner `reqwest::Response` type and provides the original request /// and a way to store state associated with the response. /// /// On the web, it uses `web_sys::Response` instead of `reqwest::Response` to avoid pulling in /// the entire `reqwest` crate and to support native browser APIs. pub struct ClientResponse { pub(crate) response: Box<dyn ClientResponseDriver>, pub(crate) extensions: Extensions, } impl ClientResponse { pub fn status(&self) -> StatusCode { self.response.status() } pub fn headers(&self) -> &HeaderMap { self.response.headers() } pub fn url(&self) -> &Url { self.response.url() } pub fn content_length(&self) -> Option<u64> { self.response.content_length() } pub async fn bytes(self) -> Result<Bytes, RequestError> { self.response.bytes().await } pub fn bytes_stream( self, ) -> impl futures_util::Stream<Item = Result<Bytes, StreamingError>> + 'static + Unpin + Send { self.response.bytes_stream() } pub fn extensions(&self) -> &Extensions { &self.extensions } pub fn extensions_mut(&mut self) -> &mut Extensions { &mut self.extensions } pub async fn json<T: DeserializeOwned>(self) -> Result<T, RequestError> { serde_json::from_slice(&self.bytes().await?) .map_err(|e| RequestError::Decode(e.to_string())) } pub async fn text(self) -> Result<String, RequestError> { self.response.text().await } /// Creates the `http::response::Parts` from this response. pub fn make_parts(&self) -> Parts { let mut response = http::response::Response::builder().status(self.response.status()); response = response.version(self.response.version()); for (key, value) in self.response.headers().iter() { response = response.header(key, value); } let (parts, _) = response.body(()).unwrap().into_parts(); parts } /// Consumes the response, returning the head and a stream of the body. pub fn into_parts(self) -> (Parts, impl Stream<Item = Result<Bytes, StreamingError>>) { (self.make_parts(), self.bytes_stream()) } } /// Set the root server URL that all server function paths are relative to for the client. /// /// If this is not set, it defaults to the origin. pub fn set_server_url(url: &'static str) { ROOT_URL.set(url).unwrap(); } /// Returns the root server URL for all server functions. pub fn get_server_url() -> &'static str { ROOT_URL.get().copied().unwrap_or("") } static ROOT_URL: OnceLock<&'static str> = OnceLock::new(); /// Delete the extra request headers for all server functions. pub fn clear_request_headers() { REQUEST_HEADERS.lock().unwrap().clear(); } /// Set the extra request headers for all server functions. pub fn set_request_headers(headers: HeaderMap) { *REQUEST_HEADERS.lock().unwrap() = headers; } /// Returns the extra request headers for all server functions. pub fn get_request_headers() -> HeaderMap { REQUEST_HEADERS.lock().unwrap().clone() } static REQUEST_HEADERS: LazyLock<Mutex<HeaderMap>> = LazyLock::new(|| Mutex::new(HeaderMap::new())); pub trait ClientResponseDriver { fn status(&self) -> StatusCode; fn headers(&self) -> &HeaderMap; fn url(&self) -> &Url; fn version(&self) -> http::Version { http::Version::HTTP_2 } fn content_length(&self) -> Option<u64>; fn bytes(self: Box<Self>) -> Pin<Box<dyn Future<Output = Result<Bytes, RequestError>> + Send>>; fn bytes_stream( self: Box<Self>, ) -> Pin<Box<dyn Stream<Item = Result<Bytes, StreamingError>> + 'static + Unpin + Send>>; fn text(self: Box<Self>) -> Pin<Box<dyn Future<Output = Result<String, RequestError>> + Send>>; } mod native { use futures::Stream; use super::*; impl ClientResponseDriver for reqwest::Response { fn status(&self) -> http::StatusCode { reqwest::Response::status(self) } fn version(&self) -> http::Version { #[cfg(target_arch = "wasm32")] { return http::Version::HTTP_2; } reqwest::Response::version(self) } fn headers(&self) -> &http::HeaderMap { reqwest::Response::headers(self) } fn url(&self) -> &url::Url { reqwest::Response::url(self) } fn content_length(&self) -> Option<u64> { reqwest::Response::content_length(self) } fn bytes( self: Box<Self>, ) -> Pin<Box<dyn Future<Output = Result<Bytes, RequestError>> + Send>> { Box::pin(SendWrapper::new(async move { reqwest::Response::bytes(*self) .map_err(reqwest_error_to_request_error) .await })) } fn bytes_stream( self: Box<Self>, ) -> Pin<Box<dyn Stream<Item = Result<Bytes, StreamingError>> + 'static + Unpin + Send>> { Box::pin(SendWrapper::new( reqwest::Response::bytes_stream(*self).map_err(|_| StreamingError::Failed), )) } fn text( self: Box<Self>, ) -> Pin<Box<dyn Future<Output = Result<String, RequestError>> + Send>> { Box::pin(SendWrapper::new(async move { reqwest::Response::text(*self) .map_err(reqwest_error_to_request_error) .await })) } } } #[cfg(feature = "web")] mod browser { use crate::{ClientResponseDriver, StreamingError}; use bytes::Bytes; use dioxus_fullstack_core::RequestError; use futures::{Stream, StreamExt}; use http::{HeaderMap, StatusCode}; use js_sys::Uint8Array; use send_wrapper::SendWrapper; use std::{pin::Pin, prelude::rust_2024::Future}; use wasm_bindgen::JsCast; pub(crate) struct WrappedGlooResponse { pub(crate) inner: gloo_net::http::Response, pub(crate) headers: HeaderMap, pub(crate) status: StatusCode, pub(crate) url: url::Url, pub(crate) content_length: Option<u64>, } impl ClientResponseDriver for WrappedGlooResponse { fn status(&self) -> StatusCode { self.status } fn headers(&self) -> &HeaderMap { &self.headers } fn url(&self) -> &url::Url { &self.url } fn content_length(&self) -> Option<u64> { self.content_length } fn bytes( self: Box<Self>, ) -> Pin<Box<dyn Future<Output = Result<Bytes, RequestError>> + Send>> { Box::pin(SendWrapper::new(async move { let bytes = self .inner .binary() .await .map_err(|e| RequestError::Request(e.to_string()))?; Ok(bytes.into()) })) } fn bytes_stream( self: Box<Self>, ) -> Pin<Box<dyn Stream<Item = Result<Bytes, StreamingError>> + 'static + Unpin + Send>> { let body = match self.inner.body() { Some(body) => body, None => { return Box::pin(SendWrapper::new(futures::stream::iter([Err( StreamingError::Failed, )]))); } }; Box::pin(SendWrapper::new( wasm_streams::ReadableStream::from_raw(body) .into_stream() .map(|chunk| { let array = chunk .map_err(|_| StreamingError::Failed)? .dyn_into::<Uint8Array>() .map_err(|_| StreamingError::Failed)?; Ok(array.to_vec().into()) }), )) } fn text( self: Box<Self>, ) -> Pin<Box<dyn Future<Output = Result<String, RequestError>> + Send>> { Box::pin(SendWrapper::new(async move { self.inner .text() .await .map_err(|e| RequestError::Request(e.to_string())) })) } } }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/fullstack/src/lazy.rs
packages/fullstack/src/lazy.rs
#![allow(clippy::needless_return)] use dioxus_core::CapturedError; use std::{hint::black_box, prelude::rust_2024::Future, sync::atomic::AtomicBool}; /// `Lazy` is a thread-safe, lazily-initialized global variable. /// /// Unlike other async once-cell implementations, accessing the value of a `Lazy` instance is synchronous /// and done on `deref`. /// /// This is done by offloading the async initialization to a blocking thread during the first access, /// and then using the initialized value for all subsequent accesses. /// /// It uses `std::sync::OnceLock` internally to ensure that the value is only initialized once. pub struct Lazy<T> { value: std::sync::OnceLock<T>, started_initialization: AtomicBool, constructor: Option<fn() -> Result<T, CapturedError>>, _phantom: std::marker::PhantomData<T>, } impl<T: Send + Sync + 'static> Lazy<T> { /// Create a new `Lazy` instance. /// /// This internally calls `std::sync::OnceLock::new()` under the hood. #[allow(clippy::self_named_constructors)] pub const fn lazy() -> Self { Self { _phantom: std::marker::PhantomData, constructor: None, started_initialization: AtomicBool::new(false), value: std::sync::OnceLock::new(), } } pub const fn new<F, G, E>(constructor: F) -> Self where F: Fn() -> G + Copy, G: Future<Output = Result<T, E>> + Send + 'static, E: Into<CapturedError>, { if std::mem::size_of::<F>() != 0 { panic!("The constructor function must be a zero-sized type (ZST). Consider using a function pointer or a closure without captured variables."); } // Prevent the constructor from being optimized out black_box(constructor); Self { _phantom: std::marker::PhantomData, value: std::sync::OnceLock::new(), started_initialization: AtomicBool::new(false), constructor: Some(blocking_initialize::<T, F, G, E>), } } /// Set the value of the `Lazy` instance. /// /// This should only be called once during the server setup phase, typically inside `dioxus::serve`. /// Future calls to this method will return an error containing the provided value. pub fn set(&self, pool: T) -> Result<(), CapturedError> { let res = self.value.set(pool); if res.is_err() { return Err(anyhow::anyhow!("Lazy value is already initialized.").into()); } Ok(()) } pub fn try_set(&self, pool: T) -> Result<(), T> { self.value.set(pool) } /// Initialize the value of the `Lazy` instance if it hasn't been initialized yet. pub fn initialize(&self) -> Result<(), CapturedError> { if let Some(constructor) = self.constructor { // If we're already initializing this value, wait on the receiver. if self .started_initialization .swap(true, std::sync::atomic::Ordering::SeqCst) { self.value.wait(); return Ok(()); } // Otherwise, we need to initialize the value self.set(constructor().unwrap())?; } Ok(()) } /// Get a reference to the value of the `Lazy` instance. This will block the current thread if the /// value is not yet initialized. pub fn get(&self) -> &T { if self.constructor.is_none() { return self.value.get().expect("Lazy value is not initialized. Make sure to call `initialize` before dereferencing."); }; if self.value.get().is_none() { self.initialize().expect("Failed to initialize lazy value"); } self.value.get().unwrap() } } impl<T: Send + Sync + 'static> Default for Lazy<T> { fn default() -> Self { Self::lazy() } } impl<T: Send + Sync + 'static> std::ops::Deref for Lazy<T> { type Target = T; fn deref(&self) -> &Self::Target { self.get() } } impl<T: std::fmt::Debug + Send + Sync + 'static> std::fmt::Debug for Lazy<T> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("Lazy").field("value", self.get()).finish() } } /// This is a small hack that allows us to staple the async initialization into a blocking context. /// /// We call the `rust-call` method of the zero-sized constructor function. This is safe because we're /// not actually dereferencing any unsafe data, just calling its vtable entry to get the future. fn blocking_initialize<T, F, G, E>() -> Result<T, CapturedError> where T: Send + Sync + 'static, F: Fn() -> G + Copy, G: Future<Output = Result<T, E>> + Send + 'static, E: Into<CapturedError>, { assert_eq!(std::mem::size_of::<F>(), 0, "The constructor function must be a zero-sized type (ZST). Consider using a function pointer or a closure without captured variables."); #[cfg(feature = "server")] { let ptr: F = unsafe { std::mem::zeroed() }; let fut = ptr(); return std::thread::spawn(move || { tokio::runtime::Builder::new_current_thread() .enable_all() .build() .unwrap() .block_on(fut) .map_err(|e| e.into()) }) .join() .unwrap(); } // todo: technically we can support constructors in wasm with the same tricks inventory uses with `__wasm_call_ctors` // the host would need to decide when to cal the ctors and when to block them. #[cfg(not(feature = "server"))] unimplemented!("Lazy initialization is only supported with tokio and threads enabled.") }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/fullstack/src/request.rs
packages/fullstack/src/request.rs
use dioxus_fullstack_core::{RequestError, ServerFnError}; #[cfg(feature = "server")] use headers::Header; use http::response::Parts; use std::{future::Future, pin::Pin}; use crate::{ClientRequest, ClientResponse}; /// The `IntoRequest` trait allows types to be used as the body of a request to a HTTP endpoint or server function. /// /// `IntoRequest` allows for types handle the calling of `ClientRequest::send` where the result is then /// passed to `FromResponse` to decode the response. /// /// You can think of the `IntoRequest` and `FromResponse` traits are "inverse" traits of the axum /// `FromRequest` and `IntoResponse` traits. Just like a type can be decoded from a request via `FromRequest`, /// a type can be encoded into a request via `IntoRequest`. /// /// ## Generic State /// /// `IntoRequest` is generic over the response type `R` which defaults to `ClientResponse`. The default /// `ClientResponse` is the base response type that internally wraps `reqwest::Response`. /// /// However, some responses might need state from the initial request to properly decode the response. /// Most state can be extended via the `.extension()` method on `ClientRequest`. In some cases, like /// websockets, the response needs to retain an initial connection from the request. Here, you can use /// the `R` generic to specify a concrete response type. The resulting type that implements `FromResponse` /// must also be generic over the same `R` type. pub trait IntoRequest<R = ClientResponse>: Sized { fn into_request( self, req: ClientRequest, ) -> impl Future<Output = Result<R, RequestError>> + 'static; } impl<A, R> IntoRequest<R> for (A,) where A: IntoRequest<R> + 'static + Send, { fn into_request( self, req: ClientRequest, ) -> impl Future<Output = Result<R, RequestError>> + 'static { A::into_request(self.0, req) } } pub trait FromResponse<R = ClientResponse>: Sized { fn from_response(res: R) -> impl Future<Output = Result<Self, ServerFnError>>; } impl<A> FromResponse for A where A: FromResponseParts, { fn from_response(res: ClientResponse) -> impl Future<Output = Result<Self, ServerFnError>> { async move { let (parts, _body) = res.into_parts(); let mut parts = parts; A::from_response_parts(&mut parts) } } } impl<A, B> FromResponse for (A, B) where A: FromResponseParts, B: FromResponse, { fn from_response(res: ClientResponse) -> impl Future<Output = Result<Self, ServerFnError>> { async move { let mut parts = res.make_parts(); let a = A::from_response_parts(&mut parts)?; let b = B::from_response(res).await?; Ok((a, b)) } } } impl<A, B, C> FromResponse for (A, B, C) where A: FromResponseParts, B: FromResponseParts, C: FromResponse, { fn from_response(res: ClientResponse) -> impl Future<Output = Result<Self, ServerFnError>> { async move { let mut parts = res.make_parts(); let a = A::from_response_parts(&mut parts)?; let b = B::from_response_parts(&mut parts)?; let c = C::from_response(res).await?; Ok((a, b, c)) } } } pub trait FromResponseParts where Self: Sized, { fn from_response_parts(parts: &mut Parts) -> Result<Self, ServerFnError>; } #[cfg(feature = "server")] impl<T: Header> FromResponseParts for axum_extra::TypedHeader<T> { fn from_response_parts(parts: &mut Parts) -> Result<Self, ServerFnError> { use headers::HeaderMapExt; let t = parts .headers .typed_get::<T>() .ok_or_else(|| ServerFnError::Serialization("Invalid header value".into()))?; Ok(axum_extra::TypedHeader(t)) } } /* todo: make the serverfns return ServerFnRequest which lets us control the future better */ #[pin_project::pin_project] #[must_use = "Requests do nothing unless you `.await` them"] pub struct ServerFnRequest<Output> { _phantom: std::marker::PhantomData<Output>, #[pin] fut: Pin<Box<dyn Future<Output = Output> + Send>>, } impl<O> ServerFnRequest<O> { pub fn new(res: impl Future<Output = O> + Send + 'static) -> Self { ServerFnRequest { _phantom: std::marker::PhantomData, fut: Box::pin(res), } } } impl<T, E> std::future::Future for ServerFnRequest<Result<T, E>> { type Output = Result<T, E>; fn poll( self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>, ) -> std::task::Poll<Self::Output> { self.project().fut.poll(cx) } } #[doc(hidden)] #[diagnostic::on_unimplemented( message = "The return type of a server function must be `Result<T, E>`", note = "`T` is either `impl IntoResponse` *or* `impl Serialize`", note = "`E` is either `From<ServerFnError> + Serialize`, `dioxus::CapturedError` or `StatusCode`." )] pub trait AssertIsResult {} impl<T, E> AssertIsResult for Result<T, E> {} #[doc(hidden)] pub fn assert_is_result<T: AssertIsResult>() {} #[diagnostic::on_unimplemented(message = r#"❌ Invalid Arguments to ServerFn ❌ The arguments to the server function must be either: - a single `impl FromRequest + IntoRequest` argument - or multiple `DeserializeOwned` arguments. Did you forget to implement `IntoRequest` or `Deserialize` for one of the arguments? `IntoRequest` is a trait that allows payloads to be sent to the server function. > See https://dioxuslabs.com/learn/0.7/essentials/fullstack/server_functions for more details. "#)] pub trait AssertCanEncode {} pub struct CantEncode; pub struct EncodeIsVerified; impl AssertCanEncode for EncodeIsVerified {} #[diagnostic::on_unimplemented(message = r#"❌ Invalid return type from ServerFn ❌ The arguments to the server function must be either: - a single `impl FromResponse` return type - a single `impl Serialize + DeserializedOwned` return type Did you forget to implement `FromResponse` or `DeserializeOwned` for one of the arguments? `FromResponse` is a trait that allows payloads to be decoded from the server function response. > See https://dioxuslabs.com/learn/0.7/essentials/fullstack/server_functions for more details. "#)] pub trait AssertCanDecode {} pub struct CantDecode; pub struct DecodeIsVerified; impl AssertCanDecode for DecodeIsVerified {} #[doc(hidden)] pub fn assert_can_encode(_t: impl AssertCanEncode) {} #[doc(hidden)] pub fn assert_can_decode(_t: impl AssertCanDecode) {}
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/fullstack/src/encoding.rs
packages/fullstack/src/encoding.rs
use bytes::Bytes; use serde::{de::DeserializeOwned, Serialize}; /// A trait for encoding and decoding data. /// /// This takes an owned self to make it easier for zero-copy encodings. pub trait Encoding: 'static { fn content_type() -> &'static str; fn stream_content_type() -> &'static str; fn to_bytes(data: impl Serialize) -> Option<Bytes> { let mut buf = Vec::new(); Self::encode(data, &mut buf)?; Some(buf.into()) } fn encode(data: impl Serialize, buf: &mut Vec<u8>) -> Option<usize>; fn decode<O: DeserializeOwned>(bytes: Bytes) -> Option<O>; } pub struct JsonEncoding; impl Encoding for JsonEncoding { fn content_type() -> &'static str { "application/json" } fn stream_content_type() -> &'static str { "application/stream+json" } fn encode(data: impl Serialize, mut buf: &mut Vec<u8>) -> Option<usize> { let len = buf.len(); serde_json::to_writer(&mut buf, &data).ok()?; Some(buf.len() - len) } fn decode<O: DeserializeOwned>(bytes: Bytes) -> Option<O> { serde_json::from_slice(&bytes).ok() } } pub struct CborEncoding; impl Encoding for CborEncoding { fn content_type() -> &'static str { "application/cbor" } fn stream_content_type() -> &'static str { "application/stream+cbor" } fn decode<O: DeserializeOwned>(bytes: Bytes) -> Option<O> { ciborium::de::from_reader(bytes.as_ref()).ok() } fn encode(data: impl Serialize, mut buf: &mut Vec<u8>) -> Option<usize> { let len = buf.len(); ciborium::into_writer(&data, &mut buf).ok()?; Some(buf.len() - len) } } #[cfg(feature = "postcard")] pub struct PostcardEncoding; #[cfg(feature = "postcard")] impl Encoding for PostcardEncoding { fn content_type() -> &'static str { "application/postcard" } fn stream_content_type() -> &'static str { "application/stream+postcard" } fn encode(data: impl Serialize, mut buf: &mut Vec<u8>) -> Option<usize> { let len = buf.len(); postcard::to_io(&data, &mut buf).ok()?; Some(buf.len() - len) } fn decode<O: DeserializeOwned>(bytes: Bytes) -> Option<O> { postcard::from_bytes(bytes.as_ref()).ok() } } #[cfg(feature = "msgpack")] pub struct MsgPackEncoding; #[cfg(feature = "msgpack")] impl Encoding for MsgPackEncoding { fn content_type() -> &'static str { "application/msgpack" } fn stream_content_type() -> &'static str { "application/stream+msgpack" } fn encode(data: impl Serialize, buf: &mut Vec<u8>) -> Option<usize> { let len = buf.len(); rmp_serde::encode::write(buf, &data).ok()?; Some(buf.len() - len) } fn decode<O: DeserializeOwned>(bytes: Bytes) -> Option<O> { rmp_serde::from_slice(&bytes).ok() } } // todo: ... add rkyv support // pub struct RkyvEncoding; // impl Encoding for RkyvEncoding { // fn content_type() -> &'static str { // "application/rkyv" // } // fn stream_content_type() -> &'static str { // "application/stream+rkyv" // } // fn to_bytes(data: impl Serialize) -> Option<Bytes> { // let mut buf = rkyv::ser::Serializer::new(rkyv::ser::AllocSerializer::new()); // rkyv::ser::Serializer::serialize(&mut buf, &data).ok()?; // Some(Bytes::from(buf.into_inner())) // } // fn from_bytes<O: DeserializeOwned>(bytes: Bytes) -> Option<O> { // let archived = unsafe { rkyv::archived_root::<O>(&bytes) }; // rkyv::Deserialize::deserialize(archived).ok() // } // }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/fullstack/src/payloads/stream.rs
packages/fullstack/src/payloads/stream.rs
#![allow(clippy::type_complexity)] use crate::{ CborEncoding, ClientRequest, ClientResponse, Encoding, FromResponse, IntoRequest, JsonEncoding, ServerFnError, }; use axum::extract::{FromRequest, Request}; use axum_core::response::IntoResponse; use bytes::{Buf as _, Bytes}; use dioxus_fullstack_core::{HttpError, RequestError}; use futures::{Stream, StreamExt}; #[cfg(feature = "server")] use futures_channel::mpsc::UnboundedSender; use headers::{ContentType, Header}; use send_wrapper::SendWrapper; use serde::{de::DeserializeOwned, Serialize}; use std::{future::Future, marker::PhantomData, pin::Pin}; /// A stream of text data. /// /// # Chunking /// /// Note that strings sent by the server might not arrive in the same chunking as they were sent. /// /// This is because the underlying transport layer (HTTP/2 or HTTP/3) may choose to split or combine /// chunks for efficiency. /// /// If you need to preserve individual string boundaries, consider using `ChunkedTextStream` or another /// encoding that preserves chunk boundaries. pub type TextStream = Streaming<String>; /// A stream of binary data. /// /// # Chunking /// /// Note that bytes sent by the server might not arrive in the same chunking as they were sent. /// This is because the underlying transport layer (HTTP/2 or HTTP/3) may choose to split or combine /// chunks for efficiency. /// /// If you need to preserve individual byte boundaries, consider using `ChunkedByteStream` or another /// encoding that preserves chunk boundaries. pub type ByteStream = Streaming<Bytes>; /// A stream of JSON-encoded data. /// /// # Chunking /// /// Normally, it's not possible to stream JSON over HTTP because browsers are free to re-chunk /// data as they see fit. However, this implementation manually frames each JSON as if it were an unmasked /// websocket message. /// /// If you need to send a stream of JSON data without framing, consider using TextStream instead and /// manually handling JSON buffering. pub type JsonStream<T> = Streaming<T, JsonEncoding>; /// A stream of Cbor-encoded data. /// /// # Chunking /// /// Normally, it's not possible to stream JSON over HTTP because browsers are free to re-chunk /// data as they see fit. However, this implementation manually frames each item as if it were an unmasked /// websocket message. pub type CborStream<T> = Streaming<T, CborEncoding>; /// A stream of manually chunked binary data. /// /// This encoding preserves chunk boundaries by framing each chunk with its length, using Websocket /// Framing. pub type ChunkedByteStream = Streaming<Bytes, CborEncoding>; /// A stream of manually chunked text data. /// /// This encoding preserves chunk boundaries by framing each chunk with its length, using Websocket /// Framing. pub type ChunkedTextStream = Streaming<String, CborEncoding>; /// A streaming payload. /// /// ## Frames and Chunking /// /// The streaming payload sends and receives data in discrete chunks or "frames". The size is converted /// to hex and sent before each chunk, followed by a CRLF, the chunk data, and another CRLF. /// /// This mimics actual HTTP chunked transfer encoding, but allows us to define our own framing /// protocol on top of it. /// /// Arbitrary bytes can be encoded between these frames, but the frames do come with some overhead. /// /// ## Browser Support for Streaming Input /// /// Browser fetch requests do not currently support full request duplexing, which /// means that that they do not begin handling responses until the full request has been sent. /// /// This means that if you use a streaming input encoding, the input stream needs to /// end before the output will begin. /// /// Streaming requests are only allowed over HTTP2 or HTTP3. /// /// Also note that not all browsers support streaming bodies to servers. pub struct Streaming<T = String, E = ()> { stream: Pin<Box<dyn Stream<Item = Result<T, StreamingError>> + Send>>, encoding: PhantomData<E>, } #[derive(thiserror::Error, Debug, Clone, PartialEq, Eq, Hash)] pub enum StreamingError { /// The streaming request was interrupted and could not be completed. #[error("The streaming request was interrupted")] Interrupted, /// The stream failed to decode a chunk - possibly due to invalid data or version mismatch. #[error("The stream failed to decode a chunk")] Decoding, /// The stream failed to connect or encountered an error. #[error("The streaming request failed")] Failed, } impl<T: 'static + Send, E> Streaming<T, E> { /// Creates a new stream from the given stream. pub fn new(value: impl Stream<Item = T> + Send + 'static) -> Self { // Box and pin the incoming stream and store as a trait object Self { stream: Box::pin(value.map(|item| Ok(item))) as Pin<Box<dyn Stream<Item = Result<T, StreamingError>> + Send>>, encoding: PhantomData, } } /// Spawns a new task that produces items for the stream. /// /// The callback is provided an `UnboundedSender` that can be used to send items to the stream. #[cfg(feature = "server")] pub fn spawn<F>(callback: impl FnOnce(UnboundedSender<T>) -> F + Send + 'static) -> Self where F: Future<Output = ()> + 'static, T: Send, { let (tx, rx) = futures_channel::mpsc::unbounded(); crate::spawn_platform(move || callback(tx)); Self::new(rx) } /// Returns the next item in the stream, or `None` if the stream has ended. pub async fn next(&mut self) -> Option<Result<T, StreamingError>> { self.stream.as_mut().next().await } /// Consumes the wrapper, returning the inner stream. pub fn into_inner(self) -> impl Stream<Item = Result<T, StreamingError>> + Send { self.stream } /// Creates a streaming payload from an existing stream of bytes. /// /// This uses the internal framing mechanism to decode the stream into items of type `T`. fn from_bytes(stream: impl Stream<Item = Result<T, StreamingError>> + Send + 'static) -> Self { Self { stream: Box::pin(stream), encoding: PhantomData, } } } impl<S, U> From<S> for TextStream where S: Stream<Item = U> + Send + 'static, U: Into<String>, { fn from(value: S) -> Self { Self::new(value.map(|data| data.into())) } } impl<S, E> From<S> for ByteStream where S: Stream<Item = Result<Bytes, E>> + Send + 'static, { fn from(value: S) -> Self { Self { stream: Box::pin(value.map(|data| data.map_err(|_| StreamingError::Failed))), encoding: PhantomData, } } } impl<T, S, U, E> From<S> for Streaming<T, E> where S: Stream<Item = U> + Send + 'static, U: Into<T>, T: 'static + Send, E: Encoding, { fn from(value: S) -> Self { Self::from_bytes(value.map(|data| Ok(data.into()))) } } impl IntoResponse for Streaming<String> { fn into_response(self) -> axum_core::response::Response { axum::response::Response::builder() .header("Content-Type", "text/plain; charset=utf-8") .body(axum::body::Body::from_stream(self.stream)) .unwrap() } } impl IntoResponse for Streaming<Bytes> { fn into_response(self) -> axum_core::response::Response { axum::response::Response::builder() .header("Content-Type", "application/octet-stream") .body(axum::body::Body::from_stream(self.stream)) .unwrap() } } impl<T: DeserializeOwned + Serialize + 'static, E: Encoding> IntoResponse for Streaming<T, E> { fn into_response(self) -> axum_core::response::Response { let res = self.stream.map(|r| match r { Ok(res) => match encode_stream_frame::<T, E>(res) { Some(bytes) => Ok(bytes), None => Err(StreamingError::Failed), }, Err(_err) => Err(StreamingError::Failed), }); axum::response::Response::builder() .header("Content-Type", E::stream_content_type()) .body(axum::body::Body::from_stream(res)) .unwrap() } } impl FromResponse for Streaming<String> { fn from_response(res: ClientResponse) -> impl Future<Output = Result<Self, ServerFnError>> { SendWrapper::new(async move { let client_stream = Box::pin(res.bytes_stream().map(|byte| match byte { Ok(bytes) => match String::from_utf8(bytes.to_vec()) { Ok(string) => Ok(string), Err(_) => Err(StreamingError::Decoding), }, Err(_) => Err(StreamingError::Failed), })); Ok(Self { stream: client_stream, encoding: PhantomData, }) }) } } impl FromResponse for Streaming<Bytes> { fn from_response(res: ClientResponse) -> impl Future<Output = Result<Self, ServerFnError>> { async move { let client_stream = Box::pin(SendWrapper::new(res.bytes_stream().map( |byte| match byte { Ok(bytes) => Ok(bytes), Err(_) => Err(StreamingError::Failed), }, ))); Ok(Self { stream: client_stream, encoding: PhantomData, }) } } } impl<T: DeserializeOwned + Serialize + 'static + Send, E: Encoding> FromResponse for Streaming<T, E> { fn from_response(res: ClientResponse) -> impl Future<Output = Result<Self, ServerFnError>> { SendWrapper::new(async move { Ok(Self { stream: byte_stream_to_client_stream::<E, _, _, _>(res.bytes_stream()), encoding: PhantomData, }) }) } } impl<S> FromRequest<S> for Streaming<String> { type Rejection = ServerFnError; fn from_request( req: Request, _state: &S, ) -> impl Future<Output = Result<Self, Self::Rejection>> + Send { async move { let (parts, body) = req.into_parts(); let content_type = parts .headers .get("content-type") .and_then(|v| v.to_str().ok()) .unwrap_or(""); if !content_type.starts_with("text/plain") { HttpError::bad_request("Invalid content type")?; } let stream = body.into_data_stream(); Ok(Self { stream: Box::pin(stream.map(|byte| match byte { Ok(bytes) => match String::from_utf8(bytes.to_vec()) { Ok(string) => Ok(string), Err(_) => Err(StreamingError::Decoding), }, Err(_) => Err(StreamingError::Failed), })), encoding: PhantomData, }) } } } impl<S> FromRequest<S> for ByteStream { type Rejection = ServerFnError; fn from_request( req: Request, _state: &S, ) -> impl Future<Output = Result<Self, Self::Rejection>> + Send { async move { let (parts, body) = req.into_parts(); let content_type = parts .headers .get("content-type") .and_then(|v| v.to_str().ok()) .unwrap_or(""); if !content_type.starts_with("application/octet-stream") { HttpError::bad_request("Invalid content type")?; } let stream = body.into_data_stream(); Ok(Self { stream: Box::pin(stream.map(|byte| match byte { Ok(bytes) => Ok(bytes), Err(_) => Err(StreamingError::Failed), })), encoding: PhantomData, }) } } } impl<T: DeserializeOwned + Serialize + 'static + Send, E: Encoding, S> FromRequest<S> for Streaming<T, E> { type Rejection = ServerFnError; fn from_request( req: Request, _state: &S, ) -> impl Future<Output = Result<Self, Self::Rejection>> + Send { async move { let (parts, body) = req.into_parts(); let content_type = parts .headers .get("content-type") .and_then(|v| v.to_str().ok()) .unwrap_or(""); if !content_type.starts_with(E::stream_content_type()) { HttpError::bad_request("Invalid content type")?; } let stream = body.into_data_stream(); Ok(Self { stream: byte_stream_to_client_stream::<E, _, _, _>(stream), encoding: PhantomData, }) } } } impl IntoRequest for Streaming<String> { fn into_request( self, builder: ClientRequest, ) -> impl Future<Output = Result<ClientResponse, RequestError>> + 'static { async move { builder .header("Content-Type", "text/plain; charset=utf-8")? .send_body_stream(self.stream.map(|e| e.map(Bytes::from))) .await } } } impl IntoRequest for ByteStream { fn into_request( self, builder: ClientRequest, ) -> impl Future<Output = Result<ClientResponse, RequestError>> + 'static { async move { builder .header(ContentType::name(), "application/octet-stream")? .send_body_stream(self.stream) .await } } } impl<T: DeserializeOwned + Serialize + 'static + Send, E: Encoding> IntoRequest for Streaming<T, E> { fn into_request( self, builder: ClientRequest, ) -> impl Future<Output = Result<ClientResponse, RequestError>> + 'static { async move { builder .header("Content-Type", E::stream_content_type())? .send_body_stream(self.stream.map(|r| { r.and_then(|item| { encode_stream_frame::<T, E>(item).ok_or(StreamingError::Failed) }) })) .await } } } impl<T> std::fmt::Debug for Streaming<T> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_tuple("Streaming").finish() } } impl<T, E: Encoding> std::fmt::Debug for Streaming<T, E> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("Streaming") .field("encoding", &std::any::type_name::<E>()) .finish() } } /// This function encodes a single frame of a streaming payload using the specified encoding. /// /// The resulting `Bytes` object is encoded as a websocket frame, so you can send it over a streaming /// HTTP response or even a websocket connection. /// /// Note that the packet is not masked, as it is assumed to be sent over a trusted connection. pub fn encode_stream_frame<T: Serialize, E: Encoding>(data: T) -> Option<Bytes> { // We use full advantage of `BytesMut` here, writing a maximally full frame and then shrinking it // down to size at the end. // // Also note we don't do any masking over this data since it's not going over an untrusted // network like a websocket would. // // We allocate 10 extra bytes to account for framing overhead, which we'll shrink after let mut bytes = vec![0u8; 10]; E::encode(data, &mut bytes)?; let len = (bytes.len() - 10) as u64; let opcode = 0x82; // FIN + binary opcode // Write the header directly into the allocated space. let offset = if len <= 125 { bytes[8] = opcode; bytes[9] = len as u8; 8 } else if len <= u16::MAX as u64 { bytes[6] = opcode; bytes[7] = 126; let len_bytes = (len as u16).to_be_bytes(); bytes[8] = len_bytes[0]; bytes[9] = len_bytes[1]; 6 } else { bytes[0] = opcode; bytes[1] = 127; bytes[2..10].copy_from_slice(&len.to_be_bytes()); 0 }; // Shrink down to the actual used size - is zero copy! Some(Bytes::from(bytes).slice(offset..)) } fn byte_stream_to_client_stream<E, T, S, E1>( stream: S, ) -> Pin<Box<dyn Stream<Item = Result<T, StreamingError>> + Send>> where S: Stream<Item = Result<Bytes, E1>> + 'static + Send, E: Encoding, T: DeserializeOwned + 'static, { Box::pin(stream.flat_map(|bytes| { enum DecodeIteratorState { Empty, Failed, Checked(Bytes), UnChecked(Bytes), } let mut state = match bytes { Ok(bytes) => DecodeIteratorState::UnChecked(bytes), Err(_) => DecodeIteratorState::Failed, }; futures::stream::iter(std::iter::from_fn(move || { match std::mem::replace(&mut state, DecodeIteratorState::Empty) { DecodeIteratorState::Empty => None, DecodeIteratorState::Failed => Some(Err(StreamingError::Failed)), DecodeIteratorState::Checked(mut bytes) => { let r = decode_stream_frame_multi::<T, E>(&mut bytes); if r.is_some() { state = DecodeIteratorState::Checked(bytes) } r } DecodeIteratorState::UnChecked(mut bytes) => { let r = decode_stream_frame_multi::<T, E>(&mut bytes); if r.is_some() { state = DecodeIteratorState::Checked(bytes); r } else { Some(Err(StreamingError::Decoding)) } } } })) })) } /// Decode a websocket-framed streaming payload produced by [`encode_stream_frame`]. /// /// This function returns `None` if the frame is invalid or cannot be decoded. /// /// It cannot handle masked frames, as those are not produced by our encoding function. pub fn decode_stream_frame<T, E>(mut frame: Bytes) -> Option<T> where E: Encoding, T: DeserializeOwned, { decode_stream_frame_multi::<T, E>(&mut frame).and_then(|r| r.ok()) } /// Decode one value and advance the bytes pointer /// /// If the frame is empty return None. /// /// Otherwise, if the initial opcode is not the one expected for binary stream /// or the frame is not large enough return error StreamingError::Decoding fn decode_stream_frame_multi<T, E>(frame: &mut Bytes) -> Option<Result<T, StreamingError>> where E: Encoding, T: DeserializeOwned, { let (offset, payload_len) = match offset_payload_len(frame)? { Ok(r) => r, Err(e) => return Some(Err(e)), }; let r = E::decode(frame.slice(offset..offset + payload_len)); frame.advance(offset + payload_len); r.map(|r| Ok(r)) } /// Compute (offset,len) for decoding data fn offset_payload_len(frame: &Bytes) -> Option<Result<(usize, usize), StreamingError>> { let data = frame.as_ref(); if data.is_empty() { return None; } if data.len() < 2 { return Some(Err(StreamingError::Decoding)); } let first = data[0]; let second = data[1]; // Require FIN with binary opcode and no RSV bits let fin = first & 0x80 != 0; let opcode = first & 0x0F; let rsv = first & 0x70; if !fin || opcode != 0x02 || rsv != 0 { return Some(Err(StreamingError::Decoding)); } // Mask bit must be zero for our framing if second & 0x80 != 0 { return Some(Err(StreamingError::Decoding)); } let mut offset = 2usize; let mut payload_len = (second & 0x7F) as usize; if payload_len == 126 { if data.len() < offset + 2 { return Some(Err(StreamingError::Decoding)); } payload_len = u16::from_be_bytes([data[offset], data[offset + 1]]) as usize; offset += 2; } else if payload_len == 127 { if data.len() < offset + 8 { return Some(Err(StreamingError::Decoding)); } let mut len_bytes = [0u8; 8]; len_bytes.copy_from_slice(&data[offset..offset + 8]); let len_u64 = u64::from_be_bytes(len_bytes); if len_u64 > usize::MAX as u64 { return Some(Err(StreamingError::Decoding)); } payload_len = len_u64 as usize; offset += 8; } if data.len() < offset + payload_len { return Some(Err(StreamingError::Decoding)); } Some(Ok((offset, payload_len))) }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/fullstack/src/payloads/axum_types.rs
packages/fullstack/src/payloads/axum_types.rs
use super::*; use crate::{ClientResponse, FromResponse}; pub use axum::extract::Json; use axum::response::{Html, NoContent, Redirect}; use dioxus_fullstack_core::{RequestError, ServerFnError}; use futures::StreamExt; use http::StatusCode; use std::future::Future; impl<T: From<String>> FromResponse for Html<T> { fn from_response(res: ClientResponse) -> impl Future<Output = Result<Self, ServerFnError>> { async move { let content = res.text().await?; Ok(Html(content.into())) } } } impl<T> IntoRequest for Json<T> where T: Serialize + 'static + DeserializeOwned, { fn into_request(self, request: ClientRequest) -> impl Future<Output = ClientResult> + 'static { async move { request.send_json(&self.0).await } } } impl<T: DeserializeOwned> FromResponse for Json<T> { fn from_response(res: ClientResponse) -> impl Future<Output = Result<Self, ServerFnError>> { async move { let data = res.json::<T>().await?; Ok(Json(data)) } } } impl FromResponse for Redirect { fn from_response(res: ClientResponse) -> impl Future<Output = Result<Self, ServerFnError>> { async move { let location = res .headers() .get(http::header::LOCATION) .ok_or_else(|| RequestError::Redirect("Missing Location header".into()))? .to_str() .map_err(|_| RequestError::Redirect("Invalid Location header".into()))?; match res.status() { StatusCode::SEE_OTHER => Ok(Redirect::to(location)), StatusCode::TEMPORARY_REDIRECT => Ok(Redirect::temporary(location)), StatusCode::PERMANENT_REDIRECT => Ok(Redirect::permanent(location)), _ => Err(RequestError::Redirect("Not a redirect status code".into()).into()), } } } } impl FromResponse for NoContent { fn from_response(res: ClientResponse) -> impl Future<Output = Result<Self, ServerFnError>> { async move { let status = res.status(); if status == StatusCode::NO_CONTENT { Ok(NoContent) } else { let body = res.text().await.unwrap_or_else(|_| "".into()); Err(RequestError::Status(body, status.into()).into()) } } } } /// Implementation of `FromResponse` for `axum::response::Response`. /// /// This allows converting a `ClientResponse` (from a client-side HTTP request) /// into an `axum::Response` for server-side handling. The response's status, /// headers, and body are transferred from the client response to the axum response. impl FromResponse for axum::response::Response { fn from_response(res: ClientResponse) -> impl Future<Output = Result<Self, ServerFnError>> { async move { let parts = res.make_parts(); let body = axum::body::Body::from_stream(res.bytes_stream()); let response = axum::response::Response::from_parts(parts, body); Ok(response) } } } /// Implementation of `IntoRequest` for `axum::extract::Request`. /// /// This allows converting an `axum::Request` (from server-side extraction) /// into a `ClientRequest` that can be sent as an HTTP request. The request's /// headers and body are transferred from the axum request to the client request. impl IntoRequest for axum::extract::Request { fn into_request( self, mut request: ClientRequest, ) -> impl Future<Output = Result<ClientResponse, RequestError>> + 'static { async move { let (parts, body) = self.into_parts(); for (key, value) in &parts.headers { request = request.header(key, value)?; } request .send_body_stream( body.into_data_stream() .map(|res| res.map_err(|_| StreamingError::Failed)), ) .await } } }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/fullstack/src/payloads/msgpack.rs
packages/fullstack/src/payloads/msgpack.rs
#![forbid(unsafe_code)] use axum::{ body::{Body, Bytes}, extract::{FromRequest, Request}, http::{header::HeaderValue, StatusCode}, response::{IntoResponse, Response}, }; use axum::{extract::rejection::BytesRejection, http, BoxError}; use derive_more::{Deref, DerefMut, From}; use serde::{de::DeserializeOwned, Serialize}; /// MessagePack Extractor / Response. /// /// When used as an extractor, it can deserialize request bodies into some type that /// implements [`serde::Deserialize`]. If the request body cannot be parsed, or value of the /// `Content-Type` header does not match any of the `application/msgpack`, `application/x-msgpack` /// or `application/*+msgpack` it will reject the request and return a `400 Bad Request` response. /// /// When used as a response, it can serialize any type that implements [`serde::Serialize`] to /// `MsgPack`, and will automatically set `Content-Type: application/msgpack` header. #[derive(Debug, Clone, Copy, Default, Deref, DerefMut, From)] pub struct MsgPack<T>(pub T); impl<T, S> FromRequest<S> for MsgPack<T> where T: DeserializeOwned, S: Send + Sync, { type Rejection = MsgPackRejection; async fn from_request(req: Request, state: &S) -> Result<Self, Self::Rejection> { if !message_pack_content_type(&req) { return Err(MsgPackRejection::MissingMsgPackContentType); } let bytes = Bytes::from_request(req, state).await?; let value = rmp_serde::from_slice(&bytes) .map_err(|e| MsgPackRejection::InvalidMsgPackBody(e.into()))?; Ok(MsgPack(value)) } } impl<T> IntoResponse for MsgPack<T> where T: Serialize, { fn into_response(self) -> Response { let bytes = match rmp_serde::encode::to_vec_named(&self.0) { Ok(res) => res, Err(err) => { return Response::builder() .status(StatusCode::INTERNAL_SERVER_ERROR) .header("Content-Type", "text/plain") .body(Body::new(err.to_string())) .unwrap(); } }; let mut res = bytes.into_response(); res.headers_mut().insert( "Content-Type", HeaderValue::from_static("application/msgpack"), ); res } } /// MessagePack Extractor / Response. /// /// When used as an extractor, it can deserialize request bodies into some type that /// implements [`serde::Deserialize`]. If the request body cannot be parsed, or value of the /// `Content-Type` header does not match any of the `application/msgpack`, `application/x-msgpack` /// or `application/*+msgpack` it will reject the request and return a `400 Bad Request` response. #[derive(Debug, Clone, Copy, Default, Deref, DerefMut, From)] pub struct MsgPackRaw<T>(pub T); impl<T, S> FromRequest<S> for MsgPackRaw<T> where T: DeserializeOwned, S: Send + Sync, { type Rejection = MsgPackRejection; async fn from_request(req: Request, state: &S) -> Result<Self, Self::Rejection> { if !message_pack_content_type(&req) { return Err(MsgPackRejection::MissingMsgPackContentType); } let bytes = Bytes::from_request(req, state).await?; let value = rmp_serde::from_slice(&bytes) .map_err(|e| MsgPackRejection::InvalidMsgPackBody(e.into()))?; Ok(MsgPackRaw(value)) } } impl<T> IntoResponse for MsgPackRaw<T> where T: Serialize, { fn into_response(self) -> Response { let bytes = match rmp_serde::encode::to_vec(&self.0) { Ok(res) => res, Err(err) => { return Response::builder() .status(StatusCode::INTERNAL_SERVER_ERROR) .header("Content-Type", "text/plain") .body(Body::new(err.to_string())) .unwrap(); } }; let mut res = bytes.into_response(); res.headers_mut().insert( "Content-Type", HeaderValue::from_static("application/msgpack"), ); res } } fn message_pack_content_type<B>(req: &Request<B>) -> bool { let Some(content_type) = req.headers().get("Content-Type") else { return false; }; let Ok(content_type) = content_type.to_str() else { return false; }; match content_type { "application/msgpack" => true, "application/x-msgpack" => true, ct if ct.starts_with("application/") && ct.ends_with("+msgpack") => true, _ => false, } } #[derive(Debug, thiserror::Error)] #[non_exhaustive] pub enum MsgPackRejection { #[error("Failed to parse the request body as MsgPack: {0}")] InvalidMsgPackBody(BoxError), #[error("Expected request with `Content-Type: application/msgpack`")] MissingMsgPackContentType, #[error("Cannot have two request body extractors for a single handler")] BodyAlreadyExtracted, #[error(transparent)] BytesRejection(#[from] BytesRejection), } impl IntoResponse for MsgPackRejection { fn into_response(self) -> Response { match self { Self::InvalidMsgPackBody(inner) => { let mut res = Response::new(Body::from(format!( "Failed to parse the request body as MsgPack: {}", inner ))); *res.status_mut() = http::StatusCode::BAD_REQUEST; res } Self::MissingMsgPackContentType => { let mut res = Response::new(Body::from( "Expected request with `Content-Type: application/msgpack`", )); *res.status_mut() = http::StatusCode::BAD_REQUEST; res } Self::BodyAlreadyExtracted => { let mut res = Response::new(Body::from( "Cannot have two request body extractors for a single handler", )); *res.status_mut() = http::StatusCode::INTERNAL_SERVER_ERROR; res } Self::BytesRejection(inner) => inner.into_response(), } } }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/fullstack/src/payloads/websocket.rs
packages/fullstack/src/payloads/websocket.rs
#![allow(unreachable_code)] #![allow(unused_imports)] //! This module implements WebSocket support for Dioxus Fullstack applications. //! //! WebSockets provide a full-duplex communication channel over a single, long-lived connection. //! //! This makes them ideal for real-time applications where the server and the client need to communicate //! frequently and with low latency. Unlike Server-Sent Events (SSE), WebSockets allow the direct //! transport of binary data, enabling things like video and audio streaming as well as more efficient //! zero-copy serialization formats. //! //! This module implements a variety of types: //! - `Websocket<In, Out, E>`: Represents a WebSocket connection that can send messages of type `In` and receive messages of type `Out`, using the encoding `E`. //! - `UseWebsocket<In, Out, E>`: A hook that provides a reactive interface to a WebSocket connection. //! - `WebSocketOptions`: Configuration options for establishing a WebSocket connection. //! - `TypedWebsocket<In, Out, E>`: A typed wrapper around an Axum WebSocket connection for server-side use. //! - `WebsocketState`: An enum representing the state of the WebSocket connection. //! - plus a variety of error types and traits for encoding/decoding messages. //! //! Dioxus Fullstack websockets are typed in both directions, letting the happy path (`.send()` and `.recv()`) //! automatically serialize and deserialize messages for you. use crate::{ClientRequest, Encoding, FromResponse, IntoRequest, JsonEncoding, ServerFnError}; use axum::{ extract::{FromRequest, Request}, http::StatusCode, }; use axum_core::response::{IntoResponse, Response}; use bytes::Bytes; use dioxus_core::{use_hook, CapturedError, Result}; use dioxus_fullstack_core::{HttpError, RequestError}; use dioxus_hooks::{use_resource, Resource, UseWaker}; use dioxus_hooks::{use_signal, use_waker}; use dioxus_signals::{ReadSignal, ReadableExt, ReadableOptionExt, Signal, WritableExt}; use futures::StreamExt; use futures::{ stream::{SplitSink, SplitStream}, SinkExt, TryFutureExt, }; use serde::{de::DeserializeOwned, Serialize}; use std::{marker::PhantomData, prelude::rust_2024::Future}; #[cfg(feature = "web")] use { futures_util::lock::Mutex, gloo_net::websocket::{futures::WebSocket as WsWebsocket, Message as WsMessage}, }; /// A hook that provides a reactive interface to a WebSocket connection. /// /// WebSockets provide a full-duplex communication channel over a single, long-lived connection. /// /// This makes them ideal for real-time applications where the server and the client need to communicate /// frequently and with low latency. Unlike Server-Sent Events (SSE), WebSockets allow the direct /// transport of binary data, enabling things like video and audio streaming as well as more efficient /// zero-copy serialization formats. /// /// This hook takes a function that returns a future which resolves to a `Websocket<In, Out, E>` - /// usually a server function. pub fn use_websocket< In: 'static, Out: 'static, E: Into<CapturedError> + 'static, F: Future<Output = Result<Websocket<In, Out, Enc>, E>> + 'static, Enc: Encoding, >( mut connect_to_websocket: impl FnMut() -> F + 'static, ) -> UseWebsocket<In, Out, Enc> { let mut waker = use_waker(); let mut status = use_signal(|| WebsocketState::Connecting); let status_read = use_hook(|| ReadSignal::new(status)); let connection = use_resource(move || { let connection = connect_to_websocket().map_err(|e| e.into()); async move { let connection = connection.await; // Update the status based on the result of the connection attempt match connection.as_ref() { Ok(_) => status.set(WebsocketState::Open), Err(_) => status.set(WebsocketState::FailedToConnect), } // Wake up the `.recv()` calls waiting for the connection to be established waker.wake(()); connection } }); UseWebsocket { connection, waker, status, status_read, } } /// The return type of the `use_websocket` hook. /// /// See the `use_websocket` documentation for more details. /// /// This handle provides methods to send and receive messages, check the connection status, /// and wait for the connection to be established. pub struct UseWebsocket<In, Out, Enc = JsonEncoding> where In: 'static, Out: 'static, Enc: 'static, { connection: Resource<Result<Websocket<In, Out, Enc>, CapturedError>>, waker: UseWaker<()>, status: Signal<WebsocketState>, status_read: ReadSignal<WebsocketState>, } impl<In, Out, E> UseWebsocket<In, Out, E> { /// Wait for the connection to be established. This guarantees that subsequent calls to methods like /// `.try_recv()` will not fail due to the connection not being ready. pub async fn connect(&self) -> WebsocketState { // Wait for the connection to be established while !self.connection.finished() { _ = self.waker.wait().await; } self.status.cloned() } /// Returns true if the WebSocket is currently connecting. /// /// This can be useful to present a loading state to the user while the connection is being established. pub fn connecting(&self) -> bool { matches!(self.status.cloned(), WebsocketState::Connecting) } /// Returns true if the Websocket is closed due to an error. pub fn is_err(&self) -> bool { matches!(self.status.cloned(), WebsocketState::FailedToConnect) } /// Returns true if the WebSocket is currently shut down and cannot be used to send or receive messages. pub fn is_closed(&self) -> bool { matches!( self.status.cloned(), WebsocketState::Closed | WebsocketState::FailedToConnect ) } /// Get the current status of the WebSocket connection. pub fn status(&self) -> ReadSignal<WebsocketState> { self.status_read } /// Send a raw message over the WebSocket connection /// /// To send a message with a particular type, see the `.send()` method instead. pub async fn send_raw(&self, msg: Message) -> Result<(), WebsocketError> { self.connect().await; self.connection .as_ref() .as_deref() .ok_or_else(WebsocketError::closed_away)? .as_ref() .map_err(|_| WebsocketError::AlreadyClosed)? .send_raw(msg) .await } /// Receive a raw message from the WebSocket connection /// /// To receive a message with a particular type, see the `.recv()` method instead. pub async fn recv_raw(&mut self) -> Result<Message, WebsocketError> { self.connect().await; let result = self .connection .as_ref() .as_deref() .ok_or_else(WebsocketError::closed_away)? .as_ref() .map_err(|_| WebsocketError::AlreadyClosed)? .recv_raw() .await; if let Err(WebsocketError::ConnectionClosed { .. }) = result.as_ref() { self.received_shutdown(); } result } pub async fn send(&self, msg: In) -> Result<(), WebsocketError> where In: Serialize, E: Encoding, { self.send_raw(Message::Binary( E::to_bytes(&msg).ok_or_else(WebsocketError::serialization)?, )) .await } /// Receive the next message from the WebSocket connection, deserialized into the `Out` type. /// /// If the connection is still opening, this will wait until the connection is established. /// If the connection fails to open or is killed while waiting, an error will be returned. /// /// This method returns an error if the connection is closed since we assume closed connections /// are a "failure". pub async fn recv(&mut self) -> Result<Out, WebsocketError> where Out: DeserializeOwned, E: Encoding, { self.connect().await; let result = self .connection .as_ref() .as_deref() .ok_or_else(WebsocketError::closed_away)? .as_ref() .map_err(|_| WebsocketError::AlreadyClosed)? .recv() .await; if let Err(WebsocketError::ConnectionClosed { .. }) = result.as_ref() { self.received_shutdown(); } result } /// Set the WebSocket connection. /// /// This method takes a `Result<Websocket<In, Out, E>, Err>`, allowing you to drive the connection /// into an errored state manually. pub fn set<Err: Into<CapturedError>>(&mut self, socket: Result<Websocket<In, Out, E>, Err>) { match socket { Ok(_) => self.status.set(WebsocketState::Open), Err(_) => self.status.set(WebsocketState::FailedToConnect), } self.connection.set(Some(socket.map_err(|e| e.into()))); self.waker.wake(()); } /// Mark the WebSocket as closed. This is called internally when the connection is closed. fn received_shutdown(&self) { let mut _self = *self; _self.status.set(WebsocketState::Closed); _self.waker.wake(()); } } impl<In, Out, E> Copy for UseWebsocket<In, Out, E> {} impl<In, Out, E> Clone for UseWebsocket<In, Out, E> { fn clone(&self) -> Self { *self } } #[derive(Debug, Clone, PartialEq, Copy)] pub enum WebsocketState { /// The WebSocket is connecting. Connecting, /// The WebSocket is open and ready to send and receive messages. Open, /// The WebSocket is closing. Closing, /// The WebSocket is closed and cannot be used to send or receive messages. Closed, /// The WebSocket failed to connect FailedToConnect, } /// A WebSocket connection that can send and receive messages of type `In` and `Out`. pub struct Websocket<In = String, Out = String, E = JsonEncoding> { protocol: Option<String>, #[allow(clippy::type_complexity)] _in: std::marker::PhantomData<fn() -> (In, Out, E)>, #[cfg(not(target_arch = "wasm32"))] native: Option<native::SplitSocket>, #[cfg(feature = "web")] web: Option<WebsysSocket>, response: Option<axum::response::Response>, } impl<I, O, E> Websocket<I, O, E> { pub async fn recv(&self) -> Result<O, WebsocketError> where O: DeserializeOwned, E: Encoding, { loop { let msg = self.recv_raw().await?; match msg { Message::Text(text) => { let e: O = E::decode(text.into()).ok_or_else(WebsocketError::deserialization)?; return Ok(e); } Message::Binary(bytes) => { let e: O = E::decode(bytes).ok_or_else(WebsocketError::deserialization)?; return Ok(e); } Message::Close { code, reason } => { return Err(WebsocketError::ConnectionClosed { code, description: reason, }); } // todo - are we supposed to response to pings? Message::Ping(_bytes) => continue, Message::Pong(_bytes) => continue, } } } /// Send a typed message over the WebSocket connection. /// /// This method serializes the message using the specified encoding `E` before sending it. /// The message will always be sent as a binary message, even if the encoding is valid UTF-8 /// like JSON. pub async fn send(&self, msg: I) -> Result<(), WebsocketError> where I: Serialize, E: Encoding, { let bytes = E::to_bytes(&msg).ok_or_else(WebsocketError::serialization)?; self.send_raw(Message::Binary(bytes)).await } /// Send a raw message over the WebSocket connection. /// /// This method allows sending text, binary, ping, pong, and close messages directly. pub async fn send_raw(&self, message: Message) -> Result<(), WebsocketError> { #[cfg(feature = "web")] if cfg!(target_arch = "wasm32") { let mut sender = self .web .as_ref() .ok_or_else(|| WebsocketError::Uninitialized)? .sender .lock() .await; match message { Message::Text(s) => { sender.send(gloo_net::websocket::Message::Text(s)).await?; } Message::Binary(bytes) => { sender .send(gloo_net::websocket::Message::Bytes(bytes.into())) .await?; } Message::Close { .. } => { sender.close().await?; } Message::Ping(_bytes) => return Ok(()), Message::Pong(_bytes) => return Ok(()), } return Ok(()); } #[cfg(not(target_arch = "wasm32"))] { let mut sender = self .native .as_ref() .ok_or_else(|| WebsocketError::Uninitialized)? .sender .lock() .await; sender .send(message.into()) .await .map_err(WebsocketError::from)?; } Ok(()) } /// Receive a raw message from the WebSocket connection. pub async fn recv_raw(&self) -> Result<Message, WebsocketError> { #[cfg(feature = "web")] if cfg!(target_arch = "wasm32") { let mut conn = self.web.as_ref().unwrap().receiver.lock().await; return match conn.next().await { Some(Ok(WsMessage::Text(text))) => Ok(Message::Text(text)), Some(Ok(WsMessage::Bytes(items))) => Ok(Message::Binary(items.into())), Some(Err(e)) => Err(WebsocketError::from(e)), None => Err(WebsocketError::closed_away()), }; } #[cfg(not(target_arch = "wasm32"))] { use tungstenite::Message as TMessage; let mut conn = self.native.as_ref().unwrap().receiver.lock().await; return match conn.next().await { Some(Ok(res)) => match res { TMessage::Text(utf8_bytes) => Ok(Message::Text(utf8_bytes.to_string())), TMessage::Binary(bytes) => Ok(Message::Binary(bytes)), TMessage::Close(Some(cf)) => Ok(Message::Close { code: cf.code.into(), reason: cf.reason.to_string(), }), TMessage::Close(None) => Ok(Message::Close { code: CloseCode::Away, reason: "Away".to_string(), }), TMessage::Ping(bytes) => Ok(Message::Ping(bytes)), TMessage::Pong(bytes) => Ok(Message::Pong(bytes)), TMessage::Frame(_frame) => Err(WebsocketError::Unexpected), }, Some(Err(e)) => Err(WebsocketError::from(e)), None => Err(WebsocketError::closed_away()), }; } unimplemented!("Non web wasm32 clients are not supported yet") } pub fn protocol(&self) -> Option<&str> { self.protocol.as_deref() } } // no two websockets are ever equal impl<I, O, E> PartialEq for Websocket<I, O, E> { fn eq(&self, _other: &Self) -> bool { false } } // Create a new WebSocket connection that uses the provided function to handle incoming messages impl<In, Out, E> IntoResponse for Websocket<In, Out, E> { fn into_response(self) -> Response { let Some(response) = self.response else { return HttpError::new( StatusCode::INTERNAL_SERVER_ERROR, "WebSocket response not initialized", ) .into_response(); }; response.into_response() } } impl<I, O, E> FromResponse<UpgradingWebsocket> for Websocket<I, O, E> { fn from_response(res: UpgradingWebsocket) -> impl Future<Output = Result<Self, ServerFnError>> { async move { #[cfg(not(target_arch = "wasm32"))] let native = res.native; #[cfg(feature = "web")] let web = res.web.map(|f| { let (sender, receiver) = f.split(); WebsysSocket { sender: Mutex::new(sender), receiver: Mutex::new(receiver), } }); Ok(Websocket { protocol: res.protocol, #[cfg(not(target_arch = "wasm32"))] native, #[cfg(feature = "web")] web, response: None, _in: PhantomData, }) } } } pub struct WebSocketOptions { protocols: Vec<String>, automatic_reconnect: bool, #[cfg(feature = "server")] upgrade: Option<axum::extract::ws::WebSocketUpgrade>, #[cfg(feature = "server")] on_failed_upgrade: Option<Box<dyn FnOnce(axum::Error) + Send + 'static>>, } impl WebSocketOptions { pub fn new() -> Self { Self { protocols: Vec::new(), automatic_reconnect: false, #[cfg(feature = "server")] upgrade: None, #[cfg(feature = "server")] on_failed_upgrade: None, } } /// Automatically reconnect if the connection is lost. This uses an exponential backoff strategy. pub fn with_automatic_reconnect(mut self) -> Self { self.automatic_reconnect = true; self } #[cfg(feature = "server")] pub fn on_failed_upgrade( mut self, callback: impl FnOnce(axum::Error) + Send + 'static, ) -> Self { self.on_failed_upgrade = Some(Box::new(callback)); self } #[cfg(feature = "server")] pub fn on_upgrade<F, Fut, In, Out, Enc>(mut self, callback: F) -> Websocket<In, Out, Enc> where F: FnOnce(TypedWebsocket<In, Out, Enc>) -> Fut + Send + 'static, Fut: Future<Output = ()> + 'static, { let on_failed_upgrade = self.on_failed_upgrade.take(); let response = self .upgrade .unwrap() .on_failed_upgrade(|e| { if let Some(callback) = on_failed_upgrade { callback(e); } }) .on_upgrade(|socket| { let res = crate::spawn_platform(move || { callback(TypedWebsocket { _in: PhantomData, _out: PhantomData, _enc: PhantomData, inner: socket, }) }); async move { let _ = res.await; } }); Websocket { // the protocol is none here since it won't be accessible until after the upgrade protocol: None, response: Some(response), _in: PhantomData, #[cfg(not(target_arch = "wasm32"))] native: None, #[cfg(feature = "web")] web: None, } } } impl Default for WebSocketOptions { fn default() -> Self { Self::new() } } impl IntoRequest<UpgradingWebsocket> for WebSocketOptions { fn into_request( self, request: ClientRequest, ) -> impl Future<Output = std::result::Result<UpgradingWebsocket, RequestError>> + 'static { async move { #[cfg(feature = "web")] if cfg!(target_arch = "wasm32") { let url_path = request.url().path(); let url_query = request.url().query(); let url_fragment = request.url().fragment(); let path_and_query = format!( "{}{}{}", url_path, url_query.map_or("".to_string(), |q| format!("?{q}")), url_fragment.map_or("".to_string(), |f| format!("#{f}")) ); let socket = gloo_net::websocket::futures::WebSocket::open_with_protocols( // ! very important we use the path here and not the full url on web. // for as long as serverfns are meant to target the same origin, this is fine. &path_and_query, &self .protocols .iter() .map(String::as_str) .collect::<Vec<_>>(), ) .unwrap(); return Ok(UpgradingWebsocket { protocol: Some(socket.protocol()), web: Some(socket), #[cfg(not(target_arch = "wasm32"))] native: None, }); } #[cfg(not(target_arch = "wasm32"))] { let response = native::send_request(request, &self.protocols) .await .unwrap(); let (inner, protocol) = response .into_stream_and_protocol(self.protocols, None) .await .unwrap(); return Ok(UpgradingWebsocket { protocol, native: Some(inner), #[cfg(feature = "web")] web: None, }); } unimplemented!("Non web wasm32 clients are not supported yet") } } } impl<S: Send> FromRequest<S> for WebSocketOptions { type Rejection = axum::response::Response; fn from_request( _req: Request, _: &S, ) -> impl Future<Output = Result<Self, Self::Rejection>> + Send { #[cfg(not(feature = "server"))] return async move { Err(StatusCode::NOT_IMPLEMENTED.into_response()) }; #[cfg(feature = "server")] async move { let ws = match axum::extract::ws::WebSocketUpgrade::from_request(_req, &()).await { Ok(ws) => ws, Err(rejection) => return Err(rejection.into_response()), }; Ok(WebSocketOptions { protocols: vec![], automatic_reconnect: false, upgrade: Some(ws), on_failed_upgrade: None, }) } } } #[doc(hidden)] pub struct UpgradingWebsocket { protocol: Option<String>, #[cfg(feature = "web")] web: Option<gloo_net::websocket::futures::WebSocket>, #[cfg(not(target_arch = "wasm32"))] native: Option<native::SplitSocket>, } unsafe impl Send for UpgradingWebsocket {} unsafe impl Sync for UpgradingWebsocket {} #[cfg(feature = "server")] pub struct TypedWebsocket<In, Out, E = JsonEncoding> { _in: std::marker::PhantomData<fn() -> In>, _out: std::marker::PhantomData<fn() -> Out>, _enc: std::marker::PhantomData<fn() -> E>, inner: axum::extract::ws::WebSocket, } #[cfg(feature = "server")] impl<In: DeserializeOwned, Out: Serialize, E: Encoding> TypedWebsocket<In, Out, E> { /// Receive an incoming message from the client. /// /// Returns `None` if the stream has closed. pub async fn recv(&mut self) -> Result<In, WebsocketError> { use axum::extract::ws::Message as AxumMessage; loop { let Some(res) = self.inner.next().await else { return Err(WebsocketError::closed_away()); }; match res { Ok(res) => match res { AxumMessage::Text(utf8_bytes) => { let e: In = E::decode(utf8_bytes.into()) .ok_or_else(WebsocketError::deserialization)?; return Ok(e); } AxumMessage::Binary(bytes) => { let e: In = E::decode(bytes).ok_or_else(WebsocketError::deserialization)?; return Ok(e); } AxumMessage::Close(Some(close_frame)) => { return Err(WebsocketError::ConnectionClosed { code: close_frame.code.into(), description: close_frame.reason.to_string(), }); } AxumMessage::Close(None) => return Err(WebsocketError::AlreadyClosed), AxumMessage::Ping(_bytes) => continue, AxumMessage::Pong(_bytes) => continue, }, Err(_res) => return Err(WebsocketError::closed_away()), } } } /// Send an outgoing message. pub async fn send(&mut self, msg: Out) -> Result<(), WebsocketError> { use axum::extract::ws::Message; let to_bytes = E::to_bytes(&msg).ok_or_else(|| { WebsocketError::Serialization(anyhow::anyhow!("Failed to serialize message").into()) })?; self.inner .send(Message::Binary(to_bytes)) .await .map_err(|_err| WebsocketError::AlreadyClosed) } /// Receive another message. /// /// Returns `None` if the stream has closed. pub async fn recv_raw(&mut self) -> Result<Message, WebsocketError> { use axum::extract::ws::Message as AxumMessage; let message = self .inner .next() .await .ok_or_else(WebsocketError::closed_away)? .map_err(|_| WebsocketError::AlreadyClosed)?; Ok(match message { AxumMessage::Text(utf8_bytes) => Message::Text(utf8_bytes.to_string()), AxumMessage::Binary(bytes) => Message::Binary(bytes), AxumMessage::Ping(bytes) => Message::Ping(bytes), AxumMessage::Pong(bytes) => Message::Pong(bytes), AxumMessage::Close(close_frame) => Message::Close { code: close_frame .clone() .map_or(CloseCode::Away, |cf| cf.code.into()), reason: close_frame.map_or("Away".to_string(), |cf| cf.reason.to_string()), }, }) } /// Send a message. pub async fn send_raw(&mut self, msg: Message) -> Result<(), WebsocketError> { let real = match msg { Message::Text(text) => axum::extract::ws::Message::Text(text.into()), Message::Binary(bytes) => axum::extract::ws::Message::Binary(bytes), Message::Ping(bytes) => axum::extract::ws::Message::Ping(bytes), Message::Pong(bytes) => axum::extract::ws::Message::Pong(bytes), Message::Close { code, reason } => { axum::extract::ws::Message::Close(Some(axum::extract::ws::CloseFrame { code: code.into(), reason: reason.into(), })) } }; self.inner .send(real) .await .map_err(|_err| WebsocketError::AlreadyClosed) } /// Return the selected WebSocket subprotocol, if one has been chosen. pub fn protocol(&self) -> Option<&http::HeaderValue> { self.inner.protocol() } /// Get a mutable reference to the underlying Axum WebSocket. pub fn socket(&mut self) -> &mut axum::extract::ws::WebSocket { &mut self.inner } } #[derive(thiserror::Error, Debug)] pub enum WebsocketError { #[error("Connection closed")] ConnectionClosed { code: CloseCode, description: String, }, #[error("WebSocket already closed")] AlreadyClosed, #[error("WebSocket capacity reached")] Capacity, #[error("An unexpected internal error occurred")] Unexpected, #[error("WebSocket is not initialized on this platform")] Uninitialized, #[cfg(not(target_arch = "wasm32"))] #[error("websocket upgrade failed")] Handshake(#[from] native::HandshakeError), #[error("reqwest error")] Reqwest(#[from] reqwest::Error), #[cfg(not(target_arch = "wasm32"))] #[error("tungstenite error")] Tungstenite(#[from] tungstenite::Error), /// Error during serialization/deserialization. #[error("error during serialization/deserialization")] Deserialization(Box<dyn std::error::Error + Send + Sync>), /// Error during serialization/deserialization. #[error("error during serialization/deserialization")] Serialization(Box<dyn std::error::Error + Send + Sync>), /// Error during serialization/deserialization. #[error("serde_json error")] Json(#[from] serde_json::Error), /// Error during serialization/deserialization. #[error("ciborium error")] Cbor(#[from] ciborium::de::Error<std::io::Error>), } #[cfg(feature = "web")] impl From<gloo_net::websocket::WebSocketError> for WebsocketError { fn from(value: gloo_net::websocket::WebSocketError) -> Self { use gloo_net::websocket::WebSocketError; match value { WebSocketError::ConnectionError => WebsocketError::AlreadyClosed, WebSocketError::ConnectionClose(close_event) => WebsocketError::ConnectionClosed { code: close_event.code.into(), description: close_event.reason, }, WebSocketError::MessageSendError(_js_error) => WebsocketError::Unexpected, _ => WebsocketError::Unexpected, } } } impl WebsocketError { pub fn closed_away() -> Self { Self::ConnectionClosed { code: CloseCode::Normal, description: "Connection closed normally".into(), } } pub fn deserialization() -> Self { Self::Deserialization(anyhow::anyhow!("Failed to deserialize message").into()) } pub fn serialization() -> Self { Self::Serialization(anyhow::anyhow!("Failed to serialize message").into()) } } #[cfg(feature = "web")] struct WebsysSocket { sender: Mutex<SplitSink<WsWebsocket, WsMessage>>, receiver: Mutex<SplitStream<WsWebsocket>>, } /// A `WebSocket` message, which can be a text string or binary data. #[derive(Clone, Debug)] pub enum Message { /// A text `WebSocket` message. // note: we can't use `tungstenite::Utf8String` here, since we don't have tungstenite on wasm. Text(String), /// A binary `WebSocket` message. Binary(Bytes), /// A ping message with the specified payload. /// /// The payload here must have a length less than 125 bytes. /// /// # WASM /// /// This variant is ignored for WASM targets. Ping(Bytes), /// A pong message with the specified payload. /// /// The payload here must have a length less than 125 bytes. /// /// # WASM /// /// This variant is ignored for WASM targets. Pong(Bytes), /// A close message. /// /// Sending this will not close the connection, though the remote peer will likely close the connection after receiving this. Close { code: CloseCode, reason: String }, } impl From<String> for Message { #[inline] fn from(value: String) -> Self { Self::Text(value) } } impl From<&str> for Message { #[inline] fn from(value: &str) -> Self { Self::from(value.to_owned()) } } impl From<Bytes> for Message { #[inline] fn from(value: Bytes) -> Self { Self::Binary(value) } } impl From<Vec<u8>> for Message { #[inline] fn from(value: Vec<u8>) -> Self { Self::from(Bytes::from(value)) } } impl From<&[u8]> for Message { #[inline] fn from(value: &[u8]) -> Self { Self::from(Bytes::copy_from_slice(value)) } } /// Status code used to indicate why an endpoint is closing the `WebSocket` /// connection.[1] /// /// [1]: https://datatracker.ietf.org/doc/html/rfc6455 #[derive(Debug, Default, Eq, PartialEq, Clone, Copy)] #[non_exhaustive] pub enum CloseCode { /// Indicates a normal closure, meaning that the purpose for /// which the connection was established has been fulfilled. #[default] Normal, /// Indicates that an endpoint is "going away", such as a server /// going down or a browser having navigated away from a page. Away, /// Indicates that an endpoint is terminating the connection due /// to a protocol error. Protocol, /// Indicates that an endpoint is terminating the connection
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
true
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/fullstack/src/payloads/multipart.rs
packages/fullstack/src/payloads/multipart.rs
#![allow(unreachable_code)] use crate::{ClientRequest, ClientResponse, IntoRequest}; use axum::{ extract::{FromRequest, Request}, response::{IntoResponse, Response}, }; use dioxus_fullstack_core::RequestError; use dioxus_html::{FormData, FormEvent}; use std::{prelude::rust_2024::Future, rc::Rc}; #[cfg(feature = "server")] use axum::extract::multipart::{Field, MultipartError}; /// A streaming multipart form data handler. /// /// This type makes it easy to send and receive multipart form data in a streaming fashion by directly /// leveraging the corresponding `dioxus_html::FormData` and `axum::extract::Multipart` types. /// /// On the client, you can create a `MultipartFormData` instance by using `.into()` on a `FormData` instance. /// This is typically done by using the `FormEvent`'s `.data()` method. /// /// On the server, you can extract a `MultipartFormData` instance by using it as an extractor in your handler function. /// This gives you access to axum's `Multipart` extractor, which allows you to handle the various fields /// and files in the multipart form data. /// /// ## Axum Usage /// /// Extractor that parses `multipart/form-data` requests (commonly used with file uploads). /// /// ⚠️ Since extracting multipart form data from the request requires consuming the body, the /// `Multipart` extractor must be *last* if there are multiple extractors in a handler. /// See ["the order of extractors"][order-of-extractors] /// /// [order-of-extractors]: mod@crate::extract#the-order-of-extractors /// /// # Large Files /// /// For security reasons, by default, `Multipart` limits the request body size to 2MB. /// See [`DefaultBodyLimit`][default-body-limit] for how to configure this limit. /// /// [default-body-limit]: crate::extract::DefaultBodyLimit pub struct MultipartFormData<T = ()> { #[cfg(feature = "server")] form: Option<axum::extract::Multipart>, _client: Option<Rc<FormData>>, _phantom: std::marker::PhantomData<T>, } impl MultipartFormData { #[cfg(feature = "server")] pub async fn next_field(&mut self) -> Result<Option<Field<'_>>, MultipartError> { if let Some(form) = &mut self.form { form.next_field().await } else { Ok(None) } } } impl<S> IntoRequest for MultipartFormData<S> { fn into_request( self, _req: ClientRequest, ) -> impl Future<Output = Result<ClientResponse, RequestError>> + 'static { async move { // On the web, it's just easier to convert the form data into a blob and then send that // blob as the body of the request. This handles setting the correct headers, wiring // up file uploads as streams, and encoding the request. #[cfg(feature = "web")] if cfg!(target_arch = "wasm32") { let data = self._client.clone().ok_or_else(|| { RequestError::Builder("Failed to get FormData from event".into()) })?; fn get_form_data(data: Rc<FormData>) -> Option<wasm_bindgen::JsValue> { use wasm_bindgen::JsCast; let event: &web_sys::Event = data.downcast()?; let target = event.target()?; let form: &web_sys::HtmlFormElement = target.dyn_ref()?; let data: web_sys::FormData = web_sys::FormData::new_with_form(form).ok()?; Some(data.into()) } let js_form_data = get_form_data(data).ok_or_else(|| { RequestError::Builder("Failed to get FormData from event".into()) })?; return _req.send_js_value(js_form_data).await; } // On non-web platforms, we actually need to read the values out of the FormData // and construct a multipart form body manually. #[cfg(not(target_arch = "wasm32"))] { let data = self._client.clone().ok_or_else(|| { RequestError::Builder("Failed to get FormData from event".into()) })?; return _req.send_multipart(&data).await; } unimplemented!("Non web wasm32 clients are not supported yet") } } } impl<S: Send + Sync + 'static, D> FromRequest<S> for MultipartFormData<D> { type Rejection = Response; #[doc = " Perform the extraction."] fn from_request( req: Request, state: &S, ) -> impl Future<Output = Result<Self, Self::Rejection>> + Send { #[cfg(feature = "server")] return async move { let form = axum::extract::multipart::Multipart::from_request(req, state) .await .map_err(|err| err.into_response())?; Ok(MultipartFormData { form: Some(form), _client: None, _phantom: std::marker::PhantomData, }) }; #[cfg(not(feature = "server"))] async { use dioxus_fullstack_core::HttpError; let _ = req; let _ = state; Err(HttpError::new( http::StatusCode::INTERNAL_SERVER_ERROR, "MultipartFormData extractor is not supported on non-server builds", ) .into_response()) } } } impl<T> From<Rc<FormData>> for MultipartFormData<T> { fn from(_value: Rc<FormData>) -> Self { MultipartFormData { #[cfg(feature = "server")] form: None, _client: Some(_value), _phantom: std::marker::PhantomData, } } } impl<T> From<FormEvent> for MultipartFormData<T> { fn from(event: FormEvent) -> Self { let data = event.data(); MultipartFormData { #[cfg(feature = "server")] form: None, _client: Some(data), _phantom: std::marker::PhantomData, } } } unsafe impl Send for MultipartFormData {} unsafe impl Sync for MultipartFormData {}
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/fullstack/src/payloads/form.rs
packages/fullstack/src/payloads/form.rs
use super::*; pub use axum::extract::Form; impl<T> IntoRequest for Form<T> where T: Serialize + 'static + DeserializeOwned, { fn into_request(self, req: ClientRequest) -> impl Future<Output = ClientResult> + 'static { async move { req.send_form(&self.0).await } } }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/fullstack/src/payloads/redirect.rs
packages/fullstack/src/payloads/redirect.rs
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/fullstack/src/payloads/text.rs
packages/fullstack/src/payloads/text.rs
use crate::{ClientResponse, FromResponse}; use axum_core::response::{IntoResponse, Response}; use dioxus_fullstack_core::ServerFnError; use send_wrapper::SendWrapper; use std::future::Future; /// A simple text response type. /// /// The `T` parameter can be anything that converts to and from `String`, such as `Rc<str>` or `String`. /// /// Unlike `Json` or plain `String`, this uses the `text/plain` content type. The `text/plain` header /// will be set on the request. pub struct Text<T>(pub T); impl<T> Text<T> { /// Create a new text response. pub fn new(text: T) -> Self { Self(text) } } impl<T: Into<String>> IntoResponse for Text<T> { fn into_response(self) -> Response { Response::builder() .header("Content-Type", "text/plain; charset=utf-8") .body(axum_core::body::Body::from(self.0.into())) .unwrap() } } impl<T: From<String>> FromResponse for Text<T> { fn from_response(res: ClientResponse) -> impl Future<Output = Result<Self, ServerFnError>> { SendWrapper::new(async move { let text = res.text().await?; Ok(Text::new(text.into())) }) } }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/fullstack/src/payloads/header.rs
packages/fullstack/src/payloads/header.rs
use super::*; pub use headers::Cookie; pub use headers::SetCookie; #[derive(Clone, Debug)] pub struct SetHeader<Data> { data: Option<Data>, } impl<T: Header> SetHeader<T> { pub fn new( value: impl TryInto<HeaderValue, Error = InvalidHeaderValue>, ) -> Result<Self, headers::Error> { let values = value.try_into().map_err(|_| headers::Error::invalid())?; let res = T::decode(&mut std::iter::once(&values))?; Ok(Self { data: Some(res) }) } } impl<T: Header> IntoResponseParts for SetHeader<T> { type Error = (); fn into_response_parts(self, res: ResponseParts) -> Result<ResponseParts, Self::Error> { let data = self.data.expect("SetHeader must have data to set"); let mut headers = vec![]; data.encode(&mut headers); Ok(axum::response::AppendHeaders( headers.into_iter().map(|value| (T::name().clone(), value)), ) .into_response_parts(res) .unwrap()) } } impl<T: Header> FromResponseParts for SetHeader<T> { fn from_response_parts(parts: &mut axum::http::response::Parts) -> Result<Self, ServerFnError> { let Some(header) = parts.headers.remove(T::name()) else { return Ok(SetHeader { data: None }); }; let data = T::decode(&mut std::iter::once(&header)) .map_err(|_| ServerFnError::Deserialization("Failed to decode header".into()))?; Ok(SetHeader { data: Some(data) }) } } impl<T: Header> IntoResponse for SetHeader<T> { fn into_response(self) -> axum::response::Response { let mut values = vec![]; self.data.unwrap().encode(&mut values); let mut response = axum::response::Response::builder(); for value in values { response = response.header(T::name(), value); } response.body(axum_core::body::Body::empty()).unwrap() } }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/fullstack/src/payloads/cbor.rs
packages/fullstack/src/payloads/cbor.rs
use axum::{ body::Bytes, extract::{rejection::BytesRejection, FromRequest, Request}, http::{header, HeaderMap, HeaderValue, StatusCode}, response::{IntoResponse, Response}, }; use serde::{de::DeserializeOwned, Serialize}; /// CBOR Extractor / Response. /// /// When used as an extractor, it can deserialize request bodies into some type that /// implements [`serde::Deserialize`]. The request will be rejected (and a [`CborRejection`] will /// be returned) if: /// /// - The request doesn't have a `Content-Type: application/cbor` (or similar) header. /// - The body doesn't contain syntactically valid CBOR. /// - The body contains syntactically valid CBOR but it couldn't be deserialized into the target type. /// - Buffering the request body fails. /// /// ⚠️ Since parsing CBOR requires consuming the request body, the `Cbor` extractor must be /// *last* if there are multiple extractors in a handler. /// See ["the order of extractors"][order-of-extractors] /// /// [order-of-extractors]: mod@crate::extract#the-order-of-extractors #[must_use] pub struct Cbor<T>(pub T); /// Check if the request has a valid CBOR content type header. /// /// This function validates that the `Content-Type` header is set to `application/cbor` /// or a compatible CBOR media type (including subtypes with `+cbor` suffix). fn is_valid_cbor_content_type(headers: &HeaderMap) -> bool { let Some(content_type) = headers.get(header::CONTENT_TYPE) else { return false; }; let Ok(content_type) = content_type.to_str() else { return false; }; let Ok(mime) = content_type.parse::<mime::Mime>() else { return false; }; let is_cbor_content_type = mime.type_() == "application" && (mime.subtype() == "cbor" || mime.suffix().is_some_and(|name| name == "cbor")); is_cbor_content_type } impl<S, T> FromRequest<S> for Cbor<T> where S: Send + Sync, T: DeserializeOwned, { type Rejection = CborRejection; /// Extract a CBOR payload from the request body. /// /// This implementation validates the content type and deserializes the CBOR data. /// Returns a `CborRejection` if validation or deserialization fails. async fn from_request(req: Request, state: &S) -> Result<Self, Self::Rejection> { if !is_valid_cbor_content_type(req.headers()) { return Err(CborRejection::MissingCborContentType); } let bytes = Bytes::from_request(req, state).await?; let value = ciborium::from_reader(&bytes as &[u8]).map_err(|_| CborRejection::FailedToParseCbor)?; Ok(Cbor(value)) } } impl<T> IntoResponse for Cbor<T> where T: Serialize, { /// Convert the CBOR payload into an HTTP response. /// /// This serializes the inner value to CBOR format and sets the appropriate /// `Content-Type: application/cbor` header. Returns a 500 Internal Server Error /// if serialization fails. fn into_response(self) -> Response { let mut buf = Vec::new(); match ciborium::into_writer(&self.0, &mut buf) { Err(_) => ( StatusCode::INTERNAL_SERVER_ERROR, [( header::CONTENT_TYPE, HeaderValue::from_static(mime::TEXT_PLAIN_UTF_8.as_ref()), )], "Failed to serialize to CBOR".to_string(), ) .into_response(), Ok(()) => ( [( header::CONTENT_TYPE, HeaderValue::from_static("application/cbor"), )], buf, ) .into_response(), } } } impl<T> From<T> for Cbor<T> { /// Create a `Cbor<T>` from the inner value. /// /// This is a convenience constructor that wraps any value in the `Cbor` struct. fn from(inner: T) -> Self { Self(inner) } } impl<T> Cbor<T> where T: DeserializeOwned, { /// Construct a `Cbor<T>` from a byte slice. /// /// This method attempts to deserialize the provided bytes as CBOR data. /// Returns a `CborRejection` if deserialization fails. pub fn from_bytes(bytes: &[u8]) -> Result<Self, CborRejection> { ciborium::de::from_reader(bytes) .map(Cbor) .map_err(|_| CborRejection::FailedToParseCbor) } } /// Rejection type for CBOR extraction failures. /// /// This enum represents the various ways that CBOR extraction can fail. /// It implements `IntoResponse` to provide appropriate HTTP responses for each error type. #[derive(thiserror::Error, Debug)] pub enum CborRejection { /// The request is missing the required `Content-Type: application/cbor` header. #[error("Expected request with `Content-Type: application/cbor`")] MissingCborContentType, /// Failed to parse the request body as valid CBOR. #[error("Invalid CBOR data")] FailedToParseCbor, /// Failed to read the request body bytes. #[error(transparent)] BytesRejection(#[from] BytesRejection), } impl IntoResponse for CborRejection { fn into_response(self) -> Response { use CborRejection::*; match self { MissingCborContentType => { (StatusCode::UNSUPPORTED_MEDIA_TYPE, self.to_string()).into_response() } FailedToParseCbor => (StatusCode::BAD_REQUEST, self.to_string()).into_response(), BytesRejection(rejection) => rejection.into_response(), } } }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/fullstack/src/payloads/postcard.rs
packages/fullstack/src/payloads/postcard.rs
use axum::{ body::Bytes, extract::{rejection::BytesRejection, FromRequest}, http::{header, HeaderMap, StatusCode}, response::{IntoResponse, Response}, }; use postcard::{from_bytes, to_allocvec}; use serde::{de::DeserializeOwned, Serialize}; use std::future::Future; /// Postcard Extractor / Response. /// /// When used as an extractor, it can deserialize request bodies into some type that /// implements [`serde::Deserialize`]. The request will be rejected (and a [`PostcardRejection`] will /// be returned) if: /// /// - The request doesn't have a `Content-Type: application/postcard` (or similar) header. /// - The body doesn't contain syntactically valid Postcard. /// - The body contains syntactically valid Postcard but it couldn't be deserialized into the target type. /// - Buffering the request body fails. /// /// ⚠️ Since parsing Postcard requires consuming the request body, the `Postcard` extractor must be /// *last* if there are multiple extractors in a handler. /// See ["the order of extractors"][order-of-extractors] /// /// [order-of-extractors]: mod@crate::extract#the-order-of-extractors pub struct Postcard<T>(pub T); #[derive(thiserror::Error, Debug)] pub enum PostcardRejection { #[error("Expected request with `Content-Type: application/postcard`")] MissingPostcardContentType, #[error(transparent)] PostcardError(#[from] postcard::Error), #[error(transparent)] Bytes(#[from] BytesRejection), } impl IntoResponse for PostcardRejection { fn into_response(self) -> Response { use PostcardRejection::*; // its often easiest to implement `IntoResponse` by calling other implementations match self { MissingPostcardContentType => { (StatusCode::UNSUPPORTED_MEDIA_TYPE, self.to_string()).into_response() } PostcardError(err) => (StatusCode::BAD_REQUEST, err.to_string()).into_response(), _ => (StatusCode::INTERNAL_SERVER_ERROR, self.to_string()).into_response(), } } } impl<T, S> FromRequest<S> for Postcard<T> where T: DeserializeOwned, S: Send + Sync, { type Rejection = PostcardRejection; fn from_request( req: axum::extract::Request, state: &S, ) -> impl Future<Output = Result<Self, Self::Rejection>> + Send { async move { if postcard_content_type(req.headers()) { let bytes = Bytes::from_request(req, state).await?; let value = match from_bytes(&bytes) { Ok(value) => value, Err(err) => return Err(PostcardRejection::PostcardError(err)), }; Ok(Postcard(value)) } else { Err(PostcardRejection::MissingPostcardContentType) } } } } fn postcard_content_type(headers: &HeaderMap) -> bool { let content_type = if let Some(content_type) = headers.get(header::CONTENT_TYPE) { content_type } else { return false; }; let content_type = if let Ok(content_type) = content_type.to_str() { content_type } else { return false; }; let mime = if let Ok(mime) = content_type.parse::<mime::Mime>() { mime } else { return false; }; let is_postcard_content_type = mime.type_() == "application" && (mime.subtype() == "postcard" || mime.suffix().is_some_and(|name| name == "postcard")); is_postcard_content_type } impl<T> IntoResponse for Postcard<T> where T: Serialize, { fn into_response(self) -> Response { // TODO: maybe use 128 bytes cause serde is doing something like that match to_allocvec(&self.0) { Ok(value) => ([(header::CONTENT_TYPE, "application/postcard")], value).into_response(), Err(err) => (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()).into_response(), } } }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/fullstack/src/payloads/files.rs
packages/fullstack/src/payloads/files.rs
use super::*; use axum_core::extract::Request; use dioxus_fullstack_core::RequestError; use dioxus_html::FileData; #[cfg(feature = "server")] use std::path::Path; use std::{ pin::Pin, task::{Context, Poll}, }; /// A payload for uploading files using streams. /// /// The `FileUpload` struct allows you to upload files by streaming their data. It can be constructed /// from a stream of bytes and can be sent as part of an HTTP request. This is particularly useful for /// handling large files without loading them entirely into memory. /// /// On the web, this uses the `ReadableStream` API to stream file data. pub struct FileStream { data: Option<FileData>, name: String, size: Option<u64>, content_type: Option<String>, #[cfg(feature = "server")] server_body: Option<axum_core::body::BodyDataStream>, // For downloaded files... #[allow(clippy::type_complexity)] client_body: Option<Pin<Box<dyn Stream<Item = Result<Bytes, StreamingError>> + Send>>>, } impl FileStream { /// Get the name of the file. pub fn file_name(&self) -> &str { &self.name } /// Get the size of the file, if known. pub fn size(&self) -> Option<u64> { self.size } /// Get the content type of the file, if available. pub fn content_type(&self) -> Option<&str> { self.content_type.as_deref() } /// Return the underlying body stream, assuming the `FileStream` was created by a server request. #[cfg(feature = "server")] pub fn body_mut(&mut self) -> Option<&mut axum_core::body::BodyDataStream> { self.server_body.as_mut() } /// Create a new `FileStream` from a file path. This is only available on the server. #[cfg(feature = "server")] pub async fn from_path(file: impl AsRef<Path>) -> Result<Self, std::io::Error> { Self::from_path_buf(file.as_ref()).await } #[cfg(feature = "server")] async fn from_path_buf(file: &Path) -> Result<Self, std::io::Error> { let metadata = file.metadata()?; let contents = tokio::fs::File::open(&file).await?; let mime = dioxus_asset_resolver::native::get_mime_from_ext( file.extension().and_then(|s| s.to_str()), ); let size = metadata.len(); let name = file .file_name() .and_then(|s| s.to_str()) .unwrap_or("file") .to_string(); // Convert the tokio file into an async byte stream let reader_stream = tokio_util::io::ReaderStream::new(contents); // Attempt to construct a BodyDataStream from the reader stream. // Many axum_core versions provide a `from_stream` or similar constructor. let body = axum_core::body::Body::from_stream(reader_stream).into_data_stream(); Ok(Self { data: None, name, size: Some(size), content_type: Some(mime.to_string()), #[cfg(feature = "server")] server_body: Some(body), client_body: None, }) } /// Create a new `FileStream` from raw components. /// /// This is meant to be used on the server where a file might not even exist but you still want /// to stream it to the client as a download. #[cfg(feature = "server")] pub fn from_raw( name: String, size: Option<u64>, content_type: String, body: axum_core::body::BodyDataStream, ) -> Self { Self { data: None, name, size, content_type: Some(content_type), #[cfg(feature = "server")] server_body: Some(body), client_body: None, } } } impl IntoRequest for FileStream { #[allow(unreachable_code)] fn into_request( self, #[allow(unused_mut)] mut builder: ClientRequest, ) -> impl Future<Output = ClientResult> + 'static { async move { let Some(file_data) = self.data else { return Err(RequestError::Request( "FileStream has no data to send".into(), )); }; #[cfg(feature = "web")] if cfg!(target_arch = "wasm32") { use js_sys::escape; use wasm_bindgen::JsCast; let as_file = file_data.inner().downcast_ref::<web_sys::File>().unwrap(); let as_blob = as_file.dyn_ref::<web_sys::Blob>().unwrap(); let content_type = as_blob.type_(); let content_length = as_blob.size().to_string(); let name = as_file.name(); // Set both Content-Length and X-Content-Size for compatibility with server extraction. // In browsers, content-length is often overwritten, so we set X-Content-Size as well // for better compatibility with dioxus-based clients. return builder .header("Content-Type", content_type)? .header("Content-Length", content_length.clone())? .header("X-Content-Size", content_length)? .header( "Content-Disposition", format!("attachment; filename=\"{}\"", escape(&name)), )? .send_js_value(as_blob.clone().into()) .await; } #[cfg(not(target_arch = "wasm32"))] { use std::ascii::escape_default; use futures::TryStreamExt; let content_type = self .content_type .unwrap_or_else(|| "application/octet-stream".to_string()); let content_length = self.size.map(|s| s.to_string()); let name = self.name; let stream = file_data.byte_stream().map_err(|_| StreamingError::Failed); // Ascii escape the filename to avoid issues with special characters. let mut chars = vec![]; for byte in name.chars() { chars.extend(escape_default(byte as u8)); } let filename = String::from_utf8(chars).map_err(|_| { RequestError::Request( "Failed to escape filename for Content-Disposition".into(), ) }); if let Some(length) = content_length { builder = builder.header("Content-Length", length)?; } if let Ok(filename) = filename { builder = builder.header( "Content-Disposition", format!("attachment; filename=\"{}\"", filename), )?; } return builder .header("Content-Type", content_type)? .send_body_stream(stream) .await; } unimplemented!("FileStream::into_request is only implemented for web targets"); } } } impl<S> FromRequest<S> for FileStream { type Rejection = ServerFnError; fn from_request( req: Request, _: &S, ) -> impl Future<Output = Result<Self, Self::Rejection>> + Send { async move { tracing::info!("Extracting FileUpload from request: {:?}", req); let disposition = req.headers().get("Content-Disposition"); let filename = match disposition.map(|s| s.to_str()) { Some(Ok(dis)) => { let content = content_disposition::parse_content_disposition(dis); content .filename_full() .unwrap_or_else(|| "file".to_string()) } _ => "file".to_string(), }; // Content-length is unreliable, so we use `X-Content-Size` as an indicator. // For stream requests with known bodies, the browser will still set Content-Length to 0, unfortunately. let size = req .headers() .get("X-Content-Size") .and_then(|s| s.to_str().ok()) .and_then(|s| s.parse::<u64>().ok()); let content_type = req .headers() .get("Content-Type") .and_then(|s| s.to_str().ok()) .map(|s| s.to_string()); Ok(FileStream { data: None, name: filename, content_type, size, client_body: None, #[cfg(feature = "server")] server_body: Some(req.into_body().into_data_stream()), }) } } } impl FromResponse for FileStream { fn from_response(res: ClientResponse) -> impl Future<Output = Result<Self, ServerFnError>> { async move { // Check status code first - don't try to stream error responses as files if !res.status().is_success() { let status_code = res.status().as_u16(); let canonical_reason = res .status() .canonical_reason() .unwrap_or("Unknown error") .to_string(); let bytes = res.bytes().await.unwrap_or_default(); let message = String::from_utf8(bytes.to_vec()).unwrap_or(canonical_reason); return Err(ServerFnError::ServerError { message, code: status_code, details: None, }); } // Extract filename from Content-Disposition header if present. let name = res .headers() .get("Content-Disposition") .and_then(|h| h.to_str().ok()) .and_then(|dis| { let cd = content_disposition::parse_content_disposition(dis); cd.filename().map(|(name, _)| name.to_string()) }) .unwrap_or_else(|| "file".to_string()); // Extract content type header let content_type = res .headers() .get("Content-Type") .and_then(|h| h.to_str().ok()) .map(|s| s.to_string()); // Prefer the response's known content length but fall back to X-Content-Size header. let size = res.content_length().or_else(|| { res.headers() .get("X-Content-Size") .and_then(|h| h.to_str().ok()) .and_then(|s| s.parse::<u64>().ok()) }); Ok(Self { data: None, name, size, content_type, client_body: Some(Box::pin(res.bytes_stream())), #[cfg(feature = "server")] server_body: None, }) } } } #[cfg(feature = "server")] impl IntoResponse for FileStream { fn into_response(self) -> axum::response::Response { use axum::body::Body; let Some(body) = self.server_body else { use dioxus_fullstack_core::HttpError; return HttpError::new(http::StatusCode::BAD_REQUEST, "FileStream has no body") .into_response(); }; let mut res = axum::response::Response::new(Body::from_stream(body)); // Set relevant headers if available if let Some(content_type) = &self.content_type { res.headers_mut() .insert("Content-Type", content_type.parse().unwrap()); } if let Some(size) = self.size { res.headers_mut() .insert("Content-Length", size.to_string().parse().unwrap()); } res.headers_mut().insert( "Content-Disposition", format!("attachment; filename=\"{}\"", self.name) .parse() .unwrap(), ); res } } impl From<FileData> for FileStream { fn from(value: FileData) -> Self { Self { name: value.name().to_string(), content_type: value.content_type().map(|s| s.to_string()), size: Some(value.size()), data: Some(value), client_body: None, #[cfg(feature = "server")] server_body: None, } } } impl Stream for FileStream { type Item = Result<Bytes, StreamingError>; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { // For server-side builds, poll the server_body stream if it exists. #[cfg(feature = "server")] if let Some(body) = self.server_body.as_mut() { return Pin::new(body) .poll_next(cx) .map_err(|_| StreamingError::Failed); } // For client-side builds, poll the client_body stream if it exists. if let Some(body) = self.client_body.as_mut() { return body.as_mut().poll_next(cx); } // Otherwise, the stream is exhausted. Poll::Ready(None) } } impl std::fmt::Debug for FileStream { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("FileStream") .field("name", &self.name) .field("size", &self.size) .field("content_type", &self.content_type) .finish() } }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/fullstack/src/payloads/query.rs
packages/fullstack/src/payloads/query.rs
use std::ops::Deref; use crate::ServerFnError; use axum::extract::FromRequestParts; use http::request::Parts; use serde::de::DeserializeOwned; /// An extractor that deserializes query parameters into the given type `T`. /// /// This uses `serde_qs` under the hood to support complex query parameter structures. #[derive(Debug, Clone, Copy, Default)] pub struct Query<T>(pub T); impl<T, S> FromRequestParts<S> for Query<T> where T: DeserializeOwned, S: Send + Sync, { type Rejection = ServerFnError; async fn from_request_parts(parts: &mut Parts, _state: &S) -> Result<Self, Self::Rejection> { let inner: T = serde_qs::from_str(parts.uri.query().unwrap_or_default()) .map_err(|e| ServerFnError::Deserialization(e.to_string()))?; Ok(Self(inner)) } } impl<T> Deref for Query<T> { type Target = T; fn deref(&self) -> &Self::Target { &self.0 } }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/fullstack/src/payloads/sse.rs
packages/fullstack/src/payloads/sse.rs
use crate::{ClientResponse, FromResponse, RequestError, ServerFnError}; #[cfg(feature = "server")] use axum::{ response::sse::{Event, KeepAlive}, BoxError, }; use futures::io::AsyncBufReadExt; use futures::Stream; use futures::{StreamExt, TryStreamExt}; use http::{header::CONTENT_TYPE, HeaderValue, StatusCode}; use serde::de::DeserializeOwned; use std::pin::Pin; use std::task::{Context, Poll}; use std::time::Duration; /// A stream of Server-Sent Events (SSE) that can be used to receive events from the server. /// /// This type implements `Stream` for asynchronous iteration over events. /// Events are automatically deserialized from JSON to the specified type `T`. #[allow(clippy::type_complexity)] pub struct ServerEvents<T> { _marker: std::marker::PhantomData<fn() -> T>, // The receiving end from the server client: Option<Pin<Box<dyn Stream<Item = Result<ServerSentEvent, ServerFnError>>>>>, #[cfg(feature = "server")] keep_alive: Option<KeepAlive>, // The actual SSE response to send to the client #[cfg(feature = "server")] sse: Option<axum::response::Sse<Pin<Box<dyn Stream<Item = Result<Event, BoxError>> + Send>>>>, } impl<T: DeserializeOwned> ServerEvents<T> { /// Receives the next event from the stream, deserializing it to `T`. /// /// Returns `None` if the stream has ended. pub async fn recv(&mut self) -> Option<Result<T, ServerFnError>> { let event = self.next_event().await?; match event { Ok(event) => { let data: Result<T, ServerFnError> = serde_json::from_str(&event.data).map_err(|err| { ServerFnError::Serialization(format!( "failed to deserialize event data: {}", err )) }); Some(data) } Err(err) => Some(Err(err)), } } } impl<T> ServerEvents<T> { /// Receives the next raw event from the stream. /// /// Returns `None` if the stream has ended. pub async fn next_event(&mut self) -> Option<Result<ServerSentEvent, ServerFnError>> { self.client.as_mut()?.next().await } } impl<T: DeserializeOwned> Stream for ServerEvents<T> { type Item = Result<T, ServerFnError>; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { let Some(client) = self.client.as_mut() else { return Poll::Ready(None); }; match client.as_mut().poll_next(cx) { Poll::Ready(Some(Ok(event))) => { let data = serde_json::from_str(&event.data).map_err(|err| { ServerFnError::Serialization(format!( "failed to deserialize event data: {}", err )) }); Poll::Ready(Some(data)) } Poll::Ready(Some(Err(err))) => Poll::Ready(Some(Err(err))), Poll::Ready(None) => Poll::Ready(None), Poll::Pending => Poll::Pending, } } } impl<T> FromResponse for ServerEvents<T> { async fn from_response(res: ClientResponse) -> Result<Self, ServerFnError> { let status = res.status(); if status != StatusCode::OK { return Err(ServerFnError::Request(RequestError::Status( format!("Expected status 200 OK, got {}", status), status.as_u16(), ))); } let content_type = res.headers().get(CONTENT_TYPE); if content_type != Some(&HeaderValue::from_static(mime::TEXT_EVENT_STREAM.as_ref())) { return Err(ServerFnError::Request(RequestError::Request(format!( "Expected content type 'text/event-stream', got {:?}", content_type )))); } let mut stream = res .bytes_stream() .map(|result| result.map_err(std::io::Error::other)) .into_async_read(); let mut line_buffer = String::new(); let mut event_buffer = EventBuffer::new(); let stream: Pin<Box<dyn Stream<Item = Result<ServerSentEvent, ServerFnError>>>> = Box::pin( async_stream::try_stream! { loop { line_buffer.clear(); if stream.read_line(&mut line_buffer).await.map_err(|err| ServerFnError::StreamError(err.to_string()))? == 0 { break; } let line = if let Some(line) = line_buffer.strip_suffix('\n') { line } else { &line_buffer }; // dispatch if line.is_empty() { if let Some(event) = event_buffer.produce_event() { yield event; } continue; } // Parse line to split field name and value, applying proper trimming. let (field, value) = line.split_once(':').unwrap_or((line, "")); let value = value.strip_prefix(' ').unwrap_or(value); // Handle fields - these are the in SSE speci. match field { "event" => event_buffer.set_event_type(value), "data" => event_buffer.push_data(value), "id" => event_buffer.set_id(value), "retry" => { if let Ok(millis) = value.parse() { event_buffer.set_retry(Duration::from_millis(millis)); } } _ => {} } } }, ); Ok(Self { _marker: std::marker::PhantomData, client: Some(stream), #[cfg(feature = "server")] keep_alive: None, #[cfg(feature = "server")] sse: None, }) } } /// Server-Sent Event representation. #[derive(Debug, Clone, Eq, PartialEq)] pub struct ServerSentEvent { /// A string identifying the type of event described. pub event_type: String, /// The data field for the message. pub data: String, /// Last event ID value. pub last_event_id: Option<String>, /// Reconnection time. pub retry: Option<Duration>, } /// Internal buffer used to accumulate lines of an SSE (Server-Sent Events) stream. struct EventBuffer { event_type: String, data: String, last_event_id: Option<String>, retry: Option<Duration>, } impl EventBuffer { /// Creates fresh new [`EventBuffer`]. #[allow(clippy::new_without_default)] fn new() -> Self { Self { event_type: String::new(), data: String::new(), last_event_id: None, retry: None, } } /// Produces a [`Event`], if current state allow it. /// /// Reset the internal state to process further data. fn produce_event(&mut self) -> Option<ServerSentEvent> { let event = if self.data.is_empty() { None } else { Some(ServerSentEvent { event_type: if self.event_type.is_empty() { "message".to_string() } else { self.event_type.clone() }, data: self.data.to_string(), last_event_id: self.last_event_id.clone(), retry: self.retry, }) }; self.event_type.clear(); self.data.clear(); event } /// Set the [`Event`]'s type. Override previous value. fn set_event_type(&mut self, event_type: &str) { self.event_type.clear(); self.event_type.push_str(event_type); } /// Extends internal data with given data. fn push_data(&mut self, data: &str) { if !self.data.is_empty() { self.data.push('\n'); } self.data.push_str(data); } fn set_id(&mut self, id: &str) { self.last_event_id = Some(id.to_string()); } fn set_retry(&mut self, retry: Duration) { self.retry = Some(retry); } } #[cfg(feature = "server")] pub use server_impl::*; #[cfg(feature = "server")] mod server_impl { use super::*; use crate::spawn_platform; use axum::response::sse::Sse; use axum_core::response::IntoResponse; use futures::Future; use futures::SinkExt; use futures::{Sink, TryStream}; use serde::Serialize; impl<T: 'static> ServerEvents<T> { /// Create a `ServerEvents` from a function that is given a sender to send events to the client. /// /// By default, we send a comment every 15 seconds to keep the connection alive. pub fn new<F, R>(f: impl FnOnce(SseTx<T>) -> F + Send + 'static) -> Self where F: Future<Output = R> + 'static, R: 'static + Send, { let (tx, mut rx) = futures_channel::mpsc::unbounded(); let tx = SseTx { sender: tx, _marker: std::marker::PhantomData, }; // Spawn the user function in the background spawn_platform(move || f(tx)); // Create the stream of events, mapping the incoming events to `Ok` // If the user function ends, the stream will end and the connection will be closed let stream = futures::stream::poll_fn(move |cx| match rx.poll_next_unpin(cx) { std::task::Poll::Ready(Some(event)) => std::task::Poll::Ready(Some( Ok(event) as Result<axum::response::sse::Event, BoxError> )), std::task::Poll::Ready(None) => std::task::Poll::Ready(None), std::task::Poll::Pending => std::task::Poll::Pending, }); let sse = Sse::new(stream.boxed()); Self { _marker: std::marker::PhantomData, client: None, keep_alive: Some(KeepAlive::new().interval(Duration::from_secs(15))), sse: Some(sse), } } /// Create a `ServerEvents` from a `TryStream` of events. pub fn from_stream<S>(stream: S) -> Self where S: TryStream<Ok = T, Error = BoxError> + Send + 'static, T: Serialize, { let stream = stream.map_ok(|event| { axum::response::sse::Event::default() .json_data(event) .expect("Failed to serialize SSE event") }); let sse = axum::response::Sse::new(stream.boxed()); Self { _marker: std::marker::PhantomData, client: None, keep_alive: Some(KeepAlive::new().interval(Duration::from_secs(15))), sse: Some(sse), } } /// Set the keep-alive configuration for the SSE connection. /// /// A `None` value will disable the default `KeepAlive` of 15 seconds. pub fn with_keep_alive(mut self, keep_alive: Option<KeepAlive>) -> Self { self.keep_alive = keep_alive; self } /// Create a `ServerEvents` from an existing Axum `Sse` response. #[allow(clippy::type_complexity)] pub fn from_sse( sse: Sse<Pin<Box<dyn Stream<Item = Result<Event, BoxError>> + Send>>>, ) -> Self { Self { _marker: std::marker::PhantomData, client: None, keep_alive: None, sse: Some(sse), } } } impl<T> IntoResponse for ServerEvents<T> { fn into_response(self) -> axum_core::response::Response { let sse = self .sse .expect("SSE should be initialized before using it as a response"); if let Some(keep_alive) = self.keep_alive { sse.keep_alive(keep_alive).into_response() } else { sse.into_response() } } } /// A transmitter for sending events to the SSE stream. pub struct SseTx<T> { sender: futures_channel::mpsc::UnboundedSender<axum::response::sse::Event>, _marker: std::marker::PhantomData<fn() -> T>, } impl<T: Serialize> SseTx<T> { /// Sends an event to the SSE stream. pub async fn send(&mut self, event: T) -> anyhow::Result<()> { let event = axum::response::sse::Event::default().json_data(event)?; self.sender.unbounded_send(event)?; Ok(()) } } impl<T> std::ops::Deref for SseTx<T> { type Target = futures_channel::mpsc::UnboundedSender<axum::response::sse::Event>; fn deref(&self) -> &Self::Target { &self.sender } } impl<T> std::ops::DerefMut for SseTx<T> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.sender } } impl<T: Serialize> Sink<T> for SseTx<T> { type Error = anyhow::Error; fn poll_ready( mut self: Pin<&mut Self>, _cx: &mut Context<'_>, ) -> Poll<Result<(), Self::Error>> { self.sender.poll_ready_unpin(_cx).map_err(|e| e.into()) } fn start_send(mut self: Pin<&mut Self>, item: T) -> Result<(), Self::Error> { let event = axum::response::sse::Event::default().json_data(item)?; self.sender.start_send(event).map_err(|e| e.into()) } fn poll_flush( mut self: Pin<&mut Self>, _cx: &mut Context<'_>, ) -> Poll<Result<(), Self::Error>> { self.sender.poll_flush_unpin(_cx).map_err(|e| e.into()) } fn poll_close( mut self: Pin<&mut Self>, _cx: &mut Context<'_>, ) -> Poll<Result<(), Self::Error>> { self.sender.poll_close_unpin(_cx).map_err(|e| e.into()) } } }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/fullstack/tests/compile-test.rs
packages/fullstack/tests/compile-test.rs
#![allow(clippy::manual_async_fn)] #![allow(unused_variables)] use anyhow::Result; use axum::extract::FromRequest; use axum::response::IntoResponse; use axum::{response::Html, Json}; use bytes::Bytes; use dioxus::prelude::*; use dioxus_fullstack::{get, FileStream, ServerFnError, Text, TextStream, Websocket}; use futures::StreamExt; use http::HeaderMap; use http::StatusCode; use http_body_util::BodyExt; use serde::{Deserialize, Serialize}; use std::future::Future; fn main() {} mod simple_extractors { use super::*; /// We can extract the state and return anything thats IntoResponse #[get("/home")] async fn one() -> Result<String> { Ok("hello home".to_string()) } /// We can extract the path arg and return anything thats IntoResponse #[get("/home/{id}")] async fn two(id: String) -> Result<String> { Ok(format!("hello home {}", id)) } /// We can do basically nothing #[get("/")] async fn three() -> Result<()> { Ok(()) } /// We can do basically nothing, with args #[get("/{one}/{two}?a&b&c")] async fn four(one: String, two: String, a: String, b: String, c: String) -> Result<()> { Ok(()) } /// We can return anything that implements IntoResponse #[get("/hello")] async fn five() -> Result<Html<String>> { Ok(Html("<h1>Hello!</h1>".to_string())) } /// We can return anything that implements IntoResponse #[get("/hello")] async fn six() -> Result<Json<String>> { Ok(Json("Hello!".to_string())) } /// We can return our own custom `Text<T>` type for sending plain text #[get("/hello")] async fn six_2() -> Result<Text<String>> { Ok(Text("Hello!".to_string())) } /// We can return our own custom TextStream type for sending plain text streams #[get("/hello")] async fn six_3() -> Result<TextStream> { Ok(TextStream::new(futures::stream::iter(vec![ "Hello 1".to_string(), "Hello 2".to_string(), "Hello 3".to_string(), ]))) } /// We can return a Result with anything that implements IntoResponse #[get("/hello")] async fn seven() -> Result<Bytes> { Ok(Bytes::from_static(b"Hello!")) } /// We can return a Result with anything that implements IntoResponse #[get("/hello")] async fn eight() -> Result<Bytes, StatusCode> { Ok(Bytes::from_static(b"Hello!")) } /// We can use the anyhow error type #[get("/hello")] async fn nine() -> Result<Bytes> { Ok(Bytes::from_static(b"Hello!")) } /// We can use the ServerFnError error type #[get("/hello")] async fn ten() -> Result<Bytes, ServerFnError> { Ok(Bytes::from_static(b"Hello!")) } /// We can use the ServerFnError error type #[get("/hello")] async fn elevent() -> Result<Bytes> { Ok(Bytes::from_static(b"Hello!")) } /// We can use multiple args that are Deserialize #[get("/hello")] async fn twelve(a: i32, b: i32, c: i32) -> Result<Bytes> { Ok(format!("Hello! {} {} {}", a, b, c).into()) } // How should we handle generics? Doesn't make a lot of sense with distributed registration? // I think we should just not support them for now. Reworking it will be a big change though. // // /// We can use generics // #[get("/hello")] // async fn thirteen<S: Serialize + DeserializeOwned>(a: S) -> Result<Bytes> { // Ok(format!("Hello! {}", serde_json::to_string(&a)?).into()) // } // /// We can use impl-style generics // #[get("/hello")] // async fn fourteen(a: impl Serialize + DeserializeOwned) -> Result<Bytes> { // Ok(format!("Hello! {}", serde_json::to_string(&a)?).into()) // } } mod custom_serialize { use super::*; #[derive(Serialize, Deserialize)] struct YourObject { id: i32, amount: Option<i32>, offset: Option<i32>, } /// Directly return the object, and it will be serialized to JSON #[get("/item/{id}?amount&offset")] async fn get_item1( id: i32, amount: Option<i32>, offset: Option<i32>, ) -> Result<Json<YourObject>> { Ok(Json(YourObject { id, amount, offset })) } #[get("/item/{id}?amount&offset")] async fn get_item2( id: i32, amount: Option<i32>, offset: Option<i32>, ) -> Result<Json<YourObject>> { Ok(Json(YourObject { id, amount, offset })) } #[get("/item/{id}?amount&offset")] async fn get_item3(id: i32, amount: Option<i32>, offset: Option<i32>) -> Result<YourObject> { Ok(YourObject { id, amount, offset }) } #[get("/item/{id}?amount&offset")] async fn get_item4( id: i32, amount: Option<i32>, offset: Option<i32>, ) -> Result<YourObject, StatusCode> { Ok(YourObject { id, amount, offset }) } } mod custom_types { use axum::response::Response; // use axum_core::response::Response; use dioxus_fullstack::{ ClientRequest, ClientResponse, FromResponse, IntoRequest, RequestError, WebSocketOptions, }; use super::*; /// We can extract the path arg and return anything thats IntoResponse #[get("/upload/image/")] async fn streaming_file(body: FileStream) -> Result<Json<i32>> { unimplemented!() } /// We can extract the path arg and return anything thats IntoResponse #[get("/upload/image/?name&size&ftype")] async fn streaming_file_args( name: String, size: usize, ftype: String, body: FileStream, ) -> Result<Json<i32>> { unimplemented!() } #[get("/")] async fn ws_endpoint(options: WebSocketOptions) -> Result<Websocket<String, String>> { unimplemented!() } struct MyCustomPayload {} impl FromResponse for MyCustomPayload { fn from_response(res: ClientResponse) -> impl Future<Output = Result<Self, ServerFnError>> { async move { Ok(MyCustomPayload {}) } } } impl IntoResponse for MyCustomPayload { fn into_response(self) -> Response { unimplemented!() } } impl<T> FromRequest<T> for MyCustomPayload { type Rejection = ServerFnError; #[allow(clippy::manual_async_fn)] fn from_request( _req: axum::extract::Request, _state: &T, ) -> impl Future<Output = Result<Self, Self::Rejection>> + Send { async move { Ok(MyCustomPayload {}) } } } impl IntoRequest for MyCustomPayload { fn into_request( self, request_builder: ClientRequest, ) -> impl Future<Output = Result<ClientResponse, RequestError>> + 'static { async move { unimplemented!() } } } #[get("/myendpoint")] async fn my_custom_handler1(payload: MyCustomPayload) -> Result<MyCustomPayload> { Ok(payload) } #[get("/myendpoint2")] async fn my_custom_handler2(payload: MyCustomPayload) -> Result<MyCustomPayload, StatusCode> { Ok(payload) } } mod overlap { use super::*; #[derive(Serialize, Deserialize)] struct MyCustomPayload {} impl IntoResponse for MyCustomPayload { fn into_response(self) -> axum::response::Response { unimplemented!() } } impl<T> FromRequest<T> for MyCustomPayload { type Rejection = ServerFnError; #[allow(clippy::manual_async_fn)] fn from_request( _req: axum::extract::Request, _state: &T, ) -> impl Future<Output = Result<Self, Self::Rejection>> + Send { async move { Ok(MyCustomPayload {}) } } } /// When we have overlapping serialize + IntoResponse impls, the autoref logic will only pick Serialize /// if IntoResponse is not available. Otherwise, IntoResponse is preferred. #[get("/myendpoint")] async fn my_custom_handler3(payload: MyCustomPayload) -> Result<MyCustomPayload, StatusCode> { Ok(payload) } /// Same, but with the anyhow::Error path #[get("/myendpoint")] async fn my_custom_handler4(payload: MyCustomPayload) -> Result<MyCustomPayload> { Ok(payload) } } mod http_ext { use dioxus::Result; use super::*; /// Extract requests directly for full control #[get("/myendpoint")] async fn my_custom_handler1(request: axum::extract::Request) -> Result<()> { let mut data = request.into_data_stream(); while let Some(chunk) = data.next().await { let _ = chunk.unwrap(); } Ok(()) } } mod input_types { use super::*; #[derive(Serialize, Deserialize)] struct CustomPayload { name: String, age: u32, } /// We can take `()` as input #[post("/")] async fn zero(a: (), b: (), c: ()) -> Result<()> { Ok(()) } /// We can take `()` as input in serde types #[post("/")] async fn zero_1(a: Json<()>) -> Result<()> { Ok(()) } /// We can take regular axum extractors as input #[post("/")] async fn one(data: Json<CustomPayload>) -> Result<()> { Ok(()) } /// We can take Deserialize types as input, and they will be deserialized from JSON #[post("/")] async fn two(name: String, age: u32) -> Result<()> { Ok(()) } /// We can take Deserialize types as input, with custom server extensions #[post("/", headers: HeaderMap)] async fn three(name: String) -> Result<()> { Ok(()) } /// We can take a regular axum-like mix with extractors and Deserialize types #[post("/", headers: HeaderMap)] async fn four(data: Json<CustomPayload>) -> Result<()> { Ok(()) } /// We can even accept string in the final position. #[post("/")] async fn five(age: u32, name: String) -> Result<()> { Ok(()) } } mod handlers { use super::*; #[get("/handlers/get")] async fn handle_get() -> Result<String> { Ok("handled get".to_string()) } #[post("/handlers/post")] async fn handle_post() -> Result<String> { Ok("handled post".to_string()) } #[put("/handlers/put")] async fn handle_put() -> Result<String> { Ok("handled put".to_string()) } #[patch("/handlers/patch")] async fn handle_patch() -> Result<String> { Ok("handled patch".to_string()) } #[delete("/handlers/delete")] async fn handle_delete() -> Result<String> { Ok("handled delete".to_string()) } }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/core-macro/src/lib.rs
packages/core-macro/src/lib.rs
#![doc = include_str!("../README.md")] #![doc(html_logo_url = "https://avatars.githubusercontent.com/u/79236386")] #![doc(html_favicon_url = "https://avatars.githubusercontent.com/u/79236386")] use component::{ComponentBody, ComponentMacroOptions}; use proc_macro::TokenStream; use quote::ToTokens; use syn::parse_macro_input; mod component; mod props; mod utils; use dioxus_rsx as rsx; #[doc = include_str!("../docs/props.md")] #[proc_macro_derive(Props, attributes(props))] pub fn derive_props(input: TokenStream) -> TokenStream { let input = parse_macro_input!(input as syn::DeriveInput); match props::impl_my_derive(&input) { Ok(output) => output.into(), Err(error) => error.to_compile_error().into(), } } #[doc = include_str!("../docs/rsx.md")] #[proc_macro] pub fn rsx(tokens: TokenStream) -> TokenStream { match syn::parse::<rsx::CallBody>(tokens) { Err(err) => err.to_compile_error().into(), Ok(body) => body.into_token_stream().into(), } } #[doc = include_str!("../docs/component.md")] #[proc_macro_attribute] pub fn component(_args: TokenStream, input: TokenStream) -> TokenStream { parse_macro_input!(input as ComponentBody) .with_options(parse_macro_input!(_args as ComponentMacroOptions)) .into_token_stream() .into() }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/core-macro/src/utils.rs
packages/core-macro/src/utils.rs
use quote::ToTokens; use syn::parse::{Parse, ParseStream}; use syn::spanned::Spanned; use syn::{parse_quote, Expr, Lit, Meta, Token, Type}; /// Attempts to convert the given literal to a string. /// Converts ints and floats to their base 10 counterparts. /// /// Returns `None` if the literal is [`Lit::Verbatim`] or if the literal is [`Lit::ByteStr`] /// and the byte string could not be converted to UTF-8. pub fn lit_to_string(lit: Lit) -> Option<String> { match lit { Lit::Str(l) => Some(l.value()), Lit::ByteStr(l) => String::from_utf8(l.value()).ok(), Lit::Byte(l) => Some(String::from(l.value() as char)), Lit::Char(l) => Some(l.value().to_string()), Lit::Int(l) => Some(l.base10_digits().to_string()), Lit::Float(l) => Some(l.base10_digits().to_string()), Lit::Bool(l) => Some(l.value().to_string()), Lit::Verbatim(_) => None, _ => None, } } pub fn format_type_string(ty: &Type) -> String { let ty_unformatted = ty.into_token_stream().to_string(); let ty_unformatted = ty_unformatted.trim(); // simply remove all whitespace let ty_formatted = ty_unformatted.replace(' ', ""); ty_formatted.to_string() } /// Represents the `#[deprecated]` attribute. /// /// You can use the [`DeprecatedAttribute::from_meta`] function to try to parse an attribute to this struct. #[derive(Default)] pub struct DeprecatedAttribute { pub since: Option<String>, pub note: Option<String>, } impl DeprecatedAttribute { /// Returns `None` if the given attribute was not a valid form of the `#[deprecated]` attribute. pub fn from_meta(meta: &Meta) -> syn::Result<Self> { if meta.path() != &parse_quote!(deprecated) { return Err(syn::Error::new( meta.span(), "attribute path is not `deprecated`", )); } match &meta { Meta::Path(_) => Ok(Self::default()), Meta::NameValue(name_value) => { let Expr::Lit(expr_lit) = &name_value.value else { return Err(syn::Error::new( name_value.span(), "literal in `deprecated` value must be a string", )); }; Ok(Self { since: None, note: lit_to_string(expr_lit.lit.clone()).map(|s| s.trim().to_string()), }) } Meta::List(list) => { let parsed = list.parse_args::<DeprecatedAttributeArgsParser>()?; Ok(Self { since: parsed.since.map(|s| s.trim().to_string()), note: parsed.note.map(|s| s.trim().to_string()), }) } } } } mod kw { use syn::custom_keyword; custom_keyword!(since); custom_keyword!(note); } struct DeprecatedAttributeArgsParser { since: Option<String>, note: Option<String>, } impl Parse for DeprecatedAttributeArgsParser { fn parse(input: ParseStream) -> syn::Result<Self> { let mut since: Option<String> = None; let mut note: Option<String> = None; if input.peek(kw::since) { input.parse::<kw::since>()?; input.parse::<Token![=]>()?; since = lit_to_string(input.parse()?); } if input.peek(Token![,]) && input.peek2(kw::note) { input.parse::<Token![,]>()?; input.parse::<kw::note>()?; input.parse::<Token![=]>()?; note = lit_to_string(input.parse()?); } Ok(Self { since, note }) } }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/core-macro/src/component.rs
packages/core-macro/src/component.rs
use proc_macro2::TokenStream; use quote::{format_ident, quote, ToTokens, TokenStreamExt}; use syn::parse::{Parse, ParseStream}; use syn::punctuated::Punctuated; use syn::spanned::Spanned; use syn::*; pub struct ComponentBody { pub item_fn: ItemFn, pub options: ComponentMacroOptions, } impl Parse for ComponentBody { fn parse(input: ParseStream) -> Result<Self> { let item_fn: ItemFn = input.parse()?; validate_component_fn(&item_fn)?; Ok(Self { item_fn, options: ComponentMacroOptions::default(), }) } } impl ComponentBody { pub fn with_options(mut self, options: ComponentMacroOptions) -> Self { self.options = options; self } } impl ToTokens for ComponentBody { fn to_tokens(&self, tokens: &mut TokenStream) { // https://github.com/DioxusLabs/dioxus/issues/1938 // If there's only one input and the input is `props: Props`, we don't need to generate a props struct // Just attach the non_snake_case attribute to the function // eventually we'll dump this metadata into devtooling that lets us find all these components // // Components can also use the struct pattern to "inline" their props. // Freya uses this a bunch (because it's clean), // e.g. `fn Navbar(NavbarProps { title }: NavbarProps)` was previously being incorrectly parsed if self.is_explicit_props_ident() || self.has_struct_parameter_pattern() { let comp_fn = &self.item_fn; tokens.append_all(allow_camel_case_for_fn_ident(comp_fn).into_token_stream()); return; } let comp_fn = self.comp_fn(); // If there's no props declared, we simply omit the props argument // This is basically so you can annotate the App component with #[component] and still be compatible with the // launch signatures that take fn() -> Element let props_struct = match self.item_fn.sig.inputs.is_empty() { // No props declared, so we don't need to generate a props struct true => quote! {}, // Props declared, so we generate a props struct and then also attach the doc attributes to it false => { let doc = format!("Properties for the [`{}`] component.", &comp_fn.sig.ident); let (props_struct, props_impls) = self.props_struct(); quote! { #[doc = #doc] #[allow(missing_docs)] #props_struct #(#props_impls)* } } }; let completion_hints = self.completion_hints(); tokens.append_all(quote! { #props_struct #comp_fn #completion_hints }); } } impl ComponentBody { // build a new item fn, transforming the original item fn fn comp_fn(&self) -> ItemFn { let ComponentBody { item_fn, .. } = self; let ItemFn { attrs, vis, sig, block, } = item_fn; let Signature { inputs, ident: fn_ident, generics, output: fn_output, .. } = sig; let Generics { where_clause, .. } = generics; let (_, impl_generics, _) = generics.split_for_impl(); // We generate a struct with the same name as the component but called `Props` let struct_ident = Ident::new(&format!("{fn_ident}Props"), fn_ident.span()); // We pull in the field names from the original function signature, but need to strip off the mutability let struct_field_names = inputs.iter().map(rebind_mutability); let props_docs = self.props_docs(inputs.iter().collect()); let inlined_props_argument = if inputs.is_empty() { quote! {} } else { quote! { #struct_ident { #(#struct_field_names),* }: #struct_ident #impl_generics } }; // Defer to the lazy_body if we're using lazy let body: TokenStream = if self.options.lazy { self.lazy_body( &struct_ident, generics, &impl_generics, fn_output, where_clause, &inlined_props_argument, block, ) } else { quote! { #block } }; // We need a props type to exist even if the inputs are empty with lazy components let emit_props = if self.options.lazy { if inputs.is_empty() { quote! {props: ()} } else { quote!(props: #struct_ident #impl_generics) } } else { inlined_props_argument }; // The extra nest is for the snake case warning to kick back in parse_quote! { #(#attrs)* #(#props_docs)* #[allow(non_snake_case)] #vis fn #fn_ident #generics (#emit_props) #fn_output #where_clause { { #body } } } } /// Generate the body of the lazy component /// /// This extracts the body into a new component that is wrapped in a lazy loader #[allow(clippy::too_many_arguments)] fn lazy_body( &self, struct_ident: &Ident, generics: &Generics, impl_generics: &TypeGenerics, fn_output: &ReturnType, where_clause: &Option<WhereClause>, inlined_props_argument: &TokenStream, block: &Block, ) -> TokenStream { let fn_ident = &self.item_fn.sig.ident; let inputs = &self.item_fn.sig.inputs; let lazy_name = format_ident!("Lazy{fn_ident}"); let out_ty = match &self.item_fn.sig.output { ReturnType::Default => quote! { () }, ReturnType::Type(_, ty) => quote! { #ty }, }; let props_ty = if inputs.is_empty() { quote! { () } } else { quote! { #struct_ident #impl_generics } }; let anon_props = if inputs.is_empty() { quote! { props: () } } else { quote! { #inlined_props_argument} }; quote! { fn #lazy_name #generics (#anon_props) #fn_output #where_clause { #block } dioxus::config_macros::maybe_wasm_split! { if wasm_split { { static __MODULE: wasm_split::LazyLoader<#props_ty, #out_ty> = wasm_split::lazy_loader!(extern "lazy" fn #lazy_name(props: #props_ty,) -> #out_ty); use_resource(|| async move { __MODULE.load().await }).suspend()?; __MODULE.call(props).unwrap() } } else { { #lazy_name(props) } } } } } /// Build an associated struct for the props of the component /// /// This will expand to the typed-builder implementation that we have vendored in this crate. /// TODO: don't vendor typed-builder and instead transform the tokens we give it before expansion. /// TODO: cache these tokens since this codegen is rather expensive (lots of tokens) /// /// We try our best to transfer over any declared doc attributes from the original function signature onto the /// props struct fields. fn props_struct(&self) -> (ItemStruct, Vec<ItemImpl>) { let ItemFn { vis, sig, .. } = &self.item_fn; let Signature { inputs, ident, generics, .. } = sig; let generic_arguments = if !generics.params.is_empty() { let generic_arguments = generics .params .iter() .map(make_prop_struct_generics) .collect::<Punctuated<_, Token![,]>>(); quote! { <#generic_arguments> } } else { quote! {} }; let where_clause = &generics.where_clause; let struct_fields = inputs.iter().map(move |f| make_prop_struct_field(f, vis)); let struct_field_idents = inputs .iter() .map(make_prop_struct_field_idents) .collect::<Vec<_>>(); let struct_ident = Ident::new(&format!("{ident}Props"), ident.span()); let item_struct = parse_quote! { #[derive(Props)] #[allow(non_camel_case_types)] #vis struct #struct_ident #generics #where_clause { #(#struct_fields),* } }; let item_impl_clone = parse_quote! { impl #generics ::core::clone::Clone for #struct_ident #generic_arguments #where_clause { #[inline] fn clone(&self) -> Self { Self { #(#struct_field_idents: ::core::clone::Clone::clone(&self.#struct_field_idents)),* } } } }; let item_impl_partial_eq = parse_quote! { impl #generics ::core::cmp::PartialEq for #struct_ident #generic_arguments #where_clause { #[inline] fn eq(&self, other: &Self) -> bool { #( self.#struct_field_idents == other.#struct_field_idents && )* true } } }; (item_struct, vec![item_impl_clone, item_impl_partial_eq]) } /// Convert a list of function arguments into a list of doc attributes for the props struct /// /// This lets us generate set of attributes that we can apply to the props struct to give it a nice docstring. fn props_docs(&self, inputs: Vec<&FnArg>) -> Vec<Attribute> { let fn_ident = &self.item_fn.sig.ident; if inputs.is_empty() { return Vec::new(); } let arg_docs = inputs .iter() .filter_map(|f| build_doc_fields(f)) .collect::<Vec<_>>(); let mut props_docs = Vec::with_capacity(5); let props_def_link = fn_ident.to_string() + "Props"; let header = format!("# Props\n*For details, see the [props struct definition]({props_def_link}).*"); props_docs.push(parse_quote! { #[doc = #header] }); for arg in arg_docs { let DocField { arg_name, arg_type, deprecation, input_arg_doc, } = arg; let arg_name = strip_pat_mutability(arg_name).to_token_stream().to_string(); let arg_type = crate::utils::format_type_string(arg_type); let input_arg_doc = keep_up_to_n_consecutive_chars(input_arg_doc.trim(), 2, '\n') .replace("\n\n", "</p><p>"); let prop_def_link = format!("{props_def_link}::{arg_name}"); let mut arg_doc = format!("- [`{arg_name}`]({prop_def_link}) : `{arg_type}`"); if let Some(deprecation) = deprecation { arg_doc.push_str("<p>👎 Deprecated"); if let Some(since) = deprecation.since { arg_doc.push_str(&format!(" since {since}")); } if let Some(note) = deprecation.note { let note = keep_up_to_n_consecutive_chars(&note, 1, '\n').replace('\n', " "); let note = keep_up_to_n_consecutive_chars(&note, 1, '\t').replace('\t', " "); arg_doc.push_str(&format!(": {note}")); } arg_doc.push_str("</p>"); if !input_arg_doc.is_empty() { arg_doc.push_str("<hr/>"); } } if !input_arg_doc.is_empty() { arg_doc.push_str(&format!("<p>{input_arg_doc}</p>")); } props_docs.push(parse_quote! { #[doc = #arg_doc] }); } props_docs } fn is_explicit_props_ident(&self) -> bool { if let Some(FnArg::Typed(PatType { pat, .. })) = self.item_fn.sig.inputs.first() { if let Pat::Ident(ident) = pat.as_ref() { return ident.ident == "props"; } } false } fn has_struct_parameter_pattern(&self) -> bool { if let Some(FnArg::Typed(PatType { pat, .. })) = self.item_fn.sig.inputs.first() { if matches!(pat.as_ref(), Pat::Struct(_)) { return true; } } false } // We generate an extra enum to help us autocomplete the braces after the component. // This is a bit of a hack, but it's the only way to get the braces to autocomplete. fn completion_hints(&self) -> TokenStream { let comp_fn = &self.item_fn.sig.ident; let completions_mod = Ident::new(&format!("{}_completions", comp_fn), comp_fn.span()); let vis = &self.item_fn.vis; quote! { #[allow(non_snake_case)] #[doc(hidden)] mod #completions_mod { #[doc(hidden)] #[allow(non_camel_case_types)] /// This enum is generated to help autocomplete the braces after the component. It does nothing pub enum Component { #comp_fn {} } } #[allow(unused)] #vis use #completions_mod::Component::#comp_fn; } } } struct DocField<'a> { arg_name: &'a Pat, arg_type: &'a Type, deprecation: Option<crate::utils::DeprecatedAttribute>, input_arg_doc: String, } fn build_doc_fields(f: &FnArg) -> Option<DocField<'_>> { let FnArg::Typed(pt) = f else { unreachable!() }; let arg_doc = pt .attrs .iter() .filter_map(|attr| { // TODO: Error reporting // Check if the path of the attribute is "doc" if !is_attr_doc(attr) { return None; }; let Meta::NameValue(meta_name_value) = &attr.meta else { return None; }; let Expr::Lit(doc_lit) = &meta_name_value.value else { return None; }; let Lit::Str(doc_lit_str) = &doc_lit.lit else { return None; }; Some(doc_lit_str.value()) }) .fold(String::new(), |mut doc, next_doc_line| { doc.push('\n'); doc.push_str(&next_doc_line); doc }); Some(DocField { arg_name: &pt.pat, arg_type: &pt.ty, deprecation: pt.attrs.iter().find_map(|attr| { if !attr.path().is_ident("deprecated") { return None; } let res = crate::utils::DeprecatedAttribute::from_meta(&attr.meta); match res { Err(e) => panic!("{}", e.to_string()), Ok(v) => Some(v), } }), input_arg_doc: arg_doc, }) } fn validate_component_fn(item_fn: &ItemFn) -> Result<()> { // Do some validation.... // 1. Ensure the component returns *something* if item_fn.sig.output == ReturnType::Default { return Err(Error::new( item_fn.sig.output.span(), "Must return a <dioxus_core::Element>".to_string(), )); } // 2. make sure there's no lifetimes on the component - we don't know how to handle those if item_fn.sig.generics.lifetimes().count() > 0 { return Err(Error::new( item_fn.sig.generics.span(), "Lifetimes are not supported in components".to_string(), )); } // 3. we can't handle async components if item_fn.sig.asyncness.is_some() { return Err(Error::new( item_fn.sig.asyncness.span(), "Async components are not supported".to_string(), )); } // 4. we can't handle const components if item_fn.sig.constness.is_some() { return Err(Error::new( item_fn.sig.constness.span(), "Const components are not supported".to_string(), )); } // 5. no receiver parameters if item_fn .sig .inputs .iter() .any(|f| matches!(f, FnArg::Receiver(_))) { return Err(Error::new( item_fn.sig.inputs.span(), "Receiver parameters are not supported".to_string(), )); } Ok(()) } /// Convert a function arg with a given visibility (provided by the function) and then generate a field for the /// associated props struct. fn make_prop_struct_field(f: &FnArg, vis: &Visibility) -> TokenStream { // There's no receivers (&self) allowed in the component body let FnArg::Typed(pt) = f else { unreachable!() }; let arg_pat = match pt.pat.as_ref() { // rip off mutability // todo: we actually don't want any of the extra bits of the field pattern Pat::Ident(f) => { let mut f = f.clone(); f.mutability = None; quote! { #f } } a => quote! { #a }, }; let PatType { attrs, ty, colon_token, .. } = pt; quote! { #(#attrs)* #vis #arg_pat #colon_token #ty } } /// Get ident from a function arg fn make_prop_struct_field_idents(f: &FnArg) -> &Ident { // There's no receivers (&self) allowed in the component body let FnArg::Typed(pt) = f else { unreachable!() }; match pt.pat.as_ref() { // rip off mutability // todo: we actually don't want any of the extra bits of the field pattern Pat::Ident(f) => &f.ident, _ => unreachable!(), } } fn make_prop_struct_generics(generics: &GenericParam) -> TokenStream { match generics { GenericParam::Type(ty) => { let ident = &ty.ident; quote! { #ident } } GenericParam::Lifetime(lifetime) => { let lifetime = &lifetime.lifetime; quote! { #lifetime } } GenericParam::Const(c) => { let ident = &c.ident; quote! { #ident } } } } fn rebind_mutability(f: &FnArg) -> TokenStream { // There's no receivers (&self) allowed in the component body let FnArg::Typed(pt) = f else { unreachable!() }; let immutable = strip_pat_mutability(&pt.pat); quote!(mut #immutable) } fn strip_pat_mutability(pat: &Pat) -> Pat { let mut pat = pat.clone(); // rip off mutability, but still write it out eventually if let Pat::Ident(ref mut pat_ident) = &mut pat { pat_ident.mutability = None; } pat } /// Checks if the attribute is a `#[doc]` attribute. fn is_attr_doc(attr: &Attribute) -> bool { attr.path() == &parse_quote!(doc) } fn keep_up_to_n_consecutive_chars( input: &str, n_of_consecutive_chars_allowed: usize, target_char: char, ) -> String { let mut output = String::new(); let mut prev_char: Option<char> = None; let mut consecutive_count = 0; for c in input.chars() { match prev_char { Some(prev) if c == target_char && prev == target_char => { if consecutive_count < n_of_consecutive_chars_allowed { output.push(c); consecutive_count += 1; } } _ => { output.push(c); prev_char = Some(c); consecutive_count = 1; } } } output } /// Takes a function and returns a clone of it where an `UpperCamelCase` identifier is allowed by the compiler. fn allow_camel_case_for_fn_ident(item_fn: &ItemFn) -> ItemFn { let mut clone = item_fn.clone(); let block = &item_fn.block; clone.attrs.push(parse_quote! { #[allow(non_snake_case)] }); clone.block = parse_quote! { { #block } }; clone } #[derive(Default)] pub struct ComponentMacroOptions { pub lazy: bool, } impl Parse for ComponentMacroOptions { fn parse(input: ParseStream) -> Result<Self> { let mut lazy_load = false; while !input.is_empty() { let ident = input.parse::<Ident>()?; let ident_name = ident.to_string(); if ident_name == "lazy" { lazy_load = true; } else if ident_name == "no_case_check" { // we used to have this? } else { return Err(Error::new( ident.span(), "Unknown option for component macro", )); } if input.peek(Token![,]) { input.parse::<Token![,]>()?; } } Ok(Self { lazy: lazy_load }) } }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/core-macro/src/props/mod.rs
packages/core-macro/src/props/mod.rs
//! This code mostly comes from idanarye/rust-typed-builder //! //! However, it has been adopted to fit the Dioxus Props builder pattern. //! //! For Dioxus, we make a few changes: //! - [x] Automatically implement [`Into<Option>`] on the setters (IE the strip setter option) //! - [x] Automatically implement a default of none for optional fields (those explicitly wrapped with [`Option<T>`]) use proc_macro2::TokenStream; use syn::punctuated::Punctuated; use syn::spanned::Spanned; use syn::{parse::Error, PathArguments}; use quote::quote; use syn::{parse_quote, GenericArgument, Ident, PathSegment, Type}; pub fn impl_my_derive(ast: &syn::DeriveInput) -> Result<TokenStream, Error> { let data = match &ast.data { syn::Data::Struct(data) => match &data.fields { syn::Fields::Named(fields) => { let struct_info = struct_info::StructInfo::new(ast, fields.named.iter())?; let builder_creation = struct_info.builder_creation_impl()?; let conversion_helper = struct_info.conversion_helper_impl()?; let fields = struct_info .included_fields() .map(|f| struct_info.field_impl(f)) .collect::<Result<Vec<_>, _>>()?; let extends = struct_info .extend_fields() .map(|f| struct_info.extends_impl(f)) .collect::<Result<Vec<_>, _>>()?; let fields = quote!(#(#fields)*).into_iter(); let required_fields = struct_info .included_fields() .filter(|f| { f.builder_attr.default.is_none() && f.builder_attr.extends.is_empty() }) .map(|f| struct_info.required_field_impl(f)) .collect::<Result<Vec<_>, _>>()?; let build_method = struct_info.build_method_impl(); quote! { #builder_creation #conversion_helper #( #fields )* #( #extends )* #( #required_fields )* #build_method } } syn::Fields::Unnamed(_) => { return Err(Error::new( ast.span(), "Props is not supported for tuple structs", )) } syn::Fields::Unit => { return Err(Error::new( ast.span(), "Props is not supported for unit structs", )) } }, syn::Data::Enum(_) => { return Err(Error::new(ast.span(), "Props is not supported for enums")) } syn::Data::Union(_) => { return Err(Error::new(ast.span(), "Props is not supported for unions")) } }; Ok(data) } mod util { use quote::ToTokens; pub fn path_to_single_string(path: &syn::Path) -> Option<String> { if path.leading_colon.is_some() { return None; } let mut it = path.segments.iter(); let segment = it.next()?; if it.next().is_some() { // Multipart path return None; } if segment.arguments != syn::PathArguments::None { return None; } Some(segment.ident.to_string()) } pub fn expr_to_single_string(expr: &syn::Expr) -> Option<String> { if let syn::Expr::Path(path) = expr { path_to_single_string(&path.path) } else { None } } pub fn ident_to_type(ident: syn::Ident) -> syn::Type { let mut path = syn::Path { leading_colon: None, segments: Default::default(), }; path.segments.push(syn::PathSegment { ident, arguments: Default::default(), }); syn::Type::Path(syn::TypePath { qself: None, path }) } pub fn empty_type() -> syn::Type { syn::TypeTuple { paren_token: Default::default(), elems: Default::default(), } .into() } pub fn type_tuple(elems: impl Iterator<Item = syn::Type>) -> syn::TypeTuple { let mut result = syn::TypeTuple { paren_token: Default::default(), elems: elems.collect(), }; if !result.elems.empty_or_trailing() { result.elems.push_punct(Default::default()); } result } pub fn empty_type_tuple() -> syn::TypeTuple { syn::TypeTuple { paren_token: Default::default(), elems: Default::default(), } } pub fn make_punctuated_single<T, P: Default>(value: T) -> syn::punctuated::Punctuated<T, P> { let mut punctuated = syn::punctuated::Punctuated::new(); punctuated.push(value); punctuated } pub fn modify_types_generics_hack<F>( ty_generics: &syn::TypeGenerics, mut mutator: F, ) -> syn::AngleBracketedGenericArguments where F: FnMut(&mut syn::punctuated::Punctuated<syn::GenericArgument, syn::token::Comma>), { let mut abga: syn::AngleBracketedGenericArguments = syn::parse(ty_generics.clone().into_token_stream().into()).unwrap_or_else(|_| { syn::AngleBracketedGenericArguments { colon2_token: None, lt_token: Default::default(), args: Default::default(), gt_token: Default::default(), } }); mutator(&mut abga.args); abga } pub fn strip_raw_ident_prefix(mut name: String) -> String { if name.starts_with("r#") { name.replace_range(0..2, ""); } name } } mod field_info { use crate::props::{looks_like_store_type, looks_like_write_type, type_from_inside_option}; use proc_macro2::TokenStream; use quote::{format_ident, quote}; use syn::spanned::Spanned; use syn::{parse::Error, punctuated::Punctuated}; use syn::{parse_quote, Expr, Path}; use super::util::{ expr_to_single_string, ident_to_type, path_to_single_string, strip_raw_ident_prefix, }; #[derive(Debug)] pub struct FieldInfo<'a> { pub ordinal: usize, pub name: &'a syn::Ident, pub generic_ident: syn::Ident, pub ty: &'a syn::Type, pub builder_attr: FieldBuilderAttr, } impl FieldInfo<'_> { pub fn new( ordinal: usize, field: &syn::Field, field_defaults: FieldBuilderAttr, ) -> Result<FieldInfo<'_>, Error> { if let Some(ref name) = field.ident { let mut builder_attr = field_defaults.with(&field.attrs)?; let strip_option_auto = builder_attr.strip_option || !builder_attr.ignore_option && type_from_inside_option(&field.ty).is_some(); // children field is automatically defaulted to an empty VNode unless it is marked as optional (in which case it defaults to None) if name == "children" && !strip_option_auto { builder_attr.default = Some(syn::parse(quote!(dioxus_core::VNode::empty()).into()).unwrap()); } // String fields automatically use impl Display if field.ty == parse_quote!(::std::string::String) || field.ty == parse_quote!(std::string::String) || field.ty == parse_quote!(string::String) || field.ty == parse_quote!(String) { builder_attr.from_displayable = true; // ToString is both more general and provides a more useful error message than From<String>. If the user tries to use `#[into]`, use ToString instead. if builder_attr.auto_into { builder_attr.auto_to_string = true; } builder_attr.auto_into = false; } // Write and Store fields automatically use impl Into if looks_like_write_type(&field.ty) || looks_like_store_type(&field.ty) { builder_attr.auto_into = true; } // If this is a child field or extends, default to Default::default() if a default isn't set if !builder_attr.extends.is_empty() { builder_attr.default.get_or_insert_with(|| { syn::parse(quote!(::core::default::Default::default()).into()).unwrap() }); } // auto detect optional if !builder_attr.strip_option && strip_option_auto { builder_attr.strip_option = true; // only change the default if it isn't manually set above builder_attr.default.get_or_insert_with(|| { syn::parse(quote!(::core::default::Default::default()).into()).unwrap() }); } Ok(FieldInfo { ordinal, name, generic_ident: syn::Ident::new( &format!("__{}", strip_raw_ident_prefix(name.to_string())), name.span(), ), ty: &field.ty, builder_attr, }) } else { Err(Error::new(field.span(), "Nameless field in struct")) } } pub fn generic_ty_param(&self) -> syn::GenericParam { syn::GenericParam::Type(self.generic_ident.clone().into()) } pub fn type_ident(&self) -> syn::Type { ident_to_type(self.generic_ident.clone()) } pub fn tuplized_type_ty_param(&self) -> syn::Type { let mut types = syn::punctuated::Punctuated::default(); types.push(self.ty.clone()); types.push_punct(Default::default()); syn::TypeTuple { paren_token: Default::default(), elems: types, } .into() } pub fn extends_vec_ident(&self) -> Option<syn::Ident> { (!self.builder_attr.extends.is_empty()).then(|| { let ident = &self.name; format_ident!("__{ident}_attributes") }) } } #[derive(Debug, Default, Clone)] pub struct FieldBuilderAttr { pub default: Option<syn::Expr>, pub docs: Vec<syn::Attribute>, pub skip: bool, pub auto_into: bool, pub auto_to_string: bool, pub from_displayable: bool, pub strip_option: bool, pub ignore_option: bool, pub extends: Vec<Path>, } impl FieldBuilderAttr { pub fn with(mut self, attrs: &[syn::Attribute]) -> Result<Self, Error> { let mut skip_tokens = None; for attr in attrs { if attr.path().is_ident("doc") { self.docs.push(attr.clone()); continue; } if path_to_single_string(attr.path()).as_deref() != Some("props") { continue; } match &attr.meta { syn::Meta::List(list) => { if list.tokens.is_empty() { continue; } } _ => { continue; } } let as_expr = attr.parse_args_with( Punctuated::<Expr, syn::Token![,]>::parse_separated_nonempty, )?; for expr in as_expr.into_iter() { self.apply_meta(expr)?; } // Stash its span for later (we don’t yet know if it’ll be an error) if self.skip && skip_tokens.is_none() { skip_tokens = Some(attr.meta.clone()); } } if self.skip && self.default.is_none() { return Err(Error::new_spanned( skip_tokens.unwrap(), "#[props(skip)] must be accompanied by default or default_code", )); } Ok(self) } pub fn apply_meta(&mut self, expr: syn::Expr) -> Result<(), Error> { match expr { // #[props(default = "...")] syn::Expr::Assign(assign) => { let name = expr_to_single_string(&assign.left) .ok_or_else(|| Error::new_spanned(&assign.left, "Expected identifier"))?; match name.as_str() { "extends" => { if let syn::Expr::Path(path) = *assign.right { self.extends.push(path.path); Ok(()) } else { Err(Error::new_spanned( assign.right, "Expected simple identifier", )) } } "default" => { self.default = Some(*assign.right); Ok(()) } "default_code" => { if let syn::Expr::Lit(syn::ExprLit { lit: syn::Lit::Str(code), .. }) = *assign.right { use std::str::FromStr; let tokenized_code = TokenStream::from_str(&code.value())?; self.default = Some( syn::parse(tokenized_code.into()) .map_err(|e| Error::new_spanned(code, format!("{e}")))?, ); } else { return Err(Error::new_spanned(assign.right, "Expected string")); } Ok(()) } _ => Err(Error::new_spanned( &assign, format!("Unknown parameter {name:?}"), )), } } // #[props(default)] syn::Expr::Path(path) => { let name = path_to_single_string(&path.path) .ok_or_else(|| Error::new_spanned(&path, "Expected identifier"))?; match name.as_str() { "default" => { self.default = Some( syn::parse(quote!(::core::default::Default::default()).into()) .unwrap(), ); Ok(()) } "optional" => { self.default = Some( syn::parse(quote!(::core::default::Default::default()).into()) .unwrap(), ); self.strip_option = true; Ok(()) } "extend" => { self.extends.push(path.path); Ok(()) } _ => { macro_rules! handle_fields { ( $( $flag:expr, $field:ident, $already:expr; )* ) => { match name.as_str() { $( $flag => { if self.$field { Err(Error::new(path.span(), concat!("Illegal setting - field is already ", $already))) } else { self.$field = true; Ok(()) } } )* _ => Err(Error::new_spanned( &path, format!("Unknown setter parameter {:?}", name), )) } } } handle_fields!( "skip", skip, "skipped"; "into", auto_into, "calling into() on the argument"; "displayable", from_displayable, "calling to_string() on the argument"; "strip_option", strip_option, "putting the argument in Some(...)"; ) } } } syn::Expr::Unary(syn::ExprUnary { op: syn::UnOp::Not(_), expr, .. }) => { if let syn::Expr::Path(path) = *expr { let name = path_to_single_string(&path.path) .ok_or_else(|| Error::new_spanned(&path, "Expected identifier"))?; match name.as_str() { "default" => { self.default = None; Ok(()) } "skip" => { self.skip = false; Ok(()) } "auto_into" => { self.auto_into = false; Ok(()) } "displayable" => { self.from_displayable = false; Ok(()) } "optional" => { self.strip_option = false; self.ignore_option = true; Ok(()) } _ => Err(Error::new_spanned(path, "Unknown setting".to_owned())), } } else { Err(Error::new_spanned( expr, "Expected simple identifier".to_owned(), )) } } _ => Err(Error::new_spanned(expr, "Expected (<...>=<...>)")), } } } } fn type_from_inside_option(ty: &Type) -> Option<&Type> { let Type::Path(type_path) = ty else { return None; }; if type_path.qself.is_some() { return None; } let path = &type_path.path; let seg = path.segments.last()?; // If the segment is a supported optional type, provide the inner type. // Return the inner type if the pattern is `Option<T>` or `ReadSignal<Option<T>>`` if seg.ident == "ReadSignal" || seg.ident == "ReadOnlySignal" { // Get the inner type. E.g. the `u16` in `ReadSignal<u16>` or `Option` in `ReadSignal<Option<bool>>` let inner_type = extract_inner_type_from_segment(seg)?; let Type::Path(inner_path) = inner_type else { // If it isn't a path, the inner type isn't option return None; }; // If we're entering an `Option`, we must get the innermost type let inner_seg = inner_path.path.segments.last()?; if inner_seg.ident == "Option" { // Get the innermost type. let innermost_type = extract_inner_type_from_segment(inner_seg)?; return Some(innermost_type); } } else if seg.ident == "Option" { // Grab the inner time. E.g. Option<u16> let inner_type = extract_inner_type_from_segment(seg)?; return Some(inner_type); } None } // Extract the inner type from a path segment. fn extract_inner_type_from_segment(segment: &PathSegment) -> Option<&Type> { let PathArguments::AngleBracketed(generic_args) = &segment.arguments else { return None; }; let GenericArgument::Type(final_type) = generic_args.args.first()? else { return None; }; Some(final_type) } mod struct_info { use convert_case::{Case, Casing}; use proc_macro2::TokenStream; use quote::{quote, ToTokens}; use syn::parse::Error; use syn::punctuated::Punctuated; use syn::spanned::Spanned; use syn::{parse_quote, Expr, Ident}; use crate::props::strip_option; use super::field_info::{FieldBuilderAttr, FieldInfo}; use super::util::{ empty_type, empty_type_tuple, expr_to_single_string, make_punctuated_single, modify_types_generics_hack, path_to_single_string, strip_raw_ident_prefix, type_tuple, }; use super::{child_owned_type, looks_like_callback_type, looks_like_signal_type}; #[derive(Debug)] pub struct StructInfo<'a> { pub vis: &'a syn::Visibility, pub name: &'a syn::Ident, pub generics: &'a syn::Generics, pub fields: Vec<FieldInfo<'a>>, pub builder_attr: TypeBuilderAttr, pub builder_name: syn::Ident, pub conversion_helper_trait_name: syn::Ident, #[allow(unused)] pub core: syn::Ident, } impl<'a> StructInfo<'a> { pub fn included_fields(&self) -> impl Iterator<Item = &FieldInfo<'a>> { self.fields.iter().filter(|f| !f.builder_attr.skip) } pub fn extend_fields(&self) -> impl Iterator<Item = &FieldInfo<'a>> { self.fields .iter() .filter(|f| !f.builder_attr.extends.is_empty()) } pub fn new( ast: &'a syn::DeriveInput, fields: impl Iterator<Item = &'a syn::Field>, ) -> Result<StructInfo<'a>, Error> { let builder_attr = TypeBuilderAttr::new(&ast.attrs)?; let builder_name = strip_raw_ident_prefix(format!("{}Builder", ast.ident)); Ok(StructInfo { vis: &ast.vis, name: &ast.ident, generics: &ast.generics, fields: fields .enumerate() .map(|(i, f)| FieldInfo::new(i, f, builder_attr.field_defaults.clone())) .collect::<Result<_, _>>()?, builder_attr, builder_name: syn::Ident::new(&builder_name, ast.ident.span()), conversion_helper_trait_name: syn::Ident::new( &format!("{builder_name}_Optional"), ast.ident.span(), ), core: syn::Ident::new(&format!("{builder_name}_core"), ast.ident.span()), }) } fn modify_generics<F: FnMut(&mut syn::Generics)>(&self, mut mutator: F) -> syn::Generics { let mut generics = self.generics.clone(); mutator(&mut generics); generics } /// Checks if the props have any fields that should be owned by the child. For example, when converting T to `ReadSignal<T>`, the new signal should be owned by the child fn has_child_owned_fields(&self) -> bool { self.fields.iter().any(|f| child_owned_type(f.ty)) } fn memoize_impl(&self) -> Result<TokenStream, Error> { // First check if there are any ReadSignal fields, if there are not, we can just use the partialEq impl let signal_fields: Vec<_> = self .included_fields() .filter(|f| looks_like_signal_type(f.ty)) .map(|f| { let name = f.name; quote!(#name) }) .collect(); let move_signal_fields = quote! { trait NonPartialEq: Sized { fn compare(&self, other: &Self) -> bool; } impl<T> NonPartialEq for &&T { fn compare(&self, other: &Self) -> bool { false } } trait CanPartialEq: PartialEq { fn compare(&self, other: &Self) -> bool; } impl<T: PartialEq> CanPartialEq for T { fn compare(&self, other: &Self) -> bool { self == other } } // If they are equal, we don't need to rerun the component we can just update the existing signals #( // Try to memo the signal let field_eq = { let old_value: &_ = &*#signal_fields.peek(); let new_value: &_ = &*new.#signal_fields.peek(); (&old_value).compare(&&new_value) }; // Make the old fields point to the new fields #signal_fields.point_to(new.#signal_fields).unwrap(); if !field_eq { // If the fields are not equal, mark the signal as dirty to rerun any subscribers (#signal_fields).mark_dirty(); } // Move the old value back self.#signal_fields = #signal_fields; )* }; let event_handlers_fields: Vec<_> = self .included_fields() .filter(|f| looks_like_callback_type(f.ty)) .collect(); let regular_fields: Vec<_> = self .included_fields() .filter(|f| !looks_like_signal_type(f.ty) && !looks_like_callback_type(f.ty)) .map(|f| { let name = f.name; quote!(#name) }) .collect(); let move_event_handlers: TokenStream = event_handlers_fields.iter().map(|field| { // If this is an optional event handler, we need to check if it's None before we try to update it let optional = strip_option(field.ty).is_some(); let name = field.name; if optional { quote! { // If the event handler is None, we don't need to update it if let (Some(old_handler), Some(new_handler)) = (self.#name.as_mut(), new.#name.as_ref()) { old_handler.__point_to(new_handler); } } } else { quote! { // Update the event handlers self.#name.__point_to(&new.#name); } } }).collect(); // If there are signals, we automatically try to memoize the signals if !signal_fields.is_empty() { Ok(quote! { // First check if the fields are equal. This will compare the signal fields by pointer let exactly_equal = self == new; if exactly_equal { // If they are return early, they can be memoized without any changes return true; } // If they are not, move the signal fields into self and check if they are equal now that the signal fields are equal #( let mut #signal_fields = self.#signal_fields; self.#signal_fields = new.#signal_fields; )* // Then check if the fields are equal now that we know the signal fields are equal // NOTE: we don't compare other fields individually because we want to let users opt-out of memoization for certain fields by implementing PartialEq themselves let non_signal_fields_equal = self == new; // If they are not equal, we need to move over all the fields that are not event handlers or signals to self if !non_signal_fields_equal { let new_clone = new.clone(); #( self.#regular_fields = new_clone.#regular_fields; )* } // Move any signal and event fields into their old container. // We update signals and event handlers in place so that they are always up to date even if they were moved into a future in a previous render #move_signal_fields #move_event_handlers non_signal_fields_equal }) } else { Ok(quote! { let equal = self == new; // Move any signal and event fields into their old container. #move_event_handlers // If they are not equal, we need to move over all the fields that are not event handlers to self if !equal { let new_clone = new.clone(); #( self.#regular_fields = new_clone.#regular_fields; )* } equal }) } } pub fn builder_creation_impl(&self) -> Result<TokenStream, Error> { let StructInfo { ref vis, ref name, ref builder_name, .. } = *self; let generics = self.generics.clone(); let (impl_generics, ty_generics, where_clause) = generics.split_for_impl(); let (_, b_initial_generics, _) = self.generics.split_for_impl(); let all_fields_param = syn::GenericParam::Type( syn::Ident::new("TypedBuilderFields", proc_macro2::Span::call_site()).into(), ); let b_generics = self.modify_generics(|g| { g.params.insert(0, all_fields_param.clone()); }); let empties_tuple = type_tuple(self.included_fields().map(|_| empty_type())); let generics_with_empty = modify_types_generics_hack(&b_initial_generics, |args| { args.insert(0, syn::GenericArgument::Type(empties_tuple.clone().into())); }); let phantom_generics = self.generics.params.iter().filter_map(|param| match param { syn::GenericParam::Lifetime(lifetime) => { let lifetime = &lifetime.lifetime; Some(quote!(::core::marker::PhantomData<&#lifetime ()>)) } syn::GenericParam::Type(ty) => { let ty = &ty.ident; Some(quote!(::core::marker::PhantomData<#ty>)) } syn::GenericParam::Const(_cnst) => None, }); let builder_method_doc = match self.builder_attr.builder_method_doc { Some(ref doc) => quote!(#doc), None => { let doc = format!( " Create a builder for building `{name}`. On the builder, call {setters} to set the values of the fields. Finally, call `.build()` to create the instance of `{name}`. ", name = self.name, setters = { let mut result = String::new(); let mut is_first = true; for field in self.included_fields() { use std::fmt::Write; if is_first { is_first = false; } else { write!(&mut result, ", ").unwrap();
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
true
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/core-macro/tests/values_memoize_in_place.rs
packages/core-macro/tests/values_memoize_in_place.rs
use dioxus::{ core::{generation, needs_update}, prelude::*, }; use dioxus_core::ElementId; use std::{any::Any, rc::Rc}; #[tokio::test] async fn values_memoize_in_place() { thread_local! { static DROP_COUNT: std::cell::RefCell<usize> = const { std::cell::RefCell::new(0) }; } struct CountsDrop; impl Drop for CountsDrop { fn drop(&mut self) { DROP_COUNT.with(|c| *c.borrow_mut() += 1); } } fn app() -> Element { let mut count = use_signal(|| 0); let x = CountsDrop; use_hook(|| { spawn(async move { for _ in 0..15 { tokio::time::sleep(std::time::Duration::from_millis(1)).await; count += 1; } }); }); rsx! { TakesEventHandler { click: move |num| { // Force the closure to own the drop counter let _ = &x; println!("num is {num}"); }, number: count() / 2 } TakesSignal { sig: count(), number: count() / 2 } } } set_event_converter(Box::new(dioxus::html::SerializedHtmlEventConverter)); let mut dom = VirtualDom::new(app); let mutations = dom.rebuild_to_vec(); println!("{:#?}", mutations); dom.mark_dirty(ScopeId::APP); for _ in 0..40 { let event = Event::new( Rc::new(PlatformEventData::new(Box::<SerializedMouseData>::default())) as Rc<dyn Any>, true, ); dom.runtime().handle_event("click", event, ElementId(1)); tokio::select! { _ = tokio::time::sleep(std::time::Duration::from_millis(20)) => {}, _ = dom.wait_for_work() => {} } dom.render_immediate(&mut dioxus_core::NoOpMutations); } dom.render_immediate(&mut dioxus_core::NoOpMutations); // As we rerun the app, the drop count should be 15 one for each render of the app component let drop_count = DROP_COUNT.with(|c| *c.borrow()); assert_eq!(drop_count, 16); } // We move over event handlers in place. Make sure we do that in a way that doesn't destroy the original event handler #[test] fn cloning_event_handler_components_work() { fn app() -> Element { let rsx_with_event_handler_component = rsx! { TakesEventHandler { click: move |evt| { println!("Clicked {evt:?}!"); }, number: 0 } }; rsx! { {rsx_with_event_handler_component.clone()} {rsx_with_event_handler_component.clone()} {rsx_with_event_handler_component.clone()} {rsx_with_event_handler_component} } } set_event_converter(Box::new(dioxus::html::SerializedHtmlEventConverter)); let mut dom = VirtualDom::new(app); let mutations = dom.rebuild_to_vec(); println!("{:#?}", mutations); dom.mark_dirty(ScopeId::APP); for _ in 0..20 { let event = Event::new( Rc::new(PlatformEventData::new(Box::<SerializedMouseData>::default())) as Rc<dyn Any>, true, ); dom.runtime().handle_event("click", event, ElementId(1)); dom.render_immediate(&mut dioxus_core::NoOpMutations); } dom.render_immediate(&mut dioxus_core::NoOpMutations); } #[component] fn TakesEventHandler(click: EventHandler<usize>, number: usize) -> Element { let first_render_click = use_hook(move || click); if generation() > 0 { // Make sure the event handler is memoized in place and never gets dropped first_render_click(number); } rsx! { button { onclick: move |_| click(number), "{number}" } } } #[component] fn TakesSignal(sig: ReadSignal<usize>, number: usize) -> Element { let first_render_sig = use_hook(move || sig); if generation() > 0 { // Make sure the signal is memoized in place and never gets dropped println!("{first_render_sig}"); } rsx! { button { "{number}" } } } // Regression test for https://github.com/DioxusLabs/dioxus/issues/2582 #[test] fn spreads_memorize_in_place() { use dioxus_core::Properties; #[derive(Props, Clone, PartialEq)] struct CompProps { #[props(extends = GlobalAttributes)] attributes: Vec<Attribute>, } let mut props = CompProps::builder().build(); assert!(!props.memoize(&CompProps::builder().all("123").build())); assert_eq!( props.attributes, vec![Attribute::new("all", "123", Some("style"), false)] ); assert!(!props.memoize(&CompProps::builder().width("123").build())); assert_eq!( props.attributes, vec![Attribute::new("width", "123", Some("style"), false)] ); assert!(!props.memoize(&CompProps::builder().build())); assert_eq!(props.attributes, vec![]); assert!(props.memoize(&CompProps::builder().build())); assert_eq!(props.attributes, vec![]); } // Regression test for https://github.com/DioxusLabs/dioxus/issues/2331 #[test] fn cloning_read_signal_components_work() { fn app() -> Element { if generation() < 5 { println!("Generating new props"); needs_update(); } let read_signal_rsx = rsx! { TakesReadSignalNonClone { sig: NonCloneable(generation() as i32) } TakesReadSignalNum { sig: generation() as i32 } }; rsx! { {read_signal_rsx.clone()} {read_signal_rsx} } } struct NonCloneable<T>(T); #[component] fn TakesReadSignalNum(sig: ReadSignal<i32>) -> Element { rsx! {} } #[component] fn TakesReadSignalNonClone(sig: ReadSignal<NonCloneable<i32>>) -> Element { rsx! {} } set_event_converter(Box::new(dioxus::html::SerializedHtmlEventConverter)); let mut dom = VirtualDom::new(app); let mutations = dom.rebuild_to_vec(); println!("{:#?}", mutations); dom.mark_dirty(ScopeId::APP); for _ in 0..20 { let event = Event::new( Rc::new(PlatformEventData::new(Box::<SerializedMouseData>::default())) as Rc<dyn Any>, true, ); dom.runtime().handle_event("click", event, ElementId(1)); dom.render_immediate(&mut dioxus_core::NoOpMutations); } dom.render_immediate(&mut dioxus_core::NoOpMutations); }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/core-macro/tests/generics.rs
packages/core-macro/tests/generics.rs
use std::fmt::Display; use dioxus::prelude::*; // This test just checks that props compile with generics // It will not actually run any code #[test] #[allow(unused)] #[allow(non_snake_case)] fn generic_props_compile() { fn app() -> Element { rsx! { TakesClone { value: "hello world" } TakesCloneManual { value: "hello world" } TakesCloneManualWhere { value: "hello world" } GenericFnWhereClause { value: "hello world" } } } #[component] fn TakesClone<T: Clone + PartialEq + 'static>(value: T) -> Element { rsx! {} } #[component] fn TakesCloneArc<T: PartialEq + 'static>(value: std::sync::Arc<T>) -> Element { rsx! {} } struct MyBox<T>(std::marker::PhantomData<T>); impl<T: Display> Clone for MyBox<T> { fn clone(&self) -> Self { MyBox(std::marker::PhantomData) } } impl<T: Display> PartialEq for MyBox<T> { fn eq(&self, _: &Self) -> bool { true } } #[component] #[allow(clippy::multiple_bound_locations)] fn TakesCloneMyBox<T: 'static>(value: MyBox<T>) -> Element where T: Display, { rsx! {} } #[derive(Props, Clone, PartialEq)] struct TakesCloneManualProps<T: Clone + PartialEq + 'static> { value: T, } fn TakesCloneManual<T: Clone + PartialEq>(props: TakesCloneManualProps<T>) -> Element { rsx! {} } #[derive(Props, Clone, PartialEq)] struct TakesCloneManualWhereProps<T> where T: Clone + PartialEq + 'static, { value: T, } fn TakesCloneManualWhere<T: Clone + PartialEq>( props: TakesCloneManualWhereProps<T>, ) -> Element { rsx! {} } #[derive(Props, Clone, PartialEq)] struct TakesCloneManualWhereWithOwnerProps<T> where T: Clone + PartialEq + 'static, { value: EventHandler<T>, } fn TakesCloneManualWhereWithOwner<T: Clone + PartialEq>( props: TakesCloneManualWhereWithOwnerProps<T>, ) -> Element { rsx! {} } #[component] fn GenericFnWhereClause<T>(value: T) -> Element where T: Clone + PartialEq + Display + 'static, { rsx! { p { "{value}" } } } }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/core-macro/tests/rsx.rs
packages/core-macro/tests/rsx.rs
#[test] fn rsx() { let t = trybuild::TestCases::new(); t.compile_fail("tests/rsx/trailing-comma-0.rs"); } /// This test ensures that automatic `into` conversion occurs for default values. /// /// These are compile-time tests. /// See https://github.com/DioxusLabs/dioxus/issues/2373 #[cfg(test)] mod test_default_into { use dioxus::prelude::*; #[derive(Props, Clone, PartialEq)] struct MyCoolProps { // Test different into configurations #[props(into, default = true)] pub val_into_w_default_val: u16, #[props(into, default)] pub val_into_w_default: u16, #[props(default = true.into())] pub val_default: u16, // Test different into configurations with strings #[props(into, default = "abc")] pub str_into_w_default_val: String, #[props(into, default)] pub str_into_w_default: String, #[props(default = "abc".into())] pub str_default: String, // Test options #[props(into, default = Some("abc".into()))] pub opt_into_w_default_val: Option<String>, #[props(into, default)] pub opt_into_w_default: Option<String>, #[props(default = Some("abc".into()))] pub opt_default: Option<String>, pub opt_element: Option<Element>, // Test no default #[props(into)] pub some_data: bool, pub some_other_data: bool, // Test default values for signals #[props(default)] read_only_w_default: ReadSignal<bool>, #[props(default = true)] read_only_w_default_val: ReadSignal<bool>, #[props(default = ReadSignal::new(Signal::new(true)))] read_only_w_default_val_explicit: ReadSignal<bool>, // Test default values for callbacks/event handlers #[props(default)] callback_w_default: Callback, #[props(default = move |_| {})] callback_w_default_val_closure: Callback, #[props(default = { fn test(_: ()) {} test })] callback_w_default_val_expr_fn: Callback, #[props(default = Callback::new(move |_: ()| {}))] callback_w_default_val_explicit: Callback, #[props(default)] event_handler_w_default: EventHandler<KeyboardEvent>, #[props(default = move |_| {})] event_handler_w_default_val_closure: EventHandler<KeyboardEvent>, #[props(default = { fn test(_: KeyboardEvent) {} test })] event_handler_w_default_val_expr_fn: EventHandler<KeyboardEvent>, #[props(default = EventHandler::new(move |_: KeyboardEvent| {}))] event_handler_w_default_val_explicit: EventHandler<KeyboardEvent>, } } /// This test ensures that read-only signals that contain an option (`Signal<Option<u16>>`) /// are correctly created as default when not provided. /// /// These are compile-time tests. /// See https://github.com/DioxusLabs/dioxus/issues/2648 #[cfg(test)] #[allow(unused)] mod test_optional_signals { use dioxus::prelude::*; // Test if test components fail to compile. #[component] fn UsesComponents() -> Element { rsx! { PropsStruct { regular_read_signal: ReadSignal::new(Signal::new(1234)), } PropsStruct { optional_read_signal: 1234, regular_read_signal: 123u16, } PropParams {} PropParams { opt_read_sig: 1234 } DoubleOption {} DoubleOption { optional: Some(1234) } } } // Test props as struct param. #[derive(Props, Clone, PartialEq)] struct MyTestProps { pub optional_read_signal: ReadSignal<Option<u16>>, pub regular_read_signal: ReadSignal<u16>, } #[component] fn PropsStruct(props: MyTestProps) -> Element { rsx! { "hi" } } // Test props as params. #[component] fn PropParams(opt_read_sig: ReadSignal<Option<u16>>) -> Element { rsx! { "hi!" } } #[component] fn DoubleOption(optional: Option<Option<u16>>) -> Element { rsx! { "hi!" } } }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/core-macro/tests/event_handler.rs
packages/core-macro/tests/event_handler.rs
use dioxus::prelude::*; // This test just checks that event handlers compile without explicit type annotations // It will not actually run any code #[test] #[allow(unused)] fn event_handlers_compile() { fn app() -> Element { let mut todos = use_signal(String::new); rsx! { input { // Normal event handlers work without explicit type annotations oninput: move |evt| todos.set(evt.value()), } button { // async event handlers work without explicit type annotations onclick: |event| async move { println!("{event:?}"); }, } // New! You can now use async closures for custom event handlers! // This shouldn't require an explicit type annotation TakesEventHandler { onclick: |event| async move { println!("{event:?}"); } } // Or you can accept a callback that returns a value // This shouldn't require an explicit type annotation TakesEventHandlerWithArg { double: move |value| (value * 2) as i32 } } } #[component] fn TakesEventHandler(onclick: EventHandler<MouseEvent>) -> Element { rsx! { button { // You can pass in EventHandlers directly to events onclick: onclick, "Click!" } button { // Or use the shorthand syntax onclick, "Click!" } // You should also be able to forward event handlers to other components with the shorthand syntax TakesEventHandler { onclick } } } #[component] fn TakesEventHandlerWithArg(double: Callback<u32, i32>) -> Element { let mut count = use_signal(|| 2); rsx! { button { // Callbacks let you easily inject custom logic into your components onclick: move |_| count.set(double(count()) as u32), "{count}" } } } }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/core-macro/tests/rsx/trailing-comma-0.rs
packages/core-macro/tests/rsx/trailing-comma-0.rs
// Given an `rsx!` invocation with a missing trailing comma, // ensure the stderr output has an informative span. use dioxus::prelude::*; fn main() { rsx! { p { class: "foo bar" "Hello world" } }; }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/fullstack-server/src/config.rs
packages/fullstack-server/src/config.rs
//! Configuration for how to serve a Dioxus application #![allow(non_snake_case)] use dioxus_core::LaunchConfig; use std::any::Any; use std::sync::Arc; use crate::{IncrementalRendererConfig, IndexHtml}; #[allow(unused)] pub(crate) type ContextProviders = Arc<Vec<Box<dyn Fn() -> Box<dyn Any> + Send + Sync + 'static>>>; /// A ServeConfig is used to configure how to serve a Dioxus application. It contains information about how to serve static assets, and what content to render with [`dioxus_ssr`]. #[derive(Clone)] pub struct ServeConfig { pub(crate) index: IndexHtml, pub(crate) incremental: Option<IncrementalRendererConfig>, pub(crate) context_providers: Vec<Arc<dyn Fn() -> Box<dyn Any> + Send + Sync + 'static>>, pub(crate) streaming_mode: StreamingMode, } /// The streaming mode to use while rendering the page #[derive(Clone, Copy, Default, PartialEq)] pub enum StreamingMode { /// Streaming is disabled; all server futures should be resolved before hydrating the page on the client #[default] Disabled, /// Out of order streaming is enabled; server futures are resolved out of order and streamed to the client /// as they resolve OutOfOrder, } impl LaunchConfig for ServeConfig {} impl Default for ServeConfig { fn default() -> Self { Self::new() } } impl ServeConfig { /// Create a new ServeConfig with incremental static generation disabled and the default index.html settings. pub fn builder() -> Self { Self::new() } /// Create a new ServeConfig with incremental static generation disabled and the default index.html settings /// /// This will automatically use the `index.html` file in the `/public` directory if it exists. /// The `/public` folder is meant located next to the current executable. If no `index.html` file is found, /// a default index.html will be used, which will not include any JavaScript or WASM initialization code. /// /// To provide an alternate `index.html`, you can use `with_index_html` method instead. pub fn new() -> Self { let index = if let Some(public_path) = crate::public_path() { let index_html_path = public_path.join("index.html"); if index_html_path.exists() { let index_html = std::fs::read_to_string(index_html_path) .expect("Failed to read index.html from public directory"); IndexHtml::new(&index_html, "main") .expect("Failed to parse index.html from public directory") } else { tracing::warn!("No index.html found in public directory, using default index.html"); IndexHtml::ssr_only() } } else { tracing::warn!( "Cannot identify public directory, using default index.html. If you need client-side scripts (like JS + WASM), please provide an explicit public directory." ); IndexHtml::ssr_only() }; Self { index, incremental: None, context_providers: Default::default(), streaming_mode: StreamingMode::default(), } } /// Create a new ServeConfig with the given parsed `IndexHtml` structure. /// /// You can create the `IndexHtml` structure by using `IndexHtml::new` method, or manually from /// a string or file. pub fn with_index_html(index: IndexHtml) -> Self { Self { index, incremental: Default::default(), context_providers: Default::default(), streaming_mode: Default::default(), } } /// Enable incremental static generation. Incremental static generation caches the /// rendered html in memory and/or the file system. It can be used to improve performance of heavy routes. /// /// ```rust, no_run /// # fn app() -> Element { unimplemented!() } /// use dioxus::prelude::*; /// /// // Finally, launch the app with the config /// LaunchBuilder::new() /// // Only set the server config if the server feature is enabled /// .with_cfg(server_only!(dioxus_server::ServeConfig::default().incremental(dioxus_server::IncrementalRendererConfig::default()))) /// .launch(app); /// ``` pub fn incremental(mut self, cfg: IncrementalRendererConfig) -> Self { self.incremental = Some(cfg); self } /// Provide context to the root and server functions. You can use this context while rendering with [`consume_context`](dioxus_core::consume_context). /// /// /// The context providers passed into this method will be called when the context type is requested which may happen many times in the lifecycle of the application. /// /// /// Context will be forwarded from the LaunchBuilder if it is provided. /// /// ```rust, no_run /// #![allow(non_snake_case)] /// use dioxus::prelude::*; /// use std::sync::Arc; /// use std::any::Any; /// /// fn main() { /// // Hydrate the application on the client /// #[cfg(not(feature = "server"))] /// dioxus::launch(app); /// /// #[cfg(feature = "server")] /// dioxus_server::serve(|| async move { /// use dioxus_server::{axum, ServeConfig, DioxusRouterExt}; /// /// let config = ServeConfig::default() /// // You can provide context to your whole app on the server (including server functions) with the `context_provider` method on the launch builder /// .context_providers(Arc::new(vec![Box::new(|| Box::new(1234u32) as Box<dyn Any>) as Box<dyn Fn() -> Box<dyn Any> + Send + Sync>])); /// /// Ok( /// axum::Router::new() /// .serve_dioxus_application(config, app) /// ) /// }) /// } /// /// #[server] /// async fn read_context() -> ServerFnResult<u32> { /// Ok(123) /// } /// /// fn app() -> Element { /// let future = use_resource(read_context); /// rsx! { /// h1 { "{future:?}" } /// } /// } /// ``` pub fn context_providers(mut self, state: ContextProviders) -> Self { // This API should probably accept Vec<Box<dyn Fn() -> Box<dyn Any> + Send + Sync + 'static>> instead of Arc so we can // continue adding to the context list after calling this method. Changing the type is a breaking change so it cannot // be done until 0.7 is released. let context_providers = (0..state.len()).map(|i| { let state = state.clone(); Arc::new(move || state[i]()) as Arc<dyn Fn() -> Box<dyn std::any::Any> + Send + Sync> }); self.context_providers.extend(context_providers); self } /// Provide context to the root and server functions. You can use this context /// while rendering with [`consume_context`](dioxus_core::consume_context). /// /// /// The context providers passed into this method will be called when the context type is requested which may happen many times in the lifecycle of the application. /// /// /// Context will be forwarded from the LaunchBuilder if it is provided. /// /// ```rust, no_run /// #![allow(non_snake_case)] /// use dioxus::prelude::*; /// /// fn main() { /// #[cfg(not(feature = "server"))] /// // Hydrate the application on the client /// dioxus::launch(app); /// /// #[cfg(feature = "server")] /// dioxus_server::serve(|| async move { /// use dioxus_server::{axum, ServeConfig, DioxusRouterExt}; /// /// let config = ServeConfig::default() /// // You can provide context to your whole app on the server (including server functions) with the `context_provider` method on the launch builder /// .context_provider(|| 1234u32); /// /// Ok( /// axum::Router::new() /// .serve_dioxus_application(config, app) /// ) /// }); /// } /// /// #[server] /// async fn read_context() -> ServerFnResult<u32> { /// Ok(123) /// } /// /// fn app() -> Element { /// let future = use_resource(read_context); /// rsx! { /// h1 { "{future:?}" } /// } /// } /// ``` pub fn context_provider<C: 'static>( mut self, state: impl Fn() -> C + Send + Sync + 'static, ) -> Self { self.context_providers .push(Arc::new(move || Box::new(state()))); self } /// Provide context to the root and server functions. You can use this context while rendering with [`consume_context`](dioxus_core::consume_context). /// /// Context will be forwarded from the LaunchBuilder if it is provided. /// /// ```rust, no_run /// #![allow(non_snake_case)] /// use dioxus::prelude::*; /// /// fn main() { /// // Hydrate the application on the client /// #[cfg(not(feature = "server"))] /// dioxus::launch(app); /// /// // Run a custom server with axum on the server /// #[cfg(feature = "server")] /// dioxus_server::serve(|| async move { /// use dioxus_server::{axum, ServeConfig, DioxusRouterExt}; /// /// let config = ServeConfig::default() /// // You can provide context to your whole app on the server (including server functions) with the `context_provider` method on the launch builder /// .context(1234u32); /// /// Ok( /// axum::Router::new() /// .serve_dioxus_application(config, app) /// ) /// }); /// } /// /// #[server] /// async fn read_context() -> ServerFnResult<u32> { /// Ok(123) /// } /// /// fn app() -> Element { /// let future = use_resource(read_context); /// rsx! { /// h1 { "{future:?}" } /// } /// } /// ``` pub fn context(mut self, state: impl Any + Clone + Send + Sync + 'static) -> Self { self.context_providers .push(Arc::new(move || Box::new(state.clone()))); self } /// Set the streaming mode for the server. By default, streaming is disabled. /// /// ```rust, no_run /// # use dioxus::prelude::*; /// # fn app() -> Element { unimplemented!() } /// dioxus::LaunchBuilder::new() /// .with_context(server_only! { /// dioxus::server::ServeConfig::builder().streaming_mode(dioxus::server::StreamingMode::OutOfOrder) /// }) /// .launch(app); /// ``` pub fn streaming_mode(mut self, mode: StreamingMode) -> Self { self.streaming_mode = mode; self } /// Enable out of order streaming. This will cause server futures to be resolved out of order and streamed to the client as they resolve. /// /// It is equivalent to calling `streaming_mode(StreamingMode::OutOfOrder)` /// /// ```rust, no_run /// # use dioxus::prelude::*; /// # fn app() -> Element { unimplemented!() } /// dioxus::LaunchBuilder::new() /// .with_context(server_only! { /// dioxus::server::ServeConfig::builder().enable_out_of_order_streaming() /// }) /// .launch(app); /// ``` pub fn enable_out_of_order_streaming(mut self) -> Self { self.streaming_mode = StreamingMode::OutOfOrder; self } }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/fullstack-server/src/launch.rs
packages/fullstack-server/src/launch.rs
//! A launch function that creates an axum router for the LaunchBuilder use crate::{server::DioxusRouterExt, FullstackState, ServeConfig}; use anyhow::Context; use axum::{ body::Body, extract::{Request, State}, routing::IntoMakeService, Router, }; use dioxus_cli_config::base_path; use dioxus_core::{ComponentFunction, Element}; use dioxus_devtools::{DevserverMsg, HotReloadMsg}; use futures_util::{stream::FusedStream, StreamExt}; use hyper::body::Incoming; use hyper_util::server::conn::auto::Builder as HyperBuilder; use hyper_util::{ rt::{TokioExecutor, TokioIo}, service::TowerToHyperService, }; use std::{any::Any, net::SocketAddr, prelude::rust_2024::Future}; use std::{pin::Pin, sync::Arc}; use subsecond::HotFn; use tokio_util::either::Either; use tower::{Service, ServiceExt as _}; #[cfg(not(target_arch = "wasm32"))] use { dioxus_core::{RenderError, VNode}, tokio::net::TcpListener, }; type ContextList = Vec<Box<dyn Fn() -> Box<dyn Any> + Send + Sync>>; type BaseComp = fn() -> Element; /// Launch a fullstack app with the given root component. pub fn launch(root: BaseComp) -> ! { launch_cfg(root, vec![], vec![]) } /// Launch a fullstack app with the given root component, contexts, and config. #[allow(unused)] pub fn launch_cfg(root: BaseComp, contexts: ContextList, platform_config: Vec<Box<dyn Any>>) -> ! { #[cfg(not(target_arch = "wasm32"))] tokio::runtime::Runtime::new() .unwrap() .block_on(async move { serve_server(root, contexts, platform_config).await }); unreachable!("Launching a fullstack app should never return") } #[cfg(not(target_arch = "wasm32"))] async fn serve_server( original_root: fn() -> Result<VNode, RenderError>, contexts: Vec<Box<dyn Fn() -> Box<dyn Any> + Send + Sync>>, platform_config: Vec<Box<dyn Any>>, ) { let mut cfg = platform_config .into_iter() .find_map(|cfg| cfg.downcast::<ServeConfig>().ok().map(|b| *b)) .unwrap_or_else(ServeConfig::new); // Extend the config's context providers with the context providers from the launch builder for ctx in contexts { let arced = Arc::new(ctx) as Arc<dyn Fn() -> Box<dyn Any> + Send + Sync>; cfg.context_providers.push(arced); } let cb = move || { let cfg = cfg.clone(); Box::pin(async move { Ok(apply_base_path( Router::new().serve_dioxus_application(cfg.clone(), original_root), original_root, cfg.clone(), base_path().map(|s| s.to_string()), )) }) as _ }; serve_router(cb, dioxus_cli_config::fullstack_address_or_localhost()).await; } /// Create a router that serves the dioxus application at the appropriate base path. /// /// This method automatically setups up: /// - Static asset serving /// - Mapping of base paths /// - Automatic registration of server functions /// - Handler to render the dioxus application /// - WebSocket handling for live reload and devtools /// - Hot-reloading /// - Async Runtime /// - Logging pub fn router(app: fn() -> Element) -> Router { let cfg = ServeConfig::new(); apply_base_path( Router::new().serve_dioxus_application(cfg.clone(), app), app, cfg, base_path().map(|s| s.to_string()), ) } /// Serve a fullstack dioxus application with a custom axum router. /// /// This function sets up an async runtime, enables the default dioxus logger, runs the provided initializer, /// and then starts an axum server with the returned router. /// /// The axum router will be bound to the address specified by the `IP` and `PORT` environment variables, /// defaulting to `127.0.0.1:8080` if not set. /// /// This function uses axum to block on serving the application, and will not return. pub fn serve<F>(mut serve_it: impl FnMut() -> F) -> ! where F: Future<Output = Result<Router, anyhow::Error>> + 'static, { let cb = move || Box::pin(serve_it()) as _; block_on( async move { serve_router(cb, dioxus_cli_config::fullstack_address_or_localhost()).await }, ); unreachable!("Serving a fullstack app should never return") } /// Serve a fullstack dioxus application with a custom axum router. /// /// This function enables the dioxus logger and then serves the axum server with hot-reloading support. /// /// To enable hot-reloading of the router, the provided `serve_callback` should return a new `Router` /// each time it is called. pub async fn serve_router( mut serve_callback: impl FnMut() -> Pin<Box<dyn Future<Output = Result<Router, anyhow::Error>>>>, addr: SocketAddr, ) { dioxus_logger::initialize_default(); let listener = TcpListener::bind(addr) .await .with_context(|| format!("Failed to bind to address {addr}")) .unwrap(); // If we're not in debug mode, just serve the app normally if !cfg!(debug_assertions) { axum::serve(listener, serve_callback().await.unwrap()) .await .unwrap(); return; } // Wire up the devtools connection. The sender only sends messages in dev. let (devtools_tx, mut devtools_rx) = futures_channel::mpsc::unbounded(); dioxus_devtools::connect(move |msg| _ = devtools_tx.unbounded_send(msg)); let mut hot_serve_callback = HotFn::current(serve_callback); let mut make_service = hot_serve_callback .call(()) .await .map(|router| router.into_make_service()) .unwrap(); let (shutdown_tx, _) = tokio::sync::broadcast::channel(1); let our_build_id = Some(dioxus_cli_config::build_id()); // Manually loop on accepting connections so we can also respond to devtools messages loop { let res = tokio::select! { res = listener.accept() => Either::Left(res), Some(msg) = devtools_rx.next(), if !devtools_rx.is_terminated() => Either::Right(msg), else => continue }; match res { Either::Left(Ok((tcp_stream, _remote_addr))) => { let mut make_service = make_service.clone(); let mut shutdown_rx = shutdown_tx.subscribe(); tokio::task::spawn(async move { let tcp_stream = TokioIo::new(tcp_stream); std::future::poll_fn(|cx| { <IntoMakeService<Router> as tower::Service<Request>>::poll_ready( &mut make_service, cx, ) }) .await .expect("Infallible"); // upgrades needed for websockets let builder = HyperBuilder::new(TokioExecutor::new()); let connection = builder.serve_connection_with_upgrades( tcp_stream, TowerToHyperService::new( make_service .call(()) .await .unwrap() .map_request(|req: Request<Incoming>| req.map(Body::new)), ), ); tokio::select! { res = connection => { if let Err(_err) = res { // This error only appears when the client doesn't send a request and // terminate the connection. // // If client sends one request then terminate connection whenever, it doesn't // appear. } } _res = shutdown_rx.recv() => {} } }); } // Handle just hot-patches for now. // We don't do RSX hot-reload since usually the client handles that once the page is loaded. // // todo(jon): I *believe* SSR is resilient to RSX changes, but we should verify that... Either::Right(DevserverMsg::HotReload(HotReloadMsg { jump_table: Some(table), for_build_id, .. })) if for_build_id == our_build_id => { // Apply the hot-reload patch to the dioxus devtools first unsafe { dioxus_devtools::subsecond::apply_patch(table).unwrap() }; // Now recreate the router // We panic here because we don't want their app to continue in a maybe-corrupted state make_service = hot_serve_callback .call(()) .await .expect("Failed to create new router after hot-patch!") .into_make_service(); // Make sure to wipe out the renderer state so we don't have stale elements crate::document::reset_renderer(); _ = shutdown_tx.send(()); } // Explicitly don't handle RSX hot-reloads on the server // The client will handle that once the page is loaded. If we handled it here, _ => {} } } } fn block_on<T>(app_future: impl Future<Output = T>) { if let Ok(handle) = tokio::runtime::Handle::try_current() { handle.block_on(app_future); } else { tokio::runtime::Builder::new_multi_thread() .enable_all() .build() .unwrap() .block_on(app_future); } } fn apply_base_path<M: 'static>( mut router: Router, root: impl ComponentFunction<(), M> + Send + Sync, cfg: ServeConfig, base_path: Option<String>, ) -> Router { if let Some(base_path) = base_path { let base_path = base_path.trim_matches('/'); // If there is a base path, nest the router under it and serve the root route manually // Nesting a route in axum only serves /base_path or /base_path/ not both router = Router::new().nest(&format!("/{base_path}/"), router).route( &format!("/{base_path}"), axum::routing::method_routing::get( |state: State<FullstackState>, mut request: Request<Body>| async move { // The root of the base path always looks like the root from dioxus fullstack *request.uri_mut() = "/".parse().unwrap(); FullstackState::render_handler(state, request).await }, ) .with_state(FullstackState::new(cfg, root)), ) } router }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/fullstack-server/src/lib.rs
packages/fullstack-server/src/lib.rs
#![doc = include_str!("../README.md")] #![doc(html_logo_url = "https://avatars.githubusercontent.com/u/79236386")] #![doc(html_favicon_url = "https://avatars.githubusercontent.com/u/79236386")] // #![warn(missing_docs)] #![cfg_attr(docsrs, feature(doc_cfg))] #![forbid(unexpected_cfgs)] // re-exported to make it possible to implement a custom Client without adding a separate // dependency on `bytes` pub use bytes::Bytes; pub use dioxus_fullstack_core::{ServerFnError, ServerFnResult}; pub use axum; pub use config::ServeConfig; pub use config::*; pub use document::ServerDocument; pub use http; pub use inventory; pub use server::*; pub mod redirect; #[cfg(not(target_arch = "wasm32"))] mod launch; #[cfg(not(target_arch = "wasm32"))] pub use launch::{launch, launch_cfg}; /// Implementations of the server side of the server function call. pub mod server; /// Types and traits for HTTP responses. // pub mod response; pub mod config; pub(crate) mod document; pub(crate) mod ssr; pub(crate) mod streaming; pub use launch::router; pub use launch::serve; pub mod serverfn; pub use serverfn::*; pub mod isrg; pub use isrg::*; mod index_html; pub(crate) use index_html::IndexHtml;
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/fullstack-server/src/document.rs
packages/fullstack-server/src/document.rs
//! On the server, we collect any elements that should be rendered into the head in the first frame of SSR. //! After the first frame, we have already sent down the head, so we can't modify it in place. The web client //! will hydrate the head with the correct contents once it loads. use std::cell::RefCell; use dioxus_core::Element; use dioxus_core_macro::rsx; use dioxus_document::{ Document, Eval, LinkProps, MetaProps, NoOpDocument, ScriptProps, StyleProps, }; use dioxus_html as dioxus_elements; use dioxus_ssr::Renderer; use parking_lot::RwLock; use std::sync::LazyLock; static RENDERER: LazyLock<RwLock<Renderer>> = LazyLock::new(|| RwLock::new(Renderer::new())); /// Reset the static renderer to a fresh state, clearing its cache. pub(crate) fn reset_renderer() { RENDERER.write().clear(); } #[derive(Default)] struct ServerDocumentInner { streaming: bool, title: Option<String>, meta: Vec<Element>, link: Vec<Element>, script: Vec<Element>, } /// A Document provider that collects all contents injected into the head for SSR rendering. #[derive(Default)] pub struct ServerDocument(RefCell<ServerDocumentInner>); impl ServerDocument { pub(crate) fn title(&self) -> Option<String> { let myself = self.0.borrow(); myself.title.as_ref().map(|title| { RENDERER .write() .render_element(rsx! { title { "{title}" } }) }) } pub(crate) fn render(&self, to: &mut impl std::fmt::Write) -> std::fmt::Result { let myself = self.0.borrow(); let element = rsx! { {myself.meta.iter().map(|m| rsx! { {m} })} {myself.link.iter().map(|l| rsx! { {l} })} {myself.script.iter().map(|s| rsx! { {s} })} }; RENDERER.write().render_element_to(to, element)?; Ok(()) } pub(crate) fn start_streaming(&self) { self.0.borrow_mut().streaming = true; } pub(crate) fn warn_if_streaming(&self) { if self.0.borrow().streaming { tracing::warn!("Attempted to insert content into the head after the initial streaming frame. Inserting content into the head only works during the initial render of SSR outside before resolving any suspense boundaries."); } } /// Write the head element into the serialized context for hydration /// We write true if the head element was written to the DOM during server side rendering #[track_caller] pub(crate) fn serialize_for_hydration(&self) { // We only serialize the head elements if the web document feature is enabled #[cfg(feature = "document")] { dioxus_fullstack_core::head_element_hydration_entry() .insert(&!self.0.borrow().streaming, std::panic::Location::caller()); } } } impl Document for ServerDocument { fn eval(&self, js: String) -> Eval { NoOpDocument.eval(js) } fn set_title(&self, title: String) { self.warn_if_streaming(); self.0.borrow_mut().title = Some(title); } fn create_meta(&self, props: MetaProps) { self.0.borrow_mut().meta.push(rsx! { meta { name: props.name, charset: props.charset, http_equiv: props.http_equiv, content: props.content, property: props.property, "data": props.data, ..props.additional_attributes, } }); } fn create_script(&self, props: ScriptProps) { let children = props.script_contents().ok(); self.0.borrow_mut().script.push(rsx! { script { src: props.src, defer: props.defer, crossorigin: props.crossorigin, fetchpriority: props.fetchpriority, integrity: props.integrity, nomodule: props.nomodule, nonce: props.nonce, referrerpolicy: props.referrerpolicy, r#type: props.r#type, ..props.additional_attributes, {children} } }); } fn create_style(&self, props: StyleProps) { let contents = props.style_contents().ok(); self.0.borrow_mut().script.push(rsx! { style { media: props.media, nonce: props.nonce, title: props.title, ..props.additional_attributes, {contents} } }) } fn create_link(&self, props: LinkProps) { self.0.borrow_mut().link.push(rsx! { link { rel: props.rel, media: props.media, title: props.title, disabled: props.disabled, r#as: props.r#as, sizes: props.sizes, href: props.href, crossorigin: props.crossorigin, referrerpolicy: props.referrerpolicy, fetchpriority: props.fetchpriority, hreflang: props.hreflang, integrity: props.integrity, r#type: props.r#type, blocking: props.blocking, ..props.additional_attributes, } }) } fn create_head_component(&self) -> bool { self.warn_if_streaming(); self.serialize_for_hydration(); true } }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/fullstack-server/src/redirect.rs
packages/fullstack-server/src/redirect.rs
use std::sync::OnceLock; /// A custom header that can be set with any value to indicate /// that the server function client should redirect to a new route. /// /// This is useful because it allows returning a value from the request, /// while also indicating that a redirect should follow. This cannot be /// done with an HTTP `3xx` status code, because the browser will follow /// that redirect rather than returning the desired data. pub const REDIRECT_HEADER: &str = "serverfnredirect"; /// A function that will be called if a server function returns a `3xx` status /// or the [`REDIRECT_HEADER`]. pub type RedirectHook = Box<dyn Fn(&str) + Send + Sync>; // allowed: not in a public API, and pretty straightforward #[allow(clippy::type_complexity)] pub(crate) static REDIRECT_HOOK: OnceLock<RedirectHook> = OnceLock::new(); /// Sets a function that will be called if a server function returns a `3xx` status /// or the [`REDIRECT_HEADER`]. Returns `Err(_)` if the hook has already been set. pub fn set_redirect_hook(hook: impl Fn(&str) + Send + Sync + 'static) -> Result<(), RedirectHook> { REDIRECT_HOOK.set(Box::new(hook)) } /// Calls the hook that has been set by [`set_redirect_hook`] to redirect to `loc`. pub fn call_redirect_hook(loc: &str) { if let Some(hook) = REDIRECT_HOOK.get() { hook(loc) } }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/fullstack-server/src/serverfn.rs
packages/fullstack-server/src/serverfn.rs
use crate::FullstackState; use axum::{ body::Body, extract::{Request, State}, response::Response, routing::MethodRouter, }; use dioxus_fullstack_core::FullstackContext; use http::{Method, StatusCode}; use std::{pin::Pin, prelude::rust_2024::Future}; /// A function endpoint that can be called from the client. #[derive(Clone)] pub struct ServerFunction { path: &'static str, method: Method, handler: fn() -> MethodRouter<FullstackState>, } impl ServerFunction { /// Create a new server function object from a MethodRouter pub const fn new( method: Method, path: &'static str, handler: fn() -> MethodRouter<FullstackState>, ) -> Self { Self { path, method, handler, } } /// The path of the server function. pub fn path(&self) -> &'static str { self.path } /// The HTTP method the server function expects. pub fn method(&self) -> Method { self.method.clone() } /// Collect all globally registered server functions pub fn collect() -> Vec<&'static ServerFunction> { inventory::iter::<ServerFunction>().collect() } /// Create a `MethodRouter` for this server function that can be mounted on an `axum::Router`. /// /// This runs the handler inside the required `FullstackContext` scope and populates /// `FullstackContext` so that the handler can use those features. /// /// It also runs the server function inside a tokio `LocalPool` to allow !Send futures. pub fn method_router(&self) -> MethodRouter<FullstackState> { (self.handler)() } /// Creates a new `MethodRouter` for the given method and !Send handler. /// /// This is used internally by the `ServerFunction` to create the method router that this /// server function uses. #[allow(clippy::type_complexity)] pub fn make_handler( method: Method, handler: fn(State<FullstackContext>, Request) -> Pin<Box<dyn Future<Output = Response>>>, ) -> MethodRouter<FullstackState> { axum::routing::method_routing::on( method .try_into() .expect("MethodFilter only supports standard HTTP methods"), move |state: State<FullstackState>, request: Request| async move { use tracing::Instrument; let current_span = tracing::Span::current(); // Allow !Send futures by running in the render handlers pinned local pool let result = state.rt.spawn_pinned(move || async move { use dioxus_fullstack_core::FullstackContext; use http::header::{ACCEPT, LOCATION, REFERER}; use http::StatusCode; // todo: we're copying the parts here, but it'd be ideal if we didn't. // We can probably just pass the URI in so the matching logic can work and then // in the server function, do all extraction via FullstackContext. This ensures // calls to `.remove()` work as expected. let (parts, body) = request.into_parts(); let server_context = FullstackContext::new(parts.clone()); let request = axum::extract::Request::from_parts(parts, body); // store Accepts and Referrer in case we need them for redirect (below) let referrer = request.headers().get(REFERER).cloned(); let accepts_html = request .headers() .get(ACCEPT) .and_then(|v| v.to_str().ok()) .map(|v| v.contains("text/html")) .unwrap_or(false); server_context .clone() .scope(async move { // Run the next middleware / handler inside the server context let mut response = handler(State(server_context), request) .instrument(current_span) .await; let server_context = FullstackContext::current().expect( "Server context should be available inside the server context scope", ); // Get the extra response headers set during the handler and add them to the response let headers = server_context.take_response_headers(); if let Some(headers) = headers { response.headers_mut().extend(headers); } // it it accepts text/html (i.e., is a plain form post) and doesn't already have a // Location set, then redirect to Referer if accepts_html { if let Some(referrer) = referrer { let has_location = response.headers().get(LOCATION).is_some(); if !has_location { *response.status_mut() = StatusCode::FOUND; response.headers_mut().insert(LOCATION, referrer); } } } response }) .await }).await; match result { Ok(response) => response, Err(err) => Response::builder() .status(StatusCode::INTERNAL_SERVER_ERROR) .body(Body::new(if cfg!(debug_assertions) { format!("Server function panicked: {}", err) } else { "Internal Server Error".to_string() })) .unwrap(), } }, ) } } impl inventory::Collect for ServerFunction { #[inline] fn registry() -> &'static inventory::Registry { static REGISTRY: inventory::Registry = inventory::Registry::new(); &REGISTRY } }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/fullstack-server/src/server.rs
packages/fullstack-server/src/server.rs
use crate::{ ssr::{SSRError, SsrRendererPool}, ServeConfig, ServerFunction, }; use axum::{ body::Body, extract::State, http::{Request, StatusCode}, response::{IntoResponse, Response}, routing::*, }; use dioxus_core::{ComponentFunction, VirtualDom}; use http::header::*; use std::path::{Path, PathBuf}; use std::sync::Arc; use tokio_util::task::LocalPoolHandle; use tower::util::MapResponse; use tower::ServiceExt; use tower_http::services::fs::ServeFileSystemResponseBody; /// A extension trait with utilities for integrating Dioxus with your Axum router. pub trait DioxusRouterExt { /// Serves the static WASM for your Dioxus application (except the generated index.html). /// /// # Example /// ```rust, no_run /// # #![allow(non_snake_case)] /// # use dioxus::prelude::*; /// use dioxus_server::DioxusRouterExt; /// /// #[tokio::main] /// async fn main() -> anyhow::Result<()> { /// let addr = dioxus::cli_config::fullstack_address_or_localhost(); /// let router = axum::Router::new() /// // Server side render the application, serve static assets, and register server functions /// .serve_static_assets() /// // Server render the application /// // ... /// .with_state(dioxus_server::FullstackState::headless()); /// let listener = tokio::net::TcpListener::bind(addr).await?; /// axum::serve(listener, router).await?; /// Ok(()) /// } /// ``` fn serve_static_assets(self) -> Router<FullstackState>; /// Serves the Dioxus application. This will serve a complete server side rendered application. /// This will serve static assets, server render the application, register server functions, and integrate with hot reloading. /// /// # Example /// ```rust, no_run /// # #![allow(non_snake_case)] /// # use dioxus::prelude::*; /// use dioxus_server::{DioxusRouterExt, ServeConfig}; /// /// #[tokio::main] /// async fn main() { /// let addr = dioxus::cli_config::fullstack_address_or_localhost(); /// let router = axum::Router::new() /// // Server side render the application, serve static assets, and register server functions /// .serve_dioxus_application(ServeConfig::new(), app); /// let listener = tokio::net::TcpListener::bind(addr).await.unwrap(); /// axum::serve(listener, router).await.unwrap(); /// } /// /// fn app() -> Element { /// rsx! { "Hello World" } /// } /// ``` fn serve_dioxus_application<M: 'static>( self, cfg: ServeConfig, app: impl ComponentFunction<(), M> + Send + Sync, ) -> Router<()>; /// Registers server functions with the default handler. /// /// # Example /// ```rust, no_run /// # use dioxus::prelude::*; /// # use dioxus_server::DioxusRouterExt; /// #[tokio::main] /// async fn main() { /// let addr = dioxus::cli_config::fullstack_address_or_localhost(); /// let router = axum::Router::new() /// // Register server functions routes with the default handler /// .register_server_functions() /// .with_state(dioxus_server::FullstackState::headless()); /// let listener = tokio::net::TcpListener::bind(addr).await.unwrap(); /// axum::serve(listener, router).await.unwrap(); /// } /// ``` #[allow(dead_code)] fn register_server_functions(self) -> Router<FullstackState>; /// Serves a Dioxus application without static assets. /// Sets up server function routes and rendering endpoints only. /// /// Useful for WebAssembly environments or when static assets /// are served by another system. /// /// # Example /// ```rust, no_run /// # use dioxus::prelude::*; /// # use dioxus_server::{DioxusRouterExt, ServeConfig}; /// #[tokio::main] /// async fn main() { /// let router = axum::Router::new() /// .serve_api_application(ServeConfig::new(), app) /// .into_make_service(); /// // ... /// } /// /// fn app() -> Element { /// rsx! { "Hello World" } /// } /// ``` fn serve_api_application<M: 'static>( self, cfg: ServeConfig, app: impl ComponentFunction<(), M> + Send + Sync, ) -> Router<()> where Self: Sized; } #[cfg(not(target_arch = "wasm32"))] impl DioxusRouterExt for Router<FullstackState> { fn register_server_functions(mut self) -> Router<FullstackState> { use std::collections::HashSet; let mut seen = HashSet::new(); for func in ServerFunction::collect() { if seen.insert(format!("{} {}", func.method(), func.path())) { tracing::info!( "Registering server function: {} {}", func.method(), func.path() ); self = self.route(func.path(), func.method_router()) } } self } fn serve_static_assets(self) -> Router<FullstackState> { let Some(public_path) = public_path() else { return self; }; // Serve all files in public folder except index.html serve_dir_cached(self, &public_path, &public_path) } fn serve_api_application<M: 'static>( self, cfg: ServeConfig, app: impl ComponentFunction<(), M> + Send + Sync, ) -> Router<()> { self.register_server_functions() .fallback(get(FullstackState::render_handler)) .with_state(FullstackState::new(cfg, app)) } fn serve_dioxus_application<M: 'static>( self, cfg: ServeConfig, app: impl ComponentFunction<(), M> + Send + Sync, ) -> Router<()> { self.register_server_functions() .serve_static_assets() .fallback(get(FullstackState::render_handler)) .with_state(FullstackState::new(cfg, app)) } } /// SSR renderer handler for Axum with added context injection. /// /// # Example /// ```rust,no_run /// #![allow(non_snake_case)] /// use std::sync::{Arc, Mutex}; /// /// use axum::routing::get; /// use dioxus::prelude::*; /// use dioxus_server::{FullstackState, render_handler, ServeConfig}; /// /// fn app() -> Element { /// rsx! { /// "hello!" /// } /// } /// /// #[tokio::main] /// async fn main() { /// let addr = dioxus::cli_config::fullstack_address_or_localhost(); /// let router = axum::Router::new() /// // Register server functions, etc. /// // Note you can use `register_server_functions_with_context` /// // to inject the context into server functions running outside /// // of an SSR render context. /// .fallback(get(render_handler)) /// .with_state(FullstackState::new(ServeConfig::new(), app)); /// /// let listener = tokio::net::TcpListener::bind(addr).await.unwrap(); /// axum::serve(listener, router).await.unwrap(); /// } /// ``` pub async fn render_handler( State(state): State<FullstackState>, request: Request<Body>, ) -> impl IntoResponse { FullstackState::render_handler(State(state), request).await } /// State used by [`FullstackState::render_handler`] to render a dioxus component with axum #[derive(Clone)] pub struct FullstackState { config: ServeConfig, build_virtual_dom: Arc<dyn Fn() -> VirtualDom + Send + Sync>, renderers: Arc<SsrRendererPool>, pub(crate) rt: LocalPoolHandle, } impl FullstackState { /// Create a headless [`FullstackState`] without a root component. /// /// This won't render pages, but can still be used to register server functions and serve static assets. pub fn headless() -> Self { let rt = LocalPoolHandle::new( std::thread::available_parallelism() .map(usize::from) .unwrap_or(1), ); Self { renderers: Arc::new(SsrRendererPool::new(4, None)), build_virtual_dom: Arc::new(|| { panic!("No root component provided for headless FullstackState") }), config: ServeConfig::new(), rt, } } /// Create a new [`FullstackState`] pub fn new<M: 'static>( config: ServeConfig, root: impl ComponentFunction<(), M> + Send + Sync + 'static, ) -> Self { let rt = LocalPoolHandle::new( std::thread::available_parallelism() .map(usize::from) .unwrap_or(1), ); Self { renderers: Arc::new(SsrRendererPool::new(4, config.incremental.clone())), build_virtual_dom: Arc::new(move || VirtualDom::new_with_props(root.clone(), ())), config, rt, } } /// Create a new [`FullstackState`] with a custom [`VirtualDom`] factory. This method can be /// used to pass context into the root component of your application. pub fn new_with_virtual_dom_factory( config: ServeConfig, build_virtual_dom: impl Fn() -> VirtualDom + Send + Sync + 'static, ) -> Self { let rt = LocalPoolHandle::new( std::thread::available_parallelism() .map(usize::from) .unwrap_or(1), ); Self { renderers: Arc::new(SsrRendererPool::new(4, config.incremental.clone())), config, build_virtual_dom: Arc::new(build_virtual_dom), rt, } } /// Set the [`ServeConfig`] for this [`FullstackState`] pub fn with_config(mut self, config: ServeConfig) -> Self { self.config = config; self } /// SSR renderer handler for Axum with added context injection. /// /// # Example /// ```rust,no_run /// #![allow(non_snake_case)] /// use std::sync::{Arc, Mutex}; /// /// use axum::routing::get; /// use dioxus::prelude::*; /// use dioxus_server::{FullstackState, render_handler, ServeConfig}; /// /// fn app() -> Element { /// rsx! { /// "hello!" /// } /// } /// /// #[tokio::main] /// async fn main() { /// let addr = dioxus::cli_config::fullstack_address_or_localhost(); /// let router = axum::Router::new() /// // Register server functions, etc. /// // Note you can use `register_server_functions_with_context` /// // to inject the context into server functions running outside /// // of an SSR render context. /// .fallback(get(render_handler)) /// .with_state(FullstackState::new(ServeConfig::new(), app)) /// .into_make_service(); /// /// let listener = tokio::net::TcpListener::bind(addr).await.unwrap(); /// axum::serve(listener, router).await.unwrap(); /// } /// ``` pub async fn render_handler(State(state): State<Self>, request: Request<Body>) -> Response { let (parts, _) = request.into_parts(); let response = state .renderers .clone() .render_to(parts, &state.config, &state.rt, { let build_virtual_dom = state.build_virtual_dom.clone(); let context_providers = state.config.context_providers.clone(); move || { let mut vdom = build_virtual_dom(); for state in context_providers.as_slice() { vdom.insert_any_root_context(state()); } vdom } }) .await; match response { Ok((status, headers, freshness, rx)) => { let mut response = Response::builder() .status(status.status) .header(CONTENT_TYPE, "text/html; charset=utf-8") .body(Body::from_stream(rx)) .unwrap(); // Write our freshness header freshness.write(response.headers_mut()); // write the other headers set by the user for (key, value) in headers.into_iter() { if let Some(key) = key { response.headers_mut().insert(key, value); } } response } Err(SSRError::Incremental(e)) => { tracing::error!("Failed to render page: {}", e); Response::builder() .status(StatusCode::INTERNAL_SERVER_ERROR) .body(e.to_string()) .unwrap() .into_response() } Err(SSRError::HttpError { status, message }) => Response::builder() .status(status) .body(Body::from(message.unwrap_or_else(|| { status .canonical_reason() .unwrap_or("An unknown error occurred") .to_string() }))) .unwrap(), } } } /// Get the path to the public assets directory to serve static files from pub(crate) fn public_path() -> Option<PathBuf> { if let Ok(path) = std::env::var("DIOXUS_PUBLIC_PATH") { return Some(PathBuf::from(path)); } // The CLI always bundles static assets into the exe/public directory Some( std::env::current_exe() .ok()? .parent() .unwrap() .join("public"), ) } fn serve_dir_cached<S>(mut router: Router<S>, public_path: &Path, directory: &Path) -> Router<S> where S: Send + Sync + Clone + 'static, { use tower_http::services::ServeFile; let dir = std::fs::read_dir(directory) .unwrap_or_else(|e| panic!("Couldn't read public directory at {:?}: {}", &directory, e)); for entry in dir.flatten() { let path = entry.path(); // Don't serve the index.html file. The SSR handler will generate it. if path == public_path.join("index.html") { continue; } let route = format!( "/{}", path.strip_prefix(public_path) .unwrap() .iter() .map(|segment| segment.to_string_lossy()) .collect::<Vec<_>>() .join("/") ); if path.is_dir() { router = serve_dir_cached(router, public_path, &path); } else { let serve_file = ServeFile::new(&path).precompressed_br(); // All cached assets are served at the root of the asset directory. If we know an asset // is hashed for cache busting, we can cache the response on the client side forever. If // the asset changes, the hash in the path will also change and the client will refetch it. if file_name_looks_immutable(&route) { router = router.nest_service(&route, cache_response_forever(serve_file)) } else { router = router.nest_service(&route, serve_file) } } } router } type MappedAxumService<S> = MapResponse< S, fn(Response<ServeFileSystemResponseBody>) -> Response<ServeFileSystemResponseBody>, >; fn cache_response_forever<S>(service: S) -> MappedAxumService<S> where S: ServiceExt<Request<Body>, Response = Response<ServeFileSystemResponseBody>>, { service.map_response(|mut response: Response<ServeFileSystemResponseBody>| { response.headers_mut().insert( CACHE_CONTROL, HeaderValue::from_static("public, max-age=31536000, immutable"), ); response }) } fn file_name_looks_immutable(file_name: &str) -> bool { // Check if the file name looks like a hash (e.g., "main-dxh12345678.js") file_name.rsplit_once("-dxh").is_some_and(|(_, hash)| { hash.chars() .take_while(|c| *c != '.') .all(|c| c.is_ascii_hexdigit()) }) } #[test] fn test_file_name_looks_immutable() { assert!(file_name_looks_immutable("main-dxh12345678.js")); assert!(file_name_looks_immutable("style-dxhabcdef.css")); assert!(!file_name_looks_immutable("index.html")); assert!(!file_name_looks_immutable("script.js")); assert!(!file_name_looks_immutable("main-dxh1234wyz.js")); assert!(!file_name_looks_immutable("main-dxh12345678-invalid.js")); }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/fullstack-server/src/ssr.rs
packages/fullstack-server/src/ssr.rs
//! A shared pool of renderers for efficient server side rendering. use crate::isrg::{ CachedRender, IncrementalRenderer, IncrementalRendererConfig, IncrementalRendererError, RenderFreshness, }; use crate::streaming::{Mount, StreamingRenderer}; use crate::{document::ServerDocument, ServeConfig}; use dioxus_cli_config::base_path; use dioxus_core::{ consume_context, has_context, try_consume_context, DynamicNode, ErrorContext, Runtime, ScopeId, SuspenseContext, TemplateNode, VNode, VirtualDom, }; use dioxus_fullstack_core::{history::provide_fullstack_history_context, HttpError, ServerFnError}; use dioxus_fullstack_core::{FullstackContext, StreamingStatus}; use dioxus_fullstack_core::{HydrationContext, SerializedHydrationData}; use dioxus_router::ParseRouteError; use dioxus_ssr::Renderer; use futures_channel::mpsc::Sender; use futures_util::{Stream, StreamExt}; use http::{request::Parts, HeaderMap, StatusCode}; use std::{ collections::HashMap, fmt::Write, iter::Peekable, rc::Rc, sync::{Arc, RwLock}, }; use tokio_util::task::LocalPoolHandle; use crate::StreamingMode; /// Errors that can occur during server side rendering before the initial chunk is sent down pub enum SSRError { /// An error from the incremental renderer. This should result in a 500 code Incremental(IncrementalRendererError), HttpError { status: StatusCode, message: Option<String>, }, } /// A suspense boundary that is pending with a placeholder in the client struct PendingSuspenseBoundary { mount: Mount, children: Vec<ScopeId>, } pub(crate) struct SsrRendererPool { renderers: RwLock<Vec<Renderer>>, incremental_cache: Option<RwLock<IncrementalRenderer>>, } impl SsrRendererPool { pub(crate) fn new(initial_size: usize, incremental: Option<IncrementalRendererConfig>) -> Self { let renderers = RwLock::new((0..initial_size).map(|_| Self::pre_renderer()).collect()); Self { renderers, incremental_cache: incremental.map(|cache| RwLock::new(cache.build())), } } /// Look for a cached route in the incremental cache and send it into the render channel if it exists fn check_cached_route( &self, route: &str, render_into: &mut Sender<Result<String, IncrementalRendererError>>, ) -> Option<RenderFreshness> { let incremental = self.incremental_cache.as_ref()?; if let Ok(mut incremental) = incremental.write() { match incremental.get(route) { Ok(Some(cached_render)) => { let CachedRender { freshness, response, .. } = cached_render; _ = render_into.start_send( String::from_utf8(response.to_vec()) .map_err(|err| IncrementalRendererError::Other(err.into())), ); return Some(freshness); } Err(e) => { tracing::error!("Failed to get route \"{route}\" from incremental cache: {e}"); } _ => {} } } None } /// Render a virtual dom into a stream. This method will return immediately and continue streaming the result in the background /// The streaming is canceled when the stream the function returns is dropped pub(crate) async fn render_to( self: Arc<Self>, parts: Parts, cfg: &ServeConfig, rt: &LocalPoolHandle, virtual_dom_factory: impl FnOnce() -> VirtualDom + Send + Sync + 'static, ) -> Result< ( HttpError, HeaderMap, RenderFreshness, impl Stream<Item = Result<String, IncrementalRendererError>>, ), SSRError, > { struct ReceiverWithDrop { receiver: futures_channel::mpsc::Receiver<Result<String, IncrementalRendererError>>, cancel_task: Option<tokio::task::JoinHandle<()>>, } impl Stream for ReceiverWithDrop { type Item = Result<String, IncrementalRendererError>; fn poll_next( mut self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>, ) -> std::task::Poll<Option<Self::Item>> { self.receiver.poll_next_unpin(cx) } } // When we drop the stream, we need to cancel the task that is feeding values to the stream impl Drop for ReceiverWithDrop { fn drop(&mut self) { if let Some(cancel_task) = self.cancel_task.take() { cancel_task.abort(); } } } let route = parts .uri .path_and_query() .ok_or_else(|| SSRError::HttpError { status: StatusCode::BAD_REQUEST, message: None, })? .to_string(); let (mut into, rx) = futures_channel::mpsc::channel::<Result<String, IncrementalRendererError>>(1000); let (initial_result_tx, initial_result_rx) = futures_channel::oneshot::channel(); // before we even spawn anything, we can check synchronously if we have the route cached if let Some(freshness) = self.check_cached_route(&route, &mut into) { return Ok(( HttpError { status: StatusCode::OK, message: None, }, HeaderMap::new(), freshness, ReceiverWithDrop { receiver: rx, cancel_task: None, }, )); } let mut renderer = self .renderers .write() .unwrap() .pop() .unwrap_or_else(Self::pre_renderer); let myself = self.clone(); let streaming_mode = cfg.streaming_mode; let cfg = cfg.clone(); let create_render_future = move || async move { let mut virtual_dom = virtual_dom_factory(); let document = Rc::new(ServerDocument::default()); virtual_dom.provide_root_context(document.clone()); // If there is a base path, trim the base path from the route and add the base path formatting to the // history provider let history = if let Some(base_path) = base_path() { let base_path = base_path.trim_matches('/'); let base_path = format!("/{base_path}"); let route = route.strip_prefix(&base_path).unwrap_or(&route); dioxus_history::MemoryHistory::with_initial_path(route).with_prefix(base_path) } else { dioxus_history::MemoryHistory::with_initial_path(&route) }; // Provide the document and streaming context to the root of the app let streaming_context = virtual_dom.in_scope(ScopeId::ROOT, || FullstackContext::new(parts)); virtual_dom.provide_root_context(document.clone() as Rc<dyn dioxus_document::Document>); virtual_dom.provide_root_context(streaming_context.clone()); virtual_dom.in_scope(ScopeId::ROOT, || { // Wrap the memory history in a fullstack history provider to provide the initial route for hydration provide_fullstack_history_context(history); // Provide a hydration compatible error boundary that serializes errors for the client dioxus_core::provide_create_error_boundary( dioxus_fullstack_core::init_error_boundary, ); }); // rebuild the virtual dom virtual_dom.rebuild_in_place(); // If streaming is disabled, wait for the virtual dom to finish all suspense work // before rendering anything if streaming_mode == StreamingMode::Disabled { virtual_dom.wait_for_suspense().await; } else { // Otherwise, just wait for the streaming context to signal the initial chunk is ready loop { // Check if the router has finished and set the streaming context to finished let streaming_context_finished = virtual_dom .in_scope(ScopeId::ROOT, || streaming_context.streaming_state()) == StreamingStatus::InitialChunkCommitted; // Or if this app isn't using the router and has finished suspense let suspense_finished = !virtual_dom.suspended_tasks_remaining(); if streaming_context_finished || suspense_finished { break; } // Wait for new async work that runs during suspense (mainly use_server_futures) virtual_dom.wait_for_suspense_work().await; // Do that async work virtual_dom.render_suspense_immediate().await; } } // check if there are any errors from the root error boundary let error = virtual_dom.in_scope(ScopeId::ROOT_ERROR_BOUNDARY, || { consume_context::<ErrorContext>().error() }); if let Some(error) = error { let mut status_code = None; let mut out_message = None; // If the errors include an `HttpError` or `StatusCode` or `ServerFnError`, we need // to try and return the appropriate status code if let Some(error) = error.downcast_ref::<HttpError>() { status_code = Some(error.status); out_message = error.message.clone(); } if let Some(error) = error.downcast_ref::<StatusCode>() { status_code = Some(*error); } // todo - the user is allowed to return anything that impls `From<ServerFnError>` // we need to eventually be able to downcast that and get the status code from it if let Some(ServerFnError::ServerError { message, code, .. }) = error.downcast_ref() { status_code = Some( (*code) .try_into() .unwrap_or(StatusCode::INTERNAL_SERVER_ERROR), ); out_message = Some(message.clone()); } // If there was an error while routing, return the error with a 404 status // Return a routing error if any of the errors were a routing error if let Some(routing_error) = error.downcast_ref::<ParseRouteError>().cloned() { status_code = Some(StatusCode::NOT_FOUND); out_message = Some(routing_error.to_string()); } // If we captured anything that produces a status code, we should return that status code. if let Some(status_code) = status_code { _ = initial_result_tx.send(Err(SSRError::HttpError { status: status_code, message: out_message, })); return; } _ = initial_result_tx.send(Err(SSRError::Incremental( IncrementalRendererError::Other(error), ))); return; } // Check the FullstackContext in case the user set the statuscode manually or via a layout. let http_status = streaming_context.current_http_status(); let headers = streaming_context .take_response_headers() .unwrap_or_default(); // Now that we handled any errors from rendering, we can send the initial ok result _ = initial_result_tx.send(Ok((http_status, headers))); // Wait long enough to assemble the `<head>` of the document before starting to stream let mut pre_body = String::new(); if let Err(err) = Self::render_head(&cfg, &mut pre_body, &virtual_dom) { _ = into.start_send(Err(err)); return; } let stream = Arc::new(StreamingRenderer::new(pre_body, into)); let scope_to_mount_mapping = Arc::new(RwLock::new(HashMap::new())); renderer.pre_render = true; { let scope_to_mount_mapping = scope_to_mount_mapping.clone(); let stream = stream.clone(); renderer.set_render_components(Self::streaming_render_component_callback( stream, scope_to_mount_mapping, )); } macro_rules! throw_error { ($e:expr) => { stream.close_with_error($e); return; }; } // Render the initial frame with loading placeholders let mut initial_frame = renderer.render(&virtual_dom); // Along with the initial frame, we render the html after the main element, but before the body tag closes. This should include the script that starts loading the wasm bundle. if let Err(err) = Self::render_after_main(&cfg, &mut initial_frame, &virtual_dom) { throw_error!(err); } stream.render(initial_frame); // After the initial render, we need to resolve suspense while virtual_dom.suspended_tasks_remaining() { virtual_dom.wait_for_suspense_work().await; let resolved_suspense_nodes = virtual_dom.render_suspense_immediate().await; // Just rerender the resolved nodes for scope in resolved_suspense_nodes { let pending_suspense_boundary = { let mut lock = scope_to_mount_mapping.write().unwrap(); lock.remove(&scope) }; // If the suspense boundary was immediately removed, it may not have a mount. We can just skip resolving it if let Some(pending_suspense_boundary) = pending_suspense_boundary { let mut resolved_chunk = String::new(); // After we replace the placeholder in the dom with javascript, we need to send down the resolved data so that the client can hydrate the node let render_suspense = |into: &mut String| { renderer.reset_hydration(); renderer.render_scope(into, &virtual_dom, scope) }; let resolved_data = Self::serialize_server_data(&virtual_dom, scope); if let Err(err) = stream.replace_placeholder( pending_suspense_boundary.mount, render_suspense, resolved_data, &mut resolved_chunk, ) { throw_error!(IncrementalRendererError::RenderError(err)); } stream.render(resolved_chunk); // Freeze the suspense boundary to prevent future reruns of any child nodes of the suspense boundary if let Some(suspense) = SuspenseContext::downcast_suspense_boundary_from_scope( &virtual_dom.runtime(), scope, ) { suspense.freeze(); // Go to every child suspense boundary and add an error boundary. Since we cannot rerun any nodes above the child suspense boundary, // we need to capture the errors and send them to the client as it resolves virtual_dom.in_runtime(|| { for &suspense_scope in pending_suspense_boundary.children.iter() { Self::start_capturing_errors(suspense_scope); } }); } } } } // After suspense is done, we render the html after the body let mut post_streaming = String::new(); if let Err(err) = Self::render_after_body(&cfg, &mut post_streaming) { throw_error!(err); } // If incremental rendering is enabled, add the new render to the cache without the streaming bits if let Some(incremental) = &self.incremental_cache { let mut cached_render = String::new(); if let Err(err) = Self::render_head(&cfg, &mut cached_render, &virtual_dom) { throw_error!(err); } renderer.reset_hydration(); if let Err(err) = renderer.render_to(&mut cached_render, &virtual_dom) { throw_error!(IncrementalRendererError::RenderError(err)); } if let Err(err) = Self::render_after_main(&cfg, &mut cached_render, &virtual_dom) { throw_error!(err); } cached_render.push_str(&post_streaming); if let Ok(mut incremental) = incremental.write() { let _ = incremental.cache(route, cached_render); } } stream.render(post_streaming); renderer.reset_render_components(); myself.renderers.write().unwrap().push(renderer); }; // Spawn the render future onto the local pool let join_handle = rt.spawn_pinned(create_render_future); // Wait for the initial result which determines the status code let (status, headers) = initial_result_rx .await .map_err(|err| SSRError::Incremental(IncrementalRendererError::Other(err.into())))??; Ok(( status, headers, RenderFreshness::now(None), ReceiverWithDrop { receiver: rx, cancel_task: Some(join_handle), }, )) } fn pre_renderer() -> Renderer { let mut renderer = Renderer::default(); renderer.pre_render = true; renderer } /// Create the streaming render component callback. It will keep track of what scopes are mounted to what pending /// suspense boundaries in the DOM. /// /// This mapping is used to replace the DOM mount with the resolved contents once the suspense boundary is finished. fn streaming_render_component_callback( stream: Arc<StreamingRenderer<IncrementalRendererError>>, scope_to_mount_mapping: Arc<RwLock<HashMap<ScopeId, PendingSuspenseBoundary>>>, ) -> impl Fn(&mut Renderer, &mut dyn Write, &VirtualDom, ScopeId) -> std::fmt::Result + Send + Sync + 'static { // We use a stack to keep track of what suspense boundaries we are nested in to add children to the correct boundary // The stack starts with the root scope because the root is a suspense boundary let pending_suspense_boundaries_stack = RwLock::new(vec![]); move |renderer, to, vdom, scope| { let is_suspense_boundary = SuspenseContext::downcast_suspense_boundary_from_scope(&vdom.runtime(), scope) .filter(|s| s.has_suspended_tasks()) .is_some(); if is_suspense_boundary { let mount = stream.render_placeholder( |to| { { pending_suspense_boundaries_stack .write() .unwrap() .push(scope); } let out = renderer.render_scope(to, vdom, scope); { pending_suspense_boundaries_stack.write().unwrap().pop(); } out }, &mut *to, )?; // Add the suspense boundary to the list of pending suspense boundaries // We will replace the mount with the resolved contents later once the suspense boundary is resolved let mut scope_to_mount_mapping_write = scope_to_mount_mapping.write().unwrap(); scope_to_mount_mapping_write.insert( scope, PendingSuspenseBoundary { mount, children: vec![], }, ); // Add the scope to the list of children of the parent suspense boundary let pending_suspense_boundaries_stack = pending_suspense_boundaries_stack.read().unwrap(); // If there is a parent suspense boundary, add the scope to the list of children // This suspense boundary will start capturing errors when the parent is resolved if let Some(parent) = pending_suspense_boundaries_stack.last() { let parent = scope_to_mount_mapping_write.get_mut(parent).unwrap(); parent.children.push(scope); } // Otherwise this is a root suspense boundary, so we need to start capturing errors immediately else { vdom.in_runtime(|| { Self::start_capturing_errors(scope); }); } } else { renderer.render_scope(to, vdom, scope)? } Ok(()) } } /// Start capturing errors at a suspense boundary. If the parent suspense boundary is frozen, we need to capture the errors in the suspense boundary /// and send them to the client to continue bubbling up fn start_capturing_errors(suspense_scope: ScopeId) { // Add an error boundary to the scope. We serialize the suspense error boundary separately so we can use // the normal in memory ErrorContext here Runtime::current().in_scope(suspense_scope, || { dioxus_core::provide_context(ErrorContext::new(None)) }); } fn serialize_server_data(virtual_dom: &VirtualDom, scope: ScopeId) -> SerializedHydrationData { // After we replace the placeholder in the dom with javascript, we need to send down the resolved data so that the client can hydrate the node // Extract any data we serialized for hydration (from server futures) let html_data = Self::extract_from_suspense_boundary(virtual_dom, scope); // serialize the server state into a base64 string html_data.serialized() } /// Walks through the suspense boundary in a depth first order and extracts the data from the context API. /// We use depth first order instead of relying on the order the hooks are called in because during suspense on the server, the order that futures are run in may be non deterministic. pub(crate) fn extract_from_suspense_boundary( vdom: &VirtualDom, scope: ScopeId, ) -> HydrationContext { let data = HydrationContext::default(); Self::serialize_errors(&data, vdom, scope); Self::take_from_scope(&data, vdom, scope); data } /// Get the errors from the suspense boundary fn serialize_errors(context: &HydrationContext, vdom: &VirtualDom, scope: ScopeId) { // If there is an error boundary on the suspense boundary, grab the error from the context API // and throw it on the client so that it bubbles up to the nearest error boundary let error = vdom.in_scope(scope, || { try_consume_context::<ErrorContext>().and_then(|error_context| error_context.error()) }); context .error_entry() .insert(&error, std::panic::Location::caller()); } fn take_from_scope(context: &HydrationContext, vdom: &VirtualDom, scope: ScopeId) { vdom.in_scope(scope, || { // Grab any serializable server context from this scope let other: Option<HydrationContext> = has_context(); if let Some(other) = other { context.extend(&other); } }); // then continue to any children if let Some(scope) = vdom.get_scope(scope) { // If this is a suspense boundary, move into the children first (even if they are suspended) because that will be run first on the client if let Some(suspense_boundary) = SuspenseContext::downcast_suspense_boundary_from_scope(&vdom.runtime(), scope.id()) { if let Some(node) = suspense_boundary.suspended_nodes() { Self::take_from_vnode(context, vdom, &node); } } if let Some(node) = scope.try_root_node() { Self::take_from_vnode(context, vdom, node); } } } fn take_from_vnode(context: &HydrationContext, vdom: &VirtualDom, vnode: &VNode) { let template = &vnode.template; let mut dynamic_nodes_iter = template.node_paths.iter().copied().enumerate().peekable(); for (root_idx, node) in template.roots.iter().enumerate() { match node { TemplateNode::Element { .. } => { // dioxus core runs nodes in an odd order to not mess up template order. We need to match // that order here let (start, end) = match Self::collect_dyn_node_range(&mut dynamic_nodes_iter, root_idx as u8) { Some((a, b)) => (a, b), None => continue, }; let reversed_iter = (start..=end).rev(); for dynamic_node_id in reversed_iter { let dynamic_node = &vnode.dynamic_nodes[dynamic_node_id]; Self::take_from_dynamic_node( context, vdom, vnode, dynamic_node, dynamic_node_id, ); } } TemplateNode::Dynamic { id } => { // Take a dynamic node off the depth first iterator _ = dynamic_nodes_iter.next().unwrap(); let dynamic_node = &vnode.dynamic_nodes[*id]; Self::take_from_dynamic_node(context, vdom, vnode, dynamic_node, *id); } _ => {} } } } fn take_from_dynamic_node( context: &HydrationContext, vdom: &VirtualDom, vnode: &VNode, dyn_node: &DynamicNode, dynamic_node_index: usize, ) { match dyn_node { DynamicNode::Component(comp) => { if let Some(scope) = comp.mounted_scope(dynamic_node_index, vnode, vdom) { Self::take_from_scope(context, vdom, scope.id()); } } DynamicNode::Fragment(nodes) => { for node in nodes { Self::take_from_vnode(context, vdom, node); } } _ => {} } } // This should have the same behavior as the collect_dyn_node_range method in core // Find the index of the first and last dynamic node under a root index fn collect_dyn_node_range( dynamic_nodes: &mut Peekable<impl Iterator<Item = (usize, &'static [u8])>>, root_idx: u8, ) -> Option<(usize, usize)> { let start = match dynamic_nodes.peek() { Some((idx, [first, ..])) if *first == root_idx => *idx, _ => return None, }; let mut end = start; while let Some((idx, p)) = dynamic_nodes.next_if(|(_, p)| matches!(p, [idx, ..] if *idx == root_idx)) { if p.len() == 1 { continue; } end = idx; } Some((start, end)) } /// Render any content before the head of the page. pub fn render_head<R: std::fmt::Write>( cfg: &ServeConfig, to: &mut R, virtual_dom: &VirtualDom, ) -> Result<(), IncrementalRendererError> { let title = { let document: Option<Rc<ServerDocument>> = virtual_dom.in_scope(ScopeId::ROOT, dioxus_core::try_consume_context); // Collect any head content from the document provider and inject that into the head document.and_then(|document| document.title()) }; to.write_str(&cfg.index.head_before_title)?; if let Some(title) = title { to.write_str(&title)?; } else { to.write_str(&cfg.index.title)?; } to.write_str(&cfg.index.head_after_title)?; let document = virtual_dom.in_scope(ScopeId::ROOT, try_consume_context::<Rc<ServerDocument>>); if let Some(document) = document { // Collect any head content from the document provider and inject that into the head document.render(to)?; // Enable a warning when inserting contents into the head during streaming document.start_streaming(); } Self::render_before_body(cfg, to)?; Ok(()) } /// Render any content before the body of the page. fn render_before_body<R: std::fmt::Write>( cfg: &ServeConfig, to: &mut R, ) -> Result<(), IncrementalRendererError> { to.write_str(&cfg.index.close_head)?; // // #[cfg(feature = "document")] // { use dioxus_interpreter_js::INITIALIZE_STREAMING_JS; write!(to, "<script>{INITIALIZE_STREAMING_JS}</script>")?; // } Ok(()) } /// Render all content after the main element of the page. pub fn render_after_main<R: std::fmt::Write>( cfg: &ServeConfig, to: &mut R, virtual_dom: &VirtualDom, ) -> Result<(), IncrementalRendererError> { // Collect the initial server data from the root node. For most apps, no use_server_futures will be resolved initially, so this will be full on `None`s. // Sending down those Nones are still important to tell the client not to run the use_server_futures that are already running on the backend let resolved_data = SsrRendererPool::serialize_server_data(virtual_dom, ScopeId::ROOT); // We always send down the data required to hydrate components on the client let raw_data = resolved_data.data; write!( to, r#"<script>window.initial_dioxus_hydration_data="{raw_data}";"#, )?; #[cfg(debug_assertions)] { // In debug mode, we also send down the type names and locations of the serialized data let debug_types = &resolved_data.debug_types; let debug_locations = &resolved_data.debug_locations; write!( to, r#"window.initial_dioxus_hydration_debug_types={debug_types};"#, )?; write!( to, r#"window.initial_dioxus_hydration_debug_locations={debug_locations};"#, )?; } write!(to, r#"</script>"#,)?; to.write_str(&cfg.index.post_main)?; Ok(()) } /// Render all content after the body of the page. pub fn render_after_body<R: std::fmt::Write>( cfg: &ServeConfig, to: &mut R, ) -> Result<(), IncrementalRendererError> { to.write_str(&cfg.index.after_closing_body_tag)?; Ok(()) } }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/fullstack-server/src/streaming.rs
packages/fullstack-server/src/streaming.rs
//! There are two common ways to render suspense: //! 1. Stream the HTML in order - this will work even if javascript is disabled, but if there is something slow at the top of your page, and fast at the bottom, nothing will render until the slow part is done //! 2. Render placeholders and stream the HTML out of order - this will only work if javascript is enabled. This lets you render any parts of your page that resolve quickly, and then render the rest of the page as it becomes available //! //! Dioxus currently uses a the second out of order streaming approach which requires javascript. The rendering structure is as follows: //! ```html //! // Initial content is sent down with placeholders //! <div> //! Header //! <div class="flex flex-col"> //! // If we reach a suspense placeholder that may be replaced later, we insert a template node with a unique id to replace later //! <div>Loading user info...</div> //! </div> //! Footer //! </div> //! // After the initial render is done, we insert divs that are hidden with new content. //! // We use divs instead of templates for better SEO //! <script> //! // Code to hook up hydration replacement //! </script> //! <div hidden id="ds-1-r"> //! <div>Final HTML</div> //! </div> //! <script> //! window.dx_hydrate(2, "suspenseboundarydata"); //! </script> //! ``` use dioxus_fullstack_core::SerializedHydrationData; use futures_channel::mpsc::Sender; use std::{ fmt::{Display, Write}, sync::{Arc, RwLock}, }; /// Sections are identified by a unique id based on the suspense path. We only track the path of suspense boundaries because the client may render different components than the server. #[derive(Clone, Debug, Default)] struct MountPath { parent: Option<Arc<MountPath>>, id: usize, } impl MountPath { fn child(&self) -> Self { Self { parent: Some(Arc::new(self.clone())), id: 0, } } } impl Display for MountPath { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { if let Some(parent) = &self.parent { write!(f, "{},", parent)?; } write!(f, "{}", self.id) } } pub(crate) struct StreamingRenderer<E = std::convert::Infallible> { channel: RwLock<Sender<Result<String, E>>>, current_path: RwLock<MountPath>, } impl<E> StreamingRenderer<E> { /// Create a new streaming renderer with the given head that renders into a channel pub(crate) fn new( before_body: impl Display, mut render_into: Sender<Result<String, E>>, ) -> Self { let start_html = before_body.to_string(); _ = render_into.start_send(Ok(start_html)); Self { channel: render_into.into(), current_path: Default::default(), } } /// Render a new chunk of html that will never change pub(crate) fn render(&self, html: impl Display) { _ = self .channel .write() .unwrap() .start_send(Ok(html.to_string())); } /// Render a new chunk of html that may change pub(crate) fn render_placeholder<W: Write + ?Sized>( &self, html: impl FnOnce(&mut W) -> std::fmt::Result, into: &mut W, ) -> Result<Mount, std::fmt::Error> { let id = self.current_path.read().unwrap().clone(); // Increment the id for the next placeholder self.current_path.write().unwrap().id += 1; // While we are inside the placeholder, set the suspense path to the suspense boundary that we are rendering let old_path = std::mem::replace(&mut *self.current_path.write().unwrap(), id.child()); html(into)?; // Restore the old path *self.current_path.write().unwrap() = old_path; Ok(Mount { id }) } /// Replace a placeholder that was rendered previously pub(crate) fn replace_placeholder<W: Write + ?Sized>( &self, id: Mount, html: impl FnOnce(&mut W) -> std::fmt::Result, resolved_data: SerializedHydrationData, into: &mut W, ) -> std::fmt::Result { // Then replace the suspense placeholder with the new content write!(into, r#"<div id="ds-{id}-r" hidden>"#)?; // While we are inside the placeholder, set the suspense path to the suspense boundary that we are rendering let old_path = std::mem::replace(&mut *self.current_path.write().unwrap(), id.id.child()); html(into)?; // Restore the old path *self.current_path.write().unwrap() = old_path; // dx_hydrate accepts 2-4 arguments. The first two are required, the rest are optional // The arguments are: // 1. The id of the nodes we are hydrating under // 2. The serialized data required to hydrate those components // 3. (in debug mode) The type names of the serialized data // 4. (in debug mode) The locations of the serialized data write!( into, r#"</div><script>window.dx_hydrate([{id}], "{}""#, resolved_data.data )?; #[cfg(debug_assertions)] { // In debug mode, we also send down the type names and locations of the serialized data let debug_types = &resolved_data.debug_types; let debug_locations = &resolved_data.debug_locations; write!(into, r#", {debug_types}, {debug_locations}"#,)?; } write!(into, r#")</script>"#)?; Ok(()) } /// Close the stream with an error pub(crate) fn close_with_error(&self, error: E) { _ = self.channel.write().unwrap().start_send(Err(error)); } } /// A mounted placeholder in the dom that may change in the future #[derive(Clone, Debug)] pub(crate) struct Mount { id: MountPath, } impl Display for Mount { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{}", self.id) } }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/fullstack-server/src/index_html.rs
packages/fullstack-server/src/index_html.rs
use anyhow::Context; use std::path::Path; /// An `IndexHtml` represents the contents of an `index.html` file used to serve a web application. /// /// This defines the static portion of your web application, typically generated by a tool like `dx` /// in conjunction with wasm-bindgen. /// /// This structure expects a well-formed HTML document split into several sections: /// - `head_before_title`: The portion of the `<head>` section before the `<title /// - `title`: The `<title>` tag and its contents. /// - `head_after_title`: The portion of the `<head>` section after the `<title>` tag. /// - `close_head`: The closing `</head>` tag and any content following it. /// - `post_main`: The content following the main application container (e.g., `<div id="main">`). /// - `after_closing_body_tag`: The content following the closing `</body>` tag. /// /// These fields are not explicitly exposed as part of the API, but are critical for dioxus-fullstack /// to properly inject server-rendered content and client-side bootstrapping scripts into the HTML. /// /// The simplest HTML document that satisfies this structure is: /// /// ```html /// <html> /// <head> </head> /// <body> /// <div id="main"></div> /// </body> /// </html> /// ``` #[derive(Clone, Debug)] pub struct IndexHtml { pub(crate) head_before_title: String, pub(crate) head_after_title: String, pub(crate) title: String, pub(crate) close_head: String, pub(crate) post_main: String, pub(crate) after_closing_body_tag: String, } impl IndexHtml { /// Create a new `IndexHtml` from the raw contents of an `index.html` file. /// /// This function will parse the `index.html` and split it into sections for easier manipulation. /// The `root_id` parameter specifies the id of the main application container (e.g., "main") /// which your app will render into. pub fn new(contents: &str, root_id: &str) -> Result<IndexHtml, anyhow::Error> { let (pre_main, post_main) = contents.split_once(&format!("id=\"{root_id}\"")) .with_context(|| format!("Failed to find id=\"{root_id}\" in index.html. The id is used to inject the application into the page."))?; let post_main = post_main.split_once('>').with_context(|| { format!("Failed to find closing > after id=\"{root_id}\" in index.html.") })?; let (pre_main, post_main) = ( pre_main.to_string() + &format!("id=\"{root_id}\"") + post_main.0 + ">", post_main.1.to_string(), ); let (head, close_head) = pre_main.split_once("</head>").with_context(|| { format!("Failed to find closing </head> tag after id=\"{root_id}\" in index.html.") })?; let (head, close_head) = (head.to_string(), "</head>".to_string() + close_head); let (post_main, after_closing_body_tag) = post_main.split_once("</body>").with_context(|| { format!("Failed to find closing </body> tag after id=\"{root_id}\" in index.html.") })?; // Strip out the head if it exists let mut head_before_title = String::new(); let mut head_after_title = head; let mut title = String::new(); if let Some((new_head_before_title, new_title)) = head_after_title.split_once("<title>") { let (new_title, new_head_after_title) = new_title .split_once("</title>") .context("Failed to find closing </title> tag after <title> in index.html.")?; title = format!("<title>{new_title}</title>"); head_before_title = new_head_before_title.to_string(); head_after_title = new_head_after_title.to_string(); } Ok(IndexHtml { head_before_title, head_after_title, title, close_head, post_main: post_main.to_string(), after_closing_body_tag: "</body>".to_string() + after_closing_body_tag, }) } /// Load an `IndexHtml` from a file at the given path. /// /// This function reads the contents of the file and parses it into an `IndexHtml`. /// The `id` parameter specifies the id of the main application container (e.g., "main"). pub fn from_file(path: &Path, id: &str) -> Result<IndexHtml, anyhow::Error> { let contents = std::fs::read_to_string(path) .with_context(|| format!("Failed to read index.html from {}", path.display()))?; IndexHtml::new(&contents, id) } /// Create a default `IndexHtml` suitable for server-side rendering (SSR) only applications. /// This will not include any customizations to the HTML nor any JavaScript to bootstrap a client-side app. pub fn ssr_only() -> Self { const DEFAULT: &str = r#"<!DOCTYPE html> <html> <head> </head> <body> <div id="main"></div> </body> </html>"#; Self::new(DEFAULT, "main").expect("Failed to load default index.html") } }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/fullstack-server/src/isrg/config.rs
packages/fullstack-server/src/isrg/config.rs
#![allow(non_snake_case)] #[cfg(not(target_arch = "wasm32"))] use crate::isrg::fs_cache::PathMapFn; use crate::isrg::memory_cache::InMemoryCache; use crate::IncrementalRenderer; use std::{ path::{Path, PathBuf}, time::Duration, }; /// A configuration for the incremental renderer. #[derive(Clone)] pub struct IncrementalRendererConfig { static_dir: PathBuf, memory_cache_limit: usize, invalidate_after: Option<Duration>, clear_cache: bool, pre_render: bool, #[cfg(not(target_arch = "wasm32"))] map_path: Option<PathMapFn>, } impl Default for IncrementalRendererConfig { fn default() -> Self { Self::new() } } impl IncrementalRendererConfig { /// Create a new incremental renderer configuration. pub fn new() -> Self { Self { static_dir: PathBuf::from("./static"), memory_cache_limit: 10000, invalidate_after: None, clear_cache: false, pre_render: false, #[cfg(not(target_arch = "wasm32"))] map_path: None, } } /// Clear the cache on startup (default: true) pub fn clear_cache(mut self, clear_cache: bool) -> Self { self.clear_cache = clear_cache; self } /// Set a mapping from the route to the file path. This will override the default mapping configured with `static_dir`. /// The function should return the path to the folder to store the index.html file in. #[cfg(not(target_arch = "wasm32"))] pub fn map_path<F: Fn(&str) -> PathBuf + Send + Sync + 'static>(mut self, map_path: F) -> Self { self.map_path = Some(std::sync::Arc::new(map_path)); self } /// Set the static directory. pub fn static_dir<P: AsRef<Path>>(mut self, static_dir: P) -> Self { self.static_dir = static_dir.as_ref().to_path_buf(); self } /// Set the memory cache limit. pub const fn memory_cache_limit(mut self, memory_cache_limit: usize) -> Self { self.memory_cache_limit = memory_cache_limit; self } /// Set the invalidation time. pub fn invalidate_after(mut self, invalidate_after: Duration) -> Self { self.invalidate_after = Some(invalidate_after); self } /// Set whether to include hydration ids in the pre-rendered html. pub fn pre_render(mut self, pre_render: bool) -> Self { self.pre_render = pre_render; self } /// Build the incremental renderer. pub fn build(self) -> IncrementalRenderer { let mut renderer = IncrementalRenderer { #[cfg(not(target_arch = "wasm32"))] file_system_cache: super::fs_cache::FileSystemCache::new( self.static_dir.clone(), self.map_path, self.invalidate_after, ), memory_cache: InMemoryCache::new(self.memory_cache_limit, self.invalidate_after), invalidate_after: self.invalidate_after, }; if self.clear_cache { renderer.invalidate_all(); } renderer } }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/fullstack-server/src/isrg/freshness.rs
packages/fullstack-server/src/isrg/freshness.rs
use std::time::Duration; use chrono::{DateTime, Utc}; /// Information about the freshness of a rendered response #[derive(Debug, Clone, Copy)] pub struct RenderFreshness { /// The age of the rendered response age: u64, /// The maximum age of the rendered response max_age: Option<u64>, /// The time the response was rendered timestamp: DateTime<Utc>, } impl RenderFreshness { /// Create new freshness information pub(crate) fn new(age: u64, max_age: u64, timestamp: DateTime<Utc>) -> Self { Self { age, max_age: Some(max_age), timestamp, } } /// Create new freshness information with only the age pub(crate) fn new_age(age: u64, timestamp: DateTime<Utc>) -> Self { Self { age, max_age: None, timestamp, } } /// Create new freshness information from a timestamp pub(crate) fn created_at(timestamp: DateTime<Utc>, max_age: Option<Duration>) -> Self { Self { age: timestamp .signed_duration_since(Utc::now()) .num_seconds() .unsigned_abs(), max_age: max_age.map(|d| d.as_secs()), timestamp, } } /// Create new freshness information at the current time pub fn now(max_age: Option<Duration>) -> Self { Self { age: 0, max_age: max_age.map(|d| d.as_secs()), timestamp: Utc::now(), } } /// Get the age of the rendered response in seconds pub fn age(&self) -> u64 { self.age } /// Get the maximum age of the rendered response in seconds pub fn max_age(&self) -> Option<u64> { self.max_age } /// Get the time the response was rendered pub fn timestamp(&self) -> DateTime<Utc> { self.timestamp } /// Write the freshness to the response headers. pub fn write(&self, headers: &mut http::HeaderMap<http::HeaderValue>) { let age = self.age(); headers.insert(http::header::AGE, age.into()); if let Some(max_age) = self.max_age() { headers.insert( http::header::CACHE_CONTROL, http::HeaderValue::from_str(&format!("max-age={}", max_age)).unwrap(), ); } } }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/fullstack-server/src/isrg/mod.rs
packages/fullstack-server/src/isrg/mod.rs
//! Incremental file based incremental rendering #![allow(non_snake_case)] mod config; mod freshness; #[cfg(not(target_arch = "wasm32"))] mod fs_cache; mod memory_cache; use std::time::Duration; use chrono::Utc; pub use config::*; pub use freshness::*; use self::memory_cache::InMemoryCache; /// A render that was cached from a previous render. pub struct CachedRender<'a> { /// The route that was rendered pub route: String, /// The freshness information for the rendered response pub freshness: RenderFreshness, /// The rendered response pub response: &'a [u8], } /// An incremental renderer. pub struct IncrementalRenderer { pub(crate) memory_cache: InMemoryCache, #[cfg(not(target_arch = "wasm32"))] pub(crate) file_system_cache: fs_cache::FileSystemCache, invalidate_after: Option<Duration>, } impl IncrementalRenderer { /// Create a new incremental renderer builder. pub fn builder() -> IncrementalRendererConfig { IncrementalRendererConfig::new() } /// Remove a route from the cache. pub fn invalidate(&mut self, route: &str) { self.memory_cache.invalidate(route); #[cfg(not(target_arch = "wasm32"))] self.file_system_cache.invalidate(route); } /// Remove all routes from the cache. pub fn invalidate_all(&mut self) { self.memory_cache.clear(); #[cfg(not(target_arch = "wasm32"))] self.file_system_cache.clear(); } /// Cache a rendered response. /// /// ```rust /// # use dioxus_server::IncrementalRenderer; /// # let mut renderer = IncrementalRenderer::builder().build(); /// let route = "/index".to_string(); /// let response = b"<html><body>Hello world</body></html>"; /// renderer.cache(route, response).unwrap(); /// ``` pub fn cache( &mut self, route: String, html: impl Into<Vec<u8>>, ) -> Result<RenderFreshness, IncrementalRendererError> { let timestamp = Utc::now(); let html = html.into(); #[cfg(not(target_arch = "wasm32"))] self.file_system_cache .put(route.clone(), timestamp, html.clone())?; self.memory_cache.put(route, timestamp, html); Ok(RenderFreshness::created_at( timestamp, self.invalidate_after, )) } /// Try to get a cached response for a route. /// /// ```rust /// # use dioxus_server::IncrementalRenderer; /// # let mut renderer = IncrementalRenderer::builder().build(); /// # let route = "/index".to_string(); /// # let response = b"<html><body>Hello world</body></html>"; /// # renderer.cache(route, response).unwrap(); /// let route = "/index"; /// let response = renderer.get(route).unwrap(); /// assert_eq!(response.unwrap().response, b"<html><body>Hello world</body></html>"); /// ``` /// /// If the route is not cached, `None` is returned. /// /// ```rust /// # use dioxus_server::IncrementalRenderer; /// # let mut renderer = IncrementalRenderer::builder().build(); /// let route = "/index"; /// let response = renderer.get(route).unwrap(); /// assert!(response.is_none()); /// ``` pub fn get<'a>( &'a mut self, route: &str, ) -> Result<Option<CachedRender<'a>>, IncrementalRendererError> { let Self { memory_cache, #[cfg(not(target_arch = "wasm32"))] file_system_cache, .. } = self; #[allow(unused)] enum FsGetError { NotPresent, Error(IncrementalRendererError), } // The borrow checker prevents us from simply using a match/if and returning early. Instead we need to use the more complex closure API // non lexical lifetimes will make this possible (it works with polonius) let or_insert = || { // check the file cache #[cfg(not(target_arch = "wasm32"))] return match file_system_cache.get(route) { Ok(Some((freshness, bytes))) => Ok((freshness.timestamp(), bytes)), Ok(None) => Err(FsGetError::NotPresent), Err(e) => Err(FsGetError::Error(e)), }; #[allow(unreachable_code)] Err(FsGetError::NotPresent) }; match memory_cache.try_get_or_insert(route, or_insert) { Ok(Some((freshness, bytes))) => Ok(Some(CachedRender { route: route.to_string(), freshness, response: bytes, })), Err(FsGetError::NotPresent) | Ok(None) => Ok(None), Err(FsGetError::Error(e)) => Err(e), } } } /// An error that can occur while rendering a route or retrieving a cached route. #[derive(Debug, thiserror::Error)] #[non_exhaustive] pub enum IncrementalRendererError { /// An formatting error occurred while rendering a route. #[error("RenderError: {0}")] RenderError(#[from] std::fmt::Error), /// An IO error occurred while rendering a route. #[error("IoError: {0}")] IoError(#[from] std::io::Error), /// An error occurred while rendering a route. #[error("Unknown error: {0}")] Other(#[from] dioxus_core::CapturedError), }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/fullstack-server/src/isrg/memory_cache.rs
packages/fullstack-server/src/isrg/memory_cache.rs
//! Incremental file based incremental rendering #![allow(non_snake_case)] use chrono::offset::Utc; use chrono::DateTime; use rustc_hash::FxHasher; use std::{hash::BuildHasherDefault, num::NonZeroUsize}; use super::freshness::RenderFreshness; pub(crate) struct InMemoryCache { #[allow(clippy::type_complexity)] lru: Option<lru::LruCache<String, (DateTime<Utc>, Vec<u8>), BuildHasherDefault<FxHasher>>>, invalidate_after: Option<std::time::Duration>, } impl InMemoryCache { pub fn new(memory_cache_limit: usize, invalidate_after: Option<std::time::Duration>) -> Self { Self { lru: NonZeroUsize::new(memory_cache_limit) .map(|limit| lru::LruCache::with_hasher(limit, Default::default())), invalidate_after, } } pub fn clear(&mut self) { if let Some(cache) = &mut self.lru { cache.clear(); } } pub fn put(&mut self, route: String, timestamp: DateTime<Utc>, data: Vec<u8>) { if let Some(cache) = &mut self.lru { cache.put(route, (timestamp, data)); } } pub fn invalidate(&mut self, route: &str) { if let Some(cache) = &mut self.lru { cache.pop(route); } } pub fn try_get_or_insert<'a, F: FnOnce() -> Result<(DateTime<Utc>, Vec<u8>), E>, E>( &'a mut self, route: &str, or_insert: F, ) -> Result<Option<(RenderFreshness, &'a [u8])>, E> { if let Some(memory_cache) = self.lru.as_mut() { let (timestamp, _) = memory_cache.try_get_or_insert(route.to_string(), or_insert)?; let now = Utc::now(); let elapsed = timestamp.signed_duration_since(now); let age = elapsed.num_seconds(); // The cache entry is out of date, so we need to remove it. if let Some(invalidate_after) = self.invalidate_after { // If we can't convert to a std duration, the duration is negative and hasn't elapsed yet. if let Ok(std_elapsed) = elapsed.to_std() { if std_elapsed > invalidate_after { tracing::trace!("memory cache out of date"); memory_cache.pop(route); return Ok(None); } } } // We need to reborrow because we may have invalidated the lifetime if the route was removed. // We know it wasn't because we returned... but rust doesn't understand that. let (timestamp, cache_hit) = memory_cache.get(route).unwrap(); return match self.invalidate_after { Some(invalidate_after) => { tracing::trace!("memory cache hit"); let max_age = invalidate_after.as_secs(); let freshness = RenderFreshness::new(age as u64, max_age, *timestamp); Ok(Some((freshness, cache_hit))) } None => { tracing::trace!("memory cache hit"); let freshness = RenderFreshness::new_age(age as u64, *timestamp); Ok(Some((freshness, cache_hit))) } }; } Ok(None) } }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/fullstack-server/src/isrg/fs_cache.rs
packages/fullstack-server/src/isrg/fs_cache.rs
#![allow(non_snake_case)] use chrono::{DateTime, Utc}; use super::{IncrementalRendererError, RenderFreshness}; use std::{path::PathBuf, sync::Arc, time::SystemTime}; pub(crate) type PathMapFn = Arc<dyn Fn(&str) -> PathBuf + Send + Sync>; pub(crate) struct FileSystemCache { static_dir: PathBuf, map_path: PathMapFn, invalidate_after: Option<std::time::Duration>, } impl FileSystemCache { pub fn new( static_dir: PathBuf, map_path: Option<PathMapFn>, invalidate_after: Option<std::time::Duration>, ) -> Self { Self { static_dir: static_dir.clone(), map_path: map_path.unwrap_or_else(move || { Arc::new(move |route: &str| { let (before_query, _) = route.split_once('?').unwrap_or((route, "")); let mut path = static_dir.clone(); for segment in before_query.split('/') { path.push(segment); } path }) }), invalidate_after, } } pub fn put( &mut self, route: String, timestamp: DateTime<Utc>, data: Vec<u8>, ) -> Result<(), IncrementalRendererError> { use std::io::Write; let file_path = self.route_as_path(&route, timestamp); if let Some(parent) = file_path.parent() { if !parent.exists() { std::fs::create_dir_all(parent)?; } } let file = std::fs::File::create(file_path)?; let mut file = std::io::BufWriter::new(file); file.write_all(&data)?; Ok(()) } pub fn clear(&mut self) { // clear the static directory of index.html files contained within folders for entry in std::fs::read_dir(&self.static_dir) .into_iter() .flatten() .flatten() { if entry.file_type().map(|ft| ft.is_dir()).unwrap_or(false) { for entry in walkdir::WalkDir::new(entry.path()).into_iter().flatten() { if entry.file_type().is_file() { if let Some(fnmae) = entry.file_name().to_str() { if fnmae.ends_with(".html") { if let Err(err) = std::fs::remove_file(entry.path()) { tracing::error!("Failed to remove file: {}", err); } } } } } } } } pub fn invalidate(&mut self, route: &str) { let file_path = self.find_file(route).unwrap().full_path; if let Err(err) = std::fs::remove_file(file_path) { tracing::error!("Failed to remove file: {}", err); } } pub fn get( &self, route: &str, ) -> Result<Option<(RenderFreshness, Vec<u8>)>, IncrementalRendererError> { if let Some(file_path) = self.find_file(route) { if let Some(freshness) = file_path.freshness(self.invalidate_after) { if let Ok(file) = std::fs::File::open(file_path.full_path) { let mut file = std::io::BufReader::new(file); let mut cache_hit = Vec::new(); std::io::copy(&mut file, &mut cache_hit)?; tracing::trace!("file cache hit {:?}", route); return Ok(Some((freshness, cache_hit))); } } } Ok(None) } fn find_file(&self, route: &str) -> Option<ValidCachedPath> { let mut file_path = (self.map_path)(route); if let Some(deadline) = self.invalidate_after { // find the first file that matches the route and is a html file file_path.push("index"); if let Ok(dir) = std::fs::read_dir(file_path) { for entry in dir.flatten() { if let Some(cached_path) = ValidCachedPath::try_from_path(entry.path()) { if let Ok(elapsed) = cached_path.timestamp.elapsed() { if elapsed < deadline { // The timestamp is valid, return the file return Some(cached_path); } } // if the timestamp is invalid or passed, delete the file if let Err(err) = std::fs::remove_file(entry.path()) { tracing::error!("Failed to remove file: {}", err); } } } None } else { None } } else { file_path.push("index.html"); file_path.exists().then_some({ ValidCachedPath { full_path: file_path, timestamp: SystemTime::now(), } }) } } fn route_as_path(&self, route: &str, timestamp: DateTime<Utc>) -> PathBuf { let mut file_path = (self.map_path)(route); if self.track_timestamps() { file_path.push("index"); file_path.push(timestamp_to_string(timestamp)); } else { file_path.push("index"); } file_path.set_extension("html"); file_path } fn track_timestamps(&self) -> bool { self.invalidate_after.is_some() } } pub(crate) struct ValidCachedPath { pub(crate) full_path: PathBuf, pub(crate) timestamp: std::time::SystemTime, } impl ValidCachedPath { pub fn try_from_path(value: PathBuf) -> Option<Self> { if value.extension() != Some(std::ffi::OsStr::new("html")) { return None; } let timestamp = decode_timestamp(value.file_stem()?.to_str()?)?; let full_path = value; Some(Self { full_path, timestamp, }) } pub fn freshness(&self, max_age: Option<std::time::Duration>) -> Option<RenderFreshness> { let age = self.timestamp.elapsed().ok()?.as_secs(); let max_age = max_age.map(|max_age| max_age.as_secs()); Some(RenderFreshness::new(age, max_age?, self.timestamp.into())) } } fn decode_timestamp(timestamp: &str) -> Option<std::time::SystemTime> { let timestamp = u64::from_str_radix(timestamp, 16).ok()?; Some(std::time::UNIX_EPOCH + std::time::Duration::from_secs(timestamp)) } pub(crate) fn timestamp_to_string(timestamp: DateTime<Utc>) -> String { let timestamp = timestamp .signed_duration_since(DateTime::<Utc>::from(std::time::UNIX_EPOCH)) .num_seconds(); format!("{:x}", timestamp) }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/rsx-rosetta/src/lib.rs
packages/rsx-rosetta/src/lib.rs
#![doc = include_str!("../README.md")] #![doc(html_logo_url = "https://avatars.githubusercontent.com/u/79236386")] #![doc(html_favicon_url = "https://avatars.githubusercontent.com/u/79236386")] use convert_case::{Case, Casing}; use dioxus_html::{map_html_attribute_to_rsx, map_html_element_to_rsx}; use dioxus_rsx::{ Attribute, AttributeName, AttributeValue, BodyNode, CallBody, Component, Element, ElementName, HotLiteral, TemplateBody, TextNode, }; pub use html_parser::{Dom, Node}; use htmlentity::entity::ICodedDataTrait; use proc_macro2::{Ident, Span}; use syn::{punctuated::Punctuated, LitStr}; /// Convert an HTML DOM tree into an RSX CallBody pub fn rsx_from_html(dom: &Dom) -> CallBody { let nodes = dom .children .iter() .filter_map(rsx_node_from_html) .collect::<Vec<_>>(); let template = TemplateBody::new(nodes); CallBody::new(template) } /// Convert an HTML Node into an RSX BodyNode /// /// If the node is a comment, it will be ignored since RSX doesn't support comments pub fn rsx_node_from_html(node: &Node) -> Option<BodyNode> { use AttributeName::*; use AttributeValue::*; match node { Node::Text(text) => Some(BodyNode::Text(TextNode::from_text( &htmlentity::entity::decode(text.as_bytes()) .to_string() .ok()?, ))), Node::Element(el) => { let el_name = if let Some(name) = map_html_element_to_rsx(&el.name) { ElementName::Ident(Ident::new(name, Span::call_site())) } else { // if we don't recognize it and it has a dash, we assume it's a web component if el.name.contains('-') { ElementName::Custom(LitStr::new(&el.name, Span::call_site())) } else { // otherwise, it might be an element that isn't supported yet ElementName::Ident(Ident::new(&el.name.to_case(Case::Snake), Span::call_site())) } }; let mut attributes: Vec<_> = el .attributes .iter() .map(|(name, value)| { // xlink attributes are deprecated and technically we can't handle them. // todo(jon): apply the namespaces to the attributes let (_namespace, name) = name.split_once(':').unwrap_or(("", name)); let value = HotLiteral::from_raw_text(value.as_deref().unwrap_or("false")); let attr = if let Some(name) = map_html_attribute_to_rsx(name) { let name = if let Some(name) = name.strip_prefix("r#") { Ident::new_raw(name, Span::call_site()) } else { Ident::new(name, Span::call_site()) }; BuiltIn(name) } else { // If we don't recognize the attribute, we assume it's a custom attribute Custom(LitStr::new(name, Span::call_site())) }; Attribute::from_raw(attr, AttrLiteral(value)) }) .collect(); let class = el.classes.join(" "); if !class.is_empty() { attributes.push(Attribute::from_raw( BuiltIn(Ident::new("class", Span::call_site())), AttrLiteral(HotLiteral::from_raw_text(&class)), )); } if let Some(id) = &el.id { attributes.push(Attribute::from_raw( BuiltIn(Ident::new("id", Span::call_site())), AttrLiteral(HotLiteral::from_raw_text(id)), )); } // the html-parser crate we use uses a HashMap for attributes. This leads to a // non-deterministic order of attributes. // Sort them here attributes.sort_by(|a, b| a.name.to_string().cmp(&b.name.to_string())); let children = el.children.iter().filter_map(rsx_node_from_html).collect(); Some(BodyNode::Element(Element { name: el_name, children, raw_attributes: attributes, merged_attributes: Default::default(), diagnostics: Default::default(), spreads: Default::default(), brace: Default::default(), })) } // We ignore comments Node::Comment(_) => None, } } /// Pull out all the svgs from the body and replace them with components of the same name pub fn collect_svgs(children: &mut [BodyNode], out: &mut Vec<BodyNode>) { for child in children { match child { BodyNode::Component(comp) => collect_svgs(&mut comp.children.roots, out), BodyNode::Element(el) if el.name == "svg" => { // we want to replace this instance with a component let mut segments = Punctuated::new(); segments.push(Ident::new("icons", Span::call_site()).into()); let new_name: Ident = Ident::new(&format!("icon_{}", out.len()), Span::call_site()); segments.push(new_name.clone().into()); // Replace this instance with a component let mut new_comp = BodyNode::Component(Component { name: syn::Path { leading_colon: None, segments, }, generics: None, spreads: Default::default(), diagnostics: Default::default(), fields: vec![], children: TemplateBody::new(vec![]), brace: Some(Default::default()), dyn_idx: Default::default(), component_literal_dyn_idx: vec![], }); std::mem::swap(child, &mut new_comp); // And push the original svg into the svg list out.push(new_comp); } BodyNode::Element(el) => collect_svgs(&mut el.children, out), _ => {} } } }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false
DioxusLabs/dioxus
https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/rsx-rosetta/tests/h-tags.rs
packages/rsx-rosetta/tests/h-tags.rs
use html_parser::Dom; #[test] fn h_tags_translate() { let html = r#" <div> <h1>hello world!</h1> <h2>hello world!</h2> <h3>hello world!</h3> <h4>hello world!</h4> <h5>hello world!</h5> <h6>hello world!</h6> </div> "# .trim(); let dom = Dom::parse(html).unwrap(); let body = dioxus_rsx_rosetta::rsx_from_html(&dom); let out = dioxus_autofmt::write_block_out(&body).unwrap(); let expected = r#" div { h1 { "hello world!" } h2 { "hello world!" } h3 { "hello world!" } h4 { "hello world!" } h5 { "hello world!" } h6 { "hello world!" } }"#; pretty_assertions::assert_eq!(&out, &expected); }
rust
Apache-2.0
ec8f31dece5c75371177bf080bab46dff54ffd0e
2026-01-04T15:32:28.012891Z
false