repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1 value | license stringclasses 7 values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2 classes |
|---|---|---|---|---|---|---|---|---|
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/router/src/hooks/use_navigator.rs | packages/router/src/hooks/use_navigator.rs | use dioxus_core::{try_consume_context, use_hook};
use crate::{Navigator, RouterContext};
/// A hook that provides access to the navigator to change the router history.
///
/// > The Routable macro will define a version of this hook with an explicit type.
///
/// ```rust
/// # use dioxus::prelude::*;
/// #[derive(Clone, Routable)]
/// enum Route {
/// #[route("/")]
/// Index {},
/// #[route("/:id")]
/// Dynamic { id: usize },
/// }
///
/// #[component]
/// fn App() -> Element {
/// rsx! {
/// Router::<Route> {}
/// }
/// }
///
/// #[component]
/// fn Index() -> Element {
/// let navigator = use_navigator();
///
/// rsx! {
/// button {
/// onclick: move |_| { navigator.push(Route::Dynamic { id: 1234 }); },
/// "Go to /1234"
/// }
/// }
/// }
///
/// #[component]
/// fn Dynamic(id: usize) -> Element {
/// rsx! {
/// p {
/// "Current ID: {id}"
/// }
/// }
/// }
///
/// # let mut vdom = VirtualDom::new(App);
/// # vdom.rebuild_in_place();
/// ```
#[must_use]
pub fn use_navigator() -> Navigator {
use_hook(|| {
let router = try_consume_context::<RouterContext>()
.expect("Must be called in a descendant of a Router component");
Navigator(router)
})
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/router/src/components/link.rs | packages/router/src/components/link.rs | #![allow(clippy::type_complexity)]
use std::fmt::Debug;
use dioxus_core::{Attribute, Element, EventHandler, VNode};
use dioxus_core_macro::{rsx, Props};
use dioxus_html::{
self as dioxus_elements, ModifiersInteraction, MountedEvent, MouseEvent, PointerInteraction,
};
use tracing::error;
use crate::navigation::NavigationTarget;
use crate::utils::use_router_internal::use_router_internal;
/// The properties for a [`Link`].
#[derive(Props, Clone, PartialEq)]
pub struct LinkProps {
/// The class attribute for the `a` tag.
pub class: Option<String>,
/// A class to apply to the generate HTML anchor tag if the `target` route is active.
pub active_class: Option<String>,
/// The children to render within the generated HTML anchor tag.
pub children: Element,
/// When [`true`], the `target` route will be opened in a new tab.
///
/// This does not change whether the [`Link`] is active or not.
#[props(default)]
pub new_tab: bool,
/// The onclick event handler.
pub onclick: Option<EventHandler<MouseEvent>>,
/// The onmounted event handler.
/// Fired when the `<a>` element is mounted.
pub onmounted: Option<EventHandler<MountedEvent>>,
#[props(default)]
/// Whether the default behavior should be executed if an `onclick` handler is provided.
///
/// 1. When `onclick` is [`None`] (default if not specified), `onclick_only` has no effect.
/// 2. If `onclick_only` is [`false`] (default if not specified), the provided `onclick` handler
/// will be executed after the links regular functionality.
/// 3. If `onclick_only` is [`true`], only the provided `onclick` handler will be executed.
pub onclick_only: bool,
/// The rel attribute for the generated HTML anchor tag.
///
/// For external `a`s, this defaults to `noopener noreferrer`.
pub rel: Option<String>,
/// The navigation target. Roughly equivalent to the href attribute of an HTML anchor tag.
#[props(into)]
pub to: NavigationTarget,
#[props(extends = GlobalAttributes)]
attributes: Vec<Attribute>,
}
impl Debug for LinkProps {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("LinkProps")
.field("active_class", &self.active_class)
.field("children", &self.children)
.field("attributes", &self.attributes)
.field("new_tab", &self.new_tab)
.field("onclick", &self.onclick.as_ref().map(|_| "onclick is set"))
.field("onclick_only", &self.onclick_only)
.field("rel", &self.rel)
.finish()
}
}
/// A link to navigate to another route.
///
/// Only works as descendant of a [`super::Router`] component, otherwise it will be inactive.
///
/// Unlike a regular HTML anchor, a [`Link`] allows the router to handle the navigation and doesn't
/// cause the browser to load a new page.
///
/// However, in the background a [`Link`] still generates an anchor, which you can use for styling
/// as normal.
///
/// # External targets
/// When the [`Link`]s target is an [`NavigationTarget::External`] target, that is used as the `href` directly. This
/// means that a [`Link`] can always navigate to an [`NavigationTarget::External`] target, even if the [`dioxus_history::History`] does not support it.
///
/// # Panic
/// - When the [`Link`] is not nested within a [`super::Router`], but
/// only in debug builds.
///
/// # Example
/// ```rust
/// # use dioxus::prelude::*;
///
/// #[derive(Clone, Routable)]
/// enum Route {
/// #[route("/")]
/// Index {},
/// }
///
/// #[component]
/// fn App() -> Element {
/// rsx! {
/// Router::<Route> {}
/// }
/// }
///
/// #[component]
/// fn Index() -> Element {
/// rsx! {
/// Link {
/// active_class: "active",
/// class: "link_class",
/// id: "link_id",
/// new_tab: true,
/// rel: "link_rel",
/// to: Route::Index {},
///
/// "A fully configured link"
/// }
/// }
/// }
/// #
/// # let mut vdom = VirtualDom::new(App);
/// # vdom.rebuild_in_place();
/// # assert_eq!(
/// # dioxus_ssr::render(&vdom),
/// # r#"<a href="/" class="link_class active" rel="link_rel" target="_blank" aria-current="page" id="link_id">A fully configured link</a>"#
/// # );
/// ```
#[doc(alias = "<a>")]
#[allow(non_snake_case)]
pub fn Link(props: LinkProps) -> Element {
let LinkProps {
active_class,
children,
attributes,
new_tab,
onclick,
onclick_only,
rel,
to,
class,
..
} = props;
// hook up to router
let router = match use_router_internal() {
Some(r) => r,
#[allow(unreachable_code)]
None => {
let msg = "`Link` must have access to a parent router";
error!("{msg}, will be inactive");
#[cfg(debug_assertions)]
panic!("{}", msg);
return VNode::empty();
}
};
let current_url = router.full_route_string();
let href = match &to {
NavigationTarget::Internal(url) => url.clone(),
NavigationTarget::External(route) => route.clone(),
};
// Add the history's prefix to internal hrefs for use in the rsx
let full_href = match &to {
NavigationTarget::Internal(url) => router.prefix().unwrap_or_default() + url,
NavigationTarget::External(route) => route.clone(),
};
let mut class_ = String::new();
if let Some(c) = class {
class_.push_str(&c);
}
if let Some(c) = active_class {
if href == current_url {
if !class_.is_empty() {
class_.push(' ');
}
class_.push_str(&c);
}
}
let class = if class_.is_empty() {
None
} else {
Some(class_)
};
let aria_current = (href == current_url).then_some("page");
let tag_target = new_tab.then_some("_blank");
let is_external = matches!(to, NavigationTarget::External(_));
let is_router_nav = !is_external && !new_tab;
let rel = rel.or_else(|| is_external.then_some("noopener noreferrer".to_string()));
let do_default = onclick.is_none() || !onclick_only;
let action = move |event: MouseEvent| {
// Only handle events without modifiers
if !event.modifiers().is_empty() {
return;
}
// Only handle left clicks
if event.trigger_button() != Some(dioxus_elements::input_data::MouseButton::Primary) {
return;
}
// If we need to open in a new tab, let the browser handle it
if new_tab {
return;
}
// todo(jon): this is extra hacky for no reason - we should fix prevent default on Links
if do_default && is_external {
return;
}
event.prevent_default();
if do_default && is_router_nav {
router.push_any(to.clone());
}
if let Some(handler) = onclick {
handler.call(event);
}
};
let onmounted = move |event| {
if let Some(handler) = props.onmounted {
handler.call(event);
}
};
// In liveview, we need to prevent the default action if the user clicks on the link with modifiers
// in javascript. The prevent_default method is not available in the liveview renderer because
// event handlers are handled over a websocket.
let liveview_prevent_default = {
// If the event is a click with the left mouse button and no modifiers, prevent the default action
// and navigate to the href with client side routing
router.include_prevent_default().then_some(
"if (event.button === 0 && !event.ctrlKey && !event.metaKey && !event.shiftKey && !event.altKey) { event.preventDefault() }"
)
};
rsx! {
a {
onclick: action,
"onclick": liveview_prevent_default,
href: full_href,
onmounted: onmounted,
class,
rel,
target: tag_target,
aria_current,
..attributes,
{children}
}
}
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/router/src/components/router.rs | packages/router/src/components/router.rs | use crate::{provide_router_context, routable::Routable, router_cfg::RouterConfig, Outlet};
use dioxus_core::{provide_context, use_hook, Callback, Element};
use dioxus_core_macro::{rsx, Props};
/// The props for [`Router`].
#[derive(Props)]
pub struct RouterProps<R: Clone + 'static> {
#[props(default, into)]
config: Callback<(), RouterConfig<R>>,
}
impl<T: Clone> Clone for RouterProps<T> {
fn clone(&self) -> Self {
*self
}
}
impl<T: Clone> Copy for RouterProps<T> {}
impl<R: Clone + 'static> Default for RouterProps<R> {
fn default() -> Self {
Self {
config: Callback::new(|_| RouterConfig::default()),
}
}
}
impl<R: Clone> PartialEq for RouterProps<R> {
fn eq(&self, _: &Self) -> bool {
// prevent the router from re-rendering when the initial url or config changes
true
}
}
/// A component that renders the current route.
pub fn Router<R: Routable + Clone>(props: RouterProps<R>) -> Element {
use crate::{outlet::OutletContext, RouterContext};
use_hook(|| {
provide_router_context(RouterContext::new(props.config.call(())));
});
#[cfg(feature = "streaming")]
dioxus_hooks::use_after_suspense_resolved(|| {
dioxus_fullstack_core::commit_initial_chunk();
});
use_hook(|| {
provide_context(OutletContext::<R>::new());
});
rsx! { Outlet::<R> {} }
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/router/src/components/default_errors.rs | packages/router/src/components/default_errors.rs | use dioxus_core::Element;
use dioxus_core_macro::rsx;
use dioxus_html as dioxus_elements;
#[allow(deprecated)]
use crate::hooks::use_router;
/// The default component to render when an external navigation fails.
#[allow(non_snake_case)]
pub fn FailureExternalNavigation() -> Element {
#[allow(deprecated)]
let router = use_router();
rsx! {
h1 { "External Navigation Failure!" }
p {
"The application tried to programmatically navigate to an external page. This "
"operation has failed. Click the link below to complete the navigation manually."
}
a { onclick: move |_| { router.clear_error() }, "Click here to go back" }
}
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/router/src/components/child_router.rs | packages/router/src/components/child_router.rs | /// Components that allow the macro to add child routers. This component provides a context
/// to the child router that maps child routes to root routes and vice versa.
use crate::{Outlet, OutletContext, Routable};
use dioxus_core::{provide_context, try_consume_context, use_hook, Element};
use dioxus_core_macro::{component, rsx, Props};
/// Maps a child route into the root router and vice versa
// NOTE: Currently child routers only support simple static prefixes, but this
// API could be expanded to support dynamic prefixes as well
pub(crate) struct ChildRouteMapping<R> {
format_route_as_root_route: fn(R) -> String,
parse_route_from_root_route: fn(&str) -> Option<R>,
}
impl<R: Routable> ChildRouteMapping<R> {
pub(crate) fn format_route_as_root_route(&self, route: R) -> String {
(self.format_route_as_root_route)(route)
}
pub(crate) fn parse_route_from_root_route(&self, route: &str) -> Option<R> {
(self.parse_route_from_root_route)(route)
}
}
/// Get the formatter that handles adding and stripping the prefix from a child route
pub(crate) fn consume_child_route_mapping<R: Routable>() -> Option<ChildRouteMapping<R>> {
try_consume_context()
}
impl<R> Clone for ChildRouteMapping<R> {
fn clone(&self) -> Self {
Self {
format_route_as_root_route: self.format_route_as_root_route,
parse_route_from_root_route: self.parse_route_from_root_route,
}
}
}
/// Props for the [`ChildRouter`] component.
#[derive(Props, Clone)]
pub struct ChildRouterProps<R: Routable> {
/// The child route to render
route: R,
/// Take a parent route and return a child route or none if the route is not part of the child
parse_route_from_root_route: fn(&str) -> Option<R>,
/// Take a child route and return a parent route
format_route_as_root_route: fn(R) -> String,
}
impl<R: Routable> PartialEq for ChildRouterProps<R> {
fn eq(&self, _: &Self) -> bool {
false
}
}
/// A component that provides a [`History`](dioxus_history::History) to a child router. The `#[child]` attribute on the router macro will insert this automatically.
#[component]
#[allow(missing_docs)]
pub fn ChildRouter<R: Routable>(props: ChildRouterProps<R>) -> Element {
use_hook(|| {
provide_context(ChildRouteMapping {
format_route_as_root_route: props.format_route_as_root_route,
parse_route_from_root_route: props.parse_route_from_root_route,
});
provide_context(OutletContext::<R>::new());
});
rsx! { Outlet::<R> {} }
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/router/src/components/outlet.rs | packages/router/src/components/outlet.rs | use crate::{outlet::OutletContext, *};
use dioxus_core::Element;
/// An outlet for the current content.
///
/// Only works as descendant of a [`Link`] component, otherwise it will be inactive.
///
/// The [`Outlet`] is aware of how many [`Outlet`]s it is nested within. It will render the content
/// of the active route that is __exactly as deep__.
///
/// # Panic
/// - When the [`Outlet`] is not nested a [`Link`] component,
/// but only in debug builds.
///
/// # Example
/// ```rust
/// # use dioxus::prelude::*;
/// #[derive(Clone, Routable)]
/// #[rustfmt::skip]
/// enum Route {
/// #[nest("/wrap")]
/// #[layout(Wrapper)] // Every layout component must have one Outlet
/// #[route("/")]
/// Child {},
/// #[end_layout]
/// #[end_nest]
/// #[route("/")]
/// Index {},
/// }
///
/// #[component]
/// fn Index() -> Element {
/// rsx! {
/// div {
/// "Index"
/// }
/// }
/// }
///
/// #[component]
/// fn Wrapper() -> Element {
/// rsx! {
/// h1 { "App" }
/// Outlet::<Route> {} // The content of child routes will be rendered here
/// }
/// }
///
/// #[component]
/// fn Child() -> Element {
/// rsx! {
/// p {
/// "Child"
/// }
/// }
/// }
///
/// # #[component]
/// # fn App() -> Element {
/// # rsx! {
/// # dioxus_router::components::HistoryProvider {
/// # history: move |_| std::rc::Rc::new(dioxus_history::MemoryHistory::with_initial_path(Route::Child {}.to_string())) as std::rc::Rc<dyn dioxus_history::History>,
/// # Router::<Route> {}
/// # }
/// # }
/// # }
/// #
/// # let mut vdom = VirtualDom::new(App);
/// # vdom.rebuild_in_place();
/// # assert_eq!(dioxus_ssr::render(&vdom), "<h1>App</h1><p>Child</p>");
/// ```
pub fn Outlet<R: Routable + Clone>() -> Element {
OutletContext::<R>::render()
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/router/src/components/history_buttons.rs | packages/router/src/components/history_buttons.rs | use dioxus_core::{Element, VNode};
use dioxus_core_macro::{rsx, Props};
use dioxus_html as dioxus_elements;
use tracing::error;
use crate::utils::use_router_internal::use_router_internal;
/// The properties for a [`GoBackButton`] or a [`GoForwardButton`].
#[derive(Debug, Props, Clone, PartialEq)]
pub struct HistoryButtonProps {
/// The children to render within the generated HTML button tag.
pub children: Element,
}
/// A button to go back through the navigation history. Similar to a browsers back button.
///
/// Only works as descendant of a [`super::Link`] component, otherwise it will be inactive.
///
/// The button will disable itself if it is known that no prior history is available.
///
/// # Panic
/// - When the [`GoBackButton`] is not nested within a [`super::Link`] component
/// hook, but only in debug builds.
///
/// # Example
/// ```rust
/// # use dioxus::prelude::*;
/// #[derive(Clone, Routable)]
/// enum Route {
/// #[route("/")]
/// Index {},
/// }
///
/// #[component]
/// fn App() -> Element {
/// rsx! {
/// Router::<Route> {}
/// }
/// }
///
/// #[component]
/// fn Index() -> Element {
/// rsx! {
/// GoBackButton {
/// "go back"
/// }
/// }
/// }
/// #
/// # let mut vdom = VirtualDom::new(App);
/// # vdom.rebuild_in_place();
/// # assert_eq!(
/// # dioxus_ssr::render(&vdom),
/// # r#"<button disabled="true">go back</button>"#
/// # );
/// ```
pub fn GoBackButton(props: HistoryButtonProps) -> Element {
let HistoryButtonProps { children } = props;
// hook up to router
let router = match use_router_internal() {
Some(r) => r,
#[allow(unreachable_code)]
None => {
let msg = "`GoBackButton` must have access to a parent router";
error!("{msg}, will be inactive");
#[cfg(debug_assertions)]
panic!("{}", msg);
return VNode::empty();
}
};
let disabled = !router.can_go_back();
rsx! {
button {
disabled: "{disabled}",
onclick: move |evt| {
evt.prevent_default();
router.go_back()
},
{children}
}
}
}
/// A button to go forward through the navigation history. Similar to a browsers forward button.
///
/// Only works as descendant of a [`super::Link`] component, otherwise it will be inactive.
///
/// The button will disable itself if it is known that no later history is available.
///
/// # Panic
/// - When the [`GoForwardButton`] is not nested within a [`super::Link`] component
/// hook, but only in debug builds.
///
/// # Example
/// ```rust
/// # use dioxus::prelude::*;
/// #[derive(Clone, Routable)]
/// enum Route {
/// #[route("/")]
/// Index {},
/// }
///
/// #[component]
/// fn App() -> Element {
/// rsx! {
/// Router::<Route> {}
/// }
/// }
///
/// #[component]
/// fn Index() -> Element {
/// rsx! {
/// GoForwardButton {
/// "go forward"
/// }
/// }
/// }
/// #
/// # let mut vdom = VirtualDom::new(App);
/// # vdom.rebuild_in_place();
/// # assert_eq!(
/// # dioxus_ssr::render(&vdom),
/// # r#"<button disabled="true">go forward</button>"#
/// # );
/// ```
pub fn GoForwardButton(props: HistoryButtonProps) -> Element {
let HistoryButtonProps { children } = props;
// hook up to router
let router = match use_router_internal() {
Some(r) => r,
#[allow(unreachable_code)]
None => {
let msg = "`GoForwardButton` must have access to a parent router";
error!("{msg}, will be inactive");
#[cfg(debug_assertions)]
panic!("{}", msg);
return VNode::empty();
}
};
let disabled = !router.can_go_forward();
rsx! {
button {
disabled: "{disabled}",
onclick: move |evt| {
evt.prevent_default();
router.go_forward()
},
{children}
}
}
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/router/src/components/history_provider.rs | packages/router/src/components/history_provider.rs | use dioxus_core::{use_hook, Callback, Element};
use dioxus_core_macro::{component, Props};
use dioxus_history::{provide_history_context, History};
use std::rc::Rc;
/// A component that provides a [`History`] for all child [`Router`] components. Renderers generally provide a default history automatically.
#[component]
#[allow(missing_docs)]
pub fn HistoryProvider(
/// The history to provide to child components.
history: Callback<(), Rc<dyn History>>,
/// The children to render within the history provider.
children: Element,
) -> Element {
use_hook(|| {
provide_history_context(history(()));
});
children
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/router/src/utils/use_router_internal.rs | packages/router/src/utils/use_router_internal.rs | use crate::RouterContext;
use dioxus_core::{try_consume_context, use_hook};
/// A private hook to subscribe to the router.
///
/// Used to reduce redundancy within other components/hooks. Safe to call multiple times for a
/// single component, but not recommended. Multiple subscriptions will be discarded.
///
/// # Return values
/// - [`None`], when the current component isn't a descendant of a [`crate::Router`] component.
/// - Otherwise [`Some`].
pub(crate) fn use_router_internal() -> Option<RouterContext> {
use_hook(try_consume_context)
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/router/src/contexts/router.rs | packages/router/src/contexts/router.rs | use std::{
collections::HashSet,
error::Error,
fmt::Display,
sync::{Arc, Mutex},
};
use dioxus_core::{provide_context, Element, ReactiveContext, ScopeId};
use dioxus_history::history;
use dioxus_signals::{CopyValue, ReadableExt, Signal, WritableExt};
use crate::{
components::child_router::consume_child_route_mapping, navigation::NavigationTarget,
routable::Routable, router_cfg::RouterConfig, SiteMapSegment,
};
/// An error that is thrown when the router fails to parse a route
#[derive(Debug, Clone)]
pub struct ParseRouteError {
message: String,
}
impl Error for ParseRouteError {}
impl Display for ParseRouteError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.message.fmt(f)
}
}
/// This context is set in the root of the virtual dom if there is a router present.
#[derive(Clone, Copy)]
struct RootRouterContext(Signal<Option<RouterContext>>);
/// Try to get the router that was created closest to the root of the virtual dom. This may be called outside of the router.
///
/// This will return `None` if there is no router present or the router has not been created yet.
pub fn root_router() -> Option<RouterContext> {
let rt = dioxus_core::Runtime::current();
if let Some(ctx) = rt.consume_context::<RootRouterContext>(ScopeId::ROOT) {
ctx.0.cloned()
} else {
rt.provide_context(
ScopeId::ROOT,
RootRouterContext(Signal::new_in_scope(None, ScopeId::ROOT)),
);
None
}
}
pub(crate) fn provide_router_context(ctx: RouterContext) {
if root_router().is_none() {
dioxus_core::Runtime::current().provide_context(
ScopeId::ROOT,
RootRouterContext(Signal::new_in_scope(Some(ctx), ScopeId::ROOT)),
);
}
provide_context(ctx);
}
/// An error that can occur when navigating.
#[derive(Debug, Clone)]
pub struct ExternalNavigationFailure(pub String);
/// A function the router will call after every routing update.
pub(crate) type RoutingCallback<R> =
Arc<dyn Fn(GenericRouterContext<R>) -> Option<NavigationTarget<R>>>;
pub(crate) type AnyRoutingCallback = Arc<dyn Fn(RouterContext) -> Option<NavigationTarget>>;
struct RouterContextInner {
unresolved_error: Option<ExternalNavigationFailure>,
subscribers: Arc<Mutex<HashSet<ReactiveContext>>>,
routing_callback: Option<AnyRoutingCallback>,
failure_external_navigation: fn() -> Element,
internal_route: fn(&str) -> bool,
site_map: &'static [SiteMapSegment],
}
impl RouterContextInner {
fn update_subscribers(&self) {
for &id in self.subscribers.lock().unwrap().iter() {
id.mark_dirty();
}
}
fn subscribe_to_current_context(&self) {
if let Some(rc) = ReactiveContext::current() {
rc.subscribe(self.subscribers.clone());
}
}
fn external(&mut self, external: String) -> Option<ExternalNavigationFailure> {
match history().external(external.clone()) {
true => None,
false => {
let failure = ExternalNavigationFailure(external);
self.unresolved_error = Some(failure.clone());
self.update_subscribers();
Some(failure)
}
}
}
}
/// A collection of router data that manages all routing functionality.
#[derive(Clone, Copy)]
pub struct RouterContext {
inner: CopyValue<RouterContextInner>,
}
impl RouterContext {
pub(crate) fn new<R: Routable + 'static>(cfg: RouterConfig<R>) -> Self {
let subscribers = Arc::new(Mutex::new(HashSet::new()));
let mapping = consume_child_route_mapping();
let myself = RouterContextInner {
unresolved_error: None,
subscribers: subscribers.clone(),
routing_callback: cfg.on_update.map(|update| {
Arc::new(move |ctx| {
let ctx = GenericRouterContext {
inner: ctx,
_marker: std::marker::PhantomData,
};
update(ctx).map(|t| match t {
NavigationTarget::Internal(r) => match mapping.as_ref() {
Some(mapping) => {
NavigationTarget::Internal(mapping.format_route_as_root_route(r))
}
None => NavigationTarget::Internal(r.to_string()),
},
NavigationTarget::External(s) => NavigationTarget::External(s),
})
}) as Arc<dyn Fn(RouterContext) -> Option<NavigationTarget>>
}),
failure_external_navigation: cfg.failure_external_navigation,
internal_route: |route| R::from_str(route).is_ok(),
site_map: R::SITE_MAP,
};
let history = history();
// set the updater
history.updater(Arc::new(move || {
for &rc in subscribers.lock().unwrap().iter() {
rc.mark_dirty();
}
}));
let myself = Self {
inner: CopyValue::new_in_scope(myself, ScopeId::ROOT),
};
// If the current route is different from the one in the browser, replace the current route
let current_route: R = myself.current();
if current_route.to_string() != history.current_route() {
myself.replace(current_route);
}
myself
}
/// Check if the router is running in a liveview context
/// We do some slightly weird things for liveview because of the network boundary
pub(crate) fn include_prevent_default(&self) -> bool {
history().include_prevent_default()
}
/// Check whether there is a previous page to navigate back to.
#[must_use]
pub fn can_go_back(&self) -> bool {
history().can_go_back()
}
/// Check whether there is a future page to navigate forward to.
#[must_use]
pub fn can_go_forward(&self) -> bool {
history().can_go_forward()
}
/// Go back to the previous location.
///
/// Will fail silently if there is no previous location to go to.
pub fn go_back(&self) {
history().go_back();
self.change_route();
}
/// Go back to the next location.
///
/// Will fail silently if there is no next location to go to.
pub fn go_forward(&self) {
history().go_forward();
self.change_route();
}
pub(crate) fn push_any(&self, target: NavigationTarget) -> Option<ExternalNavigationFailure> {
{
let mut write = self.inner.write_unchecked();
match target {
NavigationTarget::Internal(p) => history().push(p),
NavigationTarget::External(e) => return write.external(e),
}
}
self.change_route()
}
/// Push a new location.
///
/// The previous location will be available to go back to.
pub fn push(&self, target: impl Into<NavigationTarget>) -> Option<ExternalNavigationFailure> {
let target = target.into();
{
let mut write = self.inner.write_unchecked();
match target {
NavigationTarget::Internal(p) => {
let history = history();
history.push(p)
}
NavigationTarget::External(e) => return write.external(e),
}
}
self.change_route()
}
/// Replace the current location.
///
/// The previous location will **not** be available to go back to.
pub fn replace(
&self,
target: impl Into<NavigationTarget>,
) -> Option<ExternalNavigationFailure> {
let target = target.into();
{
let mut state = self.inner.write_unchecked();
match target {
NavigationTarget::Internal(p) => {
let history = history();
history.replace(p)
}
NavigationTarget::External(e) => return state.external(e),
}
}
self.change_route()
}
/// The route that is currently active.
pub fn current<R: Routable>(&self) -> R {
let absolute_route = self.full_route_string();
// If this is a child route, map the absolute route to the child route before parsing
let mapping = consume_child_route_mapping::<R>();
let route = match mapping.as_ref() {
Some(mapping) => mapping
.parse_route_from_root_route(&absolute_route)
.ok_or_else(|| "Failed to parse route".to_string()),
None => {
R::from_str(&absolute_route).map_err(|err| format!("Failed to parse route {err}"))
}
};
match route {
Ok(route) => route,
Err(err) => {
dioxus_core::throw_error(ParseRouteError { message: err });
"/".parse().unwrap_or_else(|err| panic!("{err}"))
}
}
}
/// The full route that is currently active. If this is called from inside a child router, this will always return the parent's view of the route.
pub fn full_route_string(&self) -> String {
let inner = self.inner.read();
inner.subscribe_to_current_context();
let history = history();
history.current_route()
}
/// The prefix that is currently active.
pub fn prefix(&self) -> Option<String> {
let history = history();
history.current_prefix()
}
/// Clear any unresolved errors
pub fn clear_error(&self) {
let mut write_inner = self.inner.write_unchecked();
write_inner.unresolved_error = None;
write_inner.update_subscribers();
}
/// Get the site map of the router.
pub fn site_map(&self) -> &'static [SiteMapSegment] {
self.inner.read().site_map
}
pub(crate) fn render_error(&self) -> Option<Element> {
let inner_write = self.inner.write_unchecked();
inner_write.subscribe_to_current_context();
inner_write
.unresolved_error
.as_ref()
.map(|_| (inner_write.failure_external_navigation)())
}
fn change_route(&self) -> Option<ExternalNavigationFailure> {
let self_read = self.inner.read();
if let Some(callback) = &self_read.routing_callback {
let myself = *self;
let callback = callback.clone();
drop(self_read);
if let Some(new) = callback(myself) {
let mut self_write = self.inner.write_unchecked();
match new {
NavigationTarget::Internal(p) => {
let history = history();
history.replace(p)
}
NavigationTarget::External(e) => return self_write.external(e),
}
}
}
self.inner.read().update_subscribers();
None
}
pub(crate) fn internal_route(&self, route: &str) -> bool {
(self.inner.read().internal_route)(route)
}
}
/// This context is set to the RouterConfig on_update method
pub struct GenericRouterContext<R> {
inner: RouterContext,
_marker: std::marker::PhantomData<R>,
}
impl<R> GenericRouterContext<R>
where
R: Routable,
{
/// Check whether there is a previous page to navigate back to.
#[must_use]
pub fn can_go_back(&self) -> bool {
self.inner.can_go_back()
}
/// Check whether there is a future page to navigate forward to.
#[must_use]
pub fn can_go_forward(&self) -> bool {
self.inner.can_go_forward()
}
/// Go back to the previous location.
///
/// Will fail silently if there is no previous location to go to.
pub fn go_back(&self) {
self.inner.go_back();
}
/// Go back to the next location.
///
/// Will fail silently if there is no next location to go to.
pub fn go_forward(&self) {
self.inner.go_forward();
}
/// Push a new location.
///
/// The previous location will be available to go back to.
pub fn push(
&self,
target: impl Into<NavigationTarget<R>>,
) -> Option<ExternalNavigationFailure> {
self.inner.push(target.into())
}
/// Replace the current location.
///
/// The previous location will **not** be available to go back to.
pub fn replace(
&self,
target: impl Into<NavigationTarget<R>>,
) -> Option<ExternalNavigationFailure> {
self.inner.replace(target.into())
}
/// The route that is currently active.
pub fn current(&self) -> R
where
R: Clone,
{
self.inner.current()
}
/// The prefix that is currently active.
pub fn prefix(&self) -> Option<String> {
self.inner.prefix()
}
/// Clear any unresolved errors
pub fn clear_error(&self) {
self.inner.clear_error()
}
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/router/src/contexts/navigator.rs | packages/router/src/contexts/navigator.rs | use crate::{ExternalNavigationFailure, NavigationTarget, RouterContext};
/// Acquire the navigator without subscribing to updates.
///
/// Can be called anywhere in the application provided a Router has been initialized.
///
/// ## Panics
///
/// Panics if there is no router present.
pub fn navigator() -> Navigator {
Navigator(
dioxus_core::try_consume_context::<RouterContext>()
.expect("A router must be present to use navigator"),
)
}
/// A view into the navigation state of a router.
#[derive(Clone, Copy)]
pub struct Navigator(pub(crate) RouterContext);
impl Navigator {
/// Check whether there is a previous page to navigate back to.
#[must_use]
pub fn can_go_back(&self) -> bool {
self.0.can_go_back()
}
/// Check whether there is a future page to navigate forward to.
#[must_use]
pub fn can_go_forward(&self) -> bool {
self.0.can_go_forward()
}
/// Go back to the previous location.
///
/// Will fail silently if there is no previous location to go to.
pub fn go_back(&self) {
self.0.go_back();
}
/// Go back to the next location.
///
/// Will fail silently if there is no next location to go to.
pub fn go_forward(&self) {
self.0.go_forward();
}
/// Push a new location.
///
/// The previous location will be available to go back to.
pub fn push(&self, target: impl Into<NavigationTarget>) -> Option<ExternalNavigationFailure> {
self.0.push(target)
}
/// Replace the current location.
///
/// The previous location will **not** be available to go back to.
pub fn replace(
&self,
target: impl Into<NavigationTarget>,
) -> Option<ExternalNavigationFailure> {
self.0.replace(target)
}
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/router/src/contexts/outlet.rs | packages/router/src/contexts/outlet.rs | use dioxus_core::{provide_context, try_consume_context, use_hook, Element, VNode};
use crate::{routable::Routable, utils::use_router_internal::use_router_internal};
/// A context that manages nested routing levels for outlet components.
///
/// The outlet context keeps track of the current nesting level of routes and helps
/// manage the hierarchical structure of nested routes in the application.
///
/// # Type Parameters
///
/// * `R` - The routable type that implements the routing logic
#[derive(Clone, Default)]
pub struct OutletContext<R> {
current_level: usize,
_marker: std::marker::PhantomData<R>,
}
impl<R> OutletContext<R> {
/// Creates a new outlet context starting at level 0
pub fn new() -> Self {
Self {
current_level: 0,
_marker: std::marker::PhantomData,
}
}
/// Creates a new outlet context for the next nesting level
pub fn next(&self) -> Self {
Self {
current_level: self.current_level + 1,
_marker: std::marker::PhantomData,
}
}
/// Returns the current nesting level of this outlet
pub fn level(&self) -> usize {
self.current_level
}
pub(crate) fn render() -> Element
where
R: Routable + Clone,
{
let router = use_router_internal().expect("Outlet must be inside of a router");
let outlet: OutletContext<R> = use_outlet_context();
let current_level = outlet.level();
provide_context(outlet.next());
if let Some(error) = router.render_error() {
return if current_level == 0 {
error
} else {
VNode::empty()
};
}
router.current::<R>().render(current_level)
}
}
/// Returns the current outlet context from the component hierarchy.
///
/// This hook retrieves the outlet context from the current component scope. If no context is found,
/// it creates a new context with a default level of 0.
///
/// # Type Parameters
///
/// * `R` - The routable type that implements the routing logic
///
/// # Returns
///
/// Returns an [`OutletContext<R>`] containing the current nesting level information.
///
/// # Examples
///
/// ```rust, no_run
/// # use dioxus::prelude::*;
/// # use dioxus_router::use_outlet_context;
///
/// # #[derive(Routable,Clone,PartialEq)]
/// # enum MyRouter {
/// # #[route("/")]
/// # MyView
/// # }
///
/// # #[component]
/// # fn MyView() -> Element {
/// # rsx!{ div { "My Text" } }
/// # }
///
/// let outlet_ctx = use_outlet_context::<MyRouter>();
/// println!("Current nesting level: {}", outlet_ctx.level());
/// ```
pub fn use_outlet_context<R: Clone + 'static>() -> OutletContext<R> {
use_hook(|| try_consume_context().unwrap_or_else(OutletContext::new))
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/router/tests/parsing.rs | packages/router/tests/parsing.rs | use dioxus::prelude::*;
use std::{
fmt::{self, Display},
str::FromStr,
};
#[component]
fn Root() -> Element {
unimplemented!()
}
#[component]
fn Test() -> Element {
unimplemented!()
}
#[component]
fn Dynamic(id: usize) -> Element {
unimplemented!()
}
// Make sure trailing '/'s work correctly
#[test]
fn trailing_slashes_parse() {
#[derive(Routable, Clone, Copy, PartialEq, Debug)]
enum Route {
#[route("/")]
Root {},
#[route("/test/")]
Test {},
#[route("/:id/test/")]
Dynamic { id: usize },
}
assert_eq!(Route::from_str("/").unwrap(), Route::Root {});
assert_eq!(Route::from_str("/test/").unwrap(), Route::Test {});
assert_eq!(Route::from_str("/test").unwrap(), Route::Test {});
assert_eq!(
Route::from_str("/123/test/").unwrap(),
Route::Dynamic { id: 123 }
);
assert_eq!(
Route::from_str("/123/test").unwrap(),
Route::Dynamic { id: 123 }
);
}
#[test]
fn without_trailing_slashes_parse() {
#[derive(Routable, Clone, Copy, PartialEq, Debug)]
enum RouteWithoutTrailingSlash {
#[route("/")]
Root {},
#[route("/test")]
Test {},
#[route("/:id/test")]
Dynamic { id: usize },
}
assert_eq!(
RouteWithoutTrailingSlash::from_str("/").unwrap(),
RouteWithoutTrailingSlash::Root {}
);
assert_eq!(
RouteWithoutTrailingSlash::from_str("/test/").unwrap(),
RouteWithoutTrailingSlash::Test {}
);
assert_eq!(
RouteWithoutTrailingSlash::from_str("/test").unwrap(),
RouteWithoutTrailingSlash::Test {}
);
assert_eq!(
RouteWithoutTrailingSlash::from_str("/123/test/").unwrap(),
RouteWithoutTrailingSlash::Dynamic { id: 123 }
);
assert_eq!(
RouteWithoutTrailingSlash::from_str("/123/test").unwrap(),
RouteWithoutTrailingSlash::Dynamic { id: 123 }
);
}
// Regression test for https://github.com/DioxusLabs/dioxus/issues/2984
#[test]
fn query_segments_parse() {
#[derive(Debug, Clone, PartialEq)]
enum Query {
Id(u64),
}
impl From<&str> for Query {
fn from(_: &str) -> Self {
// e.g. split query on `&` and split pairs on `=`
Query::Id(10)
}
}
impl Display for Query {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "id=10")
}
}
#[component]
fn Index(query: Query) -> Element {
rsx! {
h1 { "Index" }
}
}
#[derive(Debug, Clone, PartialEq, Routable)]
enum Route {
#[route("/?:..query")]
Index { query: Query },
}
let route = Route::Index {
query: Query::Id(10),
};
assert_eq!(route.to_string(), "/?id=10");
let parsed_route = "/?id=10".parse::<Route>().unwrap();
assert_eq!(parsed_route, route);
}
#[test]
fn optional_query_segments_parse() {
#[derive(Debug, Clone, PartialEq, Routable)]
enum Route {
#[route("/?:query&:other")]
Index { query: Option<u64>, other: u64 },
}
#[component]
fn Index(query: Option<u64>, other: u64) -> Element {
rsx! {
h1 { "Index" }
}
}
let route = Route::Index {
query: Some(10),
other: 20,
};
assert_eq!(route.to_string(), "/?query=10&other=20");
let parsed_route = "/?query=10&other=20".parse::<Route>().unwrap();
assert_eq!(parsed_route, route);
let route_without_query = Route::Index {
query: None,
other: 20,
};
assert_eq!(route_without_query.to_string(), "/?other=20");
let parsed_route_without_query = "/?other=20".parse::<Route>().unwrap();
assert_eq!(parsed_route_without_query, route_without_query);
let route_without_query_and_other = Route::Index {
query: None,
other: 0,
};
assert_eq!(route_without_query_and_other.to_string(), "/?other=0");
let parsed_route_without_query_and_other = "/".parse::<Route>().unwrap();
assert_eq!(
parsed_route_without_query_and_other,
route_without_query_and_other
);
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/router/tests/parent.rs | packages/router/tests/parent.rs | #![allow(unused)]
use std::rc::Rc;
use dioxus::prelude::*;
#[derive(Routable, Clone, PartialEq, Debug)]
#[rustfmt::skip]
enum Route {
#[route("/")]
RootIndex {},
#[nest("/fixed")]
#[layout(Fixed)]
#[route("/")]
FixedIndex {},
#[route("/fixed")]
FixedFixed {},
#[end_layout]
#[end_nest]
#[nest("/:id")]
#[layout(Parameter)]
#[route("/")]
ParameterIndex { id: u8 },
#[route("/fixed")]
ParameterFixed { id: u8 },
#[end_layout]
#[end_nest]
#[nest("/hash")]
#[route("/")]
HashIndex {},
#[nest("/:id")]
#[route("/?:query")]
HashId { id: u8, query: String },
#[layout(Parameter)]
#[route("/path/?:query#:hash")]
HashQuery { id: u8, query: String, hash: String },
}
#[test]
fn get_parent() {
assert_eq!(Route::RootIndex {}.parent(), None);
assert_eq!(Route::FixedIndex {}.parent(), Some(Route::RootIndex {}));
assert_eq!(Route::FixedFixed {}.parent(), Some(Route::FixedIndex {}));
assert_eq!(
Route::ParameterIndex { id: 0 }.parent(),
Some(Route::RootIndex {})
);
assert_eq!(
Route::ParameterFixed { id: 0 }.parent(),
Some(Route::ParameterIndex { id: 0 })
);
assert_eq!(
Route::HashQuery {
id: 0,
query: "query".into(),
hash: "hash".into()
}
.parent(),
Some(Route::HashId {
id: 0,
query: "".into()
})
);
assert_eq!(
Route::HashId {
id: 0,
query: "query".into()
}
.parent(),
Some(Route::HashIndex {})
);
assert_eq!(Route::HashIndex {}.parent(), Some(Route::RootIndex {}));
}
#[test]
fn is_child() {
assert!(!Route::RootIndex {}.is_child_of(&Route::RootIndex {}));
assert!(Route::FixedIndex {}.is_child_of(&Route::RootIndex {}));
assert!(!Route::FixedIndex {}.is_child_of(&Route::FixedIndex {}));
assert!(Route::FixedFixed {}.is_child_of(&Route::FixedIndex {}));
assert!(!Route::FixedFixed {}.is_child_of(&Route::FixedFixed {}));
assert!(Route::ParameterIndex { id: 0 }.is_child_of(&Route::RootIndex {}));
assert!(!Route::ParameterIndex { id: 0 }.is_child_of(&Route::ParameterIndex { id: 0 }));
assert!(Route::ParameterFixed { id: 0 }.is_child_of(&Route::ParameterIndex { id: 0 }));
assert!(!Route::ParameterFixed { id: 0 }.is_child_of(&Route::ParameterFixed { id: 0 }));
assert!(Route::HashQuery {
id: 0,
query: "query".into(),
hash: "hash".into()
}
.is_child_of(&Route::HashId {
id: 0,
query: "query".into()
}));
assert!(!Route::HashQuery {
id: 0,
query: "query".into(),
hash: "hash".into()
}
.is_child_of(&Route::HashQuery {
id: 0,
query: "query".into(),
hash: "hash".into()
}));
assert!(Route::HashId {
id: 0,
query: "query".into()
}
.is_child_of(&Route::HashIndex {}));
assert!(!Route::HashId {
id: 0,
query: "query".into()
}
.is_child_of(&Route::HashId {
id: 0,
query: "query".into()
}));
assert!(Route::HashIndex {}.is_child_of(&Route::RootIndex {}));
assert!(!Route::HashIndex {}.is_child_of(&Route::HashIndex {}));
}
#[component]
fn RootIndex() -> Element {
rsx! { h2 { "Root Index" } }
}
#[component]
fn Fixed() -> Element {
rsx! {
h2 { "Fixed" }
Outlet::<Route> { }
}
}
#[component]
fn FixedIndex() -> Element {
rsx! { h3 { "Fixed - Index" } }
}
#[component]
fn FixedFixed() -> Element {
rsx! { h3 { "Fixed - Fixed"} }
}
#[component]
fn Parameter(id: u8) -> Element {
rsx! {
h2 { "Parameter {id}" }
Outlet::<Route> { }
}
}
#[component]
fn ParameterIndex(id: u8) -> Element {
rsx! { h3 { "Parameter - Index" } }
}
#[component]
fn ParameterFixed(id: u8) -> Element {
rsx! { h3 { "Parameter - Fixed" } }
}
#[component]
fn HashQuery(id: u8, query: String, hash: String) -> Element {
rsx! {
h2 { "Hash Query" }
h3 { "id: {id}" }
h3 { "query: {query}" }
h3 { "hash: {hash}" }
}
}
#[component]
fn HashIndex() -> Element {
rsx! { h3 { "Hash Index" } }
}
#[component]
fn HashId(id: u8, query: String) -> Element {
rsx! {
h3 { "Hash Id {id}" }
h3 { "query: {query}" }
}
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/router/tests/site_map.rs | packages/router/tests/site_map.rs | use dioxus::prelude::*;
#[test]
fn with_class() {
#[derive(Routable, Clone, PartialEq, Debug)]
enum ChildRoute {
#[route("/")]
ChildRoot {},
#[route("/:not_static")]
NotStatic { not_static: String },
}
#[derive(Routable, Clone, PartialEq, Debug)]
enum Route {
#[route("/")]
Root {},
#[route("/test")]
Test {},
#[child("/child")]
Nested { child: ChildRoute },
}
#[component]
fn Test() -> Element {
unimplemented!()
}
#[component]
fn Root() -> Element {
unimplemented!()
}
#[component]
fn ChildRoot() -> Element {
unimplemented!()
}
#[component]
fn NotStatic(not_static: String) -> Element {
unimplemented!()
}
assert_eq!(
Route::static_routes(),
vec![
Route::Root {},
Route::Test {},
Route::Nested {
child: ChildRoute::ChildRoot {}
},
],
);
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/router/tests/via_ssr/link.rs | packages/router/tests/via_ssr/link.rs | use dioxus::prelude::*;
use dioxus_history::{History, MemoryHistory};
use dioxus_router::components::HistoryProvider;
use std::rc::Rc;
fn prepare<R: Routable>() -> String {
prepare_at::<R>("/")
}
fn prepare_at<R: Routable>(at: impl ToString) -> String {
prepare_at_with_base_path::<R>(at, "")
}
fn prepare_at_with_base_path<R: Routable>(at: impl ToString, base_path: impl ToString) -> String {
let mut vdom = VirtualDom::new_with_props(
App,
AppProps::<R> {
at: at.to_string(),
base_path: base_path.to_string(),
phantom: std::marker::PhantomData,
},
);
vdom.rebuild_in_place();
return dioxus_ssr::render(&vdom);
#[derive(Props)]
struct AppProps<R: Routable> {
at: String,
base_path: String,
phantom: std::marker::PhantomData<R>,
}
impl<R: Routable> Clone for AppProps<R> {
fn clone(&self) -> Self {
Self {
at: self.at.clone(),
base_path: self.base_path.clone(),
phantom: std::marker::PhantomData,
}
}
}
impl<R: Routable> PartialEq for AppProps<R> {
fn eq(&self, _other: &Self) -> bool {
false
}
}
#[allow(non_snake_case)]
fn App<R: Routable>(props: AppProps<R>) -> Element {
rsx! {
h1 { "App" }
HistoryProvider {
history: move |_| Rc::new(MemoryHistory::with_initial_path(props.at.clone()).with_prefix(props.base_path.clone())) as Rc<dyn History>,
Router::<R> {}
}
}
}
}
#[test]
fn href_internal() {
#[derive(Routable, Clone)]
enum Route {
#[route("/")]
Root {},
#[route("/test")]
Test {},
}
#[component]
fn Test() -> Element {
unimplemented!()
}
#[component]
fn Root() -> Element {
rsx! {
Link {
to: Route::Test {},
"Link"
}
}
}
let expected = format!("<h1>App</h1><a {href}>Link</a>", href = r#"href="/test""#,);
assert_eq!(prepare::<Route>(), expected);
// The base path should be added to the front of internal links
let base_path = "/deeply/nested/path";
let expected = format!(
"<h1>App</h1><a {href}>Link</a>",
href = r#"href="/deeply/nested/path/test""#,
);
assert_eq!(prepare_at_with_base_path::<Route>("/", base_path), expected);
}
#[test]
fn href_external() {
#[derive(Routable, Clone)]
enum Route {
#[route("/")]
Root {},
#[route("/test")]
Test {},
}
#[component]
fn Test() -> Element {
unimplemented!()
}
#[component]
fn Root() -> Element {
rsx! {
Link {
to: "https://dioxuslabs.com/",
"Link"
}
}
}
let expected = format!(
"<h1>App</h1><a {href} {rel}>Link</a>",
href = r#"href="https://dioxuslabs.com/""#,
rel = r#"rel="noopener noreferrer""#,
);
assert_eq!(prepare::<Route>(), expected);
// The base path should not effect external links
assert_eq!(
prepare_at_with_base_path::<Route>("/", "/deeply/nested/path"),
expected
);
}
#[test]
fn with_class() {
#[derive(Routable, Clone)]
enum Route {
#[route("/")]
Root {},
#[route("/test")]
Test {},
}
#[component]
fn Test() -> Element {
unimplemented!()
}
#[component]
fn Root() -> Element {
rsx! {
Link {
to: Route::Test {},
class: "test_class",
"Link"
}
}
}
let expected = format!(
"<h1>App</h1><a {href} {class}>Link</a>",
href = r#"href="/test""#,
class = r#"class="test_class""#,
);
assert_eq!(prepare::<Route>(), expected);
}
#[test]
fn with_active_class_active() {
#[derive(Routable, Clone)]
enum Route {
#[route("/")]
Root {},
}
#[component]
fn Root() -> Element {
rsx! {
Link {
to: Route::Root {},
active_class: "active_class".to_string(),
class: "test_class",
"Link"
}
}
}
let expected = format!(
"<h1>App</h1><a {href} {class} {aria}>Link</a>",
href = r#"href="/""#,
class = r#"class="test_class active_class""#,
aria = r#"aria-current="page""#,
);
assert_eq!(prepare::<Route>(), expected);
}
#[test]
fn with_active_class_inactive() {
#[derive(Routable, Clone)]
enum Route {
#[route("/")]
Root {},
#[route("/test")]
Test {},
}
#[component]
fn Test() -> Element {
unimplemented!()
}
#[component]
fn Root() -> Element {
rsx! {
Link {
to: Route::Test {},
active_class: "active_class".to_string(),
class: "test_class",
"Link"
}
}
}
let expected = format!(
"<h1>App</h1><a {href} {class}>Link</a>",
href = r#"href="/test""#,
class = r#"class="test_class""#,
);
assert_eq!(prepare::<Route>(), expected);
}
#[test]
fn with_id() {
#[derive(Routable, Clone)]
enum Route {
#[route("/")]
Root {},
#[route("/test")]
Test {},
}
#[component]
fn Test() -> Element {
unimplemented!()
}
#[component]
fn Root() -> Element {
rsx! {
Link {
to: Route::Test {},
id: "test_id",
"Link"
}
}
}
let expected = format!(
"<h1>App</h1><a {href} {id}>Link</a>",
href = r#"href="/test""#,
id = r#"id="test_id""#,
);
assert_eq!(prepare::<Route>(), expected);
}
#[test]
fn with_new_tab() {
#[derive(Routable, Clone)]
enum Route {
#[route("/")]
Root {},
#[route("/test")]
Test {},
}
#[component]
fn Test() -> Element {
unimplemented!()
}
#[component]
fn Root() -> Element {
rsx! {
Link {
to: Route::Test {},
new_tab: true,
"Link"
}
}
}
let expected = format!(
"<h1>App</h1><a {href} {target}>Link</a>",
href = r#"href="/test""#,
target = r#"target="_blank""#
);
assert_eq!(prepare::<Route>(), expected);
}
#[test]
fn with_new_tab_external() {
#[derive(Routable, Clone)]
enum Route {
#[route("/")]
Root {},
}
#[component]
fn Root() -> Element {
rsx! {
Link {
to: "https://dioxuslabs.com/",
new_tab: true,
"Link"
}
}
}
let expected = format!(
"<h1>App</h1><a {href} {rel} {target}>Link</a>",
href = r#"href="https://dioxuslabs.com/""#,
rel = r#"rel="noopener noreferrer""#,
target = r#"target="_blank""#
);
assert_eq!(prepare::<Route>(), expected);
}
#[test]
fn with_rel() {
#[derive(Routable, Clone)]
enum Route {
#[route("/")]
Root {},
#[route("/test")]
Test {},
}
#[component]
fn Test() -> Element {
unimplemented!()
}
#[component]
fn Root() -> Element {
rsx! {
Link {
to: Route::Test {},
rel: "test_rel".to_string(),
"Link"
}
}
}
let expected = format!(
"<h1>App</h1><a {href} {rel}>Link</a>",
href = r#"href="/test""#,
rel = r#"rel="test_rel""#,
);
assert_eq!(prepare::<Route>(), expected);
}
#[test]
fn with_child_route() {
#[derive(Routable, Clone, PartialEq, Debug)]
enum ChildRoute {
#[route("/")]
ChildRoot {},
#[route("/:not_static")]
NotStatic { not_static: String },
}
#[derive(Routable, Clone, PartialEq, Debug)]
enum Route {
#[route("/")]
Root {},
#[route("/test")]
Test {},
#[child("/child")]
Nested { child: ChildRoute },
}
#[component]
fn Test() -> Element {
unimplemented!()
}
#[component]
fn Root() -> Element {
rsx! {
Link {
to: Route::Test {},
"Parent Link"
}
Link {
to: Route::Nested { child: ChildRoute::NotStatic { not_static: "this-is-a-child-route".to_string() } },
"Child Link"
}
}
}
#[component]
fn ChildRoot() -> Element {
rsx! {
Link {
to: Route::Test {},
"Parent Link"
}
Link {
to: ChildRoute::NotStatic { not_static: "this-is-a-child-route".to_string() },
"Child Link 1"
}
Link {
to: Route::Nested { child: ChildRoute::NotStatic { not_static: "this-is-a-child-route".to_string() } },
"Child Link 2"
}
}
}
#[component]
fn NotStatic(not_static: String) -> Element {
unimplemented!()
}
assert_eq!(
prepare_at::<Route>("/"),
"<h1>App</h1><a href=\"/test\">Parent Link</a><a href=\"/child/this-is-a-child-route\">Child Link</a>"
);
assert_eq!(
prepare_at::<Route>("/child"),
"<h1>App</h1><a href=\"/test\">Parent Link</a><a href=\"/child/this-is-a-child-route\">Child Link 1</a><a href=\"/child/this-is-a-child-route\">Child Link 2</a>"
);
}
#[test]
fn with_hash_segment() {
#[derive(Routable, Clone)]
enum Route {
#[route("/#:data")]
Root { data: String },
}
#[component]
fn Root(data: String) -> Element {
rsx! {
Link {
to: Route::Root { data: "test".to_string() },
"Link"
}
Link {
to: Route::Root { data: "".to_string() },
"Empty"
}
}
}
assert_eq!(
prepare_at::<Route>("/#test"),
"<h1>App</h1><a href=\"/#test\" aria-current=\"page\">Link</a><a href=\"/\">Empty</a>"
);
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/router/tests/via_ssr/child_outlet.rs | packages/router/tests/via_ssr/child_outlet.rs | #![allow(unused)]
use std::rc::Rc;
use dioxus::prelude::*;
use dioxus_history::{History, MemoryHistory};
use dioxus_router::components::HistoryProvider;
fn prepare(path: impl Into<String>) -> VirtualDom {
let mut vdom = VirtualDom::new_with_props(
App,
AppProps {
path: path.into().parse().unwrap(),
},
);
vdom.rebuild_in_place();
return vdom;
#[derive(Routable, Clone, PartialEq)]
#[rustfmt::skip]
enum Route {
#[layout(Layout)]
#[child("/")]
Child { child: ChildRoute },
}
#[derive(Routable, Clone, PartialEq)]
#[rustfmt::skip]
enum ChildRoute{
#[layout(ChildLayout)]
#[route("/")]
RootIndex {}
}
#[component]
fn App(path: Route) -> Element {
rsx! {
h1 { "App" }
HistoryProvider {
history: move |_| Rc::new(MemoryHistory::with_initial_path(path.clone())) as Rc<dyn History>,
Router::<Route> {}
}
}
}
#[component]
fn RootIndex() -> Element {
rsx! { h2 { "Root Index" } }
}
#[component]
fn Layout() -> Element {
rsx! {
h2 { "parent layout" }
Outlet::<Route> { }
}
}
#[component]
fn ChildLayout() -> Element {
rsx! {
h2 { "child layout" }
Outlet::<ChildRoute> { }
}
}
}
#[test]
fn root_index() {
let vdom = prepare("/");
let html = dioxus_ssr::render(&vdom);
assert_eq!(
html,
"<h1>App</h1><h2>parent layout</h2><h2>child layout</h2><h2>Root Index</h2>"
);
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/router/tests/via_ssr/redirect.rs | packages/router/tests/via_ssr/redirect.rs | use dioxus::prelude::*;
use dioxus_history::{History, MemoryHistory};
use dioxus_router::components::HistoryProvider;
use std::{rc::Rc, str::FromStr};
// Tests for regressions of <https://github.com/DioxusLabs/dioxus/issues/2549>
#[test]
fn redirects_apply_in_order() {
let path = Route::from_str("/").unwrap();
assert_eq!(
path,
Route::Home {
lang: "en".to_string()
}
);
let mut vdom = VirtualDom::new_with_props(App, AppProps { path });
vdom.rebuild_in_place();
let as_string = dioxus_ssr::render(&vdom);
assert_eq!(as_string, "en");
}
#[derive(Clone, Routable, Debug, PartialEq)]
enum Route {
// The redirect should try to parse first because it is placed first in the enum
#[redirect("/", || Route::Home { lang: "en".to_string() })]
#[route("/?:lang")]
Home { lang: String },
}
#[component]
fn Home(lang: String) -> Element {
rsx! { "{lang}" }
}
#[component]
fn App(path: Route) -> Element {
rsx! {
HistoryProvider {
history: move |_| Rc::new(MemoryHistory::with_initial_path(path.clone())) as Rc<dyn History>,
Router::<Route> {}
}
}
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/router/tests/via_ssr/navigation.rs | packages/router/tests/via_ssr/navigation.rs | use dioxus::prelude::*;
use dioxus_core::NoOpMutations;
use std::sync::atomic::AtomicUsize;
// Regression test for <https://github.com/DioxusLabs/dioxus/issues/3235>
#[test]
fn layout_retains_state_after_navigation() {
let mut vdom = VirtualDom::new(app);
vdom.rebuild_in_place();
vdom.render_immediate(&mut NoOpMutations);
let as_string = dioxus_ssr::render(&vdom);
assert_eq!(as_string, "Other");
}
fn app() -> Element {
rsx! {
Router::<Route> {}
}
}
// Turn off rustfmt since we're doing layouts and routes in the same enum
#[derive(Routable, Clone, Debug, PartialEq)]
#[rustfmt::skip]
enum Route {
// Wrap Home in a Navbar Layout
#[layout(NavBar)]
// The default route is always "/" unless otherwise specified
#[route("/")]
Home {},
#[route("/other")]
Other {},
}
#[component]
fn NavBar() -> Element {
static NAVBARS_CREATED: AtomicUsize = AtomicUsize::new(0);
use_hook(|| {
let navbars_created = NAVBARS_CREATED.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
println!("creating navbar #{navbars_created}");
if navbars_created > 0 {
panic!("layouts should not be recreated when switching between two routes under the nav bar");
}
});
// Queue an effect to navigate to the other route after rebuild_in_place
use_effect(|| {
router().push(Route::Other {});
});
rsx! {
Outlet::<Route> {}
}
}
#[component]
fn Home() -> Element {
rsx! {
"Home!"
}
}
#[component]
fn Other() -> Element {
rsx! {
"Other"
}
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/router/tests/via_ssr/outlet.rs | packages/router/tests/via_ssr/outlet.rs | #![allow(unused)]
use std::rc::Rc;
use dioxus::prelude::*;
use dioxus_history::{History, MemoryHistory};
use dioxus_router::components::HistoryProvider;
fn prepare(path: impl Into<String>) -> VirtualDom {
let mut vdom = VirtualDom::new_with_props(
App,
AppProps {
path: path.into().parse().unwrap(),
},
);
vdom.rebuild_in_place();
return vdom;
#[derive(Routable, Clone, PartialEq)]
#[rustfmt::skip]
enum Route {
#[route("/")]
RootIndex {},
#[nest("/fixed")]
#[layout(Fixed)]
#[route("/")]
FixedIndex {},
#[route("/fixed")]
FixedFixed {},
#[end_layout]
#[end_nest]
#[nest("/:id")]
#[layout(Parameter)]
#[route("/")]
ParameterIndex { id: u8 },
#[route("/fixed")]
ParameterFixed { id: u8 },
}
#[component]
fn App(path: Route) -> Element {
rsx! {
h1 { "App" }
HistoryProvider {
history: move |_| Rc::new(MemoryHistory::with_initial_path(path.clone())) as Rc<dyn History>,
Router::<Route> {}
}
}
}
#[component]
fn RootIndex() -> Element {
rsx! { h2 { "Root Index" } }
}
#[component]
fn Fixed() -> Element {
rsx! {
h2 { "Fixed" }
Outlet::<Route> { }
}
}
#[component]
fn FixedIndex() -> Element {
rsx! { h3 { "Fixed - Index" } }
}
#[component]
fn FixedFixed() -> Element {
rsx! { h3 { "Fixed - Fixed"} }
}
#[component]
fn Parameter(id: u8) -> Element {
rsx! {
h2 { "Parameter {id}" }
Outlet::<Route> { }
}
}
#[component]
fn ParameterIndex(id: u8) -> Element {
rsx! { h3 { "Parameter - Index" } }
}
#[component]
fn ParameterFixed(id: u8) -> Element {
rsx! { h3 { "Parameter - Fixed" } }
}
}
#[test]
fn root_index() {
let vdom = prepare("/");
let html = dioxus_ssr::render(&vdom);
assert_eq!(html, "<h1>App</h1><h2>Root Index</h2>");
}
#[test]
fn fixed() {
let vdom = prepare("/fixed");
let html = dioxus_ssr::render(&vdom);
assert_eq!(html, "<h1>App</h1><h2>Fixed</h2><h3>Fixed - Index</h3>");
}
#[test]
fn fixed_fixed() {
let vdom = prepare("/fixed/fixed");
let html = dioxus_ssr::render(&vdom);
assert_eq!(html, "<h1>App</h1><h2>Fixed</h2><h3>Fixed - Fixed</h3>");
}
#[test]
fn parameter() {
let vdom = prepare("/18");
let html = dioxus_ssr::render(&vdom);
assert_eq!(
html,
"<h1>App</h1><h2>Parameter 18</h2><h3>Parameter - Index</h3>"
);
}
#[test]
fn parameter_fixed() {
let vdom = prepare("/18/fixed");
let html = dioxus_ssr::render(&vdom);
assert_eq!(
html,
"<h1>App</h1><h2>Parameter 18</h2><h3>Parameter - Fixed</h3>"
);
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/router/tests/via_ssr/main.rs | packages/router/tests/via_ssr/main.rs | mod child_outlet;
mod link;
mod navigation;
mod outlet;
mod redirect;
mod without_index;
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/router/tests/via_ssr/without_index.rs | packages/router/tests/via_ssr/without_index.rs | use std::rc::Rc;
use dioxus::prelude::*;
use dioxus_history::{History, MemoryHistory};
use dioxus_router::components::HistoryProvider;
// Tests for regressions of <https://github.com/DioxusLabs/dioxus/issues/2468>
#[test]
fn router_without_index_route_parses() {
let mut vdom = VirtualDom::new_with_props(
App,
AppProps {
path: Route::Test {},
},
);
vdom.rebuild_in_place();
let as_string = dioxus_ssr::render(&vdom);
assert_eq!(as_string, "<div>router with no index route renders</div>")
}
#[derive(Routable, Clone, Copy, PartialEq, Debug)]
enum Route {
#[route("/test")]
Test {},
}
#[component]
fn Test() -> Element {
rsx! {
div {
"router with no index route renders"
}
}
}
#[component]
fn App(path: Route) -> Element {
rsx! {
HistoryProvider {
history: move |_| Rc::new(MemoryHistory::with_initial_path(path)) as Rc<dyn History>,
Router::<Route> {}
}
}
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/build.rs | packages/cli/build.rs | fn main() {
built::write_built_file().expect("Failed to acquire build-time information");
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/wasm_bindgen.rs | packages/cli/src/wasm_bindgen.rs | use crate::{CliSettings, Result, Workspace};
use anyhow::{anyhow, Context};
use flate2::read::GzDecoder;
use std::path::{Path, PathBuf};
use tar::Archive;
use tempfile::TempDir;
use tokio::process::Command;
pub(crate) struct WasmBindgen {
version: String,
input_path: PathBuf,
out_dir: PathBuf,
out_name: String,
target: String,
debug: bool,
keep_debug: bool,
demangle: bool,
remove_name_section: bool,
remove_producers_section: bool,
keep_lld_exports: bool,
}
impl WasmBindgen {
pub(crate) fn new(version: &str) -> Self {
Self {
version: version.to_string(),
input_path: PathBuf::new(),
out_dir: PathBuf::new(),
out_name: String::new(),
target: String::new(),
debug: true,
keep_debug: true,
demangle: true,
remove_name_section: false,
remove_producers_section: false,
keep_lld_exports: false,
}
}
pub(crate) fn input_path(self, input_path: &Path) -> Self {
Self {
input_path: input_path.to_path_buf(),
..self
}
}
pub(crate) fn out_dir(self, out_dir: &Path) -> Self {
Self {
out_dir: out_dir.to_path_buf(),
..self
}
}
pub(crate) fn out_name(self, out_name: &str) -> Self {
Self {
out_name: out_name.to_string(),
..self
}
}
pub(crate) fn target(self, target: &str) -> Self {
Self {
target: target.to_string(),
..self
}
}
pub(crate) fn debug(self, debug: bool) -> Self {
Self { debug, ..self }
}
pub(crate) fn keep_debug(self, keep_debug: bool) -> Self {
Self { keep_debug, ..self }
}
pub(crate) fn demangle(self, demangle: bool) -> Self {
Self { demangle, ..self }
}
pub(crate) fn remove_name_section(self, remove_name_section: bool) -> Self {
Self {
remove_name_section,
..self
}
}
pub(crate) fn remove_producers_section(self, remove_producers_section: bool) -> Self {
Self {
remove_producers_section,
..self
}
}
pub(crate) fn keep_lld_sections(self, keep_lld_sections: bool) -> Self {
Self {
keep_lld_exports: keep_lld_sections,
..self
}
}
/// Run the bindgen command with the current settings
pub(crate) async fn run(&self) -> Result<std::process::Output> {
let binary = self.get_binary_path()?;
let mut args = Vec::new();
// Target
args.push("--target");
args.push(&self.target);
// Options
if self.debug {
args.push("--debug");
}
if !self.demangle {
args.push("--no-demangle");
}
if self.keep_debug {
args.push("--keep-debug");
}
if self.remove_name_section {
args.push("--remove-name-section");
}
if self.remove_producers_section {
args.push("--remove-producers-section");
}
if self.keep_lld_exports {
args.push("--keep-lld-exports");
}
// Out name
args.push("--out-name");
args.push(&self.out_name);
// wbg generates typescript bindnings by default - we don't want those
args.push("--no-typescript");
// Out dir
let out_dir = self
.out_dir
.to_str()
.expect("input_path should be valid utf8");
args.push("--out-dir");
args.push(out_dir);
// Input path
let input_path = self
.input_path
.to_str()
.expect("input_path should be valid utf8");
args.push(input_path);
tracing::debug!("wasm-bindgen: {:#?}", args);
// Run bindgen
let output = Command::new(binary).args(args).output().await?;
// Check for errors
if !output.stderr.is_empty() {
if output.status.success() {
tracing::debug!(
"wasm-bindgen warnings: {}",
String::from_utf8_lossy(&output.stderr)
);
} else {
tracing::error!(
"wasm-bindgen error: {}",
String::from_utf8_lossy(&output.stderr)
);
}
}
Ok(output)
}
/// Verify the installed version of wasm-bindgen-cli
///
/// For local installations, this will check that the installed version matches the specified version.
/// For managed installations, this will check that the version managed by `dx` is the specified version.
pub async fn verify_install(version: &str) -> anyhow::Result<()> {
let settings = Self::new(version);
if CliSettings::prefer_no_downloads() {
settings.verify_local_install().await
} else {
settings.verify_managed_install().await
}
}
/// Install the specified wasm-bingen version.
///
/// This will overwrite any existing wasm-bindgen binaries of the same version.
///
/// This will attempt to install wasm-bindgen from:
/// 1. Direct GitHub release download.
/// 2. `cargo binstall` if installed.
/// 3. Compile from source with `cargo install`.
async fn install(&self) -> anyhow::Result<()> {
tracing::info!("Installing wasm-bindgen-cli@{}...", self.version);
// Attempt installation from GitHub
if let Err(e) = self.install_github().await {
tracing::error!("Failed to install wasm-bindgen-cli@{}: {e}", self.version);
} else {
tracing::info!(
"wasm-bindgen-cli@{} was successfully installed from GitHub.",
self.version
);
return Ok(());
}
// Attempt installation from binstall.
if let Err(e) = self.install_binstall().await {
tracing::error!("Failed to install wasm-bindgen-cli@{}: {e}", self.version);
tracing::info!("Failed to install prebuilt binary for wasm-bindgen-cli@{}. Compiling from source instead. This may take a while.", self.version);
} else {
tracing::info!(
"wasm-bindgen-cli@{} was successfully installed from cargo-binstall.",
self.version
);
return Ok(());
}
// Attempt installation from cargo.
self.install_cargo()
.await
.context("failed to install wasm-bindgen-cli from cargo")?;
tracing::info!(
"wasm-bindgen-cli@{} was successfully installed from source.",
self.version
);
Ok(())
}
async fn install_github(&self) -> anyhow::Result<()> {
tracing::debug!(
"Attempting to install wasm-bindgen-cli@{} from GitHub",
self.version
);
let url = self.git_install_url().ok_or_else(|| {
anyhow!(
"no available GitHub binary for wasm-bindgen-cli@{}",
self.version
)
})?;
// Get the final binary location.
let binary_path = self.get_binary_path()?;
// Download then extract wasm-bindgen-cli.
let bytes = reqwest::get(url).await?.bytes().await?;
// Unpack the first tar entry to the final binary location
Archive::new(GzDecoder::new(bytes.as_ref()))
.entries()?
.find(|entry| {
entry
.as_ref()
.map(|e| {
e.path_bytes()
.ends_with(self.downloaded_bin_name().as_bytes())
})
.unwrap_or(false)
})
.context("Failed to find entry")??
.unpack(&binary_path)
.context("failed to unpack wasm-bindgen-cli binary")?;
Ok(())
}
async fn install_binstall(&self) -> anyhow::Result<()> {
tracing::debug!(
"Attempting to install wasm-bindgen-cli@{} from cargo-binstall",
self.version
);
let package = self.cargo_bin_name();
let tempdir = TempDir::new()?;
// Run install command
Command::new("cargo")
.args([
"binstall",
&package,
"--no-confirm",
"--force",
"--no-track",
"--install-path",
])
.arg(tempdir.path())
.output()
.await?;
std::fs::copy(
tempdir.path().join(self.downloaded_bin_name()),
self.get_binary_path()?,
)?;
Ok(())
}
async fn install_cargo(&self) -> anyhow::Result<()> {
tracing::debug!(
"Attempting to install wasm-bindgen-cli@{} from cargo-install",
self.version
);
let package = self.cargo_bin_name();
let tempdir = TempDir::new()?;
// Run install command
Command::new("cargo")
.args([
"install",
&package,
"--bin",
"wasm-bindgen",
"--no-track",
"--force",
"--root",
])
.arg(tempdir.path())
.output()
.await
.context("failed to install wasm-bindgen-cli from cargo-install")?;
tracing::info!("Copying into path: {}", tempdir.path().display());
// copy the wasm-bindgen out of the tempdir to the final location
std::fs::copy(
tempdir.path().join("bin").join(self.downloaded_bin_name()),
self.get_binary_path()?,
)
.context("failed to copy wasm-bindgen binary")?;
Ok(())
}
async fn verify_local_install(&self) -> anyhow::Result<()> {
tracing::trace!(
"Verifying wasm-bindgen-cli@{} is installed in the path",
self.version
);
let binary = self.get_binary_path()?;
let output = Command::new(binary)
.args(["--version"])
.output()
.await
.context("Failed to check wasm-bindgen-cli version")?;
let stdout = String::from_utf8(output.stdout)
.context("Failed to extract wasm-bindgen-cli output")?;
let installed_version = stdout.trim_start_matches("wasm-bindgen").trim();
if installed_version != self.version {
return Err(anyhow!(
"Incorrect wasm-bindgen-cli version: project requires version {} but version {} is installed",
self.version,
installed_version,
));
}
Ok(())
}
async fn verify_managed_install(&self) -> anyhow::Result<()> {
tracing::trace!(
"Verifying wasm-bindgen-cli@{} is installed in the tool directory",
self.version
);
let binary_name = self.installed_bin_name();
let path = self.install_dir()?.join(binary_name);
if !path.exists() {
self.install().await?;
}
Ok(())
}
pub fn get_binary_path(&self) -> anyhow::Result<PathBuf> {
if CliSettings::prefer_no_downloads() {
which::which("wasm-bindgen")
.map_err(|_| anyhow!("Missing wasm-bindgen-cli@{}", self.version))
} else {
let installed_name = self.installed_bin_name();
let install_dir = self.install_dir()?;
Ok(install_dir.join(installed_name))
}
}
fn install_dir(&self) -> anyhow::Result<PathBuf> {
let bindgen_dir = Workspace::dioxus_data_dir().join("wasm-bindgen/");
std::fs::create_dir_all(&bindgen_dir)?;
Ok(bindgen_dir)
}
fn installed_bin_name(&self) -> String {
let mut name = format!("wasm-bindgen-{}", self.version);
if cfg!(windows) {
name = format!("{name}.exe");
}
name
}
fn cargo_bin_name(&self) -> String {
format!("wasm-bindgen-cli@{}", self.version)
}
fn downloaded_bin_name(&self) -> &'static str {
if cfg!(windows) {
"wasm-bindgen.exe"
} else {
"wasm-bindgen"
}
}
fn git_install_url(&self) -> Option<String> {
let platform = if cfg!(all(target_os = "windows", target_arch = "x86_64")) {
"x86_64-pc-windows-msvc"
} else if cfg!(all(target_os = "linux", target_arch = "x86_64")) {
"x86_64-unknown-linux-musl"
} else if cfg!(all(target_os = "linux", target_arch = "aarch64")) {
"aarch64-unknown-linux-gnu"
} else if cfg!(all(target_os = "macos", target_arch = "x86_64")) {
"x86_64-apple-darwin"
} else if cfg!(all(target_os = "macos", target_arch = "aarch64")) {
"aarch64-apple-darwin"
} else {
return None;
};
Some(format!(
"https://github.com/rustwasm/wasm-bindgen/releases/download/{}/wasm-bindgen-{}-{}.tar.gz",
self.version, self.version, platform
))
}
}
#[cfg(test)]
mod test {
use super::*;
const VERSION: &str = "0.2.99";
/// Test the github installer.
#[tokio::test]
async fn test_github_install() {
if !crate::devcfg::test_installs() {
return;
}
let binary = WasmBindgen::new(VERSION);
reset_test().await;
binary.install_github().await.unwrap();
test_verify_install().await;
verify_installation(&binary).await;
}
/// Test the cargo installer.
#[tokio::test]
async fn test_cargo_install() {
if !crate::devcfg::test_installs() {
return;
}
let binary = WasmBindgen::new(VERSION);
reset_test().await;
binary.install_cargo().await.unwrap();
test_verify_install().await;
verify_installation(&binary).await;
}
// CI doesn't have binstall.
// Test the binstall installer
#[tokio::test]
async fn test_binstall_install() {
if !crate::devcfg::test_installs() {
return;
}
let binary = WasmBindgen::new(VERSION);
reset_test().await;
binary.install_binstall().await.unwrap();
test_verify_install().await;
verify_installation(&binary).await;
}
/// Helper to test `verify_install` after an installation.
async fn test_verify_install() {
WasmBindgen::verify_install(VERSION).await.unwrap();
}
/// Helper to test that the installed binary actually exists.
async fn verify_installation(binary: &WasmBindgen) {
let path = binary.install_dir().unwrap();
let name = binary.installed_bin_name();
let binary_path = path.join(name);
assert!(
binary_path.exists(),
"wasm-bindgen binary doesn't exist after installation"
);
}
/// Delete the installed binary. The temp folder should be automatically deleted.
async fn reset_test() {
let binary = WasmBindgen::new(VERSION);
let path = binary.install_dir().unwrap();
let name = binary.installed_bin_name();
let binary_path = path.join(name);
let _ = std::fs::remove_file(binary_path);
}
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/settings.rs | packages/cli/src/settings.rs | use crate::{Result, TraceSrc};
use anyhow::bail;
use serde::{Deserialize, Serialize};
use std::sync::LazyLock;
use std::{fs, path::PathBuf, sync::Arc};
use tracing::{error, trace, warn};
/// Describes cli settings from project or global level.
/// The order of priority goes:
/// 1. CLI Flags/Arguments
/// 2. Project-level Settings
/// 3. Global-level settings.
///
/// This allows users to control the cli settings with ease.
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub(crate) struct CliSettings {
/// Describes whether hot reload should always be on.
pub(crate) always_hot_reload: Option<bool>,
/// Describes whether the CLI should always open the browser for Web targets.
pub(crate) always_open_browser: Option<bool>,
/// Describes whether desktop apps in development will be pinned always-on-top.
pub(crate) always_on_top: Option<bool>,
/// Describes the interval in seconds that the CLI should poll for file changes on WSL.
#[serde(default = "default_wsl_file_poll_interval")]
pub(crate) wsl_file_poll_interval: Option<u16>,
/// Use tooling from path rather than downloading them.
pub(crate) no_downloads: Option<bool>,
/// Ignore updates for this version
pub(crate) ignore_version_update: Option<String>,
/// Disable telemetry
pub(crate) disable_telemetry: Option<bool>,
}
impl CliSettings {
/// Load the settings from the local, global, or default config in that order
pub(crate) fn load() -> Arc<Self> {
static SETTINGS: LazyLock<Arc<CliSettings>> =
LazyLock::new(|| Arc::new(CliSettings::global_or_default()));
SETTINGS.clone()
}
pub fn global_or_default() -> Self {
CliSettings::from_global().unwrap_or_default()
}
/// Get the path to the settings toml file.
pub(crate) fn get_settings_path() -> PathBuf {
crate::Workspace::global_settings_file()
}
/// Get the current settings structure from global.
pub(crate) fn from_global() -> Option<Self> {
let settings = crate::Workspace::global_settings_file();
if !settings.exists() {
trace!("global settings file does not exist, returning None");
return None;
}
let Some(data) = fs::read_to_string(&settings).ok() else {
warn!("failed to read global settings file");
return None;
};
let Some(data) = toml::from_str::<CliSettings>(&data).ok() else {
warn!("failed to parse global settings file");
return None;
};
Some(data)
}
/// Save the current structure to the global settings toml.
/// This does not save to project-level settings.
pub(crate) fn save(&self) -> Result<()> {
let path = Self::get_settings_path();
let data = toml::to_string_pretty(&self).map_err(|e| {
error!(dx_src = ?TraceSrc::Dev, ?self, "failed to parse config into toml");
anyhow::anyhow!("failed to parse config into toml: {e}")
})?;
// Create the directory structure if it doesn't exist.
let parent_path = path.parent().unwrap();
if let Err(e) = fs::create_dir_all(parent_path) {
error!(
dx_src = ?TraceSrc::Dev,
?data,
?path,
"failed to create directories for settings file"
);
bail!("failed to create directories for settings file: {e}");
}
// Write the data.
let result = fs::write(&path, data.clone());
if let Err(e) = result {
error!(?data, ?path, "failed to save global cli settings");
bail!("failed to save global cli settings: {e}");
}
Ok(())
}
/// Modify the settings toml file - doesn't change the settings for this session
pub(crate) fn modify_settings(with: impl FnOnce(&mut CliSettings)) -> Result<()> {
let mut _settings = CliSettings::load();
let settings: &mut CliSettings = Arc::make_mut(&mut _settings);
with(settings);
settings.save()?;
Ok(())
}
/// Check if we should prefer to use the no-downloads feature
pub(crate) fn prefer_no_downloads() -> bool {
if cfg!(feature = "no-downloads") && !cfg!(debug_assertions) {
return true;
}
if std::env::var("NO_DOWNLOADS").is_ok() {
return true;
}
CliSettings::load().no_downloads.unwrap_or_default()
}
/// Check if telemetry is disabled
pub(crate) fn telemetry_disabled() -> bool {
use std::env::var;
static TELEMETRY_DISABLED: LazyLock<bool> = LazyLock::new(|| {
if cfg!(feature = "disable-telemetry") {
return true;
}
if matches!(var("DX_TELEMETRY_ENABLED"), Ok(val) if val.eq_ignore_ascii_case("false") || val == "0")
{
return true;
}
if matches!(var("TELEMETRY"), Ok(val) if val.eq_ignore_ascii_case("false") || val == "0")
{
return true;
}
CliSettings::load().disable_telemetry.unwrap_or_default()
});
*TELEMETRY_DISABLED
}
pub(crate) fn is_ci() -> bool {
static CI: LazyLock<bool> = LazyLock::new(|| {
if matches!(std::env::var("CI"), Ok(val) if val.eq_ignore_ascii_case("true") || val == "1")
{
return true;
}
if matches!(std::env::var("DX_CI"), Ok(val) if val.eq_ignore_ascii_case("true") || val == "1")
{
return true;
}
false
});
*CI
}
}
fn default_wsl_file_poll_interval() -> Option<u16> {
Some(2)
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/workspace.rs | packages/cli/src/workspace.rs | use crate::styles::GLOW_STYLE;
use crate::CliSettings;
use crate::Result;
use crate::{config::DioxusConfig, AndroidTools};
use anyhow::{bail, Context};
use ignore::gitignore::Gitignore;
use krates::{semver::Version, KrateDetails, LockOptions};
use krates::{Cmd, Krates, NodeId};
use std::sync::Arc;
use std::{collections::HashSet, path::Path};
use std::{path::PathBuf, time::Duration};
use target_lexicon::Triple;
use tokio::process::Command;
pub struct Workspace {
pub(crate) krates: Krates,
pub(crate) settings: CliSettings,
pub(crate) wasm_opt: Option<PathBuf>,
pub(crate) sysroot: PathBuf,
pub(crate) rustc_version: String,
pub(crate) ignore: Gitignore,
pub(crate) cargo_toml: cargo_toml::Manifest,
pub(crate) android_tools: Option<Arc<AndroidTools>>,
}
impl Workspace {
/// Load the workspace from the current directory. This is cached and will only be loaded once.
pub async fn current() -> Result<Arc<Workspace>> {
static WS: tokio::sync::Mutex<Option<Arc<Workspace>>> = tokio::sync::Mutex::const_new(None);
// Lock the workspace to prevent multiple threads from loading it at the same time
// If loading the workspace failed the first time, it won't be set and therefore permeate an error.
let mut lock = WS.lock().await;
if let Some(ws) = lock.as_ref() {
return Ok(ws.clone());
}
let krates_future = tokio::task::spawn_blocking(|| {
let manifest_options = crate::logging::VERBOSITY.get().unwrap();
let lock_options = LockOptions {
frozen: manifest_options.frozen,
locked: manifest_options.locked,
offline: manifest_options.offline,
};
let mut cmd = Cmd::new();
cmd.lock_opts(lock_options);
let mut builder = krates::Builder::new();
builder.workspace(true);
let res = builder.build(cmd, |_| {})?;
if !lock_options.offline {
if let Ok(res) = std::env::var("SIMULATE_SLOW_NETWORK") {
std::thread::sleep(Duration::from_secs(res.parse().unwrap_or(5)));
}
}
Ok(res) as Result<Krates, krates::Error>
});
let spin_future = async move {
tokio::time::sleep(Duration::from_millis(1000)).await;
eprintln!("{GLOW_STYLE}warning{GLOW_STYLE:#}: Waiting for cargo-metadata...");
tokio::time::sleep(Duration::from_millis(2000)).await;
for x in 1..=100 {
tokio::time::sleep(Duration::from_millis(2000)).await;
eprintln!("{GLOW_STYLE}warning{GLOW_STYLE:#}: (Try {x}) Taking a while...");
if x % 10 == 0 {
eprintln!("{GLOW_STYLE}warning{GLOW_STYLE:#}: maybe check your network connection or build lock?");
}
}
};
let krates = tokio::select! {
f = krates_future => {
let res = f?;
if let Err(krates::Error::Metadata(e)) = res {
bail!("{e}");
}
res?
},
_ = spin_future => bail!("cargo metadata took too long to respond, try again with --offline"),
};
let settings = CliSettings::global_or_default();
let sysroot = Self::get_rustc_sysroot()
.await
.context("Failed to get rustc sysroot")?;
let rustc_version = Self::get_rustc_version()
.await
.context("Failed to get rustc version")?;
let wasm_opt = which::which("wasm-opt").ok();
let ignore = Self::workspace_gitignore(krates.workspace_root().as_std_path());
let cargo_toml = crate::cargo_toml::load_manifest_from_path(
krates.workspace_root().join("Cargo.toml").as_std_path(),
)
.context("Failed to load Cargo.toml")?;
let android_tools = crate::build::get_android_tools();
let workspace = Arc::new(Self {
krates,
settings,
wasm_opt,
sysroot: sysroot.trim().into(),
rustc_version: rustc_version.trim().into(),
ignore,
cargo_toml,
android_tools,
});
tracing::debug!(
r#"Initialized workspace:
• sysroot: {sysroot}
• rustc version: {rustc_version}
• workspace root: {workspace_root}
• dioxus versions: [{dioxus_versions:?}]"#,
sysroot = workspace.sysroot.display(),
rustc_version = workspace.rustc_version,
workspace_root = workspace.workspace_root().display(),
dioxus_versions = workspace
.dioxus_versions()
.iter()
.map(|v| v.to_string())
.collect::<Vec<_>>()
.join(", ")
);
lock.replace(workspace.clone());
Ok(workspace)
}
pub fn android_tools(&self) -> Result<Arc<AndroidTools>> {
self
.android_tools
.clone()
.context("Android not installed properly. Please set the `ANDROID_NDK_HOME` environment variable to the root of your NDK installation.")
}
pub fn is_release_profile(&self, profile: &str) -> bool {
// If the profile is "release" or ends with "-release" like the default platform release profiles,
// always put it in the release category.
if profile == "release" || profile.ends_with("-release") {
return true;
}
// Check if the profile inherits from release by traversing the `inherits` chain
let mut current_profile_name = profile;
// Try to find the current profile in the custom profiles section
while let Some(profile_settings) = self.cargo_toml.profile.custom.get(current_profile_name)
{
// Check what this profile inherits from
match &profile_settings.inherits {
// Otherwise, continue checking the profile it inherits from
Some(inherits_name) => current_profile_name = inherits_name,
// This profile doesn't explicitly inherit anything, so the chain ends here.
// Since it didn't lead to "release", return false.
None => break,
}
if current_profile_name == "release" {
return true;
}
}
false
}
pub fn check_dioxus_version_against_cli(&self) {
let dx_semver = Version::parse(env!("CARGO_PKG_VERSION")).unwrap();
let dioxus_versions = self.dioxus_versions();
tracing::trace!("dx version: {}", dx_semver);
tracing::trace!("dioxus versions: {:?}", dioxus_versions);
// if there are no dioxus versions in the workspace, we don't need to check anything
// dx is meant to be compatible with non-dioxus projects too.
if dioxus_versions.is_empty() {
return;
}
let min = dioxus_versions.iter().min().unwrap();
let max = dioxus_versions.iter().max().unwrap();
// If the minimum dioxus version is greater than the current cli version, warn the user
if min > &dx_semver
|| max < &dx_semver
|| dioxus_versions.iter().any(|f| f.pre != dx_semver.pre)
{
tracing::error!(
r#"🚫dx and dioxus versions are incompatible!
• dx version: {dx_semver}
• dioxus versions: [{}]"#,
dioxus_versions
.iter()
.map(|v| v.to_string())
.collect::<Vec<_>>()
.join(", ")
);
}
}
/// Get all the versions of dioxus in the workspace
pub fn dioxus_versions(&self) -> Vec<Version> {
let mut versions = HashSet::new();
for krate in self.krates.krates() {
if krate.name == "dioxus" {
versions.insert(krate.version.clone());
}
}
let mut versions = versions.into_iter().collect::<Vec<_>>();
versions.sort();
versions
}
#[allow(unused)]
pub fn rust_lld(&self) -> PathBuf {
self.sysroot
.join("lib")
.join("rustlib")
.join(Triple::host().to_string())
.join("bin")
.join("rust-lld")
}
/// Return the path to the `cc` compiler
///
/// This is used for the patching system to run the linker.
/// We could also just use lld given to us by rust itself.
pub fn cc(&self) -> PathBuf {
PathBuf::from("cc")
}
/// The windows linker
pub fn lld_link(&self) -> PathBuf {
self.gcc_ld_dir().join("lld-link")
}
pub fn wasm_ld(&self) -> PathBuf {
self.gcc_ld_dir().join("wasm-ld")
}
pub fn select_ranlib() -> Option<PathBuf> {
// prefer the modern llvm-ranlib if they have it
which::which("llvm-ranlib")
.or_else(|_| which::which("ranlib"))
.ok()
}
/// Return the version of the wasm-bindgen crate if it exists
pub fn wasm_bindgen_version(&self) -> Option<String> {
self.krates
.krates_by_name("wasm-bindgen")
.next()
.map(|krate| krate.krate.version.to_string())
}
// wasm-ld: ./rustup/toolchains/nightly-x86_64-unknown-linux-gnu/bin/wasm-ld
// rust-lld: ./rustup/toolchains/nightly-x86_64-unknown-linux-gnu/bin/rust-lld
fn gcc_ld_dir(&self) -> PathBuf {
self.sysroot
.join("lib")
.join("rustlib")
.join(Triple::host().to_string())
.join("bin")
.join("gcc-ld")
}
// wasm-ld: ./rustup/toolchains/nightly-x86_64-unknown-linux-gnu/bin/wasm-ld
// rust-lld: ./rustup/toolchains/nightly-x86_64-unknown-linux-gnu/bin/rust-lld
pub fn rustc_objcopy(&self) -> PathBuf {
self.sysroot
.join("lib")
.join("rustlib")
.join(Triple::host().to_string())
.join("bin")
.join("rust-objcopy")
}
// ./rustup/toolchains/nightly-x86_64-unknown-linux-gnu/lib
pub fn rustc_objcopy_dylib_path(&self) -> PathBuf {
self.sysroot.join("lib")
}
/// Find the "main" package in the workspace. There might not be one!
pub fn find_main_package(&self, package: Option<String>) -> Result<NodeId> {
if let Some(package) = package {
let mut workspace_members = self.krates.workspace_members();
let found = workspace_members.find_map(|node| {
if let krates::Node::Krate { id, krate, .. } = node {
if krate.name == package {
return Some(id);
}
}
None
});
if found.is_none() {
tracing::error!("Could not find package {package} in the workspace. Did you forget to add it to the workspace?");
tracing::error!("Packages in the workspace:");
for package in self.krates.workspace_members() {
if let krates::Node::Krate { krate, .. } = package {
tracing::error!("{}", krate.name());
}
}
}
let kid = found.ok_or_else(|| anyhow::anyhow!("Failed to find package {package}"))?;
return Ok(self.krates.nid_for_kid(kid).unwrap());
};
// if we have default members specified, try them first
if let Some(ws) = &self.cargo_toml.workspace {
for default in &ws.default_members {
let mut workspace_members = self.krates.workspace_members();
let default_member_path = std::fs::canonicalize(default).unwrap();
let found = workspace_members.find_map(|node| {
if let krates::Node::Krate { id, krate, .. } = node {
// Skip this default member if it doesn't have any binary targets
if !krate
.targets
.iter()
.any(|t| t.kind.contains(&krates::cm::TargetKind::Bin))
{
return None;
}
let member_path =
std::fs::canonicalize(krate.manifest_path.parent().unwrap()).unwrap();
if member_path == default_member_path {
return Some(id);
}
}
None
});
if let Some(kid) = found {
return Ok(self.krates.nid_for_kid(kid).unwrap());
}
}
}
// Otherwise find the package that is the closest parent of the current directory
let current_dir = std::env::current_dir()?;
let current_dir = current_dir.as_path();
// Go through each member and find the path that is a parent of the current directory
let mut closest_parent = None;
for member in self.krates.workspace_members() {
if let krates::Node::Krate { id, krate, .. } = member {
let member_path = krate.manifest_path.parent().unwrap();
if let Ok(path) = current_dir.strip_prefix(member_path.as_std_path()) {
let len = path.components().count();
match closest_parent {
Some((_, closest_parent_len)) => {
if len < closest_parent_len {
closest_parent = Some((id, len));
}
}
None => {
closest_parent = Some((id, len));
}
}
}
}
}
let kid = closest_parent
.map(|(id, _)| id)
.with_context(|| {
let bin_targets = self.krates.workspace_members().filter_map(|krate|match krate {
krates::Node::Krate { krate, .. } if krate.targets.iter().any(|t| t.kind.contains(&krates::cm::TargetKind::Bin))=> {
Some(format!("- {}", krate.name))
}
_ => None
}).collect::<Vec<_>>();
format!("Failed to find binary package to build.\nYou need to either run dx from inside a binary crate or specify a binary package to build with the `--package` flag. Try building again with one of the binary packages in the workspace:\n{}", bin_targets.join("\n"))
})?;
let package = self.krates.nid_for_kid(kid).unwrap();
Ok(package)
}
pub fn load_dioxus_config(&self, package: NodeId) -> Result<Option<DioxusConfig>> {
// Walk up from the cargo.toml to the root of the workspace looking for Dioxus.toml
let mut current_dir = self.krates[package]
.manifest_path
.parent()
.unwrap()
.as_std_path()
.to_path_buf()
.canonicalize()?;
let workspace_path = self
.krates
.workspace_root()
.as_std_path()
.to_path_buf()
.canonicalize()?;
let mut dioxus_conf_file = None;
while current_dir.starts_with(&workspace_path) {
let config = ["Dioxus.toml", "dioxus.toml"]
.into_iter()
.map(|file| current_dir.join(file))
.find(|path| path.is_file());
// Try to find Dioxus.toml in the current directory
if let Some(new_config) = config {
dioxus_conf_file = Some(new_config.as_path().to_path_buf());
break;
}
// If we can't find it, go up a directory
current_dir = current_dir
.parent()
.context("Failed to find Dioxus.toml")?
.to_path_buf();
}
let Some(dioxus_conf_file) = dioxus_conf_file else {
return Ok(None);
};
toml::from_str::<DioxusConfig>(&std::fs::read_to_string(&dioxus_conf_file)?)
.map_err(|err| {
anyhow::anyhow!("Failed to parse Dioxus.toml at {dioxus_conf_file:?}: {err}")
})
.map(Some)
}
/// Create a new gitignore map for this target crate
///
/// todo(jon): this is a bit expensive to build, so maybe we should cache it?
pub fn workspace_gitignore(workspace_dir: &Path) -> Gitignore {
let mut ignore_builder = ignore::gitignore::GitignoreBuilder::new(workspace_dir);
ignore_builder.add(workspace_dir.join(".gitignore"));
for path in Self::default_ignore_list() {
ignore_builder
.add_line(None, path)
.expect("failed to add path to file excluded");
}
ignore_builder.build().unwrap()
}
pub fn ignore_for_krate(&self, path: &Path) -> ignore::gitignore::Gitignore {
let mut ignore_builder = ignore::gitignore::GitignoreBuilder::new(path);
for path in Self::default_ignore_list() {
ignore_builder
.add_line(None, path)
.expect("failed to add path to file excluded");
}
ignore_builder.build().unwrap()
}
pub fn default_ignore_list() -> Vec<&'static str> {
vec![
".git",
".github",
".vscode",
"target",
"node_modules",
"dist",
"*~",
".*",
"*.lock",
"*.log",
]
}
pub(crate) fn workspace_root(&self) -> PathBuf {
self.krates.workspace_root().as_std_path().to_path_buf()
}
/// Returns the root of the crate that the command is run from, without calling `cargo metadata`
///
/// If the command is run from the workspace root, this will return the top-level Cargo.toml
pub(crate) fn crate_root_from_path() -> Result<PathBuf> {
/// How many parent folders are searched for a `Cargo.toml`
const MAX_ANCESTORS: u32 = 10;
/// Checks if the directory contains `Cargo.toml`
fn contains_manifest(path: &Path) -> bool {
std::fs::read_dir(path)
.map(|entries| {
entries
.filter_map(Result::ok)
.any(|ent| &ent.file_name() == "Cargo.toml")
})
.unwrap_or(false)
}
// From the current directory we work our way up, looking for `Cargo.toml`
std::env::current_dir()
.ok()
.and_then(|mut wd| {
for _ in 0..MAX_ANCESTORS {
if contains_manifest(&wd) {
return Some(wd);
}
if !wd.pop() {
break;
}
}
None
})
.context("Failed to find directory containing Cargo.toml")
}
pub async fn get_xcode_path() -> Option<PathBuf> {
let xcode = Command::new("xcode-select")
.arg("-p")
.output()
.await
.ok()
.map(|s| String::from_utf8_lossy(&s.stdout).trim().to_string().into());
xcode
}
pub async fn get_rustc_sysroot() -> Result<String, anyhow::Error> {
let sysroot = Command::new("rustc")
.args(["--print", "sysroot"])
.output()
.await
.map(|out| String::from_utf8(out.stdout).map(|s| s.trim().to_string()))?
.context("Failed to extract rustc sysroot output")?;
Ok(sysroot)
}
pub async fn get_rustc_version() -> Result<String> {
let rustc_version = Command::new("rustc")
.args(["--version"])
.output()
.await
.map(|out| String::from_utf8(out.stdout))?
.context("Failed to extract rustc version output")?;
Ok(rustc_version)
}
/// Returns the properly canonicalized path to the dx executable, used for linking and wrapping rustc
pub(crate) fn path_to_dx() -> Result<PathBuf> {
dunce::canonicalize(std::env::current_exe().context("Failed to find dx")?)
.context("Failed to find dx")
}
/// Returns the path to the dioxus data directory, used to install tools, store configs, and other things
///
/// On macOS, we prefer to not put this dir in Application Support, but rather in the home directory.
/// On Windows, we prefer to keep it in the home directory so the `dx` install dir matches the install script.
pub(crate) fn dioxus_data_dir() -> PathBuf {
static DX_HOME: std::sync::OnceLock<PathBuf> = std::sync::OnceLock::new();
DX_HOME
.get_or_init(|| {
if let Some(path) = std::env::var_os("DX_HOME") {
return PathBuf::from(path);
}
if cfg!(target_os = "macos") || cfg!(target_os = "windows") {
dirs::home_dir().unwrap().join(".dx")
} else {
dirs::data_dir()
.or_else(dirs::home_dir)
.unwrap()
.join(".dx")
}
})
.to_path_buf()
}
pub(crate) fn global_settings_file() -> PathBuf {
Self::dioxus_data_dir().join("settings.toml")
}
/// The path where components downloaded from git are cached
pub(crate) fn component_cache_dir() -> PathBuf {
Self::dioxus_data_dir().join("components")
}
/// Get the path to a specific component in the cache
pub(crate) fn component_cache_path(git: &str, rev: Option<&str>) -> PathBuf {
use std::hash::Hasher;
let mut hasher = std::hash::DefaultHasher::new();
std::hash::Hash::hash(git, &mut hasher);
if let Some(rev) = rev {
std::hash::Hash::hash(rev, &mut hasher);
}
let hash = hasher.finish();
Self::component_cache_dir().join(format!("{hash:016x}"))
}
}
impl std::fmt::Debug for Workspace {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Workspace")
.field("krates", &"..")
.field("settings", &self.settings)
.field("rustc_version", &self.rustc_version)
.field("sysroot", &self.sysroot)
.field("wasm_opt", &self.wasm_opt)
.finish()
}
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/bundle_utils.rs | packages/cli/src/bundle_utils.rs | use crate::{
config::BundleConfig, CustomSignCommandSettings, DebianSettings, MacOsSettings,
NSISInstallerMode, NsisSettings, PackageType, WebviewInstallMode, WindowsSettings, WixSettings,
};
impl From<NsisSettings> for tauri_bundler::NsisSettings {
fn from(val: NsisSettings) -> Self {
tauri_bundler::NsisSettings {
header_image: val.header_image,
sidebar_image: val.sidebar_image,
installer_icon: val.installer_icon,
install_mode: val.install_mode.into(),
languages: val.languages,
display_language_selector: val.display_language_selector,
custom_language_files: None,
template: None,
compression: tauri_utils::config::NsisCompression::None,
start_menu_folder: val.start_menu_folder,
installer_hooks: val.installer_hooks,
minimum_webview2_version: val.minimum_webview2_version,
}
}
}
impl From<BundleConfig> for tauri_bundler::BundleSettings {
fn from(val: BundleConfig) -> Self {
tauri_bundler::BundleSettings {
identifier: val.identifier,
publisher: val.publisher,
icon: val.icon,
resources: val.resources,
copyright: val.copyright,
category: val.category.and_then(|c| c.parse().ok()),
short_description: val.short_description,
long_description: val.long_description,
external_bin: val.external_bin,
deb: val.deb.map(Into::into).unwrap_or_default(),
macos: val.macos.map(Into::into).unwrap_or_default(),
windows: val.windows.map(Into::into).unwrap_or_default(),
..Default::default()
}
}
}
impl From<DebianSettings> for tauri_bundler::DebianSettings {
fn from(val: DebianSettings) -> Self {
tauri_bundler::DebianSettings {
depends: val.depends,
files: val.files,
desktop_template: val.desktop_template,
provides: val.provides,
conflicts: val.conflicts,
replaces: val.replaces,
section: val.section,
priority: val.priority,
changelog: val.changelog,
pre_install_script: val.pre_install_script,
post_install_script: val.post_install_script,
pre_remove_script: val.pre_remove_script,
post_remove_script: val.post_remove_script,
recommends: val.recommends,
}
}
}
impl From<WixSettings> for tauri_bundler::WixSettings {
fn from(val: WixSettings) -> Self {
tauri_bundler::WixSettings {
language: tauri_bundler::bundle::WixLanguage({
let mut languages: Vec<_> = val
.language
.iter()
.map(|l| {
(
l.0.clone(),
tauri_bundler::bundle::WixLanguageConfig {
locale_path: l.1.clone(),
},
)
})
.collect();
if languages.is_empty() {
languages.push(("en-US".into(), Default::default()));
}
languages
}),
template: val.template,
fragment_paths: val.fragment_paths,
component_group_refs: val.component_group_refs,
component_refs: val.component_refs,
feature_group_refs: val.feature_group_refs,
feature_refs: val.feature_refs,
merge_refs: val.merge_refs,
enable_elevated_update_task: val.enable_elevated_update_task,
banner_path: val.banner_path,
dialog_image_path: val.dialog_image_path,
fips_compliant: val.fips_compliant,
version: val.version,
upgrade_code: val.upgrade_code,
}
}
}
impl From<MacOsSettings> for tauri_bundler::MacOsSettings {
fn from(val: MacOsSettings) -> Self {
tauri_bundler::MacOsSettings {
frameworks: val.frameworks,
minimum_system_version: val.minimum_system_version,
exception_domain: val.exception_domain,
signing_identity: val.signing_identity,
provider_short_name: val.provider_short_name,
entitlements: val.entitlements,
info_plist_path: val.info_plist_path,
files: val.files,
hardened_runtime: val.hardened_runtime,
bundle_version: val.bundle_version,
bundle_name: val.bundle_name,
}
}
}
#[allow(deprecated)]
impl From<WindowsSettings> for tauri_bundler::WindowsSettings {
fn from(val: WindowsSettings) -> Self {
tauri_bundler::WindowsSettings {
digest_algorithm: val.digest_algorithm,
certificate_thumbprint: val.certificate_thumbprint,
timestamp_url: val.timestamp_url,
tsp: val.tsp,
wix: val.wix.map(Into::into),
webview_install_mode: val.webview_install_mode.into(),
allow_downgrades: val.allow_downgrades,
nsis: val.nsis.map(Into::into),
sign_command: val.sign_command.map(Into::into),
icon_path: val.icon_path.unwrap_or("./icons/icon.ico".into()),
}
}
}
impl From<NSISInstallerMode> for tauri_utils::config::NSISInstallerMode {
fn from(val: NSISInstallerMode) -> Self {
match val {
NSISInstallerMode::CurrentUser => tauri_utils::config::NSISInstallerMode::CurrentUser,
NSISInstallerMode::PerMachine => tauri_utils::config::NSISInstallerMode::PerMachine,
NSISInstallerMode::Both => tauri_utils::config::NSISInstallerMode::Both,
}
}
}
impl From<PackageType> for tauri_bundler::PackageType {
fn from(value: PackageType) -> Self {
match value {
PackageType::MacOsBundle => Self::MacOsBundle,
PackageType::IosBundle => Self::IosBundle,
PackageType::WindowsMsi => Self::WindowsMsi,
PackageType::Deb => Self::Deb,
PackageType::Rpm => Self::Rpm,
PackageType::AppImage => Self::AppImage,
PackageType::Dmg => Self::Dmg,
PackageType::Updater => Self::Updater,
PackageType::Nsis => Self::Nsis,
}
}
}
impl WebviewInstallMode {
fn into(self) -> tauri_utils::config::WebviewInstallMode {
match self {
Self::Skip => tauri_utils::config::WebviewInstallMode::Skip,
Self::DownloadBootstrapper { silent } => {
tauri_utils::config::WebviewInstallMode::DownloadBootstrapper { silent }
}
Self::EmbedBootstrapper { silent } => {
tauri_utils::config::WebviewInstallMode::EmbedBootstrapper { silent }
}
Self::OfflineInstaller { silent } => {
tauri_utils::config::WebviewInstallMode::OfflineInstaller { silent }
}
Self::FixedRuntime { path } => {
tauri_utils::config::WebviewInstallMode::FixedRuntime { path }
}
}
}
}
impl From<CustomSignCommandSettings> for tauri_bundler::CustomSignCommandSettings {
fn from(val: CustomSignCommandSettings) -> Self {
tauri_bundler::CustomSignCommandSettings {
cmd: val.cmd,
args: val.args,
}
}
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/cargo_toml.rs | packages/cli/src/cargo_toml.rs | //! The cargo_toml crate contains some logic for resolving Cargo.toml files with workspace inheritance, but it
//! doesn't handle global configs like ~/.cargo/config.toml. This module handles extending the manifest with those
//! settings if they exist.
use std::path::{Path, PathBuf};
use cargo_toml::{Manifest, Profile, Profiles};
/// Load the manifest from a path inheriting from the global config where needed
pub fn load_manifest_from_path(path: &Path) -> Result<Manifest, cargo_toml::Error> {
let mut original = Manifest::from_path(path)?;
// Merge the .cargo/config.toml if it exists
extend_manifest_config_toml(&mut original, &path.join(".cargo").join("config.toml"));
// Merge the global cargo config if it exists
if let Some(global_config) = global_cargo_config_path() {
extend_manifest_config_toml(&mut original, &global_config);
}
Ok(original)
}
/// Get the path to cargo home
fn cargo_home() -> Option<PathBuf> {
// If the cargo home env var is set, use that
if let Some(cargo_home) = std::env::var_os("CARGO_HOME") {
return Some(PathBuf::from(cargo_home));
}
// Otherwise, use the default location
if cfg!(windows) {
std::env::var_os("USERPROFILE")
.map(|user_profile| PathBuf::from(user_profile).join(".cargo"))
} else if cfg!(unix) {
dirs::home_dir().map(|home_dir| home_dir.join(".cargo"))
} else {
None
}
}
/// Get the global cargo config path if it exists
fn global_cargo_config_path() -> Option<PathBuf> {
cargo_home().map(|cargo_home| cargo_home.join("config.toml"))
}
// Extend a manifest with a config.toml if it exists
fn extend_manifest_config_toml(manifest: &mut Manifest, path: &Path) {
// Read the config.toml if it exists
let Ok(config) = std::fs::read_to_string(path) else {
return;
};
let Ok(config) = config.parse::<toml::Value>() else {
return;
};
// Try to parse profiles
if let Some(profiles) = config.get("profile").and_then(|p| p.as_table()) {
merge_profiles(
&mut manifest.profile,
toml::from_str::<cargo_toml::Profiles>(&profiles.to_string()).unwrap_or_default(),
);
}
}
/// Merge the new profiles into the target profiles. Keep the existing values if they exist.
fn merge_profiles(target: &mut Profiles, new: Profiles) {
if let Some(new_release) = new.release {
if target.release.is_none() {
target.release = Some(new_release);
} else {
merge_profile(target.release.as_mut().unwrap(), new_release);
}
}
if let Some(new_dev) = new.dev {
if target.dev.is_none() {
target.dev = Some(new_dev);
} else {
merge_profile(target.dev.as_mut().unwrap(), new_dev);
}
}
if let Some(new_test) = new.test {
if target.test.is_none() {
target.test = Some(new_test);
} else {
merge_profile(target.test.as_mut().unwrap(), new_test);
}
}
if let Some(new_bench) = new.bench {
if target.bench.is_none() {
target.bench = Some(new_bench);
} else {
merge_profile(target.bench.as_mut().unwrap(), new_bench);
}
}
#[allow(deprecated)]
if let Some(new_doc) = new.doc {
if target.doc.is_none() {
target.doc = Some(new_doc);
} else {
merge_profile(target.doc.as_mut().unwrap(), new_doc);
}
}
for (profile_name, profile) in new.custom {
if let Some(target_profile) = target.custom.get_mut(&profile_name) {
merge_profile(target_profile, profile);
} else {
target.custom.insert(profile_name, profile);
}
}
}
/// Merge the new profile into the target profile. Keep the existing values if they exist.
fn merge_profile(target: &mut Profile, new: Profile) {
if target.opt_level.is_none() {
target.opt_level = new.opt_level;
}
if target.debug.is_none() {
target.debug = new.debug;
}
if target.split_debuginfo.is_none() {
target.split_debuginfo = new.split_debuginfo;
}
if target.rpath.is_none() {
target.rpath = new.rpath;
}
if target.lto.is_none() {
target.lto = new.lto;
}
if target.debug_assertions.is_none() {
target.debug_assertions = new.debug_assertions;
}
if target.codegen_units.is_none() {
target.codegen_units = new.codegen_units;
}
if target.panic.is_none() {
target.panic = new.panic;
}
if target.incremental.is_none() {
target.incremental = new.incremental;
}
if target.overflow_checks.is_none() {
target.overflow_checks = new.overflow_checks;
}
if target.strip.is_none() {
target.strip = new.strip;
}
if target.build_override.is_none() {
target.build_override = new.build_override;
}
if target.inherits.is_none() {
target.inherits = new.inherits;
}
target.package.extend(new.package);
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/platform.rs | packages/cli/src/platform.rs | use anyhow::Result;
use clap::{arg, Arg, ArgMatches, Args, FromArgMatches};
use serde::{Deserialize, Serialize};
use std::fmt::Display;
use std::str::FromStr;
use target_lexicon::{Environment, OperatingSystem, Triple};
#[derive(
Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize, Debug, Default,
)]
#[non_exhaustive]
pub(crate) enum Platform {
/// Alias for `--target wasm32-unknown-unknown --renderer websys --bundle-format web`
#[serde(rename = "web")]
Web,
/// Alias for `--target <host> --renderer webview --bundle-format macos`
#[serde(rename = "macos")]
MacOS,
/// Alias for `--target <host> --renderer webview --bundle-format windows`
#[serde(rename = "windows")]
Windows,
/// Alias for `--target <host> --renderer webview --bundle-format linux`
#[serde(rename = "linux")]
Linux,
/// Alias for `--target <aarch64-apple-ios/sim> --renderer webview --bundle-format ios`
#[serde(rename = "ios")]
Ios,
/// Alias for `--target <device-triple> --renderer webview --bundle-format android`
#[serde(rename = "android")]
Android,
/// Alias for `--target <host> --renderer ssr --bundle-format server`
#[serde(rename = "server")]
Server,
/// Alias for `--target <host> --renderer liveview --bundle-format server`
#[serde(rename = "liveview")]
Liveview,
/// No platform was specified, so the CLI is free to choose the best one.
#[default]
Unknown,
}
impl Platform {
fn from_identifier(identifier: &str) -> std::result::Result<Self, clap::Error> {
match identifier {
"web" => Ok(Self::Web),
"macos" => Ok(Self::MacOS),
"windows" => Ok(Self::Windows),
"linux" => Ok(Self::Linux),
"ios" => Ok(Self::Ios),
"android" => Ok(Self::Android),
"server" => Ok(Self::Server),
"liveview" => Ok(Self::Liveview),
"desktop" => {
if cfg!(target_os = "macos") {
Ok(Self::MacOS)
} else if cfg!(target_os = "windows") {
Ok(Self::Windows)
} else if cfg!(unix) {
Ok(Self::Linux)
} else {
Err(clap::Error::raw(
clap::error::ErrorKind::InvalidValue,
"Desktop alias is not supported on this platform",
))
}
}
_ => Err(clap::Error::raw(
clap::error::ErrorKind::InvalidValue,
format!("Unknown platform: {identifier}"),
)),
}
}
}
impl Args for Platform {
fn augment_args_for_update(cmd: clap::Command) -> clap::Command {
Self::augment_args(cmd)
}
fn augment_args(cmd: clap::Command) -> clap::Command {
const HELP_HEADING: &str = "Platform";
cmd.arg(arg!(--web "Target a web app").help_heading(HELP_HEADING))
.arg(arg!(--desktop "Target a desktop app").help_heading(HELP_HEADING))
.arg(arg!(--macos "Target a macos desktop app").help_heading(HELP_HEADING))
.arg(arg!(--windows "Target a windows desktop app").help_heading(HELP_HEADING))
.arg(arg!(--linux "Target a linux desktop app").help_heading(HELP_HEADING))
.arg(arg!(--ios "Target an ios app").help_heading(HELP_HEADING))
.arg(arg!(--android "Target an android app").help_heading(HELP_HEADING))
.arg(arg!(--server "Target a server build").help_heading(HELP_HEADING))
.arg(arg!(--liveview "Target a liveview build").help_heading(HELP_HEADING))
.arg(
Arg::new("platform")
.long("platform")
.value_name("PLATFORM")
.help("Manually set the platform (web, macos, windows, linux, ios, android, server, liveview)")
.help_heading(HELP_HEADING)
.value_parser([
"web", "macos", "windows", "linux", "ios", "android", "server", "liveview", "desktop",
])
.conflicts_with("target_alias"),
)
.group(
clap::ArgGroup::new("target_alias")
.args([
"web", "desktop", "macos", "windows", "linux", "ios", "android", "server",
"liveview",
])
.multiple(false)
.required(false),
)
}
}
impl FromArgMatches for Platform {
fn from_arg_matches(matches: &ArgMatches) -> Result<Self, clap::Error> {
if let Some(identifier) = matches.get_one::<String>("platform") {
Self::from_identifier(identifier)
} else if let Some(platform) = matches.get_one::<clap::Id>("target_alias") {
Self::from_identifier(platform.as_str())
} else {
Ok(Self::Unknown)
}
}
fn update_from_arg_matches(&mut self, matches: &ArgMatches) -> Result<(), clap::Error> {
*self = Self::from_arg_matches(matches)?;
Ok(())
}
}
#[derive(
Copy,
Clone,
Hash,
PartialEq,
Eq,
PartialOrd,
Ord,
Serialize,
Deserialize,
Debug,
clap::ValueEnum,
)]
#[non_exhaustive]
pub(crate) enum Renderer {
/// Targeting webview renderer
Webview,
/// Targeting native renderer
Native,
/// Targeting the server platform using Axum and Dioxus-Fullstack
///
/// This is implicitly passed if `fullstack` is enabled as a feature. Using this variant simply
/// means you're only building the server variant without the `.wasm` to serve.
Server,
/// Targeting the static generation platform using SSR and Dioxus-Fullstack
Liveview,
/// Targeting the web-sys renderer
Web,
}
impl Renderer {
/// Get the feature name for the platform in the dioxus crate
pub(crate) fn feature_name(&self, target: &Triple) -> &str {
match self {
Renderer::Webview => match (target.environment, target.operating_system) {
(Environment::Android, _) | (_, OperatingSystem::IOS(_)) => "mobile",
_ => "desktop",
},
Renderer::Native => "native",
Renderer::Server => "server",
Renderer::Liveview => "liveview",
Renderer::Web => "web",
}
}
pub(crate) fn autodetect_from_cargo_feature(feature: &str) -> Option<Self> {
match feature {
"web" => Some(Self::Web),
"desktop" | "mobile" => Some(Self::Webview),
"native" => Some(Self::Native),
"liveview" => Some(Self::Liveview),
"server" => Some(Self::Server),
_ => None,
}
}
}
#[derive(Debug)]
pub(crate) struct UnknownRendererError;
impl std::error::Error for UnknownRendererError {}
impl std::fmt::Display for UnknownRendererError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "Unknown renderer")
}
}
impl FromStr for Renderer {
type Err = UnknownRendererError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"webview" => Ok(Self::Webview),
"native" => Ok(Self::Native),
"server" => Ok(Self::Server),
"liveview" => Ok(Self::Liveview),
"web" => Ok(Self::Web),
_ => Err(UnknownRendererError),
}
}
}
impl Display for Renderer {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(match self {
Renderer::Webview => "webview",
Renderer::Native => "native",
Renderer::Server => "server",
Renderer::Liveview => "liveview",
Renderer::Web => "web",
})
}
}
#[derive(
Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize, Debug, Default,
)]
#[non_exhaustive]
pub(crate) enum BundleFormat {
/// Targeting the web bundle structure
#[serde(rename = "web")]
#[default]
Web,
/// Targeting the macos desktop bundle structure
#[cfg_attr(target_os = "macos", serde(alias = "desktop"))]
#[serde(rename = "macos")]
MacOS,
/// Targeting the windows desktop bundle structure
#[cfg_attr(target_os = "windows", serde(alias = "desktop"))]
#[serde(rename = "windows")]
Windows,
/// Targeting the linux desktop bundle structure
#[cfg_attr(target_os = "linux", serde(alias = "desktop"))]
#[serde(rename = "linux")]
Linux,
/// Targeting the server bundle structure (a single binary placed next to the web build)
#[serde(rename = "server")]
Server,
/// Targeting the ios bundle structure
///
/// Can't work properly if you're not building from an Apple device.
#[serde(rename = "ios")]
Ios,
/// Targeting the android bundle structure
#[serde(rename = "android")]
Android,
}
impl BundleFormat {
/// The native "desktop" host app format.
pub(crate) fn host() -> Self {
if cfg!(target_os = "macos") {
Self::MacOS
} else if cfg!(target_os = "windows") {
Self::Windows
} else if cfg!(target_os = "linux") {
Self::Linux
} else {
Self::Web
}
}
/// Get the name of the folder we need to generate for this platform
///
/// Note that web and server share the same platform folder since we'll export the web folder as a bundle on its own
pub(crate) fn build_folder_name(&self) -> &'static str {
match self {
Self::Web => "web",
Self::Server => "web",
Self::Ios => "ios",
Self::Android => "android",
Self::Windows => "windows",
Self::Linux => "linux",
Self::MacOS => "macos",
}
}
pub(crate) fn profile_name(&self, release: bool) -> String {
let base_profile = match self {
Self::MacOS | Self::Windows | Self::Linux => "desktop",
Self::Web => "wasm",
Self::Ios => "ios",
Self::Android => "android",
Self::Server => "server",
};
let opt_level = if release { "release" } else { "dev" };
format!("{base_profile}-{opt_level}")
}
pub(crate) fn expected_name(&self) -> &'static str {
match self {
Self::Web => "Web",
Self::MacOS => "MacOS",
Self::Windows => "Windows",
Self::Linux => "Linux",
Self::Ios => "iOS",
Self::Android => "Android",
Self::Server => "Server",
}
}
}
#[derive(Debug)]
pub(crate) struct UnknownBundleFormatError;
impl std::error::Error for UnknownBundleFormatError {}
impl std::fmt::Display for UnknownBundleFormatError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "Unknown bundle format")
}
}
impl FromStr for BundleFormat {
type Err = UnknownBundleFormatError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"web" => Ok(Self::Web),
"macos" => Ok(Self::MacOS),
"windows" => Ok(Self::Windows),
"linux" => Ok(Self::Linux),
"server" => Ok(Self::Server),
"ios" => Ok(Self::Ios),
"android" => Ok(Self::Android),
_ => Err(UnknownBundleFormatError),
}
}
}
impl Display for BundleFormat {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(match self {
BundleFormat::Web => "web",
BundleFormat::MacOS => "macos",
BundleFormat::Windows => "windows",
BundleFormat::Linux => "linux",
BundleFormat::Server => "server",
BundleFormat::Ios => "ios",
BundleFormat::Android => "android",
})
}
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/rustcwrapper.rs | packages/cli/src/rustcwrapper.rs | use serde::{Deserialize, Serialize};
use std::{
env::{args, vars},
path::PathBuf,
process::ExitCode,
};
/// The environment variable indicating where the args file is located.
///
/// When `dx-rustc` runs, it writes its arguments to this file.
pub const DX_RUSTC_WRAPPER_ENV_VAR: &str = "DX_RUSTC";
/// Is `dx` being used as a rustc wrapper?
///
/// This is primarily used to intercept cargo, enabling fast hot-patching by caching the environment
/// cargo setups up for the user's current project.
///
/// In a different world we could simply rely on cargo printing link args and the rustc command, but
/// it doesn't seem to output that in a reliable, parseable, cross-platform format (ie using command
/// files on windows...), so we're forced to do this interception nonsense.
pub fn is_wrapping_rustc() -> bool {
std::env::var(DX_RUSTC_WRAPPER_ENV_VAR).is_ok()
}
#[derive(Default, Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RustcArgs {
pub args: Vec<String>,
pub envs: Vec<(String, String)>,
/// it doesn't include first program name argument
pub link_args: Vec<String>,
}
/// Check if the arguments indicate a linking step, including those in command files.
fn has_linking_args() -> bool {
for arg in std::env::args() {
// Direct check for linker-like arguments
if arg.ends_with(".o") || arg == "-flavor" {
return true;
}
// Check inside command files
if let Some(path_str) = arg.strip_prefix('@') {
if let Ok(file_binary) = std::fs::read(path_str) {
// Handle both UTF-8 and UTF-16LE encodings for response files.
let content = String::from_utf8(file_binary.clone()).unwrap_or_else(|_| {
let binary_u16le: Vec<u16> = file_binary
.chunks_exact(2)
.map(|a| u16::from_le_bytes([a[0], a[1]]))
.collect();
String::from_utf16_lossy(&binary_u16le)
});
// Check if any line in the command file contains linking indicators.
if content.lines().any(|line| {
let trimmed_line = line.trim().trim_matches('"');
trimmed_line.ends_with(".o") || trimmed_line == "-flavor"
}) {
return true;
}
}
}
}
false
}
/// Run rustc directly, but output the result to a file.
///
/// <https://doc.rust-lang.org/cargo/reference/config.html#buildrustc>
pub fn run_rustc() -> ExitCode {
// If we are being asked to link, delegate to the linker action.
if has_linking_args() {
return crate::link::LinkAction::from_env()
.expect("Linker action not found")
.run_link();
}
let var_file: PathBuf = std::env::var(DX_RUSTC_WRAPPER_ENV_VAR)
.expect("DX_RUSTC env var must be set")
.into();
// Cargo invokes a wrapper like: `wrapper-name rustc [args...]`
// We skip our own executable name (`wrapper-name`) to get the args passed to us.
let captured_args = args().skip(1).collect::<Vec<_>>();
let rustc_args = RustcArgs {
args: captured_args.clone(),
envs: vars().collect::<_>(),
link_args: Default::default(),
};
// Another terrible hack to avoid caching non-sensical args when
// a build is completely fresh (rustc is invoked with --crate-name ___)
if rustc_args
.args
.iter()
.skip_while(|arg| *arg != "--crate-name")
.nth(1)
.is_some_and(|name| name != "___")
{
let parent_dir = var_file
.parent()
.expect("Args file path has no parent directory");
std::fs::create_dir_all(parent_dir)
.expect("Failed to create parent directory for args file");
let serialized_args =
serde_json::to_string(&rustc_args).expect("Failed to serialize rustc args");
std::fs::write(&var_file, serialized_args).expect("Failed to write rustc args to file");
}
// Run the actual rustc command.
// We want all stdout/stderr to be inherited, so the user sees the compiler output.
let mut cmd = std::process::Command::new("rustc");
// The first argument in `captured_args` is "rustc", which we need to skip
// when passing arguments to the `rustc` command we are spawning.
cmd.args(captured_args.iter().skip(1));
cmd.envs(rustc_args.envs);
cmd.stdout(std::process::Stdio::inherit());
cmd.stderr(std::process::Stdio::inherit());
cmd.current_dir(std::env::current_dir().expect("Failed to get current dir"));
// Spawn the process and propagate its exit code.
let status = cmd.status().expect("Failed to execute rustc command");
std::process::exit(status.code().unwrap_or(1)); // Exit with 1 if process was killed by signal
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/devcfg.rs | packages/cli/src/devcfg.rs | //! Configuration of the CLI at runtime to enable certain experimental features.
/// Should we force the entropy to be used on the main exe?
///
/// This is used to verify that binaries are copied with different names such that they don't collide
/// and should generally be only enabled on certain platforms that require it.
pub(crate) fn should_force_entropy() -> bool {
std::env::var("DIOXUS_FORCE_ENTRY").is_ok()
}
/// Should we test the installs?
#[allow(dead_code)] // -> used in tests only
pub(crate) fn test_installs() -> bool {
std::env::var("TEST_INSTALLS").is_ok()
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/test_harnesses.rs | packages/cli/src/test_harnesses.rs | use crate::{BuildTargets, BundleFormat, Cli, Commands, Workspace};
use anyhow::Result;
use clap::Parser;
use futures_util::{stream::FuturesUnordered, StreamExt};
use std::{
collections::HashSet,
fmt::Write,
path::{Path, PathBuf},
pin::Pin,
prelude::rust_2024::Future,
};
use target_lexicon::Triple;
use tracing_subscriber::{prelude::*, util::SubscriberInitExt, EnvFilter, Layer};
#[tokio::test]
async fn run_harness() {
test_harnesses().await;
}
#[allow(dead_code)]
async fn test_harnesses() {
tracing_subscriber::registry()
.with(tracing_subscriber::fmt::layer().with_filter(EnvFilter::new("error,dx=debug,dioxus_cli=debug,manganis_cli_support=debug,wasm_split_cli=debug,subsecond_cli_support=debug",)))
.init();
TestHarnessBuilder::run(vec![
TestHarnessBuilder::new("harness-simple-web")
.deps(r#"dioxus = { workspace = true, features = ["web"] }"#)
.asrt(r#"dx build"#, |targets| async move {
let t = targets.unwrap();
assert_eq!(t.client.bundle, BundleFormat::Web);
assert_eq!(t.client.triple, "wasm32-unknown-unknown".parse().unwrap());
assert!(t.server.is_none());
}),
TestHarnessBuilder::new("harness-simple-desktop")
.deps(r#"dioxus = { workspace = true, features = ["desktop"] }"#)
.asrt(r#"dx build"#, |targets| async move {
let t = targets.unwrap();
assert_eq!(t.client.bundle, BundleFormat::host());
assert_eq!(t.client.triple, Triple::host());
assert!(t.server.is_none());
}),
TestHarnessBuilder::new("harness-simple-mobile")
.deps(r#"dioxus = { workspace = true, features = ["mobile"] }"#)
.asrt(
"dx build",
|targets| async move { assert!(targets.is_err()) },
),
TestHarnessBuilder::new("harness-simple-fullstack")
.deps(r#"dioxus = { workspace = true, features = ["fullstack"] }"#)
.fetr(r#"web=["dioxus/web"]"#)
.fetr(r#"server=["dioxus/server"]"#)
.asrt(r#"dx build"#, |targets| async move {
let t = targets.unwrap();
assert_eq!(t.client.bundle, BundleFormat::Web);
let server = t.server.unwrap();
assert_eq!(server.bundle, BundleFormat::Server);
assert_eq!(server.triple, Triple::host());
}),
TestHarnessBuilder::new("harness-simple-fullstack-with-default")
.deps(r#"dioxus = { workspace = true, features = ["fullstack"] }"#)
.fetr(r#"default=["web", "server"]"#)
.fetr(r#"web=["dioxus/web"]"#)
.fetr(r#"server=["dioxus/server"]"#)
.asrt(r#"dx build"#, |targets| async move {
let t = targets.unwrap();
assert_eq!(t.client.bundle, BundleFormat::Web);
let server = t.server.unwrap();
assert_eq!(server.bundle, BundleFormat::Server);
assert_eq!(server.triple, Triple::host());
}),
TestHarnessBuilder::new("harness-simple-fullstack-native-with-default")
.deps(r#"dioxus = { workspace = true, features = ["fullstack"] }"#)
.fetr(r#"default=["native", "server"]"#)
.fetr(r#"native=["dioxus/native"]"#)
.fetr(r#"server=["dioxus/server"]"#)
.asrt(r#"dx build"#, |targets| async move {
let t = targets.unwrap();
assert_eq!(t.client.bundle, BundleFormat::host());
assert_eq!(t.client.features.len(), 1);
assert_eq!(t.client.features[0], "native");
let server = t.server.unwrap();
assert_eq!(server.bundle, BundleFormat::Server);
assert_eq!(server.triple, Triple::host());
}),
TestHarnessBuilder::new("harness-fullstack-multi-target")
.deps(r#"dioxus = { workspace = true, features = ["fullstack"] }"#)
.fetr(r#"default=["web", "desktop", "mobile", "server"]"#)
.fetr(r#"web=["dioxus/web"]"#)
.fetr(r#"desktop=["dioxus/desktop"]"#)
.fetr(r#"mobile=["dioxus/mobile"]"#)
.fetr(r#"server=["dioxus/server"]"#)
.asrt(r#"dx build"#, |t| async move { assert!(t.is_err()) })
.asrt(r#"dx build --web"#, |targets| async move {
let t = targets.unwrap();
assert_eq!(t.client.bundle, BundleFormat::Web);
})
.asrt(r#"dx build --desktop"#, |targets| async move {
let t = targets.unwrap();
assert_eq!(t.client.bundle, BundleFormat::host());
})
.asrt(r#"dx build --ios"#, |targets| async move {
let t = targets.unwrap();
assert_eq!(t.client.bundle, BundleFormat::Ios);
assert_eq!(t.client.triple, TestHarnessBuilder::host_ios_triple_sim());
})
.asrt(r#"dx build --ios --device"#, |targets| async move {
let targets = targets.unwrap();
assert_eq!(targets.client.bundle, BundleFormat::Ios);
assert_eq!(targets.client.triple, "aarch64-apple-ios".parse().unwrap());
})
.asrt(r#"dx build --android --device"#, |targets| async move {
let t = targets.unwrap();
assert_eq!(t.client.bundle, BundleFormat::Android);
assert_eq!(t.client.triple, "aarch64-linux-android".parse().unwrap());
}),
TestHarnessBuilder::new("harness-fullstack-multi-target-no-default")
.deps(r#"dioxus = { workspace = true, features = ["fullstack"] }"#)
.fetr(r#"web=["dioxus/web"]"#)
.fetr(r#"desktop=["dioxus/desktop"]"#)
.fetr(r#"mobile=["dioxus/mobile"]"#)
.fetr(r#"server=["dioxus/server"]"#)
.asrt(r#"dx build"#, |targets| async move {
assert!(targets.is_err())
})
.asrt(r#"dx build --desktop"#, |targets| async move {
let t = targets.unwrap();
assert_eq!(t.client.bundle, BundleFormat::host());
let server = t.server.unwrap();
assert_eq!(server.bundle, BundleFormat::Server);
assert_eq!(server.triple, Triple::host());
})
.asrt(r#"dx build --ios"#, |targets| async move {
let t = targets.unwrap();
assert_eq!(t.client.bundle, BundleFormat::Ios);
assert_eq!(t.client.triple, TestHarnessBuilder::host_ios_triple_sim());
let server = t.server.unwrap();
assert_eq!(server.bundle, BundleFormat::Server);
assert_eq!(server.triple, Triple::host());
}),
TestHarnessBuilder::new("harness-fullstack-desktop")
.deps(r#"dioxus = { workspace = true, features = ["fullstack"] }"#)
.fetr(r#"desktop=["dioxus/desktop"]"#)
.fetr(r#"server=["dioxus/server"]"#)
.asrt(r#"dx build"#, |targets| async move {
let t = targets.unwrap();
assert_eq!(t.client.bundle, BundleFormat::host());
let server = t.server.unwrap();
assert_eq!(server.bundle, BundleFormat::Server);
assert_eq!(server.triple, Triple::host());
}),
TestHarnessBuilder::new("harness-fullstack-desktop-with-features")
.deps(r#"dioxus = { workspace = true, features = ["fullstack"] }"#)
.deps(r#"anyhow = { workspace = true, optional = true }"#)
.fetr(r#"desktop=["dioxus/desktop", "has_anyhow"]"#)
.fetr(r#"has_anyhow=["dep:anyhow"]"#)
.fetr(r#"server=["dioxus/server"]"#)
.asrt(r#"dx build"#, |targets| async move {
let t = targets.unwrap();
assert_eq!(t.client.bundle, BundleFormat::host());
let server = t.server.unwrap();
assert_eq!(server.bundle, BundleFormat::Server);
assert_eq!(server.triple, Triple::host());
}),
TestHarnessBuilder::new("harness-fullstack-desktop-with-default")
.deps(r#"dioxus = { workspace = true, features = ["fullstack"] }"#)
.deps(r#"anyhow = { workspace = true, optional = true }"#)
.fetr(r#"default=["desktop"]"#)
.fetr(r#"desktop=["dioxus/desktop", "has_anyhow"]"#)
.fetr(r#"has_anyhow=["dep:anyhow"]"#)
.fetr(r#"server=["dioxus/server"]"#)
.asrt(r#"dx build"#, |targets| async move {
let t = targets.unwrap();
assert_eq!(t.client.bundle, BundleFormat::host());
let server = t.server.unwrap();
assert_eq!(server.bundle, BundleFormat::Server);
assert_eq!(server.triple, Triple::host());
}),
TestHarnessBuilder::new("harness-no-dioxus")
.deps(r#"anyhow = { workspace = true, optional = true }"#)
.fetr(r#"web=["dep:anyhow"]"#)
.fetr(r#"server=[]"#)
.asrt(r#"dx build"#, |targets| async move {
let t = targets.unwrap();
assert_eq!(t.client.bundle, BundleFormat::host());
assert!(t.server.is_none());
}),
TestHarnessBuilder::new("harness-simple-dedicated-server"),
TestHarnessBuilder::new("harness-simple-dedicated-client")
.deps(r#"dioxus = { workspace = true, features = ["web"] }"#)
.asrt(r#"dx build"#, |targets| async move {
let t = targets.unwrap();
assert_eq!(t.client.bundle, BundleFormat::Web);
assert!(t.server.is_none());
})
.asrt(r#"dx build @client --package harness-simple-dedicated-client @server --package harness-simple-dedicated-server"#, |targets| async move {
let t = targets.unwrap();
assert_eq!(t.client.bundle, BundleFormat::Web);
let s = t.server.unwrap();
assert_eq!(s.bundle, BundleFormat::Server);
assert_eq!(s.triple, Triple::host());
},
)
.asrt(r#"dx build @client --package harness-simple-dedicated-client @server --package harness-simple-dedicated-server --target wasm32-unknown-unknown"#, |targets| async move {
let t = targets.unwrap();
assert_eq!(t.client.bundle, BundleFormat::Web);
let s = t.server.unwrap();
assert_eq!(s.bundle, BundleFormat::Server);
assert_eq!(s.triple, "wasm32-unknown-unknown".parse().unwrap());
}),
TestHarnessBuilder::new("harness-renderer-swap")
.deps(r#"dioxus = { workspace = true, features = ["fullstack"] }"#)
.fetr(r#"default=["desktop", "server"]"#)
.fetr(r#"desktop=["dioxus/desktop"]"#)
.fetr(r#"native=["dioxus/native"]"#)
.fetr(r#"server=["dioxus/server"]"#)
.asrt(
r#"dx build --desktop --renderer native"#,
|targets| async move {
let t = targets.unwrap();
assert_eq!(t.client.bundle, BundleFormat::host());
let server = t.server.unwrap();
assert_eq!(server.bundle, BundleFormat::Server);
assert_eq!(server.triple, Triple::host());
},
),
TestHarnessBuilder::new("harness-default-to-non-default")
.deps(r#"dioxus = { workspace = true, features = [] }"#)
.fetr(r#"default=["web"]"#)
.fetr(r#"web=["dioxus/web"]"#)
.asrt(
r#"dx build --ios"#,
|targets| async move {
let t = targets.unwrap();
assert!(t.server.is_none());
assert_eq!(t.client.bundle, BundleFormat::Ios);
assert_eq!(t.client.triple, TestHarnessBuilder::host_ios_triple_sim());
assert!(t.client.no_default_features);
},
),
TestHarnessBuilder::new("harness-fullstack-with-optional-tokio")
.deps(r#"dioxus = { workspace = true, features = ["fullstack"] }"#)
.deps(r#"serde = "1.0.219""#)
.deps(r#"tokio = { workspace = true, features = ["full"], optional = true }"#)
.fetr(r#"default = []"#)
.fetr(r#"server = ["dioxus/server", "dep:tokio"]"#)
.fetr(r#"web = ["dioxus/web"]"#)
// .asrt(r#"dx build"#, |targets| async move {
// assert!(targets.is_err())
// })
.asrt(r#"dx build --web"#, |targets| async move {
let t = targets.unwrap();
assert_eq!(t.client.bundle, BundleFormat::Web);
assert_eq!(t.client.triple, "wasm32-unknown-unknown".parse().unwrap());
let server = t.server.unwrap();
assert_eq!(server.bundle, BundleFormat::Server);
assert_eq!(server.triple, Triple::host());
})
])
.await;
}
#[derive(Default)]
struct TestHarnessBuilder {
name: String,
dependencies: String,
features: String,
futures: Vec<TestHarnessTestCase>,
}
struct TestHarnessTestCase {
args: String,
#[allow(clippy::type_complexity)]
callback: Box<dyn FnOnce(Result<BuildTargets>) -> Pin<Box<dyn Future<Output = ()>>>>,
}
impl TestHarnessBuilder {
fn new(name: &str) -> Self {
Self {
name: name.into(),
dependencies: Default::default(),
features: Default::default(),
futures: Default::default(),
}
}
/// Add a dependency to the test harness.
fn deps(mut self, dependencies: impl Into<String>) -> Self {
writeln!(&mut self.dependencies, "{}", dependencies.into()).unwrap();
self
}
/// Add a feature to the test harness.
fn fetr(mut self, features: impl Into<String>) -> Self {
writeln!(&mut self.features, "{}", features.into()).unwrap();
self
}
/// Assert the expected behavior of the test harness.
fn asrt<F>(
mut self,
args: impl Into<String>,
future: impl FnOnce(Result<BuildTargets>) -> F + 'static,
) -> Self
where
F: Future<Output = ()> + 'static,
{
self.futures.push(TestHarnessTestCase {
args: args.into(),
callback: Box::new(move |args| Box::pin(future(args))),
});
self
}
/// Write the test harness to the filesystem.
fn build(&self, harness_dir: &Path) {
let name = self.name.clone();
let dependencies = self.dependencies.clone();
let features = self.features.clone();
let test_dir = harness_dir.join(&name);
_ = std::fs::remove_dir_all(&test_dir);
std::fs::create_dir_all(&test_dir).unwrap();
std::fs::create_dir_all(test_dir.join("src")).unwrap();
let cargo_toml = format!(
r#"[package]
name = "{name}"
version = "0.0.1"
edition = "2021"
license = "MIT OR Apache-2.0"
publish = false
[dependencies]
{dependencies}
[features]
{features}
"#,
name = name,
dependencies = dependencies,
features = features
);
std::fs::write(test_dir.join("Cargo.toml"), cargo_toml).unwrap();
let contents = if features.contains("dioxus") {
r#"use dioxus::prelude::*;
fn main() {
dioxus::launch(|| rsx! { "hello world!" })
}
"#
} else {
r#"fn main() {
println!("Hello, world!");
}
"#
};
std::fs::write(test_dir.join("src/main.rs"), contents).unwrap();
}
async fn run(harnesses: Vec<Self>) {
_ = crate::VERBOSITY.set(crate::Verbosity {
verbose: true,
trace: true,
json_output: false,
log_to_file: None,
locked: false,
offline: false,
frozen: false,
});
let cargo_manifest_dir = std::env::var("CARGO_MANIFEST_DIR")
.map(PathBuf::from)
.unwrap();
let harness_dir = cargo_manifest_dir.parent().unwrap().join("cli-harnesses");
// make sure we don't start deleting random stuff.
if !harness_dir.exists() {
panic!(
"cli-harnesses directory does not exist, aborting: {:?}",
harness_dir
);
}
// Erase old entries in the harness directory, but keep files (ie README.md) around
for entry in std::fs::read_dir(&harness_dir).unwrap() {
let entry = entry.unwrap();
if entry.file_type().unwrap().is_dir() {
std::fs::remove_dir_all(entry.path()).unwrap();
}
}
// Now that the harnesses are written to the filesystem, we can call cargo_metadata
// It will be cached from here
let mut futures = FuturesUnordered::new();
let mut seen_names = HashSet::new();
for harness in harnesses {
if !seen_names.insert(harness.name.clone()) {
panic!("Duplicate test harness name found: {}", harness.name);
}
harness.build(&harness_dir);
for case in harness.futures {
let mut escaped = shell_words::split(&case.args).unwrap();
if !(escaped.contains(&"--package".to_string())
|| escaped.contains(&"@server".to_string())
|| escaped.contains(&"@client".to_string()))
{
escaped.push("--package".to_string());
escaped.push(harness.name.clone());
}
let args = Cli::try_parse_from(escaped).unwrap();
let Commands::Build(build_args) = args.action else {
panic!("Expected build command");
};
futures.push(async move {
let targets = build_args.into_targets().await;
(case.callback)(targets).await;
});
}
}
// Give a moment for fs to catch up
std::thread::sleep(std::time::Duration::from_secs(1));
let _workspace = Workspace::current().await.unwrap();
while let Some(_res) = futures.next().await {}
}
fn host_ios_triple_sim() -> Triple {
if cfg!(target_arch = "aarch64") {
"aarch64-apple-ios-sim".parse().unwrap()
} else {
"x86_64-apple-ios".parse().unwrap()
}
}
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/error.rs | packages/cli/src/error.rs | pub(crate) type Result<T, E = Error> = std::result::Result<T, E>;
pub use anyhow::Error;
use itertools::Itertools;
pub fn log_stacktrace(err: &anyhow::Error, padding: usize) -> String {
let mut trace = format!("{err}",);
for (idx, cause) in err.chain().enumerate().skip(1) {
trace.push_str(&format!(
"\n{}{IDX_STYLE}{idx}{IDX_STYLE:#}: {}",
" ".repeat(padding),
cause
.to_string()
.lines()
.enumerate()
.map(|(idx, line)| {
if idx == 0 {
line.to_string()
} else {
format!("{}{}", " ".repeat(padding + 3), line)
}
})
.join("\n"),
IDX_STYLE = crate::styles::GLOW_STYLE,
));
}
if crate::verbosity_or_default().trace {
trace.push_str(&format!("\nBacktrace:\n{}", err.backtrace()));
}
trace
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/wasm_opt.rs | packages/cli/src/wasm_opt.rs | use crate::config::WasmOptLevel;
use crate::{CliSettings, Result, WasmOptConfig, Workspace};
use anyhow::{anyhow, bail, Context};
use flate2::read::GzDecoder;
use std::path::{Path, PathBuf};
use tar::Archive;
use tempfile::NamedTempFile;
/// Write these wasm bytes with a particular set of optimizations
pub async fn write_wasm(bytes: &[u8], output_path: &Path, cfg: &WasmOptConfig) -> Result<()> {
std::fs::write(output_path, bytes)?;
optimize(output_path, output_path, cfg).await?;
Ok(())
}
pub async fn optimize(input_path: &Path, output_path: &Path, cfg: &WasmOptConfig) -> Result<()> {
let wasm_opt = WasmOpt::new(input_path, output_path, cfg)
.await
.context("Failed to create wasm-opt instance")?;
wasm_opt
.optimize()
.await
.context("Failed to run wasm-opt")?;
Ok(())
}
struct WasmOpt {
path: PathBuf,
input_path: PathBuf,
temporary_output_path: NamedTempFile,
output_path: PathBuf,
cfg: WasmOptConfig,
}
impl WasmOpt {
pub async fn new(
input_path: &Path,
output_path: &Path,
cfg: &WasmOptConfig,
) -> anyhow::Result<Self> {
let path = get_binary_path().await?;
Ok(Self {
path,
input_path: input_path.to_path_buf(),
temporary_output_path: tempfile::NamedTempFile::new()?,
output_path: output_path.to_path_buf(),
cfg: cfg.clone(),
})
}
/// Create the command to run wasm-opt
fn build_command(&self) -> tokio::process::Command {
// defaults needed by wasm-opt.
// wasm is a moving target, and we add these by default since they progressively get enabled by default.
let mut args = vec![
"--enable-reference-types",
"--enable-bulk-memory",
"--enable-mutable-globals",
"--enable-nontrapping-float-to-int",
];
if self.cfg.memory_packing {
// needed for our current approach to bundle splitting to work properly
// todo(jon): emit the main module's data section in chunks instead of all at once
args.push("--memory-packing");
}
if !self.cfg.debug {
args.push("--strip-debug");
} else {
args.push("--debuginfo");
}
for extra in &self.cfg.extra_features {
args.push(extra);
}
let level = match self.cfg.level {
WasmOptLevel::Z => "-Oz",
WasmOptLevel::S => "-Os",
WasmOptLevel::Zero => "-O0",
WasmOptLevel::One => "-O1",
WasmOptLevel::Two => "-O2",
WasmOptLevel::Three => "-O3",
WasmOptLevel::Four => "-O4",
};
tracing::debug!(
"Running wasm-opt: {} {} {} -o {} {}",
self.path.to_string_lossy(),
self.input_path.to_string_lossy(),
level,
self.temporary_output_path.path().to_string_lossy(),
args.join(" ")
);
let mut command = tokio::process::Command::new(&self.path);
command
.arg(&self.input_path)
.arg(level)
.arg("-o")
.arg(self.temporary_output_path.path())
.args(args);
command
}
pub async fn optimize(&self) -> Result<()> {
let mut command = self.build_command();
let res = command.output().await?;
if !res.status.success() {
let err = String::from_utf8_lossy(&res.stderr);
tracing::error!(
telemetry = %serde_json::json!({ "event": "wasm_opt_failed" }),
"wasm-opt failed with status code {}\nstderr: {}\nstdout: {}",
res.status,
err,
String::from_utf8_lossy(&res.stdout)
);
// A failing wasm-opt execution may leave behind an empty file so copy the original file instead.
if self.input_path != self.output_path {
std::fs::copy(&self.input_path, &self.output_path).unwrap();
}
} else {
std::fs::copy(self.temporary_output_path.path(), &self.output_path).unwrap();
}
Ok(())
}
}
// Find the URL for the latest binaryen release that contains wasm-opt
async fn find_latest_wasm_opt_download_url() -> anyhow::Result<String> {
// Find the platform identifier based on the current OS and architecture
// hardcoded for now to get around github api rate limits
if cfg!(all(target_os = "windows", target_arch = "x86_64")) {
return Ok("https://github.com/WebAssembly/binaryen/releases/download/version_123/binaryen-version_123-x86_64-windows.tar.gz".to_string());
} else if cfg!(all(target_os = "linux", target_arch = "x86_64")) {
return Ok("https://github.com/WebAssembly/binaryen/releases/download/version_123/binaryen-version_123-x86_64-linux.tar.gz".to_string());
} else if cfg!(all(target_os = "linux", target_arch = "aarch64")) {
return Ok("https://github.com/WebAssembly/binaryen/releases/download/version_123/binaryen-version_123-aarch64-linux.tar.gz".to_string());
} else if cfg!(all(target_os = "macos", target_arch = "x86_64")) {
return Ok("https://github.com/WebAssembly/binaryen/releases/download/version_123/binaryen-version_123-x86_64-macos.tar.gz".to_string());
} else if cfg!(all(target_os = "macos", target_arch = "aarch64")) {
return Ok("https://github.com/WebAssembly/binaryen/releases/download/version_123/binaryen-version_123-arm64-macos.tar.gz".to_string());
};
let url = "https://api.github.com/repos/WebAssembly/binaryen/releases/latest";
let client = reqwest::Client::new();
let response = client
.get(url)
.header("User-Agent", "dioxus-cli")
.send()
.await?
.json::<serde_json::Value>()
.await?;
tracing::trace!("Response from GitHub: {:#?}", response);
let assets = response
.get("assets")
.and_then(|assets| assets.as_array())
.ok_or_else(|| anyhow::anyhow!("Failed to parse assets"))?;
// Find the platform identifier based on the current OS and architecture
let platform = if cfg!(all(target_os = "windows", target_arch = "x86_64")) {
"x86_64-windows"
} else if cfg!(all(target_os = "linux", target_arch = "x86_64")) {
"x86_64-linux"
} else if cfg!(all(target_os = "linux", target_arch = "aarch64")) {
"aarch64-linux"
} else if cfg!(all(target_os = "macos", target_arch = "x86_64")) {
"x86_64-macos"
} else if cfg!(all(target_os = "macos", target_arch = "aarch64")) {
"arm64-macos"
} else {
bail!("Unknown platform for wasm-opt installation. Please install wasm-opt manually from https://github.com/WebAssembly/binaryen/releases and add it to your PATH.");
};
// Find the first asset with a name that contains the platform string
let asset = assets
.iter()
.find(|asset| {
asset
.get("name")
.and_then(|name| name.as_str())
.is_some_and(|name| name.contains(platform) && !name.ends_with("sha256"))
})
.with_context(|| anyhow!(
"No suitable wasm-opt binary found for platform: {}. Please install wasm-opt manually from https://github.com/WebAssembly/binaryen/releases and add it to your PATH.",
platform
))?;
// Extract the download URL from the asset
let download_url = asset
.get("browser_download_url")
.and_then(|url| url.as_str())
.ok_or_else(|| anyhow::anyhow!("Failed to get download URL for wasm-opt"))?;
Ok(download_url.to_string())
}
/// Get the path to the wasm-opt binary, downloading it if necessary
pub async fn get_binary_path() -> anyhow::Result<PathBuf> {
let install_dir = install_dir();
let install_path = installed_bin_path(&install_dir);
if install_path.exists() {
return Ok(install_path);
}
if CliSettings::prefer_no_downloads() {
if let Ok(existing) = which::which("wasm-opt") {
return Ok(existing);
} else {
return Err(anyhow!("Missing wasm-opt"));
}
}
tracing::info!("Installing wasm-opt");
install_github(&install_dir).await?;
tracing::info!("wasm-opt installed from Github");
Ok(install_path)
}
pub fn installed_location() -> Option<PathBuf> {
let install_dir = install_dir();
let install_path = installed_bin_path(&install_dir);
if install_path.exists() {
return Some(install_path);
}
if CliSettings::prefer_no_downloads() {
if let Ok(existing) = which::which("wasm-opt") {
return Some(existing);
} else {
return None;
}
}
None
}
fn install_dir() -> PathBuf {
Workspace::dioxus_data_dir().join("binaryen")
}
fn installed_bin_name() -> &'static str {
if cfg!(windows) {
"wasm-opt.exe"
} else {
"wasm-opt"
}
}
fn installed_bin_path(install_dir: &Path) -> PathBuf {
install_dir.join("bin").join(installed_bin_name())
}
/// Install wasm-opt from GitHub releases into the specified directory
async fn install_github(install_dir: &Path) -> anyhow::Result<()> {
tracing::trace!("Attempting to install wasm-opt from GitHub");
std::fs::create_dir_all(install_dir)?;
let url = find_latest_wasm_opt_download_url()
.await
.context("Failed to find latest wasm-opt download URL")?;
tracing::trace!("Downloading wasm-opt from {}", url);
// Download the binaryen release archive into memory
let bytes = reqwest::get(url).await?.bytes().await?;
// We don't need the whole gzip archive, just the wasm-opt binary and the lib folder. We
// just extract those files from the archive.
let installed_bin_path = installed_bin_path(install_dir);
let lib_folder_name = "lib";
let installed_lib_path = install_dir.join(lib_folder_name);
// Create the lib and bin directories if they don't exist
for path in [installed_bin_path.parent(), Some(&installed_lib_path)]
.into_iter()
.flatten()
{
std::fs::create_dir_all(path)
.context(format!("Failed to create directory: {}", path.display()))?;
}
let mut archive = Archive::new(GzDecoder::new(bytes.as_ref()));
// Unpack the binary and library files from the archive
for mut entry in archive.entries()?.flatten() {
// Unpack the wasm-opt binary
if entry
.path_bytes()
.ends_with(installed_bin_name().as_bytes())
{
entry.unpack(&installed_bin_path)?;
}
// Unpack any files in the lib folder
else if let Ok(path) = entry.path() {
if path.components().any(|c| c.as_os_str() == lib_folder_name) {
if let Some(file_name) = path.file_name() {
entry.unpack(installed_lib_path.join(file_name))?;
}
}
}
}
Ok(())
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/dx_build_info.rs | packages/cli/src/dx_build_info.rs | // The file has been placed there by the build script.
include!(concat!(env!("OUT_DIR"), "/built.rs"));
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/fastfs.rs | packages/cli/src/fastfs.rs | //! Methods for working with the filesystem that are faster than the std fs methods
//! Uses stuff like rayon, caching, and other optimizations
//!
//! Allows configuration in case you want to do some work while copying and allows you to track progress
use std::{
ffi::OsString,
path::{Path, PathBuf},
};
use brotli::enc::BrotliEncoderParams;
use walkdir::WalkDir;
/// Get the path to the compressed version of a file
fn compressed_path(path: &Path) -> Option<PathBuf> {
let new_extension = match path.extension() {
Some(ext) => {
if ext.to_string_lossy().to_lowercase().ends_with("br") {
return None;
}
let mut ext = ext.to_os_string();
ext.push(".br");
ext
}
None => OsString::from("br"),
};
Some(path.with_extension(new_extension))
}
/// pre-compress a file with brotli
pub(crate) fn pre_compress_file(path: &Path) -> std::io::Result<()> {
let Some(compressed_path) = compressed_path(path) else {
return Ok(());
};
let file = std::fs::File::open(path)?;
let mut stream = std::io::BufReader::new(file);
let mut buffer = std::fs::File::create(compressed_path)?;
let params = BrotliEncoderParams::default();
brotli::BrotliCompress(&mut stream, &mut buffer, ¶ms)?;
Ok(())
}
/// pre-compress all files in a folder
pub(crate) fn pre_compress_folder(path: &Path, pre_compress: bool) -> std::io::Result<()> {
let walk_dir = WalkDir::new(path);
for entry in walk_dir.into_iter().filter_map(|e| e.ok()) {
let entry_path = entry.path();
if entry_path.is_file() {
if pre_compress {
tracing::info!("Pre-compressing file {}", entry_path.display());
if let Err(err) = pre_compress_file(entry_path) {
tracing::error!("Failed to pre-compress file {entry_path:?}: {err}");
}
}
// If pre-compression isn't enabled, we should remove the old compressed file if it exists
else if let Some(compressed_path) = compressed_path(entry_path) {
_ = std::fs::remove_file(compressed_path);
}
}
}
Ok(())
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/main.rs | packages/cli/src/main.rs | #![doc = include_str!("../README.md")]
#![doc(html_logo_url = "https://avatars.githubusercontent.com/u/79236386")]
#![doc(html_favicon_url = "https://avatars.githubusercontent.com/u/79236386")]
#![cfg_attr(docsrs, feature(doc_cfg))]
#![allow(clippy::doc_overindented_list_items)]
mod build;
mod bundle_utils;
mod cargo_toml;
mod cli;
mod config;
mod devcfg;
mod dx_build_info;
mod error;
mod fastfs;
mod logging;
mod platform;
mod rustcwrapper;
mod serve;
mod settings;
mod tailwind;
mod test_harnesses;
mod wasm_bindgen;
mod wasm_opt;
mod workspace;
use std::process::ExitCode;
pub(crate) use build::*;
pub(crate) use cli::*;
pub(crate) use config::*;
pub(crate) use dioxus_dx_wire_format::*;
pub(crate) use error::*;
pub(crate) use link::*;
pub(crate) use logging::*;
pub(crate) use platform::*;
pub(crate) use rustcwrapper::*;
pub(crate) use settings::*;
pub(crate) use tailwind::*;
pub(crate) use wasm_bindgen::*;
pub(crate) use workspace::*;
#[tokio::main]
async fn main() -> ExitCode {
// The CLI uses dx as a rustcwrapper in some instances (like binary patching)
if rustcwrapper::is_wrapping_rustc() {
return rustcwrapper::run_rustc();
}
// If we're being ran as a linker (likely from ourselves), we want to act as a linker instead.
if let Some(link_args) = link::LinkAction::from_env() {
return link_args.run_link();
}
// Run under the tracing collector so we can capture errors/panics.
let result = TraceController::main(|args, tracer| async move {
match args {
Commands::Serve(opts) => opts.serve(&tracer).await,
Commands::Translate(opts) => opts.translate(),
Commands::New(opts) => opts.create().await,
Commands::Init(opts) => opts.init().await,
Commands::Config(opts) => opts.config().await,
Commands::Autoformat(opts) => opts.autoformat().await,
Commands::Check(opts) => opts.check().await,
Commands::Build(opts) => opts.build().await,
Commands::Bundle(opts) => opts.bundle().await,
Commands::Run(opts) => opts.run().await,
Commands::SelfUpdate(opts) => opts.self_update().await,
Commands::Tools(BuildTools::BuildAssets(opts)) => opts.run().await,
Commands::Tools(BuildTools::HotpatchTip(opts)) => opts.run().await,
Commands::Doctor(opts) => opts.doctor().await,
Commands::Print(opts) => opts.print().await,
Commands::Components(opts) => opts.run().await,
}
});
// Print the structured output in JSON format for third-party tools to consume.
// Make sure we do this as the last step so you can always `tail -1` it
match result.await {
StructuredOutput::Error { message } => {
tracing::error!(json = %StructuredOutput::Error { message });
std::process::exit(1);
}
output => tracing::info!(json = %output),
}
ExitCode::SUCCESS
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/logging.rs | packages/cli/src/logging.rs | //! CLI Tracing
//!
//! The CLI's tracing has internal and user-facing logs. User-facing logs are directly routed to the user in some form.
//! Internal logs are stored in a log file for consumption in bug reports and debugging.
//! We use tracing fields to determine whether a log is internal or external and additionally if the log should be
//! formatted or not.
//!
//! These two fields are
//! `dx_src` which tells the logger that this is a user-facing message and should be routed as so.
//! `dx_no_fmt`which tells the logger to avoid formatting the log and to print it as-is.
//!
//! 1. Build general filter
//! 2. Build file append layer for logging to a file. This file is reset on every CLI-run.
//! 3. Build CLI layer for routing tracing logs to the TUI.
//! 4. Build fmt layer for non-interactive logging with a custom writer that prevents output during interactive mode.
//!
//! ## Telemetry
//!
//! The CLI collects anonymized telemetry data to help us understand how the CLI is used. We primarily
//! care about catching panics and fatal errors. Data is uploaded to PostHog through a custom proxy endpoint.
//!
//! Telemetry events are collected while the CLI is running and then flushed to disk at the end of the session.
//! When the CLI starts again, it tries its best to upload the telemetry data from the FS. In CI,
//! the telemetry data is uploaded immediately after the CLI completes with a 5 second timeout.
//!
//! You can opt out in a number of ways:
//! - set TELEMETRY=false in your environment
//! - set DX_TELEMETRY_ENABLED=false in your environment
//! - set `dx config set disable-telemetry true`
//!
use crate::component::ComponentCommand;
use crate::{dx_build_info::GIT_COMMIT_HASH_SHORT, serve::ServeUpdate, Cli, Commands, Verbosity};
use crate::{BundleFormat, CliSettings, Workspace};
use anyhow::{bail, Context, Error, Result};
use cargo_metadata::diagnostic::{Diagnostic, DiagnosticLevel};
use clap::Parser;
use dioxus_cli_telemetry::TelemetryEventData;
use dioxus_dx_wire_format::StructuredOutput;
use futures_channel::mpsc::{UnboundedReceiver, UnboundedSender};
use futures_util::FutureExt;
use itertools::Itertools;
use serde::{Deserialize, Serialize};
use serde_json::json;
use std::{any::Any, io::Read, str::FromStr, sync::Arc, time::SystemTime};
use std::{borrow::Cow, sync::OnceLock};
use std::{
collections::HashMap,
env,
fmt::{Debug, Display, Write as _},
sync::atomic::{AtomicBool, Ordering},
time::Instant,
};
use std::{future::Future, panic::AssertUnwindSafe};
use tracing::{field::Visit, Level, Subscriber};
use tracing_subscriber::{
fmt::{
format::{self, Writer},
time::FormatTime,
},
prelude::*,
registry::LookupSpan,
EnvFilter, Layer,
};
use uuid::Uuid;
const LOG_ENV: &str = "DIOXUS_LOG";
const DX_SRC_FLAG: &str = "dx_src";
pub static VERBOSITY: OnceLock<Verbosity> = OnceLock::new();
pub fn verbosity_or_default() -> Verbosity {
crate::VERBOSITY.get().cloned().unwrap_or_default()
}
fn reset_cursor() {
use std::io::IsTerminal;
// if we are running in a terminal, reset the cursor. The tui_active flag is not set for
// the cargo generate TUI, but we still want to reset the cursor.
if std::io::stdout().is_terminal() {
_ = console::Term::stdout().show_cursor();
}
}
/// A trait that emits an anonymous JSON representation of the object, suitable for telemetry.
pub(crate) trait Anonymized {
fn anonymized(&self) -> serde_json::Value;
}
/// A custom layer that wraps our special interception logic based on the mode of the CLI and its verbosity.
///
/// Redirects TUI logs, writes to files, and queues telemetry events.
///
/// It is cloned and passed directly as a layer to the tracing subscriber.
#[derive(Clone)]
pub struct TraceController {
http_client: Option<reqwest::Client>,
reporter: Option<Reporter>,
telemetry_tx: UnboundedSender<TelemetryEventData>,
telemetry_rx: Arc<tokio::sync::Mutex<UnboundedReceiver<TelemetryEventData>>>,
log_to_file: Option<Arc<tokio::sync::Mutex<std::fs::File>>>,
tui_active: Arc<AtomicBool>,
tui_tx: UnboundedSender<TraceMsg>,
tui_rx: Arc<tokio::sync::Mutex<UnboundedReceiver<TraceMsg>>>,
}
/// An error that contains information about a captured panic, including the error, thread name, and location.
/// This is passed through to the tracing subscriber which is downcasted into a `CapturedPanicError`
#[derive(Debug, Clone)]
struct CapturedPanicError {
error: Arc<Error>,
error_type: String,
thread_name: Option<String>,
location: Option<SavedLocation>,
}
impl std::fmt::Display for CapturedPanicError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"CapturedBacktrace {{ error: {}, error_type: {}, thread_name: {:?}, location: {:?} }}",
self.error, self.error_type, self.thread_name, self.location
)
}
}
impl std::error::Error for CapturedPanicError {}
#[derive(Debug, Clone)]
struct SavedLocation {
file: String,
line: u32,
column: u32,
}
impl TraceController {
/// Initialize the CLI and set up the tracing infrastructure
///
/// This captures panics and flushes telemetry to a file after the CLI has run.
///
/// We pass the TraceController around the CLI in a few places, namely the serve command so the TUI
/// can access things like the logs.
pub async fn main<F>(run_app: impl FnOnce(Commands, Self) -> F) -> StructuredOutput
where
F: Future<Output = Result<StructuredOutput>>,
{
let args = Cli::parse();
let tui_active = Arc::new(AtomicBool::new(false));
let is_serve_cmd = matches!(args.action, Commands::Serve(_));
VERBOSITY
.set(args.verbosity.clone())
.expect("verbosity should only be set once");
// Set up a basic env-based filter for the logs
let env_filter = match env::var(LOG_ENV) {
Ok(_) => EnvFilter::from_env(LOG_ENV),
_ if is_serve_cmd => EnvFilter::new("error,dx=trace,dioxus_cli=trace,manganis_cli_support=trace,wasm_split_cli=trace,subsecond_cli_support=trace"),
_ => EnvFilter::new(format!(
"error,dx={our_level},dioxus_cli={our_level},manganis_cli_support={our_level},wasm_split_cli={our_level},subsecond_cli_support={our_level}",
our_level = if args.verbosity.verbose { "debug" } else { "info" }
))
};
// Listen to a few more tokio events if the tokio-console feature is enabled
#[cfg(feature = "tokio-console")]
let env_filter = env_filter
.add_directive("tokio=trace".parse().unwrap())
.add_directive("runtime=trace".parse().unwrap());
// Set up the json filter which lets through JSON traces only if the `json` field is present in the trace metadata
// If the json is disabled, we filter it completely so it doesn't show up in the logs
let json_filter = tracing_subscriber::filter::filter_fn(move |meta| {
if meta.fields().len() == 1 && meta.fields().iter().next().unwrap().name() == "json" {
return args.verbosity.json_output;
}
true
});
// We complete filter out a few fields that are not relevant to the user, like `dx_src` and `json`
let fmt_layer = tracing_subscriber::fmt::layer()
.with_target(args.verbosity.verbose)
.fmt_fields(
format::debug_fn(move |writer, field, value| {
if field.name() == "json" && !args.verbosity.json_output {
return Ok(());
}
if field.name() == "dx_src" && !args.verbosity.verbose {
return Ok(());
}
if field.name() == "telemetry" {
return Ok(());
}
write!(writer, "{}", format_field(field.name(), value))
})
.delimited(" "),
)
.with_timer(PrettyUptime::default());
// If json output is enabled, we want to format the output as JSON
// When running in interactive mode (of which serve is the only one), we don't want to log to console directly
let fmt_layer = if args.verbosity.json_output {
fmt_layer.json().flatten_event(true).boxed()
} else {
fmt_layer.boxed()
}
.with_filter(tracing_subscriber::filter::filter_fn({
let tui_active = tui_active.clone();
move |re| {
// If the TUI is active, we don't want to log to the console directly
if tui_active.load(Ordering::Relaxed) {
return false;
}
// If the TUI is disabled, then we might be draining the logs during an error report.
// In this case, we only show debug logs if the verbosity is set to verbose.
//
// If we're not running the serve command at all, this isn't relevant, so let the log through.
if !is_serve_cmd {
return true;
}
let verbosity = VERBOSITY.get().unwrap();
// If the verbosity is trace, let through trace logs (most verbose)
if verbosity.trace {
return re.level() <= &Level::TRACE;
}
// if the verbosity is verbose, let through debug logs
if verbosity.verbose {
return re.level() <= &Level::DEBUG;
}
// Otherwise, only let through info and higher level logs
re.level() <= &Level::INFO
}
}));
// Set up the tokio console subscriber if enabled
#[cfg(feature = "tokio-console")]
let console_layer = console_subscriber::spawn();
#[cfg(not(feature = "tokio-console"))]
let console_layer = tracing_subscriber::layer::Identity::new();
// Construct our custom layer that handles the TUI and file logging
let log_to_file = args
.verbosity
.log_to_file
.as_deref()
.map(|file_path| {
std::fs::OpenOptions::new()
.append(true)
.create(true)
.open(file_path)
.map(|file| Arc::new(tokio::sync::Mutex::new(file)))
})
.transpose()
.context("Failed to open specified log_file for writing")
.unwrap();
// Create a new session ID for this invocation of the CLI
let reporter = Self::enroll_reporter().ok();
// Create a new telemetry uploader
let http_client = reqwest::Client::builder()
.timeout(std::time::Duration::from_secs(5))
.build()
.ok();
// Create a new telemetry channel
// Note that we only drain the channel at the end of the CLI run, so it's not really being used as a channel - more of a vecdeque
let (telemetry_tx, telemetry_rx) = futures_channel::mpsc::unbounded();
let (tui_tx, tui_rx) = futures_channel::mpsc::unbounded();
let tracer = TraceController {
reporter,
telemetry_tx,
log_to_file,
tui_tx,
tui_rx: Arc::new(tokio::sync::Mutex::new(tui_rx)),
telemetry_rx: Arc::new(tokio::sync::Mutex::new(telemetry_rx)),
http_client,
tui_active,
};
// Spawn the telemetry uploader in the background
tokio::spawn(
tracer
.clone()
.upload_telemetry_files(Self::to_invoked_event(&args.action)),
);
// Construct the tracing subscriber
tracing_subscriber::registry()
.with(env_filter)
.with(json_filter)
.with(tracer.clone())
.with(console_layer)
.with(fmt_layer)
.init();
// Set the panic handler to capture backtraces in case of a panic
// let initial_thread = std::thread::current();
std::panic::set_hook(Box::new(move |panic_info| {
let payload = if let Some(s) = panic_info.payload().downcast_ref::<String>() {
s.to_string()
} else if let Some(s) = panic_info.payload().downcast_ref::<&str>() {
s.to_string()
} else {
"<unknown panic>".to_string()
};
let current_thread = std::thread::current();
let thread_name = current_thread.name().map(|s| s.to_string());
let location = panic_info.location().unwrap();
let err = anyhow::anyhow!(payload);
let err_display = format!("{:?}", err)
.lines()
.take_while(|line| !line.ends_with("___rust_try"))
.join("\n");
let boxed_panic: Box<dyn std::error::Error + Send + 'static> =
Box::new(CapturedPanicError {
error: Arc::new(err),
thread_name: thread_name.clone(),
error_type: "Rust Panic".to_string(),
location: panic_info.location().map(|l| SavedLocation {
file: l.file().to_string(),
line: l.line(),
column: l.column(),
}),
});
tracing::error!(
telemetry = %json!({ "event": "Captured Panic" }),
backtrace = boxed_panic,
"Thread {} panicked at {location}:\n\n {err_display}",
thread_name.as_deref().unwrap_or("<unknown>"),
);
}));
// Run the app, catching panics and errors, early flushing if `ctrl_c` is pressed.
let app_res = AssertUnwindSafe(run_with_ctrl_c(run_app(args.action, tracer.clone())))
.catch_unwind()
.await;
// Do any final logging cleanup
tracer.finish(app_res).await
}
// Redirects the tracing logs to the TUI if it's active, otherwise it just collects them.
pub fn redirect_to_tui(&self) {
self.tui_active.store(true, Ordering::Relaxed);
}
/// Wait for the internal logger to send a message
pub(crate) async fn wait(&self) -> ServeUpdate {
use futures_util::StreamExt;
let Some(log) = self.tui_rx.lock().await.next().await else {
return std::future::pending().await;
};
ServeUpdate::TracingLog { log }
}
/// Uploads telemetry logs from the filesystem to the telemetry endpoint.
///
/// As the app runs, we simply fire off messages into the TelemetryTx handle.
///
/// Once the session is over, or the tx is flushed manually, we then log to a file.
/// This prevents any performance issues from building up during long session.
/// For `dx serve`, we asynchronously flush after full rebuilds are *completed*.
/// Initialize a user session with a stable ID.
///
/// This also sends a heartbeat event to the telemetry endpoint to indicate that the CLI is alive.
///
/// Docs on how to send posthog:
/// <https://posthog.com/docs/api/capture>
///
/// We try to send batched requests *without* the api key in the header. It's usually fine to send
/// the API key along with the request, but we want to control revoking key on the backend.
///
/// Todo: we should accept some sort of configuration from posthog to allow us to downsample telemetry events.
/// otherwise we might end up being flooded by telemetry events.
///
/// We loop receive messages, pushing them into a batch.
async fn upload_telemetry_files(self, invoked_event: TelemetryEventData) -> Result<()> {
use fs2::FileExt;
// Wait a little bit to prevent abuse (spam loops) and not do extra work if it's a simple `--help` call
tokio::time::sleep(std::time::Duration::from_millis(100)).await;
// Send off a heartbeat request. If this fails, we skip anything else.
self.send_invoked_event(invoked_event).await?;
// Wait a few seconds to see if we can end up in `dx serve` or a long-running task
// If we're in CI though, we do want to flush telemetry immediately
if !CliSettings::is_ci() {
tokio::time::sleep(std::time::Duration::from_millis(3000)).await;
}
// Now start loading telemetry files, locking them, and then uploading them.
let stats_dir = Workspace::dioxus_data_dir().join("stats").join("sessions");
for entry in stats_dir.read_dir()?.flatten() {
// Try to open the file...
let Ok(mut file) = std::fs::File::open(entry.path()) else {
continue;
};
// And then we hold an exclusive lock on the file while we upload it
// This prevents multiple processes from trying to upload the same file at the same time which would cause duplicate uploads
if file.try_lock_exclusive().is_err() {
continue;
};
// Now that we have the lock, we can read the file and upload it
// todo: validate that _bytes_read is not greater than 20mb - this will fail to upload
let mut jsonl_file = String::new();
let Ok(_bytes_read) = file.read_to_string(&mut jsonl_file) else {
continue;
};
// If the file is empty, we delete it and continue
if jsonl_file.trim().is_empty() {
_ = std::fs::remove_file(entry.path());
continue;
}
// We assume since this is a jsonl file that every line is valid json. We just concat the lines together
// and then send them using the batched client.
// The session ID is the file stem of the file, which is a UUID. If not, we just make up a
// new session ID on the fly.
let reporter = self.reporter.as_ref().context("no reporter")?;
let session_id = entry
.path()
.file_stem()
.and_then(|s| Uuid::from_str(s.to_str()?).ok())
.unwrap_or_else(Uuid::new_v4);
let request_body = jsonl_file
.lines()
.map(serde_json::from_str::<TelemetryEventData>)
.filter_map(Result::ok)
.map(|event| Self::telemetry_to_posthog(session_id, reporter.distinct_id, event))
.collect::<Vec<_>>();
// Send the request
// - If the request fails, we just log the error and continue
// - If the request succeeds, we remove the file
match self.upload_to_posthog(&request_body).await {
Ok(()) => _ = std::fs::remove_file(entry.path()),
Err(err) => {
tracing::trace!(
"Failed to upload telemetry file: {} because {}",
entry.path().display(),
err
);
}
}
}
Ok(())
}
/// Uploads a set of telemetry events to the PostHog endpoint.
async fn upload_to_posthog(&self, body: &Vec<serde_json::Value>) -> Result<()> {
use hyper::header::CONTENT_TYPE;
let reporter = self.reporter.as_ref().context("No reporter initialized")?;
let res = self
.http_client
.as_ref()
.context("HTTP client not initialized")?
.post(Self::posthog_capture_endpoint())
.header(CONTENT_TYPE, "application/json")
.header("X-Reporter-ID", reporter.distinct_id.to_string())
.json(&body)
.send()
.await
.context("Failed to send telemetry data")?;
if !res.status().is_success() {
bail!(
"Failed to upload telemetry event: {:?}. Response: {:?}",
res.status(),
res.text().await
);
}
Ok(())
}
async fn send_invoked_event(&self, heartbeat: TelemetryEventData) -> Result<()> {
let reporter = self.reporter.as_ref().context("No reporter initialized")?;
let body = Self::telemetry_to_posthog(reporter.session_id, reporter.distinct_id, heartbeat);
self.upload_to_posthog(&vec![body]).await
}
/// Convert the dioxus-cli-telemetry event into a posthog event.
///
/// We try to maintain the same structure for each telemetry event to do advanced filtering on the backend.
fn telemetry_to_posthog(
session_id: Uuid,
distinct_id: Uuid,
event: TelemetryEventData,
) -> serde_json::Value {
let TelemetryEventData {
action,
message,
time,
values,
command,
stack_frames,
file,
line,
column,
module,
error_type,
error_handled,
} = event;
let mut ph_event = posthog_rs::Event::new(action, distinct_id.to_string());
// The reporter's fields
_ = ph_event.insert_prop("is_ci", CliSettings::is_ci());
_ = ph_event.insert_prop("session_id", session_id.to_string());
_ = ph_event.insert_prop("distinct_id", distinct_id.to_string());
_ = ph_event.insert_prop("host_os", target_lexicon::HOST.operating_system.to_string());
_ = ph_event.insert_prop("host_arch", target_lexicon::HOST.architecture.to_string());
_ = ph_event.insert_prop("host_triple", target_lexicon::Triple::host().to_string());
_ = ph_event.insert_prop("cli_version_major", crate::dx_build_info::PKG_VERSION_MAJOR);
_ = ph_event.insert_prop("cli_version_minor", crate::dx_build_info::PKG_VERSION_MINOR);
_ = ph_event.insert_prop("cli_version_patch", crate::dx_build_info::PKG_VERSION_PATCH);
_ = ph_event.insert_prop("cli_version_pre", crate::dx_build_info::PKG_VERSION_PRE);
_ = ph_event.insert_prop("cli_commit_hash", GIT_COMMIT_HASH_SHORT.unwrap_or_default());
_ = ph_event.insert_prop("cli_source_file", line);
_ = ph_event.insert_prop("cli_source_line", file);
_ = ph_event.insert_prop("cli_source_column", column);
_ = ph_event.insert_prop("cli_source_module", module);
// And the TelemetryEventData fields
_ = ph_event.insert_prop("command", &command);
_ = ph_event.insert_prop("message", &message);
// And the rest of the event values
for (key, value) in values {
_ = ph_event.insert_prop(key, value);
}
// We need to go add the api key to the event, since posthog_rs doesn't expose it for us...
let mut value = serde_json::to_value(ph_event).unwrap();
value["timestamp"] = serde_json::Value::String(time.to_rfc3339());
value["api_key"] = serde_json::Value::String(
"phc_d2jQTZMqAWxSkzv3NQ8TlxCP49vtBZ5ZmlYMIZLFNNU".to_string(),
);
// If the event is an error, we need to add the error properties so posthog picks it up.
// The stackframes should be transformed already.
if let Some(error_type) = error_type {
value["event"] = "$exception".into();
value["properties"]["$session_id"] = session_id.to_string().into();
value["properties"]["$exception_list"] = json!([{
"type": error_type,
"value": &message,
"mechanism": {
"handled": error_handled, // panics are not handled
"synthetic": false, // all errors/panics and "real", not emitted by sentry or the sdk
},
"stacktrace": {
"type": "raw", // debug symbols are generally used. this might be wrong?
"frames": stack_frames
}
}]);
}
value
}
fn enroll_reporter() -> Result<Reporter> {
#[derive(Debug, Deserialize, Serialize)]
struct ReporterSession {
reporter_id: Uuid,
session_id: Uuid,
created_at: SystemTime,
last_used: SystemTime,
}
// If the user requests telemetry disabled, we don't enroll them
if CliSettings::telemetry_disabled() {
bail!("Telemetry is disabled");
}
// Create the sessions folder if it doesn't exist
let stats_folder = Workspace::dioxus_data_dir().join("stats");
let sessions_folder = stats_folder.join("sessions");
if !sessions_folder.exists() {
std::fs::create_dir_all(&sessions_folder)?;
}
// Create a reporter_id. If we find an invalid reporter_id, we use `nil` as the reporter ID.
let distinct_id_file = stats_folder.join("reporter.json");
let reporter_session = std::fs::read_to_string(&distinct_id_file)
.map(|e| serde_json5::from_str::<ReporterSession>(&e));
// If we have a valid reporter session, we use it, otherwise we create a new one.
let mut reporter = match reporter_session {
Ok(Ok(session)) => session,
_ => ReporterSession {
reporter_id: Uuid::new_v4(),
session_id: Uuid::new_v7(uuid::Timestamp::now(
uuid::timestamp::context::ContextV7::new(),
)),
created_at: SystemTime::now(),
last_used: SystemTime::now(),
},
};
// Update the last used time to now, updating the session ID if it's older than 30 minutes.
if reporter
.last_used
.duration_since(SystemTime::now())
.map(|d| d.as_secs() > (30 * 60))
.unwrap_or(true)
{
reporter.created_at = SystemTime::now();
reporter.session_id = Uuid::new_v7(uuid::Timestamp::now(
uuid::timestamp::context::ContextV7::new(),
));
}
reporter.last_used = SystemTime::now();
// Write the reporter session back to the file
std::fs::write(&distinct_id_file, serde_json5::to_string(&reporter)?)?;
Ok(Reporter {
distinct_id: reporter.reporter_id,
session_id: reporter.session_id,
})
}
async fn finish(
&self,
res: Result<Result<StructuredOutput>, Box<dyn Any + Send>>,
) -> StructuredOutput {
// Drain the tracer as regular messages
self.tui_active.store(false, Ordering::Relaxed);
reset_cursor();
// re-emit any remaining messages in case they're useful.
while let Ok(Some(msg)) = self.tui_rx.lock().await.try_next() {
let content = match msg.content {
TraceContent::Text(text) => text,
TraceContent::Cargo(msg) => msg.message.to_string(),
};
match msg.level {
Level::ERROR => tracing::error!("{content}"),
Level::WARN => tracing::warn!("{content}"),
Level::INFO => tracing::info!("{content}"),
Level::DEBUG => tracing::debug!("{content}"),
Level::TRACE => tracing::trace!("{content}"),
}
}
// If we have "safe" error, we we need to log it
if let Ok(Err(err)) = &res {
self.record_backtrace(
err,
"Fatal error",
std::thread::current().name(),
None,
Default::default(),
);
}
// And then we can finally flush telemetry to disk
_ = self.flush_telemetry_to_disk().await;
// Create the final output based on the result of the CLI run.
match res {
// No printing needed for successful runs, we just return the output for JSON output.
Ok(Ok(output)) => output,
// If the CLI run failed, we print the error and return it.
// Anyhow gives us a nice stack trace, so we can just print it.
// Eventually we might want to format this better since a full stack trace is kinda ugly.
Ok(Err(err)) => {
use crate::styles::{ERROR_STYLE, GLOW_STYLE};
let arg = std::env::args().nth(1).unwrap_or_else(|| "dx".to_string());
let err_display = format!("{err:?}")
.lines()
.take_while(|line| !line.ends_with("___rust_try"))
.join("\n");
let message = format!(
"{ERROR_STYLE}ERROR{ERROR_STYLE:#} {GLOW_STYLE}dx {}{GLOW_STYLE:#}: {}",
arg, err_display
);
eprintln!("\n{message}");
StructuredOutput::Error { message }
}
// The top-level thread panicked entirely. The panic handler should print the error for us.
// Just return the error for the structured output.
Err(e) => StructuredOutput::Error {
message: if let Some(s) = e.downcast_ref::<String>() {
s.to_string()
} else if let Some(s) = e.downcast_ref::<&str>() {
s.to_string()
} else {
"<unknown error>".to_string()
},
},
}
}
/// Flush telemetry to disk.
///
/// Maybe flush telemetry immediately if the command is a long-running command like `dx serve` or `dx run` and there's an error.
/// Currently just flushes telemetry immediately if we're in CI.
async fn flush_telemetry_to_disk(&self) -> Result<()> {
use std::io::Write;
let reporter = self.reporter.as_ref().context("No reporter initialized")?;
// If we're in CI, we try to upload the telemetry immediately, with a short timeout (5 seconds or so)
// Hopefully it doesn't fail! Not much we can do in CI.
match CliSettings::is_ci() {
true => {
let mut msgs = self.telemetry_rx.lock().await;
let request_body = std::iter::from_fn(|| msgs.try_next().ok().flatten())
.filter_map(|msg| serde_json::to_value(msg).ok())
.collect::<Vec<_>>();
_ = self.upload_to_posthog(&request_body).await;
}
// Dump the logs to a the session file as jsonl
false => {
let mut msgs = self.telemetry_rx.lock().await;
let msg_list =
std::iter::from_fn(|| msgs.try_next().ok().flatten()).collect::<Vec<_>>();
if !msg_list.is_empty() {
let dest = Workspace::dioxus_data_dir()
.join("stats")
.join("sessions")
.join(format!("{}.jsonl", reporter.session_id));
let mut logfile = std::fs::OpenOptions::new()
.append(true)
.create(true)
.open(dest)?;
for msg in msg_list {
serde_json::to_writer(&mut logfile, &msg)?;
writeln!(logfile)?;
}
}
}
}
Ok(())
}
fn posthog_capture_endpoint() -> String {
format!(
"{}/capture/",
Self::posthog_endpoint().trim_end_matches('/')
)
}
fn posthog_endpoint() -> String {
// In dev mode we can override the endpoint with an environment variable
if cfg!(debug_assertions) {
if let Ok(endpoint) = env::var("DX_REPORTER_ENDPOINT") {
return endpoint;
}
}
"https://dx-cli-stats.dioxus.dev/".to_string()
}
/// Collect the raw arguments passed to the CLI and convert them into a telemetry event.
///
/// This gives some usage information about which commands are being used and how. Especially useful
/// if a particular session is failing in some way.
pub(crate) fn to_invoked_event(arg: &crate::Commands) -> TelemetryEventData {
let (message, anonymized) = Self::command_anonymized(arg);
let raw_arguments = std::env::args()
.map(|s| dioxus_cli_telemetry::strip_paths(&s))
.collect::<Vec<_>>();
TelemetryEventData::new("cli_invoked", message)
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | true |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/tailwind.rs | packages/cli/src/tailwind.rs | use crate::{CliSettings, Result, Workspace};
use anyhow::{anyhow, Context};
use std::{
path::{Path, PathBuf},
process::Stdio,
};
use tokio::process::Command;
#[derive(Debug)]
pub(crate) struct TailwindCli {
version: String,
}
impl TailwindCli {
const V3_TAG: &'static str = "v3.4.15";
const V4_TAG: &'static str = "v4.1.5";
pub(crate) fn new(version: String) -> Self {
Self { version }
}
pub(crate) async fn run_once(
manifest_dir: PathBuf,
input_path: Option<PathBuf>,
output_path: Option<PathBuf>,
) -> Result<()> {
let Some(tailwind) = Self::autodetect(&manifest_dir) else {
return Ok(());
};
if !tailwind.get_binary_path()?.exists() {
tracing::info!("Installing tailwindcss@{}", tailwind.version);
tailwind.install_github().await?;
}
let output = tailwind
.run(&manifest_dir, input_path, output_path, false)?
.wait_with_output()
.await?;
if !output.stderr.is_empty() {
tracing::warn!(
"Warnings while running tailwind: {}",
String::from_utf8_lossy(&output.stdout)
);
}
Ok(())
}
pub(crate) fn serve(
manifest_dir: PathBuf,
input_path: Option<PathBuf>,
output_path: Option<PathBuf>,
) -> tokio::task::JoinHandle<Result<()>> {
tokio::spawn(async move {
let Some(tailwind) = Self::autodetect(&manifest_dir) else {
return Ok(());
};
if !tailwind.get_binary_path()?.exists() {
tracing::info!("Installing tailwindcss@{}", tailwind.version);
tailwind.install_github().await?;
}
// the tw watcher blocks on stdin, and `.wait()` will drop stdin
// unfortunately the tw watcher just deadlocks in this case, so we take the stdin manually
let mut proc = tailwind.run(&manifest_dir, input_path, output_path, true)?;
let stdin = proc.stdin.take();
proc.wait().await?;
drop(stdin);
Ok(())
})
}
/// Use the correct tailwind version based on the manifest directory.
///
/// - If `tailwind.config.js` or `tailwind.config.ts` exists, use v3.
/// - If `tailwind.css` exists, use v4.
///
/// Note that v3 still uses the tailwind.css file, but usually the accompanying js file indicates
/// that the project is using v3.
pub(crate) fn autodetect(manifest_dir: &Path) -> Option<Self> {
if manifest_dir.join("tailwind.config.js").exists() {
return Some(Self::v3());
}
if manifest_dir.join("tailwind.config.ts").exists() {
return Some(Self::v3());
}
if manifest_dir.join("tailwind.css").exists() {
return Some(Self::v4());
}
None
}
pub(crate) fn v4() -> Self {
Self::new(Self::V4_TAG.to_string())
}
pub(crate) fn v3() -> Self {
Self::new(Self::V3_TAG.to_string())
}
pub(crate) fn run(
&self,
manifest_dir: &Path,
input_path: Option<PathBuf>,
output_path: Option<PathBuf>,
watch: bool,
) -> Result<tokio::process::Child> {
let binary_path = self.get_binary_path()?;
let input_path = input_path.unwrap_or_else(|| manifest_dir.join("tailwind.css"));
let output_path =
output_path.unwrap_or_else(|| manifest_dir.join("assets").join("tailwind.css"));
if !output_path.exists() {
std::fs::create_dir_all(output_path.parent().unwrap())
.context("failed to create tailwindcss output directory")?;
}
tracing::debug!("Spawning tailwindcss@{} with args: {:?}", self.version, {
[
binary_path.to_string_lossy().to_string(),
"--input".to_string(),
input_path.to_string_lossy().to_string(),
"--output".to_string(),
output_path.to_string_lossy().to_string(),
"--watch".to_string(),
]
});
let mut cmd = Command::new(binary_path);
let proc = cmd
.arg("--input")
.arg(input_path)
.arg("--output")
.arg(output_path)
.args(watch.then_some("--watch"))
.current_dir(manifest_dir)
.kill_on_drop(true)
.stdin(Stdio::piped())
.stdout(Stdio::null())
.stderr(Stdio::null())
.spawn()?;
Ok(proc)
}
pub fn get_binary_path(&self) -> anyhow::Result<PathBuf> {
if CliSettings::prefer_no_downloads() {
which::which("tailwindcss").map_err(|_| anyhow!("Missing tailwindcss@{}", self.version))
} else {
let installed_name = self.installed_bin_name();
let install_dir = self.install_dir()?;
Ok(install_dir.join(installed_name))
}
}
fn installed_bin_name(&self) -> String {
let mut name = format!("tailwindcss-{}", self.version);
if cfg!(windows) {
name = format!("{name}.exe");
}
name
}
async fn install_github(&self) -> anyhow::Result<()> {
tracing::debug!(
"Attempting to install tailwindcss@{} from GitHub",
self.version
);
let url = self.git_install_url().ok_or_else(|| {
anyhow!(
"no available GitHub binary for tailwindcss@{}",
self.version
)
})?;
// Get the final binary location.
let binary_path = self.get_binary_path()?;
// Download then extract tailwindcss.
let bytes = reqwest::get(url).await?.bytes().await?;
std::fs::create_dir_all(binary_path.parent().unwrap())
.context("failed to create tailwindcss directory")?;
std::fs::write(&binary_path, &bytes).context("failed to write tailwindcss binary")?;
// Make the binary executable.
#[cfg(unix)]
{
use std::os::unix::fs::PermissionsExt;
let mut perms = binary_path.metadata()?.permissions();
perms.set_mode(0o755);
std::fs::set_permissions(&binary_path, perms)?;
}
Ok(())
}
fn downloaded_bin_name(&self) -> Option<String> {
let platform = match target_lexicon::HOST.operating_system {
target_lexicon::OperatingSystem::Linux => "linux",
target_lexicon::OperatingSystem::Darwin(_) => "macos",
target_lexicon::OperatingSystem::Windows => "windows",
_ => return None,
};
let arch = match target_lexicon::HOST.architecture {
target_lexicon::Architecture::X86_64 if platform == "windows" => "x64.exe",
target_lexicon::Architecture::X86_64 => "x64",
// you would think this would be arm64.exe, but tailwind doesnt distribute arm64 binaries
target_lexicon::Architecture::Aarch64(_) if platform == "windows" => "x64.exe",
target_lexicon::Architecture::Aarch64(_) => "arm64",
_ => return None,
};
Some(format!("tailwindcss-{platform}-{arch}"))
}
fn install_dir(&self) -> Result<PathBuf> {
let bindgen_dir = Workspace::dioxus_data_dir().join("tailwind/");
Ok(bindgen_dir)
}
fn git_install_url(&self) -> Option<String> {
// eg:
//
// https://github.com/tailwindlabs/tailwindcss/releases/download/v4.1.5/tailwindcss-linux-arm64
//
// tailwindcss-linux-arm64
// tailwindcss-linux-x64
// tailwindcss-macos-arm64
// tailwindcss-macos-x64
// tailwindcss-windows-x64.exe
// tailwindcss-linux-arm64-musl
// tailwindcss-linux-x64-musl
Some(format!(
"https://github.com/tailwindlabs/tailwindcss/releases/download/{}/{}",
self.version,
self.downloaded_bin_name()?
))
}
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/build/builder.rs | packages/cli/src/build/builder.rs | use crate::{
serve::WebServer, verbosity_or_default, BuildArtifacts, BuildRequest, BuildStage,
BuilderUpdate, BundleFormat, ProgressRx, ProgressTx, Result, RustcArgs, StructuredOutput,
};
use anyhow::{bail, Context, Error};
use dioxus_cli_opt::process_file_to;
use futures_util::{future::OptionFuture, pin_mut, FutureExt};
use itertools::Itertools;
use std::{
env,
time::{Duration, Instant, SystemTime},
};
use std::{
net::SocketAddr,
path::{Path, PathBuf},
process::Stdio,
};
use subsecond_types::JumpTable;
use target_lexicon::Architecture;
use tokio::{
io::{AsyncBufReadExt, BufReader, Lines},
process::{Child, ChildStderr, ChildStdout, Command},
task::JoinHandle,
};
use tokio_stream::wrappers::UnboundedReceiverStream;
use super::{BuildContext, BuildId, BuildMode, HotpatchModuleCache};
/// The component of the serve engine that watches ongoing builds and manages their state, open handle,
/// and progress.
///
/// Previously, the builder allowed multiple apps to be built simultaneously, but this newer design
/// simplifies the code and allows only one app and its server to be built at a time.
///
/// Here, we track the number of crates being compiled, assets copied, the times of these events, and
/// other metadata that gives us useful indicators for the UI.
///
/// A handle to a running app.
///
/// The actual child processes might not be present (web) or running (died/killed).
///
/// The purpose of this struct is to accumulate state about the running app and its server, like
/// any runtime information needed to hotreload the app or send it messages.
///
/// We might want to bring in websockets here too, so we know the exact channels the app is using to
/// communicate with the devserver. Currently that's a broadcast-type system, so this struct isn't super
/// duper useful.
///
/// todo: restructure this such that "open" is a running task instead of blocking the main thread
pub(crate) struct AppBuilder {
pub tx: ProgressTx,
pub rx: ProgressRx,
// The original request with access to its build directory
pub build: BuildRequest,
// Ongoing build task, if any
pub build_task: JoinHandle<Result<BuildArtifacts>>,
// If a build has already finished, we'll have its artifacts (rustc, link args, etc) to work with
pub artifacts: Option<BuildArtifacts>,
/// The aslr offset of this running app
pub aslr_reference: Option<u64>,
/// The list of patches applied to the app, used to know which ones to reapply and/or iterate from.
pub patches: Vec<JumpTable>,
pub patch_cache: Option<HotpatchModuleCache>,
/// The virtual directory that assets will be served from
/// Used mostly for apk/ipa builds since they live in simulator
pub runtime_asset_dir: Option<PathBuf>,
// These might be None if the app died or the user did not specify a server
pub child: Option<Child>,
// stdio for the app so we can read its stdout/stderr
// we don't map stdin today (todo) but most apps don't need it
pub stdout: Option<Lines<BufReader<ChildStdout>>>,
pub stderr: Option<Lines<BufReader<ChildStderr>>>,
// Android logcat stream (treated as stderr for error/warn levels)
pub adb_logcat_stdout: Option<UnboundedReceiverStream<String>>,
/// Handle to the task that's monitoring the child process
pub spawn_handle: Option<JoinHandle<Result<()>>>,
/// The executables but with some extra entropy in their name so we can run two instances of the
/// same app without causing collisions on the filesystem.
pub entropy_app_exe: Option<PathBuf>,
pub builds_opened: usize,
// Metadata about the build that needs to be managed by watching build updates
// used to render the TUI
pub stage: BuildStage,
pub compiled_crates: usize,
pub expected_crates: usize,
pub bundling_progress: f64,
pub compile_start: Option<Instant>,
pub compile_end: Option<Instant>,
pub bundle_start: Option<Instant>,
pub bundle_end: Option<Instant>,
/// The debugger for the app - must be enabled with the `d` key
pub(crate) pid: Option<u32>,
}
impl AppBuilder {
/// Create a new `AppBuilder` and immediately start a build process.
///
/// This method initializes the builder with the provided `BuildRequest` and spawns an asynchronous
/// task (`build_task`) to handle the build process. The build process involves several stages:
///
/// 1. **Tooling Verification**: Ensures that the necessary tools are available for the build.
/// 2. **Build Directory Preparation**: Sets up the directory structure required for the build.
/// 3. **Build Execution**: Executes the build process asynchronously.
/// 4. **Bundling**: Packages the built artifacts into a final bundle.
///
/// The `build_task` is a Tokio task that runs the build process in the background. It uses a
/// `BuildContext` to manage the build state and communicate progress or errors via a message
/// channel (`tx`).
///
/// The builder is initialized with default values for various fields, such as the build stage,
/// progress metrics, and optional runtime configurations.
///
/// # Notes
///
/// - The `build_task` is immediately spawned and will run independently of the caller.
/// - The caller can use other methods on the `AppBuilder` to monitor the build progress or handle
/// updates (e.g., `wait`, `finish_build`).
/// - The build process is designed to be cancellable and restartable using methods like `abort_all`
/// or `rebuild`.
pub(crate) fn new(request: &BuildRequest) -> Result<Self> {
let (tx, rx) = futures_channel::mpsc::unbounded();
Ok(Self {
build: request.clone(),
stage: BuildStage::Initializing,
build_task: tokio::task::spawn(std::future::pending()),
tx,
rx,
patches: vec![],
compiled_crates: 0,
expected_crates: 1,
bundling_progress: 0.0,
builds_opened: 0,
compile_start: Some(Instant::now()),
aslr_reference: None,
compile_end: None,
bundle_start: None,
bundle_end: None,
runtime_asset_dir: None,
child: None,
stderr: None,
stdout: None,
adb_logcat_stdout: None,
spawn_handle: None,
entropy_app_exe: None,
artifacts: None,
patch_cache: None,
pid: None,
})
}
/// Create a new `AppBuilder` and immediately start a build process.
pub fn started(request: &BuildRequest, mode: BuildMode, build_id: BuildId) -> Result<Self> {
let mut builder = Self::new(request)?;
builder.start(mode, build_id);
Ok(builder)
}
pub(crate) fn start(&mut self, mode: BuildMode, build_id: BuildId) {
self.build_task = tokio::spawn({
let request = self.build.clone();
let tx = self.tx.clone();
async move {
let ctx = BuildContext {
mode,
build_id,
tx: tx.clone(),
};
request.verify_tooling(&ctx).await?;
request.prebuild(&ctx).await?;
request.build(&ctx).await
}
});
}
/// Wait for any new updates to the builder - either it completed or gave us a message etc
pub(crate) async fn wait(&mut self) -> BuilderUpdate {
use futures_util::StreamExt;
use BuilderUpdate::*;
// Wait for the build to finish or for it to emit a status message
let update = tokio::select! {
Some(progress) = self.rx.next() => progress,
bundle = (&mut self.build_task) => {
// Replace the build with an infinitely pending task so we can select it again without worrying about deadlocks/spins
self.build_task = tokio::task::spawn(std::future::pending());
match bundle {
Ok(Ok(bundle)) => BuilderUpdate::BuildReady { bundle },
Ok(Err(err)) => BuilderUpdate::BuildFailed { err },
Err(err) => BuilderUpdate::BuildFailed { err: anyhow::anyhow!("Build panicked! {err:#?}") },
}
},
Some(Ok(Some(msg))) = OptionFuture::from(self.stdout.as_mut().map(|f| f.next_line())) => {
StdoutReceived { msg }
},
Some(Ok(Some(msg))) = OptionFuture::from(self.stderr.as_mut().map(|f| f.next_line())) => {
StderrReceived { msg }
},
Some(msg) = OptionFuture::from(self.spawn_handle.as_mut()) => {
match msg {
Ok(Ok(_)) => StdoutReceived { msg: "Finished launching app".to_string() },
Ok(Err(err)) => StderrReceived { msg: err.to_string() },
Err(err) => StderrReceived { msg: err.to_string() }
}
},
Some(Some(msg)) = OptionFuture::from(self.adb_logcat_stdout.as_mut().map(|s| s.next())) => {
// Send as stderr for errors/warnings, stdout for info/debug
// Parse the priority level from a logcat line
//
// Logcat brief format: "I/TAG(12345): message"
// Returns the priority char (V, D, I, W, E, F)
if matches!(msg.chars().next().unwrap_or('I'), 'E' | 'W' | 'F') {
StderrReceived { msg }
} else {
StdoutReceived { msg }
}
},
Some(status) = OptionFuture::from(self.child.as_mut().map(|f| f.wait())) => {
match status {
Ok(status) => {
self.child = None;
ProcessExited { status }
},
Err(err) => {
let () = futures_util::future::pending().await;
ProcessWaitFailed { err }
}
}
}
};
// Update the internal stage of the build so the UI can render it
// *VERY IMPORTANT* - DO NOT AWAIT HERE
// doing so will cause the changes to be lost since this wait call is called under a cancellable task
// todo - move this handling to a separate function that won't be cancelled
match &update {
BuilderUpdate::Progress { stage } => {
// Prevent updates from flowing in after the build has already finished
if !self.is_finished() {
self.stage = stage.clone();
match stage {
BuildStage::Initializing => {
self.compiled_crates = 0;
self.bundling_progress = 0.0;
}
BuildStage::Starting { crate_count, .. } => {
self.expected_crates = *crate_count.max(&1);
}
BuildStage::InstallingTooling => {}
BuildStage::Compiling { current, total, .. } => {
self.compiled_crates = *current;
self.expected_crates = *total.max(&1);
if self.compile_start.is_none() {
self.compile_start = Some(Instant::now());
}
}
BuildStage::Bundling => {
self.complete_compile();
self.bundling_progress = 0.0;
self.bundle_start = Some(Instant::now());
}
BuildStage::OptimizingWasm => {}
BuildStage::CopyingAssets { current, total, .. } => {
self.bundling_progress = *current as f64 / *total as f64;
}
BuildStage::Success => {
self.compiled_crates = self.expected_crates;
self.bundling_progress = 1.0;
}
BuildStage::Failed => {
self.compiled_crates = self.expected_crates;
self.bundling_progress = 1.0;
}
BuildStage::Aborted => {}
BuildStage::Restarting => {
self.compiled_crates = 0;
self.expected_crates = 1;
self.bundling_progress = 0.0;
}
BuildStage::RunningBindgen => {}
_ => {}
}
}
}
BuilderUpdate::CompilerMessage { .. } => {}
BuilderUpdate::BuildReady { .. } => {
self.compiled_crates = self.expected_crates;
self.bundling_progress = 1.0;
self.stage = BuildStage::Success;
self.complete_compile();
self.bundle_end = Some(Instant::now());
}
BuilderUpdate::BuildFailed { .. } => {
tracing::debug!("Setting builder to failed state");
self.stage = BuildStage::Failed;
}
StdoutReceived { .. } => {}
StderrReceived { .. } => {}
ProcessExited { .. } => {}
ProcessWaitFailed { .. } => {}
}
update
}
pub(crate) fn patch_rebuild(&mut self, changed_files: Vec<PathBuf>, build_id: BuildId) {
// We need the rustc args from the original build to pass to the new build
let Some(artifacts) = self.artifacts.as_ref().cloned() else {
tracing::warn!(
"Ignoring patch rebuild for {build_id:?} since there is no existing build."
);
return;
};
// On web, our patches are fully relocatable, so we don't need to worry about ASLR, but
// for all other platforms, we need to use the ASLR reference to know where to insert the patch.
let aslr_reference = match self.aslr_reference {
Some(val) => val,
None if matches!(
self.build.triple.architecture,
Architecture::Wasm32 | Architecture::Wasm64
) =>
{
0
}
None => {
tracing::warn!(
"Ignoring hotpatch since there is no ASLR reference. Is the client connected?"
);
return;
}
};
let cache = artifacts
.patch_cache
.clone()
.context("Failed to get patch cache")
.unwrap();
// Abort all the ongoing builds, cleaning up any loose artifacts and waiting to cleanly exit
self.abort_all(BuildStage::Restarting);
self.build_task = tokio::spawn({
let request = self.build.clone();
let ctx = BuildContext {
build_id,
tx: self.tx.clone(),
mode: BuildMode::Thin {
changed_files,
rustc_args: artifacts.direct_rustc,
aslr_reference,
cache,
},
};
async move { request.build(&ctx).await }
});
}
/// Restart this builder with new build arguments.
pub(crate) fn start_rebuild(&mut self, mode: BuildMode, build_id: BuildId) {
// Abort all the ongoing builds, cleaning up any loose artifacts and waiting to cleanly exit
// And then start a new build, resetting our progress/stage to the beginning and replacing the old tokio task
self.abort_all(BuildStage::Restarting);
self.artifacts.take();
self.patch_cache.take();
self.build_task = tokio::spawn({
let request = self.build.clone();
let ctx = BuildContext {
tx: self.tx.clone(),
mode,
build_id,
};
async move { request.build(&ctx).await }
});
}
/// Shutdown the current build process
///
/// todo: might want to use a cancellation token here to allow cleaner shutdowns
pub(crate) fn abort_all(&mut self, stage: BuildStage) {
self.stage = stage;
self.compiled_crates = 0;
self.expected_crates = 1;
self.bundling_progress = 0.0;
self.compile_start = None;
self.bundle_start = None;
self.bundle_end = None;
self.compile_end = None;
self.build_task.abort();
}
/// Wait for the build to finish, returning the final bundle
/// Should only be used by code that's not interested in the intermediate updates and only cares about the final bundle
///
/// todo(jon): maybe we want to do some logging here? The build/bundle/run screens could be made to
/// use the TUI output for prettier outputs.
pub(crate) async fn finish_build(&mut self) -> Result<BuildArtifacts> {
loop {
match self.wait().await {
BuilderUpdate::Progress { stage } => {
match &stage {
BuildStage::Compiling {
current,
total,
krate,
..
} => {
tracing::info!("Compiled [{current:>3}/{total}]: {krate}");
}
BuildStage::RunningBindgen => tracing::info!("Running wasm-bindgen..."),
BuildStage::CopyingAssets {
current,
total,
path,
} => {
tracing::info!(
"Copying asset ({}/{total}): {}",
current + 1,
path.display()
);
}
BuildStage::Bundling => tracing::info!("Bundling app..."),
BuildStage::CodeSigning => tracing::info!("Code signing app..."),
_ => {}
}
tracing::info!(json = %StructuredOutput::BuildUpdate { stage: stage.clone() });
}
BuilderUpdate::CompilerMessage { message } => {
tracing::info!(json = %StructuredOutput::RustcOutput { message: message.clone() }, %message);
}
BuilderUpdate::BuildReady { bundle } => {
tracing::debug!(json = %StructuredOutput::BuildFinished {
artifacts: bundle.clone().into_structured_output(),
});
return Ok(bundle);
}
BuilderUpdate::BuildFailed { err } => {
// Flush remaining compiler messages
while let Ok(Some(msg)) = self.rx.try_next() {
if let BuilderUpdate::CompilerMessage { message } = msg {
tracing::info!(json = %StructuredOutput::RustcOutput { message: message.clone() }, %message);
}
}
return Err(err);
}
BuilderUpdate::StdoutReceived { .. } => {}
BuilderUpdate::StderrReceived { .. } => {}
BuilderUpdate::ProcessExited { .. } => {}
BuilderUpdate::ProcessWaitFailed { .. } => {}
}
}
}
/// Create a list of environment variables that the child process will use
///
/// We try to emulate running under `cargo` as much as possible, carrying over vars like `CARGO_MANIFEST_DIR`.
/// Previously, we didn't want to emulate this behavior, but now we do in order to be a good
/// citizen of the Rust ecosystem and allow users to use `cargo` features like `CARGO_MANIFEST_DIR`.
///
/// Note that Dioxus apps *should not* rely on this vars being set, but libraries like Bevy do.
pub(crate) fn child_environment_variables(
&mut self,
devserver_ip: Option<SocketAddr>,
start_fullstack_on_address: Option<SocketAddr>,
always_on_top: bool,
build_id: BuildId,
) -> Vec<(String, String)> {
let krate = &self.build;
// Set the env vars that the clients will expect
// These need to be stable within a release version (ie 0.6.0)
let mut envs: Vec<(String, String)> = vec![
(
dioxus_cli_config::CLI_ENABLED_ENV.into(),
"true".to_string(),
),
(
dioxus_cli_config::APP_TITLE_ENV.into(),
krate.config.web.app.title.clone(),
),
(
dioxus_cli_config::SESSION_CACHE_DIR.into(),
self.build.session_cache_dir().display().to_string(),
),
(dioxus_cli_config::BUILD_ID.into(), build_id.0.to_string()),
(
dioxus_cli_config::ALWAYS_ON_TOP_ENV.into(),
always_on_top.to_string(),
),
];
if let Some(devserver_ip) = devserver_ip {
envs.push((
dioxus_cli_config::DEVSERVER_IP_ENV.into(),
devserver_ip.ip().to_string(),
));
envs.push((
dioxus_cli_config::DEVSERVER_PORT_ENV.into(),
devserver_ip.port().to_string(),
));
}
if verbosity_or_default().verbose {
envs.push(("RUST_BACKTRACE".into(), "1".to_string()));
}
if let Some(base_path) = krate.trimmed_base_path() {
envs.push((
dioxus_cli_config::ASSET_ROOT_ENV.into(),
base_path.to_string(),
));
}
if let Some(env_filter) = env::var_os("RUST_LOG").and_then(|e| e.into_string().ok()) {
envs.push(("RUST_LOG".into(), env_filter));
}
// Launch the server if we were given an address to start it on, and the build includes a server. After we
// start the server, consume its stdout/stderr.
if let Some(addr) = start_fullstack_on_address {
envs.push((
dioxus_cli_config::SERVER_IP_ENV.into(),
addr.ip().to_string(),
));
envs.push((
dioxus_cli_config::SERVER_PORT_ENV.into(),
addr.port().to_string(),
));
}
// If there's any CARGO vars in the rustc_wrapper files, push those too
if let Ok(res) = std::fs::read_to_string(self.build.rustc_wrapper_args_file()) {
if let Ok(res) = serde_json::from_str::<RustcArgs>(&res) {
for (key, value) in res.envs {
if key.starts_with("CARGO_") {
envs.push((key, value));
}
}
}
}
envs
}
#[allow(clippy::too_many_arguments)]
pub(crate) async fn open(
&mut self,
devserver_ip: SocketAddr,
open_address: Option<SocketAddr>,
start_fullstack_on_address: Option<SocketAddr>,
open_browser: bool,
always_on_top: bool,
build_id: BuildId,
args: &[String],
) -> Result<()> {
let envs = self.child_environment_variables(
Some(devserver_ip),
start_fullstack_on_address,
always_on_top,
build_id,
);
// We try to use stdin/stdout to communicate with the app
match self.build.bundle {
// Unfortunately web won't let us get a proc handle to it (to read its stdout/stderr) so instead
// use use the websocket to communicate with it. I wish we could merge the concepts here,
// like say, opening the socket as a subprocess, but alas, it's simpler to do that somewhere else.
BundleFormat::Web => {
// Only the first build we open the web app, after that the user knows it's running
if open_browser {
self.open_web(open_address.unwrap_or(devserver_ip));
}
}
BundleFormat::Ios => {
if let Some(device) = self.build.device_name.to_owned() {
self.open_ios_device(&device).await?
} else {
self.open_ios_sim(envs).await?
}
}
BundleFormat::Android => {
self.open_android(false, devserver_ip, envs, self.build.device_name.clone())
.await?;
}
// These are all just basically running the main exe, but with slightly different resource dir paths
BundleFormat::Server
| BundleFormat::MacOS
| BundleFormat::Windows
| BundleFormat::Linux => self.open_with_main_exe(envs, args)?,
};
self.builds_opened += 1;
Ok(())
}
/// Gracefully kill the process and all of its children
///
/// Uses the `SIGTERM` signal on unix and `taskkill` on windows.
/// This complex logic is necessary for things like window state preservation to work properly.
///
/// Also wipes away the entropy executables if they exist.
pub(crate) async fn soft_kill(&mut self) {
use futures_util::FutureExt;
// Kill any running executables on Windows
let Some(mut process) = self.child.take() else {
return;
};
let Some(pid) = process.id() else {
_ = process.kill().await;
return;
};
// on unix, we can send a signal to the process to shut down
#[cfg(unix)]
{
_ = Command::new("kill")
.args(["-s", "TERM", &pid.to_string()])
.spawn();
}
// on windows, use the `taskkill` command
#[cfg(windows)]
{
_ = Command::new("taskkill")
.args(["/PID", &pid.to_string()])
.spawn();
}
// join the wait with a 100ms timeout
futures_util::select! {
_ = process.wait().fuse() => {}
_ = tokio::time::sleep(std::time::Duration::from_millis(1000)).fuse() => {}
};
// Wipe out the entropy executables if they exist
if let Some(entropy_app_exe) = self.entropy_app_exe.take() {
_ = std::fs::remove_file(entropy_app_exe);
}
// Abort the spawn handle monitoring task if it exists
if let Some(spawn_handle) = self.spawn_handle.take() {
spawn_handle.abort();
}
}
pub(crate) async fn hotpatch(
&mut self,
res: &BuildArtifacts,
cache: &HotpatchModuleCache,
) -> Result<JumpTable> {
let original = self.build.main_exe();
let new = self.build.patch_exe(res.time_start);
let asset_dir = self.build.asset_dir();
// Hotpatch asset!() calls
for bundled in res.assets.unique_assets() {
let original_artifacts = self
.artifacts
.as_mut()
.context("No artifacts to hotpatch")?;
if original_artifacts.assets.contains(bundled) {
continue;
}
// If this is a new asset, insert it into the artifacts so we can track it when hot reloading
original_artifacts.assets.insert_asset(*bundled);
let from = dunce::canonicalize(PathBuf::from(bundled.absolute_source_path()))?;
let to = asset_dir.join(bundled.bundled_path());
tracing::debug!("Copying asset from patch: {}", from.display());
if let Err(e) = dioxus_cli_opt::process_file_to(bundled.options(), &from, &to) {
tracing::error!("Failed to copy asset: {e}");
continue;
}
// If the emulator is android, we need to copy the asset to the device with `adb push asset /data/local/tmp/dx/assets/filename.ext`
if self.build.bundle == BundleFormat::Android {
let bundled_name = PathBuf::from(bundled.bundled_path());
_ = self.copy_file_to_android_tmp(&from, &bundled_name).await;
}
}
// Make sure to add `include!()` calls to the watcher so we can watch changes as they evolve
for file in res.depinfo.files.iter() {
let original_artifacts = self
.artifacts
.as_mut()
.context("No artifacts to hotpatch")?;
if !original_artifacts.depinfo.files.contains(file) {
original_artifacts.depinfo.files.push(file.clone());
}
}
tracing::debug!("Patching {} -> {}", original.display(), new.display());
let mut jump_table = self.build.create_jump_table(&new, cache)?;
// If it's android, we need to copy the assets to the device and then change the location of the patch
if self.build.bundle == BundleFormat::Android {
jump_table.lib = self
.copy_file_to_android_tmp(&new, &(PathBuf::from(new.file_name().unwrap())))
.await?;
}
let changed_files = match &res.mode {
BuildMode::Thin { changed_files, .. } => changed_files.clone(),
_ => vec![],
};
use crate::styles::{GLOW_STYLE, NOTE_STYLE};
let changed_file = changed_files.first().unwrap();
tracing::info!(
"Hot-patching: {NOTE_STYLE}{}{NOTE_STYLE:#} took {GLOW_STYLE}{:?}ms{GLOW_STYLE:#}",
changed_file
.display()
.to_string()
.trim_start_matches(&self.build.crate_dir().display().to_string()),
SystemTime::now()
.duration_since(res.time_start)
.unwrap()
.as_millis()
);
self.patches.push(jump_table.clone());
Ok(jump_table)
}
/// Hotreload an asset in the running app.
///
/// This will modify the build dir in place! Be careful! We generally assume you want all bundles
/// to reflect the latest changes, so we will modify the bundle.
///
/// However, not all platforms work like this, so we might also need to update a separate asset
/// dir that the system simulator might be providing. We know this is the case for ios simulators
/// and haven't yet checked for android.
///
/// This will return the bundled name of the assets such that we can send it to the clients letting
/// them know what to reload. It's not super important that this is robust since most clients will
/// kick all stylsheets without necessarily checking the name.
pub(crate) async fn hotreload_bundled_assets(
&self,
changed_file: &PathBuf,
) -> Option<Vec<PathBuf>> {
let artifacts = self.artifacts.as_ref()?;
// Use the build dir if there's no runtime asset dir as the override. For the case of ios apps,
// we won't actually be using the build dir.
let asset_dir = match self.runtime_asset_dir.as_ref() {
Some(dir) => dir.to_path_buf().join("assets/"),
None => self.build.asset_dir(),
};
// Canonicalize the path as Windows may use long-form paths "\\\\?\\C:\\".
let changed_file = dunce::canonicalize(changed_file)
.inspect_err(|e| tracing::debug!("Failed to canonicalize hotreloaded asset: {e}"))
.ok()?;
// The asset might've been renamed thanks to the manifest, let's attempt to reload that too
let resources = artifacts.assets.get_assets_for_source(&changed_file)?;
let mut bundled_names = Vec::new();
for resource in resources {
let output_path = asset_dir.join(resource.bundled_path());
tracing::debug!("Hotreloading asset {changed_file:?} in target {asset_dir:?}");
// Remove the old asset if it exists
_ = std::fs::remove_file(&output_path);
// And then process the asset with the options into the **old** asset location. If we recompiled,
// the asset would be in a new location because the contents and hash have changed. Since we are
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | true |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/build/manifest.rs | packages/cli/src/build/manifest.rs | //! The build manifest for `dx` applications, containing metadata about the build including
//! the CLI version, Rust version, and all bundled assets.
//!
//! We eventually plan to use this manifest to support tighter integration with deployment platforms
//! and CDNs.
//!
//! This manifest contains the list of assets, rust version, and cli version used to build the app.
//! Eventually, we might want to expand this to include more metadata about the build, including
//! build time, target platform, etc.
use dioxus_cli_opt::AssetManifest;
use serde::{Deserialize, Serialize};
#[derive(Default, Serialize, Deserialize)]
pub struct AppManifest {
/// Stable since 0.7.0
pub cli_version: String,
/// Stable since 0.7.0
pub rust_version: String,
/// Stable since 0.7.0
pub assets: AssetManifest,
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/build/pre_render.rs | packages/cli/src/build/pre_render.rs | use anyhow::Context;
use dioxus_cli_config::{server_ip, server_port};
use dioxus_dx_wire_format::BuildStage;
use futures_util::{stream::FuturesUnordered, StreamExt};
use std::{
net::{IpAddr, Ipv4Addr, SocketAddr},
time::Duration,
};
use tokio::process::Command;
use crate::BuildId;
use super::{AppBuilder, BuilderUpdate};
/// Pre-render the static routes, performing static-site generation
pub(crate) async fn pre_render_static_routes(
devserver_ip: Option<SocketAddr>,
builder: &mut AppBuilder,
updates: Option<&futures_channel::mpsc::UnboundedSender<BuilderUpdate>>,
) -> anyhow::Result<()> {
if let Some(updates) = updates {
updates
.unbounded_send(BuilderUpdate::Progress {
stage: BuildStage::Prerendering,
})
.unwrap();
}
let server_exe = builder.build.main_exe();
// Use the address passed in through environment variables or default to localhost:9999. We need
// to default to a value that is different than the CLI default address to avoid conflicts
let ip = server_ip().unwrap_or_else(|| IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)));
let port = server_port().unwrap_or(9999);
let fullstack_address = SocketAddr::new(ip, port);
let address = fullstack_address.ip().to_string();
let port = fullstack_address.port().to_string();
// Borrow port and address so we can easily move them into multiple tasks below
let address = &address;
let port = &port;
tracing::info!("Running SSG at http://{address}:{port} for {server_exe:?}");
let vars = builder.child_environment_variables(
devserver_ip,
Some(fullstack_address),
false,
BuildId::SECONDARY,
);
// Run the server executable
let _child = Command::new(&server_exe)
.envs(vars)
.current_dir(server_exe.parent().unwrap())
.stdout(std::process::Stdio::null())
.stderr(std::process::Stdio::null())
.kill_on_drop(true)
.spawn()?;
// Borrow reqwest_client so we only move the reference into the futures
let reqwest_client = reqwest::Client::new();
let reqwest_client = &reqwest_client;
// Get the routes from the `/static_routes` endpoint
let mut routes = None;
// The server may take a few seconds to start up. Try fetching the route up to 5 times with a one second delay
const RETRY_ATTEMPTS: usize = 5;
for i in 0..=RETRY_ATTEMPTS {
tracing::debug!(
"Attempting to get static routes from server. Attempt {i} of {RETRY_ATTEMPTS}"
);
let request = reqwest_client
.post(format!("http://{address}:{port}/api/static_routes"))
.body("{}".to_string())
.send()
.await;
match request {
Ok(request) => {
routes = Some(request
.json::<Vec<String>>()
.await
.inspect(|text| tracing::debug!("Got static routes: {text:?}"))
.context("Failed to parse static routes from the server. Make sure your server function returns Vec<String> with the (default) json encoding")?);
break;
}
Err(err) => {
// If the request fails, try up to 5 times with a one second delay
// If it fails 5 times, return the error
if i == RETRY_ATTEMPTS {
return Err(err).context("Failed to get static routes from server. Make sure you have a server function at the `/api/static_routes` endpoint that returns Vec<String> of static routes.");
}
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
}
}
}
let routes = routes.expect(
"static routes should exist or an error should have been returned on the last attempt",
);
// Create a pool of futures that cache each route
let mut resolved_routes = routes
.into_iter()
.map(|route| async move {
tracing::info!("Rendering {route} for SSG");
// For each route, ping the server to force it to cache the response for ssg
let request = reqwest_client
.get(format!("http://{address}:{port}{route}"))
.header("Accept", "text/html")
.send()
.await?;
// If it takes longer than 30 seconds to resolve the route, log a warning
let warning_task = tokio::spawn({
let route = route.clone();
async move {
tokio::time::sleep(Duration::from_secs(30)).await;
tracing::warn!("Route {route} has been rendering for 30 seconds");
}
});
// Wait for the streaming response to completely finish before continuing. We don't use the html it returns directly
// because it may contain artifacts of intermediate streaming steps while the page is loading. The SSG app should write
// the final clean HTML to the disk automatically after the request completes.
let _html = request.text().await?;
// Cancel the warning task if it hasn't already run
warning_task.abort();
Ok::<_, reqwest::Error>(route)
})
.collect::<FuturesUnordered<_>>();
while let Some(route) = resolved_routes.next().await {
match route {
Ok(route) => tracing::debug!("ssg success: {route:?}"),
Err(err) => tracing::error!("ssg error: {err:?}"),
}
}
tracing::info!("SSG complete");
drop(_child);
Ok(())
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/build/mod.rs | packages/cli/src/build/mod.rs | //! The core build module for `dx`, enabling building, bundling, and runtime hot-patching of Rust
//! applications. This module defines the entire end-to-end build process, including bundling for
//! all major platforms including Mac, Windows, Linux, iOS, Android, and WebAssembly.
//!
//! The bulk of the builder code is contained within the [`request`] module which establishes the
//! arguments and flow of the build process. The [`context`] module contains the context for the build
//! including status updates and build customization. The [`patch`] module contains the logic for
//! hot-patching Rust code through binary analysis and a custom linker. The [`builder`] module contains
//! the management of the ongoing build and methods to open the build as a running app.
mod assets;
mod builder;
mod context;
mod manifest;
mod patch;
mod pre_render;
mod request;
mod tools;
pub(crate) use assets::*;
pub(crate) use builder::*;
pub(crate) use context::*;
pub(crate) use manifest::*;
pub(crate) use patch::*;
pub(crate) use pre_render::*;
pub(crate) use request::*;
pub(crate) use tools::*;
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/build/request.rs | packages/cli/src/build/request.rs | //! # [`BuildRequest`] - the core of the build process
//!
//! The [`BuildRequest`] object is the core of the build process. It contains all the resolved arguments
//! flowing in from the CLI, dioxus.toml, env vars, and the workspace.
//!
//! Every BuildRequest is tied to a given workspace and BuildArgs. For simplicity's sake, the BuildArgs
//! struct is used to represent the CLI arguments and all other configuration is basically just
//! extra CLI arguments, but in a configuration format.
//!
//! When [`BuildRequest::build`] is called, it will prepare its work directory in the target folder
//! and then start running the build process. A [`BuildContext`] is required to customize this
//! build process, containing a channel for progress updates and the build mode.
//!
//! The [`BuildMode`] is extremely important since it influences how the build is performed. Most
//! "normal" builds just use [`BuildMode::Base`], but we also support [`BuildMode::Fat`] and
//! [`BuildMode::Thin`]. These builds are used together to power the hot-patching and fast-linking
//! engine.
//! - BuildMode::Base: A normal build generated using `cargo rustc`
//! - BuildMode::Fat: A "fat" build where all dependency rlibs are merged into a static library
//! - BuildMode::Thin: A "thin" build that dynamically links against the artifacts produced by the "fat" build
//!
//! The BuildRequest is also responsible for writing the final build artifacts to disk. This includes
//!
//! - Writing the executable
//! - Processing assets from the artifact
//! - Writing any metadata or configuration files (Info.plist, AndroidManifest.xml)
//! - Bundle splitting (for wasm) and wasm-bindgen
//!
//! In some cases, the BuildRequest also handles the linking of the final executable. Specifically,
//! - For Android, we use `dx` as an opaque linker to dynamically find the true android linker
//! - For hotpatching, the CLI manually links the final executable with a stub file
//!
//! ## Build formats:
//!
//! We support building for the most popular platforms:
//! - Web via wasm-bindgen
//! - macOS via app-bundle
//! - iOS via app-bundle
//! - Android via gradle
//! - Linux via app-image
//! - Windows via exe, msi/msix
//!
//! Note that we are missing some setups that we *should* support:
//! - PWAs, WebWorkers, ServiceWorkers
//! - Web Extensions
//! - Linux via flatpak/snap
//!
//! There are some less popular formats that we might want to support eventually:
//! - TVOS, watchOS
//! - OpenHarmony
//!
//! Also, some deploy platforms have their own bespoke formats:
//! - Cloudflare workers
//! - AWS Lambda
//!
//! Currently, we defer most of our deploy-based bundling to Tauri bundle, though we should migrate
//! to just bundling everything ourselves. This would require us to implement code-signing which
//! is a bit of a pain, but fortunately a solved process (<https://github.com/rust-mobile/xbuild>).
//!
//! ## Build Structure
//!
//! Builds generally follow the same structure everywhere:
//! - A main executable
//! - Sidecars (alternate entrypoints, framewrok plugins, etc)
//! - Assets (images, fonts, etc)
//! - Metadata (Info.plist, AndroidManifest.xml)
//! - Glue code (java, kotlin, javascript etc)
//! - Entitlements for code-signing and verification
//!
//! We need to be careful to not try and put a "round peg in a square hole," but most platforms follow
//! the same pattern.
//!
//! As such, we try to assemble a build directory that's somewhat sensible:
//! - A main "staging" dir for a given app
//! - Per-profile dirs (debug/release)
//! - A platform dir (ie web/desktop/android/ios)
//! - The "bundle" dir which is basically the `.app` format or `wwww` dir.
//! - The "executable" dir where the main exe is housed
//! - The "assets" dir where the assets are housed
//! - The "meta" dir where stuff like Info.plist, AndroidManifest.xml, etc are housed
//!
//! There's also some "quirky" folders that need to be stable between builds but don't influence the
//! bundle itself:
//! - session_cache_dir which stores stuff like window position
//!
//! ### Web:
//!
//! Create a folder that is somewhat similar to an app-image (exe + asset)
//! The server is dropped into the `web` folder, even if there's no `public` folder.
//! If there's no server (SPA), we still use the `web` folder, but it only contains the
//! public folder.
//!
//! ```
//! web/
//! server
//! assets/
//! public/
//! index.html
//! wasm/
//! app.wasm
//! glue.js
//! snippets/
//! ...
//! assets/
//! logo.png
//! ```
//!
//! ### Linux:
//!
//! <https://docs.appimage.org/reference/appdir.html#ref-appdir>
//! current_exe.join("Assets")
//! ```
//! app.appimage/
//! AppRun
//! app.desktop
//! package.json
//! assets/
//! logo.png
//! ```
//!
//! ### Macos
//!
//! We simply use the macos format where binaries are in `Contents/MacOS` and assets are in `Contents/Resources`
//! We put assets in an assets dir such that it generally matches every other platform and we can
//! output `/assets/blah` from manganis.
//! ```
//! App.app/
//! Contents/
//! Info.plist
//! MacOS/
//! Frameworks/
//! Resources/
//! assets/
//! blah.icns
//! blah.png
//! CodeResources
//! _CodeSignature/
//! ```
//!
//! ### iOS
//!
//! Not the same as mac! ios apps are a bit "flattened" in comparison. simpler format, presumably
//! since most ios apps don't ship frameworks/plugins and such.
//!
//! todo(jon): include the signing and entitlements in this format diagram.
//! ```
//! App.app/
//! main
//! assets/
//! ```
//!
//! ### Android:
//!
//! Currently we need to generate a `src` type structure, not a pre-packaged apk structure, since
//! we need to compile kotlin and java. This pushes us into using gradle and following a structure
//! similar to that of cargo mobile2. Eventually I'd like to slim this down (drop buildSrc) and
//! drive the kotlin build ourselves. This would let us drop gradle (yay! no plugins!) but requires
//! us to manage dependencies (like kotlinc) ourselves (yuck!).
//!
//! <https://github.com/WanghongLin/miscellaneous/blob/master/tools/build-apk-manually.sh>
//!
//! Unfortunately, it seems that while we can drop the `android` build plugin, we still will need
//! gradle since kotlin is basically gradle-only.
//!
//! Pre-build:
//! ```
//! app.apk/
//! .gradle
//! app/
//! src/
//! main/
//! assets/
//! jniLibs/
//! java/
//! kotlin/
//! res/
//! AndroidManifest.xml
//! build.gradle.kts
//! proguard-rules.pro
//! buildSrc/
//! build.gradle.kts
//! src/
//! main/
//! kotlin/
//! BuildTask.kt
//! build.gradle.kts
//! gradle.properties
//! gradlew
//! gradlew.bat
//! settings.gradle
//! ```
//!
//! Final build:
//! ```
//! app.apk/
//! AndroidManifest.xml
//! classes.dex
//! assets/
//! logo.png
//! lib/
//! armeabi-v7a/
//! libmyapp.so
//! arm64-v8a/
//! libmyapp.so
//! x86/
//! libmyapp.so
//! x86_64/
//! libmyapp.so
//! ```
//! Notice that we *could* feasibly build this ourselves :)
//!
//! ### Windows:
//! <https://superuser.com/questions/749447/creating-a-single-file-executable-from-a-directory-in-windows>
//! Windows does not provide an AppImage format, so instead we're going build the same folder
//! structure as an AppImage, but when distributing, we'll create a .exe that embeds the resources
//! as an embedded .zip file. When the app runs, it will implicitly unzip its resources into the
//! Program Files folder. Any subsequent launches of the parent .exe will simply call the AppRun.exe
//! entrypoint in the associated Program Files folder.
//!
//! This is, in essence, the same as an installer, so we might eventually just support something like msi/msix
//! which functionally do the same thing but with a sleeker UI.
//!
//! This means no installers are required and we can bake an updater into the host exe.
//!
//! ## Handling asset lookups:
//! current_exe.join("assets")
//! ```
//! app.appimage/
//! main.exe
//! main.desktop
//! package.json
//! assets/
//! logo.png
//! ```
//!
//! Since we support just a few locations, we could just search for the first that exists
//! - usr
//! - ../Resources
//! - assets
//! - Assets
//! - $cwd/assets
//!
//! ```
//! assets::root() ->
//! mac -> ../Resources/
//! ios -> ../Resources/
//! android -> assets/
//! server -> assets/
//! liveview -> assets/
//! web -> /assets/
//! root().join(bundled)
//! ```
//!
//! Every dioxus app can have an optional server executable which will influence the final bundle.
//! This is built in parallel with the app executable during the `build` phase and the progres/status
//! of the build is aggregated.
//!
//! The server will *always* be dropped into the `web` folder since it is considered "web" in nature,
//! and will likely need to be combined with the public dir to be useful.
//!
//! We do our best to assemble read-to-go bundles here, such that the "bundle" step for each platform
//! can just use the build dir
//!
//! When we write the AppBundle to a folder, it'll contain each bundle for each platform under the app's name:
//! ```
//! dog-app/
//! build/
//! web/
//! server.exe
//! assets/
//! some-secret-asset.txt (a server-side asset)
//! public/
//! index.html
//! assets/
//! logo.png
//! desktop/
//! App.app
//! App.appimage
//! App.exe
//! server/
//! server
//! assets/
//! some-secret-asset.txt (a server-side asset)
//! ios/
//! App.app
//! App.ipa
//! android/
//! App.apk
//! bundle/
//! build.json
//! Desktop.app
//! Mobile_x64.ipa
//! Mobile_arm64.ipa
//! Mobile_rosetta.ipa
//! web.appimage
//! web/
//! server.exe
//! assets/
//! some-secret-asset.txt
//! public/
//! index.html
//! assets/
//! logo.png
//! style.css
//! ```
//!
//! When deploying, the build.json file will provide all the metadata that dx-deploy will use to
//! push the app to stores, set up infra, manage versions, etc.
//!
//! The format of each build will follow the name plus some metadata such that when distributing you
//! can easily trim off the metadata.
//!
//! The idea here is that we can run any of the programs in the same way that they're deployed.
//!
//! ## Bundle structure links
//! - apple: <https://developer.apple.com/documentation/bundleresources/placing_content_in_a_bundle>
//! - appimage: <https://docs.appimage.org/packaging-guide/manual.html#ref-manual>
//!
//! ## Extra links
//! - xbuild: <https://github.com/rust-mobile/xbuild/blob/master/xbuild/src/command/build.rs>
use crate::{
AndroidTools, AppManifest, BuildContext, BuildId, BundleFormat, DioxusConfig, Error,
LinkAction, LinkerFlavor, Platform, Renderer, Result, RustcArgs, TargetArgs, TraceSrc,
WasmBindgen, WasmOptConfig, Workspace, DX_RUSTC_WRAPPER_ENV_VAR,
};
use anyhow::{bail, Context};
use cargo_metadata::diagnostic::Diagnostic;
use cargo_toml::{Profile, Profiles, StripSetting};
use depinfo::RustcDepInfo;
use dioxus_cli_config::{format_base_path_meta_element, PRODUCT_NAME_ENV};
use dioxus_cli_config::{APP_TITLE_ENV, ASSET_ROOT_ENV};
use dioxus_cli_opt::{process_file_to, AssetManifest};
use itertools::Itertools;
use krates::{cm::TargetKind, NodeId};
use manganis::{AssetOptions, BundledAsset};
use manganis_core::{AssetOptionsBuilder, AssetVariant};
use rayon::prelude::{IntoParallelRefIterator, ParallelIterator};
use serde::{Deserialize, Serialize};
use std::{borrow::Cow, ffi::OsString};
use std::{
collections::{BTreeMap, HashSet},
io::Write,
path::{Path, PathBuf},
process::Stdio,
sync::{
atomic::{AtomicUsize, Ordering},
Arc,
},
time::{SystemTime, UNIX_EPOCH},
};
use subsecond_types::JumpTable;
use target_lexicon::{Architecture, OperatingSystem, Triple};
use tempfile::TempDir;
use tokio::{io::AsyncBufReadExt, process::Command};
use uuid::Uuid;
use super::HotpatchModuleCache;
/// This struct is used to plan the build process.
///
/// The point here is to be able to take in the user's config from the CLI without modifying the
/// arguments in place. Creating a buildplan "resolves" their config into a build plan that can be
/// introspected. For example, the users might not specify a "Triple" in the CLI but the triple will
/// be guaranteed to be resolved here.
///
/// Creating a buildplan also lets us introspect build requests and modularize our build process.
/// This will, however, lead to duplicate fields between the CLI and the build engine. This is fine
/// since we have the freedom to evolve the schema internally without breaking the API.
///
/// All updates from the build will be sent on a global "BuildProgress" channel.
#[derive(Clone)]
pub(crate) struct BuildRequest {
pub(crate) workspace: Arc<Workspace>,
pub(crate) config: DioxusConfig,
pub(crate) crate_package: NodeId,
pub(crate) crate_target: krates::cm::Target,
pub(crate) profile: String,
pub(crate) release: bool,
pub(crate) bundle: BundleFormat,
pub(crate) triple: Triple,
pub(crate) device_name: Option<String>,
pub(crate) should_codesign: bool,
pub(crate) package: String,
pub(crate) main_target: String,
pub(crate) features: Vec<String>,
pub(crate) rustflags: cargo_config2::Flags,
pub(crate) extra_cargo_args: Vec<String>,
pub(crate) extra_rustc_args: Vec<String>,
pub(crate) no_default_features: bool,
pub(crate) all_features: bool,
pub(crate) target_dir: PathBuf,
pub(crate) skip_assets: bool,
pub(crate) wasm_split: bool,
pub(crate) debug_symbols: bool,
pub(crate) inject_loading_scripts: bool,
pub(crate) custom_linker: Option<PathBuf>,
pub(crate) base_path: Option<String>,
pub(crate) using_dioxus_explicitly: bool,
pub(crate) apple_entitlements: Option<PathBuf>,
pub(crate) apple_team_id: Option<String>,
pub(crate) session_cache_dir: PathBuf,
pub(crate) raw_json_diagnostics: bool,
pub(crate) windows_subsystem: Option<String>,
}
/// dx can produce different "modes" of a build. A "regular" build is a "base" build. The Fat and Thin
/// modes are used together to achieve binary patching and linking.
///
/// Guide:
/// ----------
/// - Base: A normal build generated using `cargo rustc`, intended for production use cases
///
/// - Fat: A "fat" build with -Wl,-all_load and no_dead_strip, keeping *every* symbol in the binary.
/// Intended for development for larger up-front builds with faster link times and the ability
/// to binary patch the final binary. On WASM, this also forces wasm-bindgen to generate all
/// JS-WASM bindings, saving us the need to re-wasmbindgen the final binary.
///
/// - Thin: A "thin" build that dynamically links against the dependencies produced by the "fat" build.
/// This is generated by calling rustc *directly* and might be more fragile to construct, but
/// generates *much* faster than a regular base or fat build.
#[derive(Clone, Debug, PartialEq)]
pub enum BuildMode {
/// A normal build generated using `cargo rustc`
///
/// "run" indicates whether this build is intended to be run immediately after building.
/// This means we try to capture the build environment, saving vars like `CARGO_MANIFEST_DIR`
/// for the running executable.
Base { run: bool },
/// A "Fat" build generated with cargo rustc and dx as a custom linker without -Wl,-dead-strip
Fat,
/// A "thin" build generated with `rustc` directly and dx as a custom linker
Thin {
rustc_args: RustcArgs,
changed_files: Vec<PathBuf>,
aslr_reference: u64,
cache: Arc<HotpatchModuleCache>,
},
}
/// The end result of a build.
///
/// Contains the final asset manifest, the executable, and metadata about the build.
/// Note that the `exe` might be stale and/or overwritten by the time you read it!
///
/// The patch cache is only populated on fat builds and then used for thin builds (see `BuildMode::Thin`).
#[derive(Clone, Debug)]
pub struct BuildArtifacts {
pub(crate) root_dir: PathBuf,
pub(crate) exe: PathBuf,
pub(crate) direct_rustc: RustcArgs,
pub(crate) time_start: SystemTime,
pub(crate) time_end: SystemTime,
pub(crate) assets: AssetManifest,
pub(crate) mode: BuildMode,
pub(crate) patch_cache: Option<Arc<HotpatchModuleCache>>,
pub(crate) depinfo: RustcDepInfo,
pub(crate) build_id: BuildId,
}
impl BuildRequest {
/// Create a new build request.
///
/// This method consolidates various inputs into a single source of truth. It combines:
/// - Command-line arguments provided by the user.
/// - The crate's `Cargo.toml`.
/// - The `dioxus.toml` configuration file.
/// - User-specific CLI settings.
/// - The workspace metadata.
/// - Host-specific details (e.g., Android tools, installed frameworks).
/// - The intended target platform.
///
/// Fields may be duplicated from the inputs to allow for autodetection and resolution.
///
/// Autodetection is performed for unspecified fields where possible.
///
/// Note: Build requests are typically created only when the CLI is invoked or when significant
/// changes are detected in the `Cargo.toml` (e.g., features added or removed).
pub(crate) async fn new(args: &TargetArgs, workspace: Arc<Workspace>) -> Result<Self> {
let crate_package = workspace.find_main_package(args.package.clone())?;
let config = workspace
.load_dioxus_config(crate_package)?
.unwrap_or_default();
let target_kind = match args.example.is_some() {
true => TargetKind::Example,
false => TargetKind::Bin,
};
let main_package = &workspace.krates[crate_package];
let target_name = args
.example
.clone()
.or(args.bin.clone())
.or_else(|| {
if let Some(default_run) = &main_package.default_run {
return Some(default_run.to_string());
}
let bin_count = main_package
.targets
.iter()
.filter(|x| x.kind.contains(&target_kind))
.count();
if bin_count != 1 {
return None;
}
main_package.targets.iter().find_map(|x| {
if x.kind.contains(&target_kind) {
Some(x.name.clone())
} else {
None
}
})
})
.unwrap_or(workspace.krates[crate_package].name.clone());
// Use the main_target for the client + server build if it is set, otherwise use the target name for this
// specific build. This is important for @client @server syntax so we use the client's output directory for the bundle.
let main_target = args.client_target.clone().unwrap_or(target_name.clone());
let crate_target = main_package
.targets
.iter()
.find(|target| {
target_name == target.name.as_str() && target.kind.contains(&target_kind)
})
.with_context(|| {
let target_of_kind = |kind|-> String {
let filtered_packages = main_package
.targets
.iter()
.filter_map(|target| {
target.kind.contains(kind).then_some(target.name.as_str())
}).collect::<Vec<_>>();
filtered_packages.join(", ")};
if let Some(example) = &args.example {
let examples = target_of_kind(&TargetKind::Example);
format!("Failed to find example {example}. \nAvailable examples are:\n{examples}")
} else if let Some(bin) = &args.bin {
let binaries = target_of_kind(&TargetKind::Bin);
format!("Failed to find binary {bin}. \nAvailable binaries are:\n{binaries}")
} else {
format!("Failed to find target {target_name}. \nIt looks like you are trying to build dioxus in a library crate. \
You either need to run dx from inside a binary crate or build a specific example with the `--example` flag. \
Available examples are:\n{}", target_of_kind(&TargetKind::Example))
}
})?
.clone();
// We usually use the simulator unless --device is passed *or* a device is detected by probing.
// For now, though, since we don't have probing, it just defaults to false
// Tools like xcrun/adb can detect devices
let device = args.device.clone();
let using_dioxus_explicitly = main_package
.dependencies
.iter()
.any(|dep| dep.name == "dioxus");
/*
Determine which features, triple, profile, etc to pass to the build.
Most of the time, users should use `dx serve --<platform>` where the platform name directly
corresponds to the feature in their cargo.toml. So,
- `dx serve --web` will enable the `web` feature
- `dx serve --mobile` will enable the `mobile` feature
- `dx serve --desktop` will enable the `desktop` feature
In this case, we set default-features to false and then add back the default features that
aren't renderers, and then add the feature for the given renderer (ie web/desktop/mobile).
We call this "no-default-features-stripped."
There are a few cases where the user doesn't need to pass a platform.
- they selected one via `dioxus = { features = ["web"] }`
- they have a single platform in their default features `default = ["web"]`
- there is only a single non-server renderer as a feature `web = ["dioxus/web"], server = ["dioxus/server"]`
- they compose the super triple via triple + bundleformat + features
Note that we only use the names of the features to correspond with the platform.
Platforms are "super triples", meaning they contain information about
- bundle format
- target triple
- how to serve
- enabled features
By default, the --platform presets correspond to:
- web: bundle(web), triple(wasm32), serve(http-serve), features("web")
- desktop: alias to mac/win/linux
- mac: bundle(mac), triple(host), serve(appbundle-open), features("desktop")
- windows: bundle(exefolder), triple(host), serve(run-exe), features("desktop")
- linux: bundle(appimage), triple(host), serve(run-exe), features("desktop")
- ios: bundle(ios), triple(arm64-apple-ios), serve(ios-simulator/xcrun), features("mobile")
- android: bundle(android), triple(arm64-apple-ios), serve(android-emulator/adb), features("mobile")
- server: bundle(server), triple(host), serve(run-exe), features("server") (and disables the client)
- liveview: bundle(liveview), triple(host), serve(run-exe), features("liveview")
- unknown: <auto or default to desktop>
Fullstack usage is inferred from the presence of the fullstack feature or --fullstack.
*/
let mut features = args.features.clone();
let mut no_default_features = args.no_default_features;
let all_features = args.all_features;
let mut triple = args.target.clone();
let mut renderer = args.renderer;
let mut bundle_format = args.bundle;
let mut platform = args.platform;
// the crate might be selecting renderers but the user also passes a renderer. this is weird
// ie dioxus = { features = ["web"] } but also --platform desktop
// anyways, we collect it here in the event we need it if platform is not specified.
let dioxus_direct_renderer = Self::renderer_enabled_by_dioxus_dependency(main_package);
let known_features_as_renderers = Self::features_that_enable_renderers(main_package);
// The crate might enable multiple platforms or no platforms at
// We collect all the platforms it enables first and then select based on the --platform arg
let enabled_renderers = if no_default_features {
vec![]
} else {
Self::enabled_cargo_toml_default_features_renderers(main_package)
};
// Try the easy autodetects.
// - if the user has `dioxus = { features = ["web"] }`
// - if the `default =["web"]` or `default = ["dioxus/web"]`
// - if there's only one non-server platform ie `web = ["dioxus/web"], server = ["dioxus/server"]`
// Only do this if we're explicitly using dioxus
if matches!(platform, Platform::Unknown) && using_dioxus_explicitly {
let auto = dioxus_direct_renderer
.or_else(|| {
if enabled_renderers.len() == 1 {
Some(enabled_renderers[0].clone())
} else {
None
}
})
.or_else(|| {
// If multiple renderers are enabled, pick the first non-server one
if enabled_renderers.len() == 2
&& enabled_renderers
.iter()
.any(|f| matches!(f.0, Renderer::Server))
{
return Some(
enabled_renderers
.iter()
.find(|f| !matches!(f.0, Renderer::Server))
.cloned()
.unwrap(),
);
}
None
})
.or_else(|| {
// Pick the first non-server feature in the cargo.toml
let non_server_features = known_features_as_renderers
.iter()
.filter(|f| f.1.as_str() != "server")
.collect::<Vec<_>>();
if non_server_features.len() == 1 {
Some(non_server_features[0].clone())
} else {
None
}
});
if let Some((direct, feature)) = auto {
match direct {
_ if feature == "mobile" || feature == "dioxus/mobile" => {
bail!(
"Could not autodetect mobile platform. Use --ios or --android instead."
);
}
Renderer::Webview | Renderer::Native => {
if cfg!(target_os = "macos") {
platform = Platform::MacOS;
} else if cfg!(target_os = "linux") {
platform = Platform::Linux;
} else if cfg!(target_os = "windows") {
platform = Platform::Windows;
}
}
Renderer::Server => platform = Platform::Server,
Renderer::Liveview => platform = Platform::Liveview,
Renderer::Web => platform = Platform::Web,
}
renderer = renderer.or(Some(direct));
}
}
// Set the super triple from the platform if it's provided.
// Otherwise, we attempt to guess it from the rest of their inputs.
match platform {
Platform::Unknown => {}
Platform::Web => {
if main_package.features.contains_key("web") && renderer.is_none() {
features.push("web".into());
}
renderer = renderer.or(Some(Renderer::Web));
bundle_format = bundle_format.or(Some(BundleFormat::Web));
triple = triple.or(Some("wasm32-unknown-unknown".parse()?));
no_default_features = true;
}
Platform::MacOS => {
if main_package.features.contains_key("desktop") && renderer.is_none() {
features.push("desktop".into());
}
renderer = renderer.or(Some(Renderer::Webview));
bundle_format = bundle_format.or(Some(BundleFormat::MacOS));
triple = triple.or(Some(Triple::host()));
no_default_features = true;
}
Platform::Windows => {
if main_package.features.contains_key("desktop") && renderer.is_none() {
features.push("desktop".into());
}
renderer = renderer.or(Some(Renderer::Webview));
bundle_format = bundle_format.or(Some(BundleFormat::Windows));
triple = triple.or(Some(Triple::host()));
no_default_features = true;
}
Platform::Linux => {
if main_package.features.contains_key("desktop") && renderer.is_none() {
features.push("desktop".into());
}
renderer = renderer.or(Some(Renderer::Webview));
bundle_format = bundle_format.or(Some(BundleFormat::Linux));
triple = triple.or(Some(Triple::host()));
no_default_features = true;
}
Platform::Ios => {
if main_package.features.contains_key("mobile") && renderer.is_none() {
features.push("mobile".into());
}
renderer = renderer.or(Some(Renderer::Webview));
bundle_format = bundle_format.or(Some(BundleFormat::Ios));
no_default_features = true;
match device.is_some() {
// If targeting device, we want to build for the device which is always aarch64
true => triple = triple.or(Some("aarch64-apple-ios".parse()?)),
// If the host is aarch64, we assume the user wants to build for iOS simulator
false if matches!(Triple::host().architecture, Architecture::Aarch64(_)) => {
triple = triple.or(Some("aarch64-apple-ios-sim".parse()?))
}
// Otherwise, it's the x86_64 simulator, which is just x86_64-apple-ios
_ => triple = triple.or(Some("x86_64-apple-ios".parse()?)),
}
}
Platform::Android => {
if main_package.features.contains_key("mobile") && renderer.is_none() {
features.push("mobile".into());
}
renderer = renderer.or(Some(Renderer::Webview));
bundle_format = bundle_format.or(Some(BundleFormat::Android));
no_default_features = true;
// maybe probe adb?
if let Some(_device_name) = device.as_ref() {
if triple.is_none() {
triple = Some(
crate::get_android_tools()
.context("Failed to get android tools")?
.autodetect_android_device_triple()
.await,
);
}
} else {
triple = triple.or(Some({
match Triple::host().architecture {
Architecture::X86_32(_) => "i686-linux-android".parse()?,
Architecture::X86_64 => "x86_64-linux-android".parse()?,
Architecture::Aarch64(_) => "aarch64-linux-android".parse()?,
_ => "aarch64-linux-android".parse()?,
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | true |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/build/assets.rs | packages/cli/src/build/assets.rs | //! The dioxus asset system.
//!
//! This module provides functionality for extracting assets from a binary file and then writing back
//! their asset hashes directly into the binary file. Previously, we performed asset hashing in the
//! `asset!()` macro. The new system, implemented here, instead performs the hashing at build time,
//! which provides more flexibility in the asset processing pipeline.
//!
//! We chose to implement this approach since assets might reference each other which means we minimally
//! need to parse the asset to create a unique hash for each asset before they are used in the application.
//! The hashes are used both for cache busting the asset in the browser and to cache the asset optimization
//! process in the build system.
//!
//! We use the same lessons learned from the hot-patching engine which parses the binary file and its
//! symbol table to find symbols that match the `__ASSETS__` prefix. These symbols are ideally data
//! symbols and contain the BundledAsset data type which implements ConstSerialize and ConstDeserialize.
//!
//! When the binary is built, the `dioxus asset!()` macro will emit its metadata into the __ASSETS__
//! symbols, which we process here. After reading the metadata directly from the executable, we then
//! hash it and write the hash directly into the binary file.
//!
//! During development, we can skip this step for most platforms since local paths are sufficient
//! for asset loading. However, for WASM and for production builds, we need to ensure that assets
//! can be found relative to the current exe. Unfortunately, on android, the `current_exe` path is wrong,
//! so the assets are resolved against the "asset root" - which is covered by the asset loader crate.
//!
//! Finding the __ASSETS__ symbols is not quite straightforward when hotpatching, especially on WASM
//! since we build and link the module as relocatable, which is not a stable WASM proposal. In this
//! implementation, we handle both the non-PIE *and* PIC cases which are rather bespoke to our whole
//! build system.
use std::{
io::{Cursor, Read, Seek, Write},
path::{Path, PathBuf},
};
use crate::Result;
use anyhow::{bail, Context};
use const_serialize::{serialize_const, ConstVec, SerializeConst};
use dioxus_cli_opt::AssetManifest;
use manganis::{AssetOptions, AssetVariant, BundledAsset, ImageFormat, ImageSize};
use object::{File, Object, ObjectSection, ObjectSymbol, ReadCache, ReadRef, Section, Symbol};
use pdb::FallibleIterator;
use rayon::iter::{IntoParallelRefMutIterator, ParallelIterator};
/// Extract all manganis symbols and their sections from the given object file.
fn manganis_symbols<'a, 'b, R: ReadRef<'a>>(
file: &'b File<'a, R>,
) -> impl Iterator<Item = (ManganisVersion, Symbol<'a, 'b, R>, Section<'a, 'b, R>)> + 'b {
file.symbols().filter_map(move |symbol| {
let name = symbol.name().ok()?;
let version = looks_like_manganis_symbol(name)?;
let section_index = symbol.section_index()?;
let section = file.section_by_index(section_index).ok()?;
Some((version, symbol, section))
})
}
#[derive(Copy, Clone)]
enum ManganisVersion {
/// The legacy version of the manganis format published with 0.7.0 and 0.7.1
Legacy,
/// The new version of the manganis format 0.7.2 onward
New,
}
impl ManganisVersion {
fn size(&self) -> usize {
match self {
ManganisVersion::Legacy => {
<manganis_core_07::BundledAsset as const_serialize_07::SerializeConst>::MEMORY_LAYOUT.size()
}
ManganisVersion::New => BundledAsset::MEMORY_LAYOUT.size(),
}
}
fn deserialize(&self, data: &[u8]) -> Option<BundledAsset> {
match self {
ManganisVersion::Legacy => {
let buffer = const_serialize_07::ConstReadBuffer::new(data);
let (_, legacy_asset) =
const_serialize_07::deserialize_const!(manganis_core_07::BundledAsset, buffer)?;
Some(legacy_asset_to_modern_asset(&legacy_asset))
}
ManganisVersion::New => {
let (_, asset) =
const_serialize::deserialize_const!(manganis_core::BundledAsset, data)?;
Some(asset)
}
}
}
fn serialize(&self, asset: &BundledAsset) -> Vec<u8> {
match self {
ManganisVersion::Legacy => {
let legacy_asset = modern_asset_to_legacy_asset(asset);
let buffer = const_serialize_07::serialize_const(
&legacy_asset,
const_serialize_07::ConstVec::new(),
);
buffer.as_ref().to_vec()
}
ManganisVersion::New => {
let buffer = serialize_const(asset, ConstVec::new());
buffer.as_ref().to_vec()
}
}
}
}
fn legacy_asset_to_modern_asset(
legacy_asset: &manganis_core_07::BundledAsset,
) -> manganis_core::BundledAsset {
let bundled_path = legacy_asset.bundled_path();
let absolute_path = legacy_asset.absolute_source_path();
let legacy_options = legacy_asset.options();
let add_hash = legacy_options.hash_suffix();
let options = match legacy_options.variant() {
manganis_core_07::AssetVariant::Image(image) => {
let format = match image.format() {
manganis_core_07::ImageFormat::Png => ImageFormat::Png,
manganis_core_07::ImageFormat::Jpg => ImageFormat::Jpg,
manganis_core_07::ImageFormat::Webp => ImageFormat::Webp,
manganis_core_07::ImageFormat::Avif => ImageFormat::Avif,
manganis_core_07::ImageFormat::Unknown => ImageFormat::Unknown,
};
let size = match image.size() {
manganis_core_07::ImageSize::Automatic => ImageSize::Automatic,
manganis_core_07::ImageSize::Manual { width, height } => {
ImageSize::Manual { width, height }
}
};
let preload = image.preloaded();
AssetOptions::image()
.with_format(format)
.with_size(size)
.with_preload(preload)
.with_hash_suffix(add_hash)
.into_asset_options()
}
manganis_core_07::AssetVariant::Folder(_) => AssetOptions::folder()
.with_hash_suffix(add_hash)
.into_asset_options(),
manganis_core_07::AssetVariant::Css(css) => AssetOptions::css()
.with_hash_suffix(add_hash)
.with_minify(css.minified())
.with_preload(css.preloaded())
.with_static_head(css.static_head())
.into_asset_options(),
manganis_core_07::AssetVariant::CssModule(css_module) => AssetOptions::css_module()
.with_hash_suffix(add_hash)
.with_minify(css_module.minified())
.with_preload(css_module.preloaded())
.into_asset_options(),
manganis_core_07::AssetVariant::Js(js) => AssetOptions::js()
.with_hash_suffix(add_hash)
.with_minify(js.minified())
.with_preload(js.preloaded())
.with_static_head(js.static_head())
.into_asset_options(),
_ => AssetOptions::builder()
.with_hash_suffix(add_hash)
.into_asset_options(),
};
BundledAsset::new(absolute_path, bundled_path, options)
}
fn modern_asset_to_legacy_asset(modern_asset: &BundledAsset) -> manganis_core_07::BundledAsset {
let bundled_path = modern_asset.bundled_path();
let absolute_path = modern_asset.absolute_source_path();
let legacy_options = modern_asset.options();
let add_hash = legacy_options.hash_suffix();
let options = match legacy_options.variant() {
AssetVariant::Image(image) => {
let format = match image.format() {
ImageFormat::Png => manganis_core_07::ImageFormat::Png,
ImageFormat::Jpg => manganis_core_07::ImageFormat::Jpg,
ImageFormat::Webp => manganis_core_07::ImageFormat::Webp,
ImageFormat::Avif => manganis_core_07::ImageFormat::Avif,
ImageFormat::Unknown => manganis_core_07::ImageFormat::Unknown,
};
let size = match image.size() {
ImageSize::Automatic => manganis_core_07::ImageSize::Automatic,
ImageSize::Manual { width, height } => {
manganis_core_07::ImageSize::Manual { width, height }
}
};
let preload = image.preloaded();
manganis_core_07::AssetOptions::image()
.with_format(format)
.with_size(size)
.with_preload(preload)
.with_hash_suffix(add_hash)
.into_asset_options()
}
AssetVariant::Folder(_) => manganis_core_07::AssetOptions::folder()
.with_hash_suffix(add_hash)
.into_asset_options(),
AssetVariant::Css(css) => manganis_core_07::AssetOptions::css()
.with_hash_suffix(add_hash)
.with_minify(css.minified())
.with_preload(css.preloaded())
.with_static_head(css.static_head())
.into_asset_options(),
AssetVariant::CssModule(css_module) => manganis_core_07::AssetOptions::css_module()
.with_hash_suffix(add_hash)
.with_minify(css_module.minified())
.with_preload(css_module.preloaded())
.into_asset_options(),
AssetVariant::Js(js) => manganis_core_07::AssetOptions::js()
.with_hash_suffix(add_hash)
.with_minify(js.minified())
.with_preload(js.preloaded())
.with_static_head(js.static_head())
.into_asset_options(),
_ => manganis_core_07::AssetOptions::builder()
.with_hash_suffix(add_hash)
.into_asset_options(),
};
manganis_core_07::BundledAsset::new(absolute_path, bundled_path, options)
}
fn looks_like_manganis_symbol(name: &str) -> Option<ManganisVersion> {
if name.contains("__MANGANIS__") {
Some(ManganisVersion::Legacy)
} else if name.contains("__ASSETS__") {
Some(ManganisVersion::New)
} else {
None
}
}
/// An asset offset in the binary
#[derive(Clone, Copy)]
struct ManganisSymbolOffset {
version: ManganisVersion,
offset: u64,
}
impl ManganisSymbolOffset {
fn new(version: ManganisVersion, offset: u64) -> Self {
Self { version, offset }
}
}
/// Find the offsets of any manganis symbols in the given file.
fn find_symbol_offsets<'a, R: ReadRef<'a>>(
path: &Path,
file_contents: &[u8],
file: &File<'a, R>,
) -> Result<Vec<ManganisSymbolOffset>> {
let pdb_file = find_pdb_file(path);
match file.format() {
// We need to handle dynamic offsets in wasm files differently
object::BinaryFormat::Wasm => find_wasm_symbol_offsets(file_contents, file),
// Windows puts the symbol information in a PDB file alongside the executable.
// If this is a windows PE file and we found a PDB file, we will use that to find the symbol offsets.
object::BinaryFormat::Pe if pdb_file.is_some() => {
find_pdb_symbol_offsets(&pdb_file.unwrap())
}
// Otherwise, look for manganis symbols in the object file.
_ => find_native_symbol_offsets(file),
}
}
/// Find the pdb file matching the executable file.
fn find_pdb_file(path: &Path) -> Option<PathBuf> {
let mut pdb_file = path.with_extension("pdb");
// Also try to find it in the same directory as the executable with _'s instead of -'s
if let Some(file_name) = pdb_file.file_name() {
let new_file_name = file_name.to_string_lossy().replace('-', "_");
let altrnate_pdb_file = pdb_file.with_file_name(new_file_name);
// Keep the most recent pdb file
match (pdb_file.metadata(), altrnate_pdb_file.metadata()) {
(Ok(pdb_metadata), Ok(alternate_metadata)) => {
if let (Ok(pdb_modified), Ok(alternate_modified)) =
(pdb_metadata.modified(), alternate_metadata.modified())
{
if pdb_modified < alternate_modified {
pdb_file = altrnate_pdb_file;
}
}
}
(Err(_), Ok(_)) => {
pdb_file = altrnate_pdb_file;
}
_ => {}
}
}
if pdb_file.exists() {
Some(pdb_file)
} else {
None
}
}
/// Find the offsets of any manganis symbols in a pdb file.
fn find_pdb_symbol_offsets(pdb_file: &Path) -> Result<Vec<ManganisSymbolOffset>> {
let pdb_file_handle = std::fs::File::open(pdb_file)?;
let mut pdb_file = pdb::PDB::open(pdb_file_handle).context("Failed to open PDB file")?;
let Ok(Some(sections)) = pdb_file.sections() else {
tracing::error!("Failed to read sections from PDB file");
return Ok(Vec::new());
};
let global_symbols = pdb_file
.global_symbols()
.context("Failed to read global symbols from PDB file")?;
let address_map = pdb_file
.address_map()
.context("Failed to read address map from PDB file")?;
let mut symbols = global_symbols.iter();
let mut addresses = Vec::new();
while let Ok(Some(symbol)) = symbols.next() {
let Ok(pdb::SymbolData::Public(data)) = symbol.parse() else {
continue;
};
let Some(rva) = data.offset.to_section_offset(&address_map) else {
continue;
};
let name = data.name.to_string();
if let Some(version) = looks_like_manganis_symbol(&name) {
let section = sections
.get(rva.section as usize - 1)
.expect("Section index out of bounds");
addresses.push(ManganisSymbolOffset::new(
version,
(section.pointer_to_raw_data + rva.offset) as u64,
));
}
}
Ok(addresses)
}
/// Find the offsets of any manganis symbols in a native object file.
fn find_native_symbol_offsets<'a, R: ReadRef<'a>>(
file: &File<'a, R>,
) -> Result<Vec<ManganisSymbolOffset>> {
let mut offsets = Vec::new();
for (version, symbol, section) in manganis_symbols(file) {
let virtual_address = symbol.address();
let Some((section_range_start, _)) = section.file_range() else {
tracing::error!(
"Found __ASSETS__ symbol {:?} in section {}, but the section has no file range",
symbol.name(),
section.index()
);
continue;
};
// Translate the section_relative_address to the file offset
let section_relative_address: u64 = (virtual_address as i128 - section.address() as i128)
.try_into()
.expect("Virtual address should be greater than or equal to section address");
let file_offset = section_range_start + section_relative_address;
offsets.push(ManganisSymbolOffset::new(version, file_offset));
}
Ok(offsets)
}
fn eval_walrus_global_expr(module: &walrus::Module, expr: &walrus::ConstExpr) -> Option<u64> {
match expr {
walrus::ConstExpr::Value(walrus::ir::Value::I32(value)) => Some(*value as u64),
walrus::ConstExpr::Value(walrus::ir::Value::I64(value)) => Some(*value as u64),
walrus::ConstExpr::Global(id) => {
let global = module.globals.get(*id);
if let walrus::GlobalKind::Local(pointer) = &global.kind {
eval_walrus_global_expr(module, pointer)
} else {
None
}
}
_ => None,
}
}
/// Find the offsets of any manganis symbols in the wasm file.
fn find_wasm_symbol_offsets<'a, R: ReadRef<'a>>(
file_contents: &[u8],
file: &File<'a, R>,
) -> Result<Vec<ManganisSymbolOffset>> {
let Some(section) = file
.sections()
.find(|section| section.name() == Ok("<data>"))
else {
tracing::error!("Failed to find <data> section in WASM file");
return Ok(Vec::new());
};
let Some((_, section_range_end)) = section.file_range() else {
tracing::error!("Failed to find file range for <data> section in WASM file");
return Ok(Vec::new());
};
let section_size = section.data()?.len() as u64;
let section_start = section_range_end - section_size;
// Translate the section_relative_address to the file offset
// WASM files have a section address of 0 in object, reparse the data section with wasmparser
// to get the correct address and section start
// Note: We need to reparse just the data section with wasmparser to get the file offset because walrus does
// not expose the file offset information
let reader = wasmparser::DataSectionReader::new(wasmparser::BinaryReader::new(
&file_contents[section_start as usize..section_range_end as usize],
0,
))
.context("Failed to create WASM data section reader")?;
let main_memory = reader
.into_iter()
.next()
.context("Failed find main memory from WASM data section")?
.context("Failed to read main memory from WASM data section")?;
// main_memory.data is a slice somewhere in file_contents. Find out the offset in the file
let data_start_offset = (main_memory.data.as_ptr() as u64)
.checked_sub(file_contents.as_ptr() as u64)
.expect("Data section start offset should be within the file contents");
// Parse the wasm file to find the globals
let module = walrus::Module::from_buffer(file_contents).unwrap();
let mut offsets = Vec::new();
// Find the main memory offset
let main_memory = module
.data
.iter()
.next()
.context("Failed to find main memory in WASM module")?;
let walrus::DataKind::Active {
offset: main_memory_offset,
..
} = main_memory.kind
else {
tracing::error!("Failed to find main memory offset in WASM module");
return Ok(Vec::new());
};
// In the hot patch build, the main memory offset is a global from the main module and each global
// is it's own global. Use an offset of 0 instead if we can't evaluate the global
let main_memory_offset =
eval_walrus_global_expr(&module, &main_memory_offset).unwrap_or_default();
for export in module.exports.iter() {
let Some(version) = looks_like_manganis_symbol(&export.name) else {
continue;
};
let walrus::ExportItem::Global(global) = export.item else {
continue;
};
let walrus::GlobalKind::Local(pointer) = module.globals.get(global).kind else {
continue;
};
let Some(virtual_address) = eval_walrus_global_expr(&module, &pointer) else {
tracing::error!(
"Found __ASSETS__ symbol {:?} in WASM file, but the global expression could not be evaluated",
export.name
);
continue;
};
let section_relative_address: u64 = ((virtual_address as i128)
- main_memory_offset as i128)
.try_into()
.expect("Virtual address should be greater than or equal to section address");
let file_offset = data_start_offset + section_relative_address;
offsets.push(ManganisSymbolOffset::new(version, file_offset));
}
Ok(offsets)
}
/// Find all assets in the given file, hash them, and write them back to the file.
/// Then return an `AssetManifest` containing all the assets found in the file.
pub(crate) async fn extract_assets_from_file(path: impl AsRef<Path>) -> Result<AssetManifest> {
let path = path.as_ref();
let mut file = open_file_for_writing_with_timeout(
path,
std::fs::OpenOptions::new().write(true).read(true),
)
.await?;
let mut file_contents = Vec::new();
file.read_to_end(&mut file_contents)?;
let mut reader = Cursor::new(&file_contents);
let read_cache = ReadCache::new(&mut reader);
let object_file = object::File::parse(&read_cache)?;
let offsets = find_symbol_offsets(path, &file_contents, &object_file)?;
let mut assets = Vec::new();
// Read each asset from the data section using the offsets
for symbol in offsets.iter().copied() {
let version = symbol.version;
let offset = symbol.offset;
file.seek(std::io::SeekFrom::Start(offset))?;
let mut data_in_range = vec![0; version.size()];
file.read_exact(&mut data_in_range)?;
if let Some(bundled_asset) = version.deserialize(&data_in_range) {
tracing::debug!(
"Found asset at offset {offset}: {:?}",
bundled_asset.absolute_source_path()
);
assets.push(bundled_asset);
} else {
tracing::warn!("Found an asset at offset {offset} that could not be deserialized. This may be caused by a mismatch between your dioxus and dioxus-cli versions.");
}
}
// Add the hash to each asset in parallel
assets
.par_iter_mut()
.for_each(dioxus_cli_opt::add_hash_to_asset);
// Write back the assets to the binary file
for (symbol, asset) in offsets.into_iter().zip(&assets) {
let version = symbol.version;
let offset = symbol.offset;
let new_data = version.serialize(asset);
file.seek(std::io::SeekFrom::Start(offset))?;
// Write the modified binary data back to the file
file.write_all(new_data.as_ref())?;
}
// Ensure the file is flushed to disk
file.sync_all()
.context("Failed to sync file after writing assets")?;
// If the file is a macos binary, we need to re-sign the modified binary
if object_file.format() == object::BinaryFormat::MachO && !assets.is_empty() {
// Spawn the codesign command to re-sign the binary
let output = std::process::Command::new("codesign")
.arg("--force")
.arg("--sign")
.arg("-") // Sign with an empty identity
.arg(path)
.output()
.context("Failed to run codesign - is `codesign` in your path?")?;
if !output.status.success() {
bail!(
"Failed to re-sign the binary with codesign after finalizing the assets: {}",
String::from_utf8_lossy(&output.stderr)
);
}
}
// Finally, create the asset manifest
let mut manifest = AssetManifest::default();
for asset in assets {
manifest.insert_asset(asset);
}
Ok(manifest)
}
/// Try to open a file for writing, retrying if the file is already open by another process.
///
/// This is useful on windows where antivirus software might grab the executable before we have a chance to read it.
async fn open_file_for_writing_with_timeout(
file: &Path,
options: &mut std::fs::OpenOptions,
) -> Result<std::fs::File> {
let start_time = std::time::Instant::now();
let timeout = std::time::Duration::from_secs(5);
loop {
match options.open(file) {
Ok(file) => return Ok(file),
Err(e) => {
if cfg!(windows) && e.raw_os_error() == Some(32) && start_time.elapsed() < timeout {
// File is already open, wait and retry
tracing::trace!(
"Failed to open file because another process is using it. Retrying..."
);
tokio::time::sleep(std::time::Duration::from_millis(50)).await;
} else {
return Err(e.into());
}
}
}
}
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/build/context.rs | packages/cli/src/build/context.rs | //! Report progress about the build to the user. We use channels to report progress back to the CLI.
use super::BuildMode;
use crate::{BuildArtifacts, BuildStage, Error, TraceSrc};
use cargo_metadata::diagnostic::Diagnostic;
use futures_channel::mpsc::{UnboundedReceiver, UnboundedSender};
use serde::{Deserialize, Serialize};
use std::{path::PathBuf, process::ExitStatus};
/// The context of the build process. While the BuildRequest is a "plan" for the build, the BuildContext
/// provides some dynamic configuration that is only known at runtime. For example, the Progress channel
/// and the BuildMode can change while serving.
///
/// The structure of this is roughly taken from cargo itself which uses a similar pattern.
#[derive(Debug, Clone)]
pub struct BuildContext {
pub tx: ProgressTx,
pub mode: BuildMode,
pub build_id: BuildId,
}
pub type ProgressTx = UnboundedSender<BuilderUpdate>;
pub type ProgressRx = UnboundedReceiver<BuilderUpdate>;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize, Serialize)]
pub struct BuildId(pub(crate) usize);
impl BuildId {
pub const PRIMARY: Self = Self(0);
pub const SECONDARY: Self = Self(1);
}
#[allow(clippy::large_enum_variant)]
pub enum BuilderUpdate {
Progress {
stage: BuildStage,
},
CompilerMessage {
message: Diagnostic,
},
/// The build completed successfully and the artifacts are ready. The artifacts are dependent on
/// the build mode (fat vs thin vs base).
BuildReady {
bundle: BuildArtifacts,
},
/// The build failed. This might be because of a compilation error, or an error internal to DX.
BuildFailed {
err: Error,
},
/// A running process has received a stdout.
/// May or may not be a complete line - do not treat it as a line. It will include a line if it is a complete line.
///
/// We will poll lines and any content in a 50ms interval
StdoutReceived {
msg: String,
},
/// A running process has received a stderr.
/// May or may not be a complete line - do not treat it as a line. It will include a line if it is a complete line.
///
/// We will poll lines and any content in a 50ms interval
StderrReceived {
msg: String,
},
/// The running app (DUT) has exited and is no longer running.
ProcessExited {
status: ExitStatus,
},
/// Waiting for the process failed. This might be because it's hung or being debugged.
/// This is not the same as the process exiting, so it should just be logged but not treated as an error.
ProcessWaitFailed {
err: std::io::Error,
},
}
impl BuildContext {
/// Returns true if this is a client build - basically, is this the primary build?
/// We try not to duplicate work between client and server builds, like asset copying.
pub(crate) fn is_primary_build(&self) -> bool {
self.build_id == BuildId::PRIMARY
}
pub(crate) fn status_wasm_bindgen_start(&self) {
_ = self.tx.unbounded_send(BuilderUpdate::Progress {
stage: BuildStage::RunningBindgen,
});
}
pub(crate) fn status_splitting_bundle(&self) {
_ = self.tx.unbounded_send(BuilderUpdate::Progress {
stage: BuildStage::SplittingBundle,
});
}
pub(crate) fn status_start_bundle(&self) {
_ = self.tx.unbounded_send(BuilderUpdate::Progress {
stage: BuildStage::Bundling,
});
}
pub(crate) fn status_running_gradle(&self) {
_ = self.tx.unbounded_send(BuilderUpdate::Progress {
stage: BuildStage::RunningGradle,
})
}
pub(crate) fn status_codesigning(&self) {
_ = self.tx.unbounded_send(BuilderUpdate::Progress {
stage: BuildStage::CodeSigning,
});
}
pub(crate) fn status_build_diagnostic(&self, message: Diagnostic) {
_ = self
.tx
.unbounded_send(BuilderUpdate::CompilerMessage { message });
}
pub(crate) fn status_build_error(&self, line: String) {
tracing::warn!(dx_src = ?TraceSrc::Cargo, "{line}");
}
pub(crate) fn status_build_message(&self, line: String) {
tracing::trace!(dx_src = ?TraceSrc::Cargo, "{line}");
}
pub(crate) fn status_build_progress(&self, count: usize, total: usize, name: String) {
_ = self.tx.unbounded_send(BuilderUpdate::Progress {
stage: BuildStage::Compiling {
current: count,
total,
krate: name,
},
});
}
pub(crate) fn status_starting_build(&self, crate_count: usize) {
_ = self.tx.unbounded_send(BuilderUpdate::Progress {
stage: BuildStage::Starting {
patch: matches!(self.mode, BuildMode::Thin { .. }),
crate_count,
},
});
}
pub(crate) fn status_starting_link(&self) {
_ = self.tx.unbounded_send(BuilderUpdate::Progress {
stage: BuildStage::Linking,
});
}
pub(crate) fn status_copied_asset(
progress: &UnboundedSender<BuilderUpdate>,
current: usize,
total: usize,
path: PathBuf,
) {
_ = progress.unbounded_send(BuilderUpdate::Progress {
stage: BuildStage::CopyingAssets {
current,
total,
path,
},
});
}
pub(crate) fn status_optimizing_wasm(&self) {
_ = self.tx.unbounded_send(BuilderUpdate::Progress {
stage: BuildStage::OptimizingWasm,
});
}
pub(crate) fn status_hotpatching(&self) {
_ = self.tx.unbounded_send(BuilderUpdate::Progress {
stage: BuildStage::Hotpatching,
});
}
pub(crate) fn status_installing_tooling(&self) {
_ = self.tx.unbounded_send(BuilderUpdate::Progress {
stage: BuildStage::InstallingTooling,
});
}
pub(crate) fn status_compressing_assets(&self) {
_ = self.tx.unbounded_send(BuilderUpdate::Progress {
stage: BuildStage::CompressingAssets,
});
}
pub(crate) fn status_extracting_assets(&self) {
_ = self.tx.unbounded_send(BuilderUpdate::Progress {
stage: BuildStage::ExtractingAssets,
});
}
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/build/tools.rs | packages/cli/src/build/tools.rs | use crate::Result;
use anyhow::Context;
use itertools::Itertools;
use std::{path::PathBuf, sync::Arc};
use target_lexicon::{
Aarch64Architecture, Architecture, ArmArchitecture, Triple, X86_32Architecture,
};
use tokio::process::Command;
/// The tools for Android (ndk, sdk, etc)
///
/// <https://gist.github.com/Pulimet/5013acf2cd5b28e55036c82c91bd56d8?permalink_comment_id=3678614>
#[derive(Debug, Clone)]
pub(crate) struct AndroidTools {
pub(crate) sdk: Option<PathBuf>,
pub(crate) ndk: PathBuf,
pub(crate) adb: PathBuf,
pub(crate) java_home: Option<PathBuf>,
}
pub fn get_android_tools() -> Option<Arc<AndroidTools>> {
// We check for SDK first since users might install Android Studio and then install the SDK
// After that they might install the NDK, so the SDK drives the source of truth.
let sdk = var_or_debug("ANDROID_SDK_ROOT")
.or_else(|| var_or_debug("ANDROID_SDK"))
.or_else(|| var_or_debug("ANDROID_HOME"));
// Check the ndk. We look for users's overrides first and then look into the SDK.
// Sometimes users set only the NDK (especially if they're somewhat advanced) so we need to look for it manually
//
// Might look like this, typically under "sdk":
// "/Users/jonkelley/Library/Android/sdk/ndk/25.2.9519653/toolchains/llvm/prebuilt/darwin-x86_64/bin/aarch64-linux-android24-clang"
let ndk = var_or_debug("NDK_HOME")
.or_else(|| var_or_debug("ANDROID_NDK_HOME"))
.or_else(|| {
// Look for the most recent NDK in the event the user has installed multiple NDK
// Eventually we might need to drive this from Dioxus.toml
let sdk = sdk.as_ref()?;
let ndk_dir = sdk.join("ndk").read_dir().ok()?;
ndk_dir
.flatten()
.map(|dir| (dir.file_name(), dir.path()))
.sorted()
.next_back()
.map(|(_, path)| path.to_path_buf())
})?;
// Look for ADB in the SDK. If it's not there we'll use `adb` from the PATH
let adb = sdk
.as_ref()
.and_then(|sdk| {
let tools = sdk.join("platform-tools");
if tools.join("adb").exists() {
return Some(tools.join("adb"));
}
if tools.join("adb.exe").exists() {
return Some(tools.join("adb.exe"));
}
None
})
.unwrap_or_else(|| PathBuf::from("adb"));
// https://stackoverflow.com/questions/71381050/java-home-is-set-to-an-invalid-directory-android-studio-flutter
// always respect the user's JAVA_HOME env var above all other options
//
// we only attempt autodetection if java_home is not set
//
// this is a better fallback than falling onto the users' system java home since many users might
// not even know which java that is - they just know they have android studio installed
let java_home = std::env::var_os("JAVA_HOME")
.map(PathBuf::from)
.or_else(|| {
// Attempt to autodetect java home from the android studio path or jdk path on macos
#[cfg(target_os = "macos")]
{
let jbr_home =
PathBuf::from("/Applications/Android Studio.app/Contents/jbr/Contents/Home/");
if jbr_home.exists() {
return Some(jbr_home);
}
let jre_home =
PathBuf::from("/Applications/Android Studio.app/Contents/jre/Contents/Home");
if jre_home.exists() {
return Some(jre_home);
}
let jdk_home =
PathBuf::from("/Library/Java/JavaVirtualMachines/openjdk.jdk/Contents/Home/");
if jdk_home.exists() {
return Some(jdk_home);
}
}
#[cfg(target_os = "windows")]
{
let jbr_home = PathBuf::from("C:\\Program Files\\Android\\Android Studio\\jbr");
if jbr_home.exists() {
return Some(jbr_home);
}
}
// todo(jon): how do we detect java home on linux?
#[cfg(target_os = "linux")]
{
let jbr_home = PathBuf::from("/usr/lib/jvm/java-11-openjdk-amd64");
if jbr_home.exists() {
return Some(jbr_home);
}
}
None
});
Some(Arc::new(AndroidTools {
ndk,
adb,
java_home,
sdk,
}))
}
impl AndroidTools {
pub(crate) fn android_tools_dir(&self) -> PathBuf {
let prebuilt = self.ndk.join("toolchains").join("llvm").join("prebuilt");
if cfg!(target_os = "macos") {
// for whatever reason, even on aarch64 macos, the linker is under darwin-x86_64
return prebuilt.join("darwin-x86_64").join("bin");
}
if cfg!(target_os = "linux") {
return prebuilt.join("linux-x86_64").join("bin");
}
if cfg!(target_os = "windows") {
return prebuilt.join("windows-x86_64").join("bin");
}
// Otherwise return the first entry in the prebuilt directory
prebuilt
.read_dir()
.expect("Failed to read android toolchains directory")
.next()
.expect("Failed to find android toolchains directory")
.expect("Failed to read android toolchain file")
.path()
}
/// Return the location of the clang toolchain for the given target triple.
///
/// Note that we use clang:
/// "~/Library/Android/sdk/ndk/25.2.9519653/toolchains/llvm/prebuilt/darwin-x86_64/bin/aarch64-linux-android24-clang"
///
/// But if we needed the linker, we would use:
/// "~/Library/Android/sdk/ndk/25.2.9519653/toolchains/llvm/prebuilt/darwin-x86_64/bin/ld"
///
/// However, for our purposes, we only go through the cc driver and not the linker directly.
pub(crate) fn android_cc(&self, triple: &Triple, sdk_version: u32) -> PathBuf {
let suffix = if cfg!(target_os = "windows") {
".cmd"
} else {
""
};
let target = match triple.architecture {
Architecture::Arm(_) => "armv7a-linux-androideabi",
_ => &triple.to_string(),
};
self.android_tools_dir()
.join(format!("{}{}-clang{}", target, sdk_version, suffix))
}
pub(crate) fn sysroot(&self) -> PathBuf {
// The sysroot is usually located in the NDK under:
// "~/Library/Android/sdk/ndk/25.2.9519653/toolchains/llvm/prebuilt/darwin-x86_64/sysroot"
// or similar, depending on the platform.
self.android_tools_dir().parent().unwrap().join("sysroot")
}
pub(crate) fn sdk(&self) -> PathBuf {
// /Users/jonathankelley/Library/Android/sdk/ndk/25.2/... (25.2 is the ndk here)
// /Users/jonathankelley/Library/Android/sdk/
self.sdk
.clone()
.unwrap_or_else(|| self.ndk.parent().unwrap().parent().unwrap().to_path_buf())
}
pub(crate) fn emulator(&self) -> PathBuf {
self.sdk().join("emulator").join("emulator")
}
pub(crate) fn clang_folder(&self) -> PathBuf {
// The clang folder is usually located in the NDK under:
// "~/Library/Android/sdk/ndk/25.2.9519653/toolchains/llvm/prebuilt/darwin-x86_64/lib/clang/<version>"
// or similar, depending on the platform.
self.android_tools_dir()
.parent()
.unwrap()
.join("lib")
.join("clang")
}
pub(crate) fn ranlib(&self) -> PathBuf {
self.android_tools_dir().join("llvm-ranlib")
}
pub(crate) fn ar_path(&self) -> PathBuf {
self.android_tools_dir().join("llvm-ar")
}
pub(crate) fn target_cc(&self) -> PathBuf {
self.android_tools_dir().join("clang")
}
pub(crate) fn target_cxx(&self) -> PathBuf {
self.android_tools_dir().join("clang++")
}
pub(crate) fn java_home(&self) -> Option<PathBuf> {
self.java_home.clone()
}
pub(crate) fn android_jnilib(triple: &Triple) -> &'static str {
use target_lexicon::Architecture;
match triple.architecture {
Architecture::Arm(_) => "armeabi-v7a",
Architecture::Aarch64(_) => "arm64-v8a",
Architecture::X86_32(_) => "x86",
Architecture::X86_64 => "x86_64",
_ => unimplemented!("Unsupported architecture"),
}
}
pub(crate) async fn autodetect_android_device_triple(&self) -> Triple {
// Use the host's triple and then convert field by field
// ie, the "best" emulator for an m1 mac would be: "aarch64-linux-android"
// - We assume android is always "linux"
// - We try to match the architecture unless otherwise specified. This is because
// emulators that match the host arch are usually faster.
let mut triple = "aarch64-linux-android".parse::<Triple>().unwrap();
// TODO: Wire this up with --device flag. (add `-s serial`` flag before `shell` arg)
let output = Command::new(&self.adb)
.arg("shell")
.arg("uname")
.arg("-m")
.output()
.await
.map(|out| String::from_utf8(out.stdout));
match output {
Ok(Ok(out)) => match out.trim() {
"armv7l" | "armv8l" => {
triple.architecture = Architecture::Arm(ArmArchitecture::Arm)
}
"aarch64" => {
triple.architecture = Architecture::Aarch64(Aarch64Architecture::Aarch64)
}
"i386" => triple.architecture = Architecture::X86_32(X86_32Architecture::I386),
"x86_64" => {
triple.architecture = Architecture::X86_64;
}
"" => {
tracing::debug!("No device running - probably waiting for emulator");
}
other => {
tracing::debug!("Unknown architecture from adb: {other}");
}
},
Ok(Err(err)) => {
tracing::debug!("Failed to parse adb output: {err}");
}
Err(err) => {
tracing::debug!("ADB command failed: {:?}", err);
}
};
triple
}
pub(crate) fn libcpp_shared(&self, triple: &Triple) -> PathBuf {
// The libc++_shared.so is usually located in the sysroot under:
// "~/Library/Android/sdk/ndk/25.2.9519653/toolchains/llvm/prebuilt/darwin-x86_64/sysroot/usr/lib/<arch>/libc++_shared.so"
// or similar, depending on the platform.
self.sysroot()
.join("usr")
.join("lib")
.join(Self::sysroot_target(&triple.to_string()))
.join("libc++_shared.so")
}
pub(crate) fn sysroot_target(rust_target: &str) -> &str {
(match rust_target {
"armv7-linux-androideabi" => "arm-linux-androideabi",
_ => rust_target,
}) as _
}
pub(crate) fn openssl_prebuilt_aar() -> &'static [u8] {
include_bytes!("../../assets/android/prebuilt/openssl-1.1.1q-beta-1.tar.gz")
}
pub(crate) fn openssl_prebuilt_dest() -> PathBuf {
crate::Workspace::dioxus_data_dir()
.join("prebuilt")
.join("openssl-1.1.1q-beta-1")
}
pub(crate) fn openssl_lib_dir(arch: &Triple) -> PathBuf {
let libs_dir = Self::openssl_prebuilt_dest().join("ssl").join("libs");
match arch.architecture {
Architecture::Arm(_) => libs_dir.join("android.armeabi-v7a"),
Architecture::Aarch64(_) => libs_dir.join("android.arm64-v8a"),
Architecture::X86_32(_) => libs_dir.join("android.x86"),
Architecture::X86_64 => libs_dir.join("android.x86_64"),
_ => libs_dir.join("android.arm64-v8a"), // Default to arm64-v8a
}
}
pub(crate) fn openssl_include_dir() -> PathBuf {
Self::openssl_prebuilt_dest().join("ssl").join("include")
}
/// Unzip the prebuilt OpenSSL AAR file into the `.dx/prebuilt/openssl-<version>` directory
pub(crate) fn unpack_prebuilt_openssl() -> Result<()> {
let raw_aar = AndroidTools::openssl_prebuilt_aar();
let aar_dest = AndroidTools::openssl_prebuilt_dest();
if aar_dest.exists() {
tracing::trace!("Prebuilt OpenSSL already exists at {:?}", aar_dest);
return Ok(());
}
std::fs::create_dir_all(aar_dest.parent().context("no parent for aar")?)
.context("failed to create prebuilt OpenSSL directory")?;
// Unpack the entire tar.gz file into the destination directory
let mut archive = tar::Archive::new(flate2::read::GzDecoder::new(raw_aar as &[u8]));
archive
.unpack(aar_dest.parent().context("no parent for aar dest")?)
.context("failed to unpack prebuilt OpenSSL archive")?;
tracing::debug!("Unpacked prebuilt OpenSSL to {:?}", aar_dest);
Ok(())
}
}
fn var_or_debug(name: &str) -> Option<PathBuf> {
use std::env::var;
var(name)
.inspect_err(|_| tracing::trace!("{name} not set"))
.ok()
.map(PathBuf::from)
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/build/patch.rs | packages/cli/src/build/patch.rs | use anyhow::Context;
use itertools::Itertools;
use object::{
macho::{self},
read::File,
write::{MachOBuildVersion, SectionId, StandardSection, Symbol, SymbolId, SymbolSection},
Endianness, Object, ObjectSymbol, SymbolFlags, SymbolKind, SymbolScope,
};
use rayon::prelude::{IntoParallelRefIterator, ParallelIterator};
use std::{
collections::{BTreeMap, HashMap, HashSet},
ops::{Deref, Range},
path::Path,
path::PathBuf,
sync::{Arc, RwLock},
};
use subsecond_types::{AddressMap, JumpTable};
use target_lexicon::{Architecture, OperatingSystem, PointerWidth, Triple};
use thiserror::Error;
use walrus::{
ConstExpr, DataKind, ElementItems, ElementKind, FunctionBuilder, FunctionId, FunctionKind,
ImportKind, Module, ModuleConfig, TableId,
};
use wasmparser::{
BinaryReader, BinaryReaderError, Linking, LinkingSectionReader, Payload, SymbolInfo,
};
type Result<T, E = PatchError> = std::result::Result<T, E>;
#[derive(Debug, Error)]
pub enum PatchError {
#[error("Failed to read file: {0}")]
ReadFs(#[from] std::io::Error),
#[error("No debug symbols in the patch output. Check your profile's `opt-level` and debug symbols config.")]
MissingSymbols,
#[error("Failed to parse wasm section: {0}")]
ParseSection(#[from] wasmparser::BinaryReaderError),
#[error("Failed to parse object file, {0}")]
ParseObjectFile(#[from] object::read::Error),
#[error("Failed to write object file: {0}")]
WriteObjectFIle(#[from] object::write::Error),
#[error("Failed to emit module: {0}")]
RuntimeError(#[from] anyhow::Error),
#[error("Failed to read module's PDB file: {0}")]
PdbLoadError(#[from] pdb::Error),
#[error("{0}")]
InvalidModule(String),
#[error("Unsupported platform: {0}")]
UnsupportedPlatform(String),
}
/// A cache for the hotpatching engine that stores the original module's parsed symbol table.
/// For large projects, this can shave up to 50% off the total patching time. Since we compile the base
/// module with every symbol in it, it can be quite large (hundreds of MB), so storing this here lets
/// us avoid re-parsing the module every time we want to patch it.
///
/// On the Dioxus Docsite, it dropped the patch time from 3s to 1.1s (!)
#[derive(Default)]
pub struct HotpatchModuleCache {
pub path: PathBuf,
// .... wasm stuff
pub symbol_ifunc_map: HashMap<String, i32>,
pub old_wasm: Module,
pub old_bytes: Vec<u8>,
pub old_exports: HashSet<String>,
pub old_imports: HashSet<String>,
// ... native stuff
pub symbol_table: HashMap<String, CachedSymbol>,
}
pub struct CachedSymbol {
pub address: u64,
pub kind: SymbolKind,
pub is_undefined: bool,
pub is_weak: bool,
pub size: u64,
pub flags: SymbolFlags<SectionId, SymbolId>,
}
impl PartialEq for HotpatchModuleCache {
fn eq(&self, other: &Self) -> bool {
self.path == other.path
}
}
impl std::fmt::Debug for HotpatchModuleCache {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("HotpatchModuleCache")
.field("_path", &self.path)
.finish()
}
}
impl HotpatchModuleCache {
/// This caching step is crucial for performance on large projects. The original module can be
/// quite large (hundreds of MB), so this step drastically speeds it up.
pub fn new(original: &Path, triple: &Triple) -> Result<Self> {
let cache = match triple.operating_system {
OperatingSystem::Windows => {
use pdb::FallibleIterator;
// due to lifetimes, this code is unfortunately duplicated.
// the pdb crate doesn't bind the lifetime of the items in the iterator to the symbol table,
// so we're stuck with local lifetime.s
let old_pdb_file = original.with_extension("pdb");
let old_pdb_file_handle = std::fs::File::open(old_pdb_file)?;
let mut pdb_file = pdb::PDB::open(old_pdb_file_handle)?;
let global_symbols = pdb_file.global_symbols()?;
let address_map = pdb_file.address_map()?;
let mut symbol_table = HashMap::new();
let mut symbols = global_symbols.iter();
while let Ok(Some(symbol)) = symbols.next() {
match symbol.parse() {
Ok(pdb::SymbolData::Public(data)) => {
let rva = data.offset.to_rva(&address_map);
let is_undefined = rva.is_none();
// treat undefined symbols as 0 to match macho/elf
let rva = rva.unwrap_or_default();
symbol_table.insert(
data.name.to_string().to_string(),
CachedSymbol {
address: rva.0 as u64,
kind: if data.function {
SymbolKind::Text
} else {
SymbolKind::Data
},
is_undefined,
is_weak: false,
size: 0,
flags: SymbolFlags::None,
},
);
}
Ok(pdb::SymbolData::Data(data)) => {
let rva = data.offset.to_rva(&address_map);
let is_undefined = rva.is_none();
// treat undefined symbols as 0 to match macho/elf
let rva = rva.unwrap_or_default();
symbol_table.insert(
data.name.to_string().to_string(),
CachedSymbol {
address: rva.0 as u64,
kind: SymbolKind::Data,
is_undefined,
is_weak: false,
size: 0,
flags: SymbolFlags::None,
},
);
}
_ => {}
}
}
HotpatchModuleCache {
symbol_table,
path: original.to_path_buf(),
..Default::default()
}
}
// We need to load the ifunc table from the original module since that gives us the map
// of name to address (since ifunc entries are also pointers in wasm - ie 0x30 is the 30th
// entry in the ifunc table)
//
// One detail here is that with high optimization levels, the names of functions in the ifunc
// table will be smaller than the total number of functions in the module. This is because
// in high opt-levels, functions are merged. Fortunately, the symbol table remains intact
// and functions with different names point to the same function index (not to be confused
// with the function index in the module!).
//
// We need to take an extra step to account for merged functions by mapping function index
// to a set of functions that point to the same index.
_ if triple.architecture == Architecture::Wasm32 => {
let bytes = std::fs::read(original)?;
let ParsedModule {
module, symbols, ..
} = parse_module_with_ids(&bytes)?;
if symbols.symbols.is_empty() {
return Err(PatchError::MissingSymbols);
}
let name_to_ifunc_old = collect_func_ifuncs(&module);
// These are the "real" bindings for functions in the module
// Basically a map between a function's index and its real name
let func_to_index = module
.funcs
.par_iter()
.filter_map(|f| {
let name = f.name.as_deref()?;
Some((*symbols.code_symbol_map.get(name)?, name))
})
.collect::<HashMap<usize, &str>>();
// Find the corresponding function that shares the same index, but in the ifunc table
let name_to_ifunc_old: HashMap<_, _> = symbols
.code_symbol_map
.par_iter()
.filter_map(|(name, idx)| {
let new_modules_unified_function = func_to_index.get(idx)?;
let offset = name_to_ifunc_old.get(new_modules_unified_function)?;
Some((*name, *offset))
})
.collect();
let symbol_ifunc_map = name_to_ifunc_old
.par_iter()
.map(|(name, idx)| (name.to_string(), *idx))
.collect::<HashMap<_, _>>();
let old_exports = module
.exports
.iter()
.map(|e| e.name.to_string())
.collect::<HashSet<_>>();
let old_imports = module
.imports
.iter()
.map(|i| i.name.to_string())
.collect::<HashSet<_>>();
HotpatchModuleCache {
path: original.to_path_buf(),
old_bytes: bytes,
symbol_ifunc_map,
old_exports,
old_imports,
old_wasm: module,
..Default::default()
}
}
_ => {
let old_bytes = std::fs::read(original)?;
let obj = File::parse(&old_bytes as &[u8])?;
let symbol_table = obj
.symbols()
.filter_map(|s| {
let flags = match s.flags() {
SymbolFlags::None => SymbolFlags::None,
SymbolFlags::Elf { st_info, st_other } => {
SymbolFlags::Elf { st_info, st_other }
}
SymbolFlags::MachO { n_desc } => SymbolFlags::MachO { n_desc },
_ => SymbolFlags::None,
};
Some((
s.name().ok()?.to_string(),
CachedSymbol {
address: s.address(),
is_undefined: s.is_undefined(),
is_weak: s.is_weak(),
kind: s.kind(),
size: s.size(),
flags,
},
))
})
.collect::<HashMap<_, _>>();
HotpatchModuleCache {
symbol_table,
path: original.to_path_buf(),
old_bytes,
..Default::default()
}
}
};
Ok(cache)
}
}
pub fn create_windows_jump_table(patch: &Path, cache: &HotpatchModuleCache) -> Result<JumpTable> {
use pdb::FallibleIterator;
let old_name_to_addr = &cache.symbol_table;
let mut new_name_to_addr = HashMap::new();
let new_pdb_file_handle = std::fs::File::open(patch.with_extension("pdb"))?;
let mut pdb_file = pdb::PDB::open(new_pdb_file_handle)?;
let symbol_table = pdb_file.global_symbols()?;
let address_map = pdb_file.address_map()?;
let mut symbol_iter = symbol_table.iter();
while let Ok(Some(symbol)) = symbol_iter.next() {
if let Ok(pdb::SymbolData::Public(data)) = symbol.parse() {
let rva = data.offset.to_rva(&address_map);
if let Some(rva) = rva {
new_name_to_addr.insert(data.name.to_string(), rva.0 as u64);
}
}
}
let mut map = AddressMap::default();
for (new_name, new_addr) in new_name_to_addr.iter() {
if let Some(old_addr) = old_name_to_addr.get(new_name.as_ref()) {
map.insert(old_addr.address, *new_addr);
}
}
let new_base_address = new_name_to_addr
.get("main")
.cloned()
.context("failed to find 'main' symbol in patch")?;
let aslr_reference = old_name_to_addr
.get("main")
.map(|s| s.address)
.context("failed to find '_main' symbol in original module")?;
Ok(JumpTable {
lib: patch.to_path_buf(),
map,
new_base_address,
aslr_reference,
ifunc_count: 0,
})
}
/// Assemble a jump table for "nix" architectures. This uses the `object` crate to parse both
/// executable's symbol tables and then creates a mapping between the two. Unlike windows, the symbol
/// tables are stored within the binary itself, so we can use the `object` crate to parse them.
///
/// We use the `_aslr_reference` as a reference point in the base program to calculate the aslr slide
/// both at compile time and at runtime.
///
/// This does not work for WASM since the `object` crate does not support emitting the WASM format,
/// and because WASM requires more logic to handle the wasm-bindgen transformations.
pub fn create_native_jump_table(
patch: &Path,
triple: &Triple,
cache: &HotpatchModuleCache,
) -> Result<JumpTable> {
let old_name_to_addr = &cache.symbol_table;
let obj2_bytes = std::fs::read(patch)?;
let obj2 = File::parse(&obj2_bytes as &[u8])?;
let mut map = AddressMap::default();
let new_syms = obj2.symbol_map();
let new_name_to_addr = new_syms
.symbols()
.par_iter()
.map(|s| (s.name(), s.address()))
.collect::<HashMap<_, _>>();
for (new_name, new_addr) in new_name_to_addr.iter() {
if let Some(old_addr) = old_name_to_addr.get(*new_name) {
map.insert(old_addr.address, *new_addr);
}
}
let sentinel = main_sentinel(triple);
let new_base_address = new_name_to_addr
.get(sentinel)
.cloned()
.context("failed to find 'main' symbol in base - are deubg symbols enabled?")?;
let aslr_reference = old_name_to_addr
.get(sentinel)
.map(|s| s.address)
.context("failed to find 'main' symbol in original module - are debug symbols enabled?")?;
Ok(JumpTable {
lib: patch.to_path_buf(),
map,
new_base_address,
aslr_reference,
ifunc_count: 0,
})
}
/// In the web, our patchable functions are actually ifuncs
///
/// We need to line up the ifuncs from the main module to the ifuncs in the patch.
///
/// According to the dylink spec, there will be two sets of entries:
///
/// - got.func: functions in the indirect function table
/// - got.mem: data objects in the data segments
///
/// It doesn't seem like we can compile the base module to export these, sadly, so we're going
/// to manually satisfy them here, removing their need to be imported.
///
/// <https://github.com/WebAssembly/tool-conventions/blob/main/DynamicLinking.md>
pub fn create_wasm_jump_table(patch: &Path, cache: &HotpatchModuleCache) -> Result<JumpTable> {
let name_to_ifunc_old = &cache.symbol_ifunc_map;
let old = &cache.old_wasm;
let old_symbols =
parse_bytes_to_data_segment(&cache.old_bytes).context("Failed to parse data segment")?;
let new_bytes = std::fs::read(patch).context("Could not read patch file")?;
let mut new = Module::from_buffer(&new_bytes)?;
let mut got_mems = vec![];
let mut got_funcs = vec![];
let mut wbg_funcs = vec![];
let mut env_funcs = vec![];
// Collect all the GOT entries from the new module.
// The GOT imports come from the wasm-ld implementation of the dynamic linking spec
//
// https://github.com/WebAssembly/tool-conventions/blob/main/DynamicLinking.md#imports
//
// Normally, the base module would synthesize these as exports, but we're not compiling the base
// module with `--pie` (nor does wasm-bindgen support it yet), so we need to manually satisfy them.
//
// One thing to watch out for here is that GOT.func entries have no visibility to any de-duplication
// or merging, so we need to take great care in the base module to export *every* symbol even if
// they point to the same function.
//
// The other thing to watch out for here is the __wbindgen_placeholder__ entries. These are meant
// to be satisfied by wasm-bindgen via manual code generation, but we can't run wasm-bindgen on the
// patch, so we need to do it ourselves. This involves preventing their elimination in the base module
// by prefixing them with `__saved_wbg_`. When handling the imports here, we need modify the imported
// name to match the prefixed export name in the base module.
for import in new.imports.iter() {
match import.module.as_str() {
"GOT.func" => {
let Some(entry) = name_to_ifunc_old.get(import.name.as_str()).cloned() else {
return Err(PatchError::InvalidModule(format!(
"Expected to find GOT.func entry in ifunc table: {}",
import.name.as_str()
)));
};
got_funcs.push((import.id(), entry));
}
"GOT.mem" => got_mems.push(import.id()),
"env" => env_funcs.push(import.id()),
"__wbindgen_placeholder__" => wbg_funcs.push(import.id()),
m => tracing::trace!("Unknown import: {m}:{}", import.name),
}
}
// We need to satisfy the GOT.func imports of this side module. The GOT imports come from the wasm-ld
// implementation of the dynamic linking spec
//
// https://github.com/WebAssembly/tool-conventions/blob/main/DynamicLinking.md#imports
//
// Most importantly, these functions are functions meant to be called indirectly. In normal wasm
// code generation, only functions that Rust code references via pointers are given a slot in
// the indirection function table. The optimization here traditionally meaning that if a function
// can be called directly, then it doesn't need to be referenced indirectly and potentially inlined
// or dissolved during LTO.
//
// In our "fat build" setup, we aggregated all symbols from dependencies into a `dependencies.ar` file.
// By promoting these functions to the dynamic scope, we also prevent their inlining because the
// linker can still expect some form of interposition to happen, requiring the symbol *actually*
// exists.
//
// Our technique here takes advantage of that and the [`prepare_wasm_base_module`] function promotes
// every possible function to the indirect function table. This means that the GOT imports that
// `relocation-model=pic` synthesizes can reference the functions via the indirect function table
// even if they are not normally synthesized in regular wasm code generation.
//
// Normally, the dynamic linker setup would resolve GOT.func against the same GOT.func export in
// the main module, but we don't have that. Instead, we simply re-parse the main module, aggregate
// its ifunc table, and then resolve directly to the index in that table.
for (import_id, ifunc_index) in got_funcs {
let import = new.imports.get(import_id);
let ImportKind::Global(id) = import.kind else {
return Err(PatchError::InvalidModule(format!(
"Expected GOT.func import to be a global: {}",
import.name
)));
};
// "satisfying" the import means removing it from the import table and replacing its target
// value with a local global.
new.imports.delete(import_id);
new.globals.get_mut(id).kind =
walrus::GlobalKind::Local(ConstExpr::Value(walrus::ir::Value::I32(ifunc_index)));
}
// We need to satisfy the GOT.mem imports of this side module. The GOT.mem imports come from the wasm-ld
// implementation of the dynamic linking spec
//
// https://github.com/WebAssembly/tool-conventions/blob/main/DynamicLinking.md#imports
//
// Unlike the ifunc table, the GOT.mem imports do not need any additional post-processing of the
// base module to satisfy. Since our patching approach works but leveraging the experimental dynamic
// PIC support in rustc[wasm] and wasm-ld, we are using the GOT.mem imports as a way of identifying
// data segments that are present in the base module.
//
// Normally, the dynamic linker would synthesize corresponding GOT.mem exports in the main module,
// but since we're patching on-the-fly, this table will always be out-of-date.
//
// Instead, we use the symbol table from the base module to find the corresponding data symbols
// and then resolve the offset of the data segment in the main module. Using the symbol table
// can be somewhat finicky if the user compiled the code with a high-enough opt level that nukes
// the names of the data segments, but otherwise this system works well.
//
// We simply use the name of the import as a key into the symbol table and then its offset into
// its data segment as the value within the global.
for mem in got_mems {
let import = new.imports.get(mem);
let data_symbol_idx = *old_symbols
.data_symbol_map
.get(import.name.as_str())
.with_context(|| {
format!("Failed to find GOT.mem import by its name: {}", import.name)
})?;
let data_symbol = old_symbols
.data_symbols
.get(&data_symbol_idx)
.context("Failed to find data symbol by its index")?;
let data = old
.data
.iter()
.nth(data_symbol.which_data_segment)
.context("Missing data segment in the main module")?;
let offset = match data.kind {
DataKind::Active {
offset: ConstExpr::Value(walrus::ir::Value::I32(idx)),
..
} => idx,
DataKind::Active {
offset: ConstExpr::Value(walrus::ir::Value::I64(idx)),
..
} => idx as i32,
_ => {
return Err(PatchError::InvalidModule(format!(
"Data segment of invalid table: {:?}",
data.kind
)));
}
};
let ImportKind::Global(global_id) = import.kind else {
return Err(PatchError::InvalidModule(
"Expected GOT.mem import to be a global".to_string(),
));
};
// "satisfying" the import means removing it from the import table and replacing its target
// value with a local global.
new.imports.delete(mem);
new.globals.get_mut(global_id).kind = walrus::GlobalKind::Local(ConstExpr::Value(
walrus::ir::Value::I32(offset + data_symbol.segment_offset as i32),
));
}
// wasm-bindgen has a limit on the number of exports a module can have, so we need to call the main
// module's functions indirectly. This is done by dropping the env import and replacing it with a
// local function that calls the indirect function from the table.
//
// https://github.com/emscripten-core/emscripten/issues/22863
let ifunc_table_initializer = new
.elements
.iter()
.find_map(|e| match e.kind {
ElementKind::Active { table, .. } => Some(table),
_ => None,
})
.context("Missing ifunc table")?;
for env_func_import in env_funcs {
let import = new.imports.get(env_func_import);
let ImportKind::Function(func_id) = import.kind else {
continue;
};
if cache.old_exports.contains(import.name.as_str())
|| cache.old_imports.contains(import.name.as_str())
{
continue;
}
let name = import.name.as_str().to_string();
if let Some(table_idx) = name_to_ifunc_old.get(import.name.as_str()) {
new.imports.delete(env_func_import);
convert_func_to_ifunc_call(
&mut new,
ifunc_table_initializer,
func_id,
*table_idx,
name.clone(),
);
continue;
}
if name_is_bindgen_symbol(&name) {
new.imports.delete(env_func_import);
convert_func_to_ifunc_call(&mut new, ifunc_table_initializer, func_id, 0, name);
continue;
}
tracing::warn!("[hotpatching]: Symbol slipped through the cracks: {}", name);
}
// Wire up the preserved intrinsic functions that we saved before running wasm-bindgen to the expected
// imports from the patch.
for import_id in wbg_funcs {
let import = new.imports.get_mut(import_id);
let ImportKind::Function(func_id) = import.kind else {
continue;
};
import.module = "env".into();
import.name = format!("__saved_wbg_{}", import.name);
if name_is_bindgen_symbol(&import.name) {
let name = import.name.as_str().to_string();
new.imports.delete(import_id);
convert_func_to_ifunc_call(&mut new, ifunc_table_initializer, func_id, 0, name);
}
}
// Rewrite the wbg_cast functions to call the indirect functions from the original module.
// This is necessary because wasm-bindgen uses these calls to perform dynamic type casting through
// the JS layer. If we don't rewrite these, they end up as calls to `breaks_if_inlined` functions
// which are no-ops and get rewritten by the wbindgen post-processing step.
//
// Here, we find the corresponding wbg_cast function in the old module by name and then rewrite
// the patch module's cast function to call the indirect function from the original module.
//
// See the wbg_cast implementation in wasm-bindgen for more details:
// <https://github.com/wasm-bindgen/wasm-bindgen/blob/f61a588f674304964a2062b2307edb304aed4d16/src/rt/mod.rs#L30>
let new_func_ids = new.funcs.iter().map(|f| f.id()).collect::<Vec<_>>();
for func_id in new_func_ids {
let Some(name) = new.funcs.get(func_id).name.as_deref() else {
continue;
};
if name.contains("wasm_bindgen4__rt8wbg_cast") && !name.contains("breaks_if_inline") {
let name = name.to_string();
let old_idx = name_to_ifunc_old
.get(&name)
.copied()
.ok_or_else(|| anyhow::anyhow!("Could not find matching wbg_cast function for [{name}] - must generate new JS bindings."))?;
convert_func_to_ifunc_call(&mut new, ifunc_table_initializer, func_id, old_idx, name);
}
}
// Wipe away the unnecessary sections
let customs = new.customs.iter().map(|f| f.0).collect::<Vec<_>>();
for custom_id in customs {
if let Some(custom) = new.customs.get_mut(custom_id) {
if custom.name().contains("manganis") || custom.name().contains("__wasm_bindgen") {
new.customs.delete(custom_id);
}
}
}
// Clear the start function from the patch - we don't want any code automatically running!
new.start = None;
// Update the wasm module on the filesystem to use the newly lifted version
let lib = patch.to_path_buf();
std::fs::write(&lib, new.emit_wasm())?;
// And now assemble the jump table by mapping the old ifunc table to the new one, by name
//
// The ifunc_count will be passed to the dynamic loader so it can allocate the right amount of space
// in the indirect function table when loading the patch.
let name_to_ifunc_new = collect_func_ifuncs(&new);
let ifunc_count = name_to_ifunc_new.len() as u64;
let mut map = AddressMap::default();
for (name, idx) in name_to_ifunc_new.iter() {
// Find the corresponding ifunc in the old module by name
if let Some(old_idx) = name_to_ifunc_old.get(*name) {
map.insert(*old_idx as u64, *idx as u64);
continue;
}
}
Ok(JumpTable {
map,
lib,
ifunc_count,
aslr_reference: 0,
new_base_address: 0,
})
}
fn convert_func_to_ifunc_call(
new: &mut Module,
ifunc_table_initializer: TableId,
func_id: FunctionId,
table_idx: i32,
name: String,
) {
use walrus::ir;
let func = new.funcs.get_mut(func_id);
let ty_id = func.ty();
// Convert the import function to a local function that calls the indirect function from the table
let ty = new.types.get(ty_id);
let params = ty.params().to_vec();
let results = ty.results().to_vec();
let locals: Vec<_> = params.iter().map(|ty| new.locals.add(*ty)).collect();
// New function that calls the indirect function
let mut builder = FunctionBuilder::new(&mut new.types, ¶ms, &results);
let mut body = builder.name(name).func_body();
// Push the params onto the stack
for arg in locals.iter() {
body.local_get(*arg);
}
// And then the address of the indirect function
body.instr(ir::Instr::Const(ir::Const {
value: ir::Value::I32(table_idx),
}));
// And call it
body.instr(ir::Instr::CallIndirect(ir::CallIndirect {
ty: ty_id,
table: ifunc_table_initializer,
}));
new.funcs.get_mut(func_id).kind = FunctionKind::Local(builder.local_func(locals));
}
fn collect_func_ifuncs(m: &Module) -> HashMap<&str, i32> {
// Collect all the functions in the module that are ifuncs
let mut func_to_offset = HashMap::new();
for el in m.elements.iter() {
let ElementKind::Active { offset, .. } = &el.kind else {
continue;
};
let offset = match offset {
// Handle explicit offsets
ConstExpr::Value(value) => match value {
walrus::ir::Value::I32(idx) => *idx,
walrus::ir::Value::I64(idx) => *idx as i32,
_ => continue,
},
// Globals are usually imports and thus don't add a specific offset
// ie the ifunc table is offset by a global, so we don't need to push the offset out
ConstExpr::Global(_) => 0,
_ => continue,
};
match &el.items {
ElementItems::Functions(ids) => {
for (idx, id) in ids.iter().enumerate() {
if let Some(name) = m.funcs.get(*id).name.as_deref() {
func_to_offset.insert(name, offset + idx as i32);
}
}
}
ElementItems::Expressions(_ref_type, _const_exprs) => {}
}
}
func_to_offset
}
/// Resolve the undefined symbols in the incrementals against the original binary, returning an object
/// file that can be linked along the incrementals.
///
/// This makes it possible to dlopen the resulting object file and use the original binary's symbols
/// bypassing the dynamic linker.
///
/// This is very similar to malware :) but it's not!
///
/// Note - this function is not defined to run on WASM binaries. The `object` crate does not
///
/// todo... we need to wire up the cache
pub fn create_undefined_symbol_stub(
cache: &HotpatchModuleCache,
incrementals: &[PathBuf],
triple: &Triple,
aslr_reference: u64,
) -> Result<Vec<u8>> {
let sorted: Vec<_> = incrementals.iter().sorted().collect();
// Find all the undefined symbols in the incrementals
let mut undefined_symbols = HashSet::new();
let mut defined_symbols = HashSet::new();
for path in sorted {
let bytes = std::fs::read(path).with_context(|| format!("failed to read {path:?}"))?;
let file = File::parse(bytes.deref() as &[u8])?;
for symbol in file.symbols() {
if symbol.is_undefined() {
undefined_symbols.insert(symbol.name()?.to_string());
} else if symbol.is_global() {
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | true |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/config/app.rs | packages/cli/src/config/app.rs | use serde::{Deserialize, Serialize};
use std::path::PathBuf;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub(crate) struct ApplicationConfig {
/// The path where global assets will be added when components are added with `dx components add`
#[serde(default)]
pub(crate) asset_dir: Option<PathBuf>,
#[serde(default)]
pub(crate) out_dir: Option<PathBuf>,
#[serde(default = "public_dir_default")]
#[serde(deserialize_with = "empty_string_is_none")]
pub(crate) public_dir: Option<PathBuf>,
#[serde(default)]
pub(crate) tailwind_input: Option<PathBuf>,
#[serde(default)]
pub(crate) tailwind_output: Option<PathBuf>,
/// Use this file for the info.plist associated with the iOS app.
/// `dx` will merge any required settings into this file required to build the app
#[serde(default)]
pub(crate) ios_info_plist: Option<PathBuf>,
/// Use this file for the info.plist associated with the macOS app.
/// `dx` will merge any required settings into this file required to build the app
#[serde(default)]
pub(crate) macos_info_plist: Option<PathBuf>,
/// Use this file for the entitlements.plist associated with the iOS app.
#[serde(default)]
pub(crate) ios_entitlements: Option<PathBuf>,
/// Use this file for the entitlements.plist associated with the macOS app.
#[serde(default)]
pub(crate) macos_entitlements: Option<PathBuf>,
/// Use this file for the AndroidManifest.xml associated with the Android app.
/// `dx` will merge any required settings into this file required to build the app
#[serde(default)]
pub(crate) android_manifest: Option<PathBuf>,
/// Use this file for the MainActivity.kt associated with the Android app.
#[serde(default)]
pub(crate) android_main_activity: Option<PathBuf>,
/// Specified minimum sdk version for gradle to build the app with.
#[serde(default)]
pub(crate) android_min_sdk_version: Option<u32>,
}
fn public_dir_default() -> Option<PathBuf> {
Some("public".into())
}
fn empty_string_is_none<'de, D>(deserializer: D) -> Result<Option<PathBuf>, D::Error>
where
D: serde::Deserializer<'de>,
{
let opt: Option<String> = Option::deserialize(deserializer)?;
match opt {
Some(s) if s.is_empty() => Ok(None),
Some(s) => Ok(Some(PathBuf::from(s))),
None => Ok(None),
}
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/config/serve.rs | packages/cli/src/config/serve.rs | use clap::Parser;
/// The arguments for the address the server will run on
#[derive(Clone, Debug, Default, Parser)]
pub(crate) struct AddressArguments {
/// The port the server will run on
#[clap(long)]
pub(crate) port: Option<u16>,
/// The address the server will run on
#[clap(long)]
pub(crate) addr: Option<std::net::IpAddr>,
}
impl crate::Anonymized for AddressArguments {
fn anonymized(&self) -> serde_json::Value {
serde_json::json!({
"port": self.port,
"addr": self.addr.map(|addr| if addr.is_loopback() { "loopback" } else if addr.is_unspecified() { "unspecified" } else { "other" }),
})
}
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/config/dioxus_config.rs | packages/cli/src/config/dioxus_config.rs | use crate::config::component::ComponentConfig;
use super::*;
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub(crate) struct DioxusConfig {
pub(crate) application: ApplicationConfig,
#[serde(default)]
pub(crate) web: WebConfig,
#[serde(default)]
pub(crate) bundle: BundleConfig,
#[serde(default)]
pub(crate) components: ComponentConfig,
}
impl Default for DioxusConfig {
fn default() -> Self {
Self {
application: ApplicationConfig {
asset_dir: None,
out_dir: None,
public_dir: Some("public".into()),
tailwind_input: None,
tailwind_output: None,
ios_info_plist: None,
android_manifest: None,
android_main_activity: None,
android_min_sdk_version: None,
macos_info_plist: None,
ios_entitlements: None,
macos_entitlements: None,
},
web: WebConfig {
app: WebAppConfig {
title: default_title(),
base_path: None,
},
proxy: vec![],
watcher: Default::default(),
resource: WebResourceConfig {
dev: WebDevResourceConfig {
style: vec![],
script: vec![],
},
style: Some(vec![]),
script: Some(vec![]),
},
https: WebHttpsConfig {
enabled: None,
mkcert: None,
key_path: None,
cert_path: None,
},
pre_compress: false,
wasm_opt: Default::default(),
},
bundle: BundleConfig::default(),
components: ComponentConfig::default(),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn static_dir_defaults_to_public() {
let config = DioxusConfig::default();
assert_eq!(
config.application.public_dir,
Some(std::path::PathBuf::from("public"))
);
}
#[test]
fn static_dir_can_be_overridden() {
let source = r#"
[application]
public_dir = "public2"
"#;
let config: DioxusConfig = toml::from_str(source).expect("parse config");
assert_eq!(
config.application.public_dir.as_deref(),
Some(std::path::Path::new("public2"))
);
}
#[test]
fn static_dir_can_be_disabled() {
let source = r#"
[application]
public_dir = ""
"#;
let config: DioxusConfig = toml::from_str(source).expect("parse config");
assert_eq!(config.application.public_dir.as_deref(), None);
}
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/config/bundle.rs | packages/cli/src/config/bundle.rs | use serde::{Deserialize, Serialize};
use std::path::PathBuf;
use std::{collections::HashMap, str::FromStr};
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub(crate) struct BundleConfig {
#[serde(default)]
pub(crate) identifier: Option<String>,
#[serde(default)]
pub(crate) publisher: Option<String>,
#[serde(default)]
pub(crate) icon: Option<Vec<String>>,
#[serde(default)]
pub(crate) resources: Option<Vec<String>>,
#[serde(default)]
pub(crate) copyright: Option<String>,
#[serde(default)]
pub(crate) category: Option<String>,
#[serde(default)]
pub(crate) short_description: Option<String>,
#[serde(default)]
pub(crate) long_description: Option<String>,
#[serde(default)]
pub(crate) external_bin: Option<Vec<String>>,
#[serde(default)]
pub(crate) deb: Option<DebianSettings>,
#[serde(default)]
pub(crate) macos: Option<MacOsSettings>,
#[serde(default)]
pub(crate) windows: Option<WindowsSettings>,
#[serde(default)]
pub(crate) android: Option<AndroidSettings>,
}
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub(crate) struct DebianSettings {
// OS-specific settings:
/// the list of debian dependencies.
#[serde(default)]
pub depends: Option<Vec<String>>,
/// the list of recommended debian dependencies.
#[serde(default)]
pub recommends: Option<Vec<String>>,
/// the list of dependencies the package provides.
#[serde(default)]
pub provides: Option<Vec<String>>,
/// the list of package conflicts.
#[serde(default)]
pub conflicts: Option<Vec<String>>,
/// the list of package replaces.
#[serde(default)]
pub replaces: Option<Vec<String>>,
/// List of custom files to add to the deb package.
/// Maps the path on the debian package to the path of the file to include (relative to the current working directory).
#[serde(default)]
pub files: HashMap<PathBuf, PathBuf>,
/// Path to a custom desktop file Handlebars template.
///
/// Available variables: `categories`, `comment` (optional), `exec`, `icon` and `name`.
#[serde(default)]
pub desktop_template: Option<PathBuf>,
/// Define the section in Debian Control file. See : <https://www.debian.org/doc/debian-policy/ch-archive.html#s-subsections>
#[serde(default)]
pub section: Option<String>,
/// Change the priority of the Debian Package. By default, it is set to `optional`.
/// Recognized Priorities as of now are : `required`, `important`, `standard`, `optional`, `extra`
#[serde(default)]
pub priority: Option<String>,
/// Path of the uncompressed Changelog file, to be stored at /usr/share/doc/package-name/changelog.gz. See
/// <https://www.debian.org/doc/debian-policy/ch-docs.html#changelog-files-and-release-notes>
#[serde(default)]
pub changelog: Option<PathBuf>,
/// Path to script that will be executed before the package is unpacked. See
/// <https://www.debian.org/doc/debian-policy/ch-maintainerscripts.html>
#[serde(default)]
pub pre_install_script: Option<PathBuf>,
/// Path to script that will be executed after the package is unpacked. See
/// <https://www.debian.org/doc/debian-policy/ch-maintainerscripts.html>
#[serde(default)]
pub post_install_script: Option<PathBuf>,
/// Path to script that will be executed before the package is removed. See
/// <https://www.debian.org/doc/debian-policy/ch-maintainerscripts.html>
#[serde(default)]
pub pre_remove_script: Option<PathBuf>,
/// Path to script that will be executed after the package is removed. See
/// <https://www.debian.org/doc/debian-policy/ch-maintainerscripts.html>
#[serde(default)]
pub post_remove_script: Option<PathBuf>,
}
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub(crate) struct WixSettings {
#[serde(default)]
pub(crate) language: Vec<(String, Option<PathBuf>)>,
#[serde(default)]
pub(crate) template: Option<PathBuf>,
#[serde(default)]
pub(crate) fragment_paths: Vec<PathBuf>,
#[serde(default)]
pub(crate) component_group_refs: Vec<String>,
#[serde(default)]
pub(crate) component_refs: Vec<String>,
#[serde(default)]
pub(crate) feature_group_refs: Vec<String>,
#[serde(default)]
pub(crate) feature_refs: Vec<String>,
#[serde(default)]
pub(crate) merge_refs: Vec<String>,
#[serde(default)]
pub(crate) skip_webview_install: bool,
#[serde(default)]
pub(crate) license: Option<PathBuf>,
#[serde(default)]
pub(crate) enable_elevated_update_task: bool,
#[serde(default)]
pub(crate) banner_path: Option<PathBuf>,
#[serde(default)]
pub(crate) dialog_image_path: Option<PathBuf>,
#[serde(default)]
pub(crate) fips_compliant: bool,
/// MSI installer version in the format `major.minor.patch.build` (build is optional).
///
/// Because a valid version is required for MSI installer, it will be derived from [`tauri_bundler::PackageSettings::version`] if this field is not set.
///
/// The first field is the major version and has a maximum value of 255. The second field is the minor version and has a maximum value of 255.
/// The third and fourth fields have a maximum value of 65,535.
///
/// See <https://learn.microsoft.com/en-us/windows/win32/msi/productversion> for more info.
#[serde(default)]
pub version: Option<String>,
/// A GUID upgrade code for MSI installer. This code **_must stay the same across all of your updates_**,
/// otherwise, Windows will treat your update as a different app and your users will have duplicate versions of your app.
///
/// By default, tauri generates this code by generating a Uuid v5 using the string `<productName>.exe.app.x64` in the DNS namespace.
/// You can use Tauri's CLI to generate and print this code for you by running `tauri inspect wix-upgrade-code`.
///
/// It is recommended that you set this value in your tauri config file to avoid accidental changes in your upgrade code
/// whenever you want to change your product name.
#[serde(default)]
pub upgrade_code: Option<uuid::Uuid>,
}
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub(crate) struct MacOsSettings {
#[serde(default)]
pub(crate) bundle_version: Option<String>,
#[serde(default)]
pub(crate) frameworks: Option<Vec<String>>,
#[serde(default)]
pub(crate) minimum_system_version: Option<String>,
#[serde(default)]
pub(crate) license: Option<String>,
#[serde(default)]
pub(crate) exception_domain: Option<String>,
#[serde(default)]
pub(crate) signing_identity: Option<String>,
#[serde(default)]
pub(crate) provider_short_name: Option<String>,
#[serde(default)]
pub(crate) entitlements: Option<String>,
#[serde(default)]
pub(crate) info_plist_path: Option<PathBuf>,
#[serde(default)]
pub(crate) bundle_name: Option<String>,
/// List of custom files to add to the application bundle.
/// Maps the path in the Contents directory in the app to the path of the file to include (relative to the current working directory).
#[serde(default)]
pub files: HashMap<PathBuf, PathBuf>,
/// Preserve the hardened runtime version flag, see <https://developer.apple.com/documentation/security/hardened_runtime>
///
/// Settings this to `false` is useful when using an ad-hoc signature, making it less strict.
#[serde(default = "default_hardened_runtime")]
pub hardened_runtime: bool,
}
fn default_hardened_runtime() -> bool {
true
}
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub(crate) struct WindowsSettings {
#[serde(default)]
pub(crate) digest_algorithm: Option<String>,
#[serde(default)]
pub(crate) certificate_thumbprint: Option<String>,
#[serde(default)]
pub(crate) timestamp_url: Option<String>,
#[serde(default)]
pub(crate) tsp: bool,
#[serde(default)]
pub(crate) wix: Option<WixSettings>,
#[serde(default)]
pub(crate) icon_path: Option<PathBuf>,
#[serde(default)]
pub(crate) webview_install_mode: WebviewInstallMode,
#[serde(default)]
pub(crate) webview_fixed_runtime_path: Option<PathBuf>,
#[serde(default)]
pub(crate) allow_downgrades: bool,
#[serde(default)]
pub(crate) nsis: Option<NsisSettings>,
/// Specify a custom command to sign the binaries.
/// This command needs to have a `%1` in it which is just a placeholder for the binary path,
/// which we will detect and replace before calling the command.
///
/// Example:
/// ```text
/// sign-cli --arg1 --arg2 %1
/// ```
///
/// By Default we use `signtool.exe` which can be found only on Windows so
/// if you are on another platform and want to cross-compile and sign you will
/// need to use another tool like `osslsigncode`.
#[serde(default)]
pub sign_command: Option<CustomSignCommandSettings>,
}
#[derive(Default, Debug, Clone, Serialize, Deserialize)]
pub(crate) struct NsisSettings {
#[serde(default)]
pub(crate) template: Option<PathBuf>,
#[serde(default)]
pub(crate) license: Option<PathBuf>,
#[serde(default)]
pub(crate) header_image: Option<PathBuf>,
#[serde(default)]
pub(crate) sidebar_image: Option<PathBuf>,
#[serde(default)]
pub(crate) installer_icon: Option<PathBuf>,
#[serde(default)]
pub(crate) install_mode: NSISInstallerMode,
#[serde(default)]
pub(crate) languages: Option<Vec<String>>,
#[serde(default)]
pub(crate) custom_language_files: Option<HashMap<String, PathBuf>>,
#[serde(default)]
pub(crate) display_language_selector: bool,
#[serde(default)]
pub(crate) start_menu_folder: Option<String>,
#[serde(default)]
pub(crate) installer_hooks: Option<PathBuf>,
/// Try to ensure that the WebView2 version is equal to or newer than this version,
/// if the user's WebView2 is older than this version,
/// the installer will try to trigger a WebView2 update.
#[serde(default)]
pub minimum_webview2_version: Option<String>,
}
#[derive(Default, Debug, Clone, Serialize, Deserialize)]
pub(crate) enum NSISInstallerMode {
#[default]
CurrentUser,
PerMachine,
Both,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub(crate) enum WebviewInstallMode {
Skip,
DownloadBootstrapper { silent: bool },
EmbedBootstrapper { silent: bool },
OfflineInstaller { silent: bool },
FixedRuntime { path: PathBuf },
}
impl Default for WebviewInstallMode {
fn default() -> Self {
Self::OfflineInstaller { silent: false }
}
}
// Because all four fields must appear at the same time, there is no need for an Option
#[derive(Debug, Clone, Serialize, Deserialize)]
pub(crate) struct AndroidSettings {
pub(crate) jks_file: PathBuf,
pub(crate) jks_password: String,
pub(crate) key_alias: String,
pub(crate) key_password: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CustomSignCommandSettings {
/// The command to run to sign the binary.
pub cmd: String,
/// The arguments to pass to the command.
///
/// "%1" will be replaced with the path to the binary to be signed.
pub args: Vec<String>,
}
#[derive(Clone, Copy, Debug, clap::ValueEnum, Serialize)]
pub(crate) enum PackageType {
/// The macOS application bundle (.app).
#[clap(name = "macos")]
MacOsBundle,
/// The iOS app bundle.
#[clap(name = "ios")]
IosBundle,
/// The Windows bundle (.msi).
#[clap(name = "msi")]
WindowsMsi,
/// The NSIS bundle (.exe).
#[clap(name = "nsis")]
Nsis,
/// The Linux Debian package bundle (.deb).
#[clap(name = "deb")]
Deb,
/// The Linux RPM bundle (.rpm).
#[clap(name = "rpm")]
Rpm,
/// The Linux AppImage bundle (.AppImage).
#[clap(name = "appimage")]
AppImage,
/// The macOS DMG bundle (.dmg).
#[clap(name = "dmg")]
Dmg,
/// The Updater bundle (a patch of an existing app)
#[clap(name = "updater")]
Updater,
}
impl FromStr for PackageType {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"macos" => Ok(PackageType::MacOsBundle),
"ios" => Ok(PackageType::IosBundle),
"msi" => Ok(PackageType::WindowsMsi),
"nsis" => Ok(PackageType::Nsis),
"deb" => Ok(PackageType::Deb),
"rpm" => Ok(PackageType::Rpm),
"appimage" => Ok(PackageType::AppImage),
"dmg" => Ok(PackageType::Dmg),
"updater" => Ok(PackageType::Updater),
_ => Err(format!("{s} is not a valid package type")),
}
}
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/config/mod.rs | packages/cli/src/config/mod.rs | mod app;
mod bundle;
mod component;
mod dioxus_config;
mod serve;
mod web;
pub(crate) use app::*;
pub(crate) use bundle::*;
pub(crate) use dioxus_config::*;
pub(crate) use serve::*;
pub(crate) use web::*;
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/config/web.rs | packages/cli/src/config/web.rs | use serde::{Deserialize, Serialize};
use std::path::PathBuf;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub(crate) struct WebConfig {
#[serde(default)]
pub(crate) app: WebAppConfig,
#[serde(default)]
pub(crate) proxy: Vec<WebProxyConfig>,
#[serde(default)]
pub(crate) watcher: WebWatcherConfig,
#[serde(default)]
pub(crate) resource: WebResourceConfig,
#[serde(default)]
pub(crate) https: WebHttpsConfig,
/// Whether to enable pre-compression of assets and wasm during a web build in release mode
#[serde(default = "false_bool")]
pub(crate) pre_compress: bool,
/// The wasm-opt configuration
#[serde(default)]
pub(crate) wasm_opt: WasmOptConfig,
}
impl Default for WebConfig {
fn default() -> Self {
Self {
pre_compress: false_bool(),
app: Default::default(),
https: Default::default(),
wasm_opt: Default::default(),
proxy: Default::default(),
watcher: Default::default(),
resource: Default::default(),
}
}
}
/// The wasm-opt configuration
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub(crate) struct WasmOptConfig {
/// The wasm-opt level to use for release builds [default: s]
/// Options:
/// - z: optimize aggressively for size
/// - s: optimize for size
/// - 1: optimize for speed
/// - 2: optimize for more for speed
/// - 3: optimize for even more for speed
/// - 4: optimize aggressively for speed
#[serde(default)]
pub(crate) level: WasmOptLevel,
/// Keep debug symbols in the wasm file
#[serde(default = "false_bool")]
pub(crate) debug: bool,
/// Enable memory packing
#[serde(default = "false_bool")]
pub(crate) memory_packing: bool,
/// Extra arguments to pass to wasm-opt
///
/// For example, to enable simd, you can set this to `["--enable-simd"]`.
///
/// You can also disable features by prefixing them with `--disable-`, e.g. `["--disable-bulk-memory"]`.
///
/// Currently only --enable and --disable flags are supported.
#[serde(default)]
pub(crate) extra_features: Vec<String>,
}
/// The wasm-opt level to use for release web builds [default: Z]
#[derive(Default, Debug, Copy, Clone, Serialize, Deserialize)]
pub(crate) enum WasmOptLevel {
/// Optimize aggressively for size
#[serde(rename = "z")]
#[default]
Z,
/// Optimize for size
#[serde(rename = "s")]
S,
/// Don't optimize
#[serde(rename = "0")]
Zero,
/// Optimize for speed
#[serde(rename = "1")]
One,
/// Optimize for more for speed
#[serde(rename = "2")]
Two,
/// Optimize for even more for speed
#[serde(rename = "3")]
Three,
/// Optimize aggressively for speed
#[serde(rename = "4")]
Four,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub(crate) struct WebAppConfig {
#[serde(default = "default_title")]
pub(crate) title: String,
pub(crate) base_path: Option<String>,
}
impl Default for WebAppConfig {
fn default() -> Self {
Self {
title: default_title(),
base_path: None,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub(crate) struct WebProxyConfig {
pub(crate) backend: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub(crate) struct WebWatcherConfig {
#[serde(default = "watch_path_default")]
pub(crate) watch_path: Vec<PathBuf>,
#[serde(default)]
pub(crate) reload_html: bool,
#[serde(default = "true_bool")]
pub(crate) index_on_404: bool,
}
impl Default for WebWatcherConfig {
fn default() -> Self {
Self {
watch_path: watch_path_default(),
reload_html: false,
index_on_404: true,
}
}
}
fn watch_path_default() -> Vec<PathBuf> {
vec![PathBuf::from("src"), PathBuf::from("examples")]
}
#[derive(Default, Debug, Clone, Serialize, Deserialize)]
pub(crate) struct WebResourceConfig {
pub(crate) dev: WebDevResourceConfig,
pub(crate) style: Option<Vec<PathBuf>>,
pub(crate) script: Option<Vec<PathBuf>>,
}
#[derive(Default, Debug, Clone, Serialize, Deserialize)]
pub(crate) struct WebDevResourceConfig {
#[serde(default)]
pub(crate) style: Vec<PathBuf>,
#[serde(default)]
pub(crate) script: Vec<PathBuf>,
}
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub(crate) struct WebHttpsConfig {
pub(crate) enabled: Option<bool>,
pub(crate) mkcert: Option<bool>,
pub(crate) key_path: Option<String>,
pub(crate) cert_path: Option<String>,
}
fn true_bool() -> bool {
true
}
fn false_bool() -> bool {
false
}
pub(crate) fn default_title() -> String {
"dioxus | ⛺".into()
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/config/component.rs | packages/cli/src/config/component.rs | use crate::component::ComponentRegistry;
use serde::{Deserialize, Serialize};
use std::path::PathBuf;
/// Configuration for the `dioxus component` commands
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub(crate) struct ComponentConfig {
/// The component registry to default to when adding components
#[serde(default)]
pub(crate) registry: ComponentRegistry,
/// The path where components are stored when adding or removing components
#[serde(default)]
pub(crate) components_dir: Option<PathBuf>,
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/cli/config.rs | packages/cli/src/cli/config.rs | use super::*;
use crate::{CliSettings, TraceSrc, Workspace};
/// Dioxus config file controls
#[derive(Clone, Debug, Deserialize, Subcommand)]
pub(crate) enum Config {
/// Init `Dioxus.toml` for project/folder.
Init {
/// Init project name
name: String,
/// Cover old config
#[clap(long)]
#[serde(default)]
force: bool,
},
/// Format print Dioxus config.
FormatPrint {},
/// Create a custom html file.
CustomHtml {},
/// Set CLI settings.
#[command(subcommand)]
Set(Setting),
}
#[derive(Debug, Clone, Copy, Deserialize, Subcommand)]
pub(crate) enum Setting {
/// Set the value of the always-hot-reload setting.
AlwaysHotReload { value: BoolValue },
/// Set the value of the always-open-browser setting.
AlwaysOpenBrowser { value: BoolValue },
/// Set the value of the always-on-top desktop setting.
AlwaysOnTop { value: BoolValue },
/// Set the interval that file changes are polled on WSL for hot reloading.
WSLFilePollInterval { value: u16 },
/// Disable the built-in telemetry for the CLI
DisableTelemetry { value: BoolValue },
}
impl Display for Setting {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::AlwaysHotReload { value: _ } => write!(f, "always-hot-reload"),
Self::AlwaysOpenBrowser { value: _ } => write!(f, "always-open-browser"),
Self::AlwaysOnTop { value: _ } => write!(f, "always-on-top"),
Self::WSLFilePollInterval { value: _ } => write!(f, "wsl-file-poll-interval"),
Self::DisableTelemetry { value: _ } => write!(f, "disable-telemetry"),
}
}
}
// Clap complains if we use a bool directly and I can't find much info about it.
// "Argument 'value` is positional and it must take a value but action is SetTrue"
#[derive(Debug, Clone, Copy, serde::Serialize, Deserialize, clap::ValueEnum)]
pub(crate) enum BoolValue {
True,
False,
}
impl From<BoolValue> for bool {
fn from(value: BoolValue) -> Self {
match value {
BoolValue::True => true,
BoolValue::False => false,
}
}
}
impl Config {
pub(crate) async fn config(self) -> Result<StructuredOutput> {
let crate_root = Workspace::crate_root_from_path()?;
match self {
Config::Init { name, force } => {
let conf_path = crate_root.join("Dioxus.toml");
if conf_path.is_file() && !force {
tracing::warn!(
"config file `Dioxus.toml` already exist, use `--force` to overwrite it."
);
return Ok(StructuredOutput::Success);
}
let mut file = File::create(conf_path)?;
let content = String::from(include_str!("../../assets/dioxus.toml"))
.replace("{{project-name}}", &name);
file.write_all(content.as_bytes())?;
tracing::info!(dx_src = ?TraceSrc::Dev, "🚩 Init config file completed.");
}
Config::FormatPrint {} => {
let workspace = Workspace::current().await?;
tracing::info!("{:#?}", workspace.settings);
}
Config::CustomHtml {} => {
let html_path = crate_root.join("index.html");
let mut file = File::create(html_path)?;
let content = include_str!("../../assets/web/dev.index.html");
file.write_all(content.as_bytes())?;
tracing::info!(dx_src = ?TraceSrc::Dev, "🚩 Create custom html file done.");
}
// Handle CLI settings.
Config::Set(setting) => {
CliSettings::modify_settings(|settings| match setting {
Setting::AlwaysOnTop { value } => settings.always_on_top = Some(value.into()),
Setting::AlwaysHotReload { value } => {
settings.always_hot_reload = Some(value.into())
}
Setting::AlwaysOpenBrowser { value } => {
settings.always_open_browser = Some(value.into())
}
Setting::WSLFilePollInterval { value } => {
settings.wsl_file_poll_interval = Some(value)
}
Setting::DisableTelemetry { value } => {
settings.disable_telemetry = Some(value.into());
}
})?;
tracing::info!(dx_src = ?TraceSrc::Dev, "🚩 CLI setting `{setting}` has been set.");
}
}
Ok(StructuredOutput::Success)
}
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/cli/serve.rs | packages/cli/src/cli/serve.rs | use super::*;
use crate::{AddressArguments, Anonymized, BuildArgs, TraceController};
/// Serve the project
///
/// `dx serve` takes cargo args by default with additional renderer args like `--web`, `--webview`, and `--native`:
///
/// ```sh
/// dx serve --example blah --target blah --android
/// ```
///
/// A simple serve:
/// ```sh
/// dx serve --web
/// ```
///
/// As of dioxus 0.7, `dx serve` allows independent customization of the client and server builds,
/// allowing workspaces and removing any "magic" done to support ergonomic fullstack serving with
/// an plain `dx serve`. These require specifying more arguments like features since they won't be autodetected.
///
/// ```sh
/// dx serve \
/// client --package frontend \
/// server --package backend
/// ```
#[derive(Clone, Debug, Default, Parser)]
#[command(group = clap::ArgGroup::new("release-incompatible").multiple(true).conflicts_with("release"))]
pub(crate) struct ServeArgs {
/// The arguments for the address the server will run on
#[clap(flatten)]
pub(crate) address: AddressArguments,
/// Open the app in the default browser [default: true - unless cli settings are set]
#[arg(long, default_missing_value="true", num_args=0..=1)]
pub(crate) open: Option<bool>,
/// Enable full hot reloading for the app [default: true - unless cli settings are set]
#[clap(long, group = "release-incompatible")]
pub(crate) hot_reload: Option<bool>,
/// Configure always-on-top for desktop apps [default: true - unless cli settings are set]
#[clap(long, default_missing_value = "true")]
pub(crate) always_on_top: Option<bool>,
/// Set cross-origin-policy to same-origin [default: false]
#[clap(name = "cross-origin-policy")]
#[clap(long)]
pub(crate) cross_origin_policy: bool,
/// Sets the interval in seconds that the CLI will poll for file changes on WSL.
#[clap(long, default_missing_value = "2")]
pub(crate) wsl_file_poll_interval: Option<u16>,
/// Run the server in interactive mode
#[arg(long, default_missing_value="true", num_args=0..=1, short = 'i')]
pub(crate) interactive: Option<bool>,
/// Enable Rust hot-patching instead of full rebuilds [default: false]
///
/// This is quite experimental and may lead to unexpected segfaults or crashes in development.
#[arg(long, default_value_t = false, alias = "hotpatch")]
pub(crate) hot_patch: bool,
/// Watch the filesystem for changes and trigger a rebuild [default: true]
#[clap(long, default_missing_value = "true", num_args=0..=1)]
pub(crate) watch: Option<bool>,
/// Exit the CLI after running into an error. This is mainly used to test hot patching internally
#[clap(long)]
#[clap(hide = true)]
pub(crate) exit_on_error: bool,
/// Platform-specific arguments for the build
#[clap(flatten)]
pub(crate) platform_args: CommandWithPlatformOverrides<PlatformServeArgs>,
}
#[derive(Clone, Debug, Default, Parser)]
pub(crate) struct PlatformServeArgs {
#[clap(flatten)]
pub(crate) targets: BuildArgs,
/// Additional arguments to pass to the executable
#[clap(long, default_value = "")]
pub(crate) args: String,
}
impl ServeArgs {
/// Start the tui, builder, etc by resolving the arguments and then running the actual top-level serve function
///
/// Make sure not to do any intermediate logging since our tracing infra has now enabled much
/// higher log levels
///
/// We also set up proper panic handling since the TUI has a tendency to corrupt the terminal.
pub(crate) async fn serve(self, tracer: &TraceController) -> Result<StructuredOutput> {
// Redirect all logging the cli logger - if there's any pending after a panic, we flush it
let is_interactive_tty = self.is_interactive_tty();
if is_interactive_tty {
tracer.redirect_to_tui();
}
crate::serve::serve_all(self, tracer)
.await
.map(|_| StructuredOutput::Success)
}
/// Check if the server is running in interactive mode. This involves checking the terminal as well
pub(crate) fn is_interactive_tty(&self) -> bool {
use std::io::IsTerminal;
std::io::stdout().is_terminal() && self.interactive.unwrap_or(true)
}
}
impl Anonymized for ServeArgs {
fn anonymized(&self) -> Value {
json! {{
"address": self.address.anonymized(),
"open": self.open,
"hot_reload": self.hot_reload,
"always_on_top": self.always_on_top,
"cross_origin_policy": self.cross_origin_policy,
"wsl_file_poll_interval": self.wsl_file_poll_interval,
"interactive": self.interactive,
"hot_patch": self.hot_patch,
"watch": self.watch,
"exit_on_error": self.exit_on_error,
"platform_args": self.platform_args.anonymized(),
}}
}
}
impl Anonymized for PlatformServeArgs {
fn anonymized(&self) -> Value {
json! {{
"targets": self.targets.anonymized(),
"args": !self.args.is_empty(),
}}
}
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/cli/autoformat.rs | packages/cli/src/cli/autoformat.rs | use super::{check::collect_rs_files, *};
use crate::Workspace;
use anyhow::{bail, Context};
use dioxus_autofmt::{IndentOptions, IndentType};
use rayon::prelude::*;
use std::{borrow::Cow, fs, path::Path};
// For reference, the rustfmt main.rs file
// https://github.com/rust-lang/rustfmt/blob/master/src/bin/main.rs
/// Format some rsx
#[derive(Clone, Debug, Parser)]
pub(crate) struct Autoformat {
/// Format rust code before the formatting the rsx macros
#[clap(long)]
pub(crate) all_code: bool,
/// Run in 'check' mode. Exits with 0 if input is formatted correctly. Exits
/// with 1 and prints a diff if formatting is required.
#[clap(short, long)]
pub(crate) check: bool,
/// Input rsx (selection)
#[clap(short, long)]
pub(crate) raw: Option<String>,
/// Input file at path (set to "-" to read file from stdin, and output formatted file to stdout)
#[clap(short, long)]
pub(crate) file: Option<String>,
/// Split attributes in lines or not
#[clap(short, long, default_value = "false")]
pub(crate) split_line_attributes: bool,
/// The package to build
#[clap(short, long)]
pub(crate) package: Option<String>,
}
impl Autoformat {
pub(crate) async fn autoformat(self) -> Result<StructuredOutput> {
let Autoformat {
check,
raw,
file,
split_line_attributes,
all_code: format_rust_code,
..
} = self;
if let Some(file) = file {
// Format a single file
refactor_file(file, split_line_attributes, format_rust_code)?;
} else if let Some(raw) = raw {
// Format raw text.
let indent = indentation_for(".", self.split_line_attributes)?;
let formatted =
dioxus_autofmt::fmt_block(&raw, 0, indent).context("error formatting codeblock")?;
println!("{}", formatted);
} else {
// Default to formatting the project.
let crate_dir = if let Some(package) = self.package {
let workspace = Workspace::current().await?;
let dx_crate = workspace
.find_main_package(Some(package))
.context("Failed to find package")?;
workspace.krates[dx_crate]
.manifest_path
.parent()
.unwrap()
.to_path_buf()
.into()
} else {
Cow::Borrowed(Path::new("."))
};
autoformat_project(check, split_line_attributes, format_rust_code, crate_dir)
.context("error autoformatting project")?;
}
Ok(StructuredOutput::Success)
}
}
fn refactor_file(
file: String,
split_line_attributes: bool,
format_rust_code: bool,
) -> Result<(), Error> {
let indent = indentation_for(".", split_line_attributes)?;
let file_content = if file == "-" {
let mut contents = String::new();
std::io::stdin().read_to_string(&mut contents)?;
Ok(contents)
} else {
fs::read_to_string(&file)
};
let mut s = file_content.context("failed to open file")?;
if format_rust_code {
s = format_rust(&s)?;
}
let parsed = syn::parse_file(&s).context("failed to parse file")?;
let edits =
dioxus_autofmt::try_fmt_file(&s, &parsed, indent).context("failed to format file")?;
let out = dioxus_autofmt::apply_formats(&s, edits);
if file == "-" {
print!("{out}");
} else if let Err(e) = fs::write(&file, out) {
tracing::error!("failed to write formatted content to file: {e}",);
} else {
println!("formatted {file}");
}
Ok(())
}
fn format_file(
path: impl AsRef<Path>,
indent: IndentOptions,
format_rust_code: bool,
) -> Result<usize> {
let mut contents = fs::read_to_string(&path)?;
let mut if_write = false;
if format_rust_code {
let formatted = format_rust(&contents).context("Syntax Error")?;
if contents != formatted {
if_write = true;
contents = formatted;
}
}
let parsed = syn::parse_file(&contents).context("Failed to parse file")?;
let edits = dioxus_autofmt::try_fmt_file(&contents, &parsed, indent)
.context("Failed to format file")?;
let len = edits.len();
if !edits.is_empty() {
if_write = true;
}
if if_write {
let out = dioxus_autofmt::apply_formats(&contents, edits);
fs::write(path, out)?;
}
Ok(len)
}
/// Read every .rs file accessible when considering the .gitignore and try to format it
///
/// Runs using rayon for multithreading, so it should be really really fast
///
/// Doesn't do mod-descending, so it will still try to format unreachable files. TODO.
fn autoformat_project(
check: bool,
split_line_attributes: bool,
format_rust_code: bool,
dir: impl AsRef<Path>,
) -> Result<()> {
let mut files_to_format = vec![];
collect_rs_files(dir.as_ref(), &mut files_to_format);
if files_to_format.is_empty() {
return Ok(());
}
if files_to_format.is_empty() {
return Ok(());
}
let indent = indentation_for(&files_to_format[0], split_line_attributes)?;
let counts = files_to_format
.into_par_iter()
.map(|path| {
let res = format_file(&path, indent.clone(), format_rust_code);
match res {
Ok(cnt) => Some(cnt),
Err(err) => {
tracing::error!("error formatting file : {}\n{:#?}", path.display(), err);
None
}
}
})
.collect::<Vec<_>>();
let files_formatted: usize = counts.into_iter().flatten().sum();
if files_formatted > 0 && check {
bail!("{files_formatted} files needed formatting");
}
Ok(())
}
fn indentation_for(
file_or_dir: impl AsRef<Path>,
split_line_attributes: bool,
) -> Result<IndentOptions> {
let out = std::process::Command::new("cargo")
.args(["fmt", "--", "--print-config", "current"])
.arg(file_or_dir.as_ref())
.stdout(std::process::Stdio::piped())
.stderr(std::process::Stdio::inherit())
.output()?;
if !out.status.success() {
bail!("cargo fmt failed with status: {out:?}");
}
let config = String::from_utf8_lossy(&out.stdout);
let hard_tabs = config
.lines()
.find(|line| line.starts_with("hard_tabs "))
.and_then(|line| line.split_once('='))
.map(|(_, value)| value.trim() == "true")
.context("Could not find hard_tabs option in rustfmt config")?;
let tab_spaces = config
.lines()
.find(|line| line.starts_with("tab_spaces "))
.and_then(|line| line.split_once('='))
.map(|(_, value)| value.trim().parse::<usize>())
.context("Could not find tab_spaces option in rustfmt config")?
.context("Could not parse tab_spaces option in rustfmt config")?;
Ok(IndentOptions::new(
if hard_tabs {
IndentType::Tabs
} else {
IndentType::Spaces
},
tab_spaces,
split_line_attributes,
))
}
/// Format rust code using prettyplease
fn format_rust(input: &str) -> Result<String> {
let syntax_tree = syn::parse_file(input)
.map_err(format_syn_error)
.context("Failed to parse Rust syntax")?;
let output = prettyplease::unparse(&syntax_tree);
Ok(output)
}
fn format_syn_error(err: syn::Error) -> Error {
let start = err.span().start();
let line = start.line;
let column = start.column;
anyhow::anyhow!("Syntax Error in line {line} column {column}:\n{err}")
}
#[tokio::test]
async fn test_auto_fmt() {
let test_rsx = r#"
//
div {}
//
//
//
"#
.to_string();
let fmt = Autoformat {
all_code: false,
check: false,
raw: Some(test_rsx),
file: None,
split_line_attributes: false,
package: None,
};
fmt.autoformat().await.unwrap();
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/cli/link.rs | packages/cli/src/cli/link.rs | use crate::Result;
use anyhow::{bail, Context};
use serde::{Deserialize, Serialize};
use std::{borrow::Cow, ffi::OsString, path::PathBuf, process::ExitCode};
use target_lexicon::Triple;
/// `dx` can act as a linker in a few scenarios. Note that we don't *actually* implement the linker logic,
/// instead just proxying to a specified linker (or not linking at all!).
///
/// This comes in two flavors:
/// --------------------------
/// - `BaseLink`: We are linking dependencies and want to dynamically select the linker from the environment.
/// This is mostly implemented for Android where the linker is selected in part by the
/// device connected over ADB which can not be determined by .cargo/Config.toml.
/// We implemented this because previous setups like cargo mobile required a hard-coded
/// linker path in your project which does not work in team-based setups.
///
/// - `NoLink`: We are not linking at all, and instead deferring our linking to the driving process,
/// usually being `dx` itself. In this case, we are just writing the linker args to a file
/// and then outputting a dummy object file to satisfy the linker. This is generally used
/// by the binary patching engine since we need to actually do "real linker logic" like
/// traversing object files and satisfying missing symbols. That process is *much* easier
/// to do in the driving host process when we have all the information available. Unfortunately,
/// rustc doesn't provide a "real" way of granularly stepping through the compile process
/// so this is basically a hack.
///
/// We use "BaseLink" when a linker is specified, and "NoLink" when it is not. Both generate a resulting
/// object file.
#[derive(Debug)]
pub struct LinkAction {
pub linker: Option<PathBuf>,
pub triple: Triple,
pub link_args_file: PathBuf,
pub link_err_file: PathBuf,
}
/// The linker flavor to use. This influences the argument style that gets passed to the linker.
/// We're imitating the rustc linker flavors here.
///
/// <https://doc.rust-lang.org/beta/nightly-rustc/rustc_target/spec/enum.LinkerFlavor.html>
#[derive(Debug, Serialize, Deserialize, PartialEq, Clone)]
pub enum LinkerFlavor {
Gnu,
Darwin,
WasmLld,
Msvc,
Unsupported, // a catch-all for unsupported linkers, usually the stripped-down unix ones
}
impl LinkAction {
const DX_LINK_ARG: &str = "DX_LINK";
const DX_ARGS_FILE: &str = "DX_LINK_ARGS_FILE";
const DX_ERR_FILE: &str = "DX_LINK_ERR_FILE";
const DX_LINK_TRIPLE: &str = "DX_LINK_TRIPLE";
const DX_LINK_CUSTOM_LINKER: &str = "DX_LINK_CUSTOM_LINKER";
/// Should we write the input arguments to a file (aka act as a linker subprocess)?
///
/// Just check if the magic env var is set
pub(crate) fn from_env() -> Option<Self> {
if std::env::var(Self::DX_LINK_ARG).is_err() {
return None;
}
Some(Self {
linker: std::env::var(Self::DX_LINK_CUSTOM_LINKER)
.ok()
.map(PathBuf::from),
link_args_file: std::env::var(Self::DX_ARGS_FILE)
.expect("Linker args file not set")
.into(),
link_err_file: std::env::var(Self::DX_ERR_FILE)
.expect("Linker error file not set")
.into(),
triple: std::env::var(Self::DX_LINK_TRIPLE)
.expect("Linker triple not set")
.parse()
.expect("Failed to parse linker triple"),
})
}
pub(crate) fn write_env_vars(
&self,
env_vars: &mut Vec<(Cow<'static, str>, OsString)>,
) -> Result<()> {
env_vars.push((Self::DX_LINK_ARG.into(), "1".into()));
env_vars.push((
Self::DX_ARGS_FILE.into(),
dunce::canonicalize(&self.link_args_file)?.into_os_string(),
));
env_vars.push((
Self::DX_ERR_FILE.into(),
dunce::canonicalize(&self.link_err_file)?.into_os_string(),
));
env_vars.push((Self::DX_LINK_TRIPLE.into(), self.triple.to_string().into()));
if let Some(linker) = &self.linker {
env_vars.push((
Self::DX_LINK_CUSTOM_LINKER.into(),
dunce::canonicalize(linker)
.unwrap_or(linker.clone())
.into_os_string(),
));
}
Ok(())
}
pub(crate) fn run_link(self) -> ExitCode {
let link_err_file = self.link_err_file.clone();
if let Err(err) = self.run_link_inner() {
eprintln!("Linker error: {err}");
// If we failed to run the linker, we need to write the error to the file
// so that the main process can read it.
_ = std::fs::create_dir_all(link_err_file.parent().unwrap());
_ = std::fs::write(link_err_file, format!("Linker error: {err}"));
return ExitCode::FAILURE;
}
ExitCode::SUCCESS
}
/// Write the incoming linker args to a file
///
/// The file will be given by the dx-magic-link-arg env var itself, so we use
/// it both for determining if we should act as a linker and the for the file name itself.
fn run_link_inner(self) -> Result<()> {
let args: Vec<_> = std::env::args().collect();
if args.is_empty() {
return Ok(());
}
let mut args = get_actual_linker_args_excluding_program_name(args);
if self.triple.environment == target_lexicon::Environment::Android {
args.retain(|arg| !arg.ends_with(".lib"));
}
// Write the linker args to a file for the main process to read
// todo: we might need to encode these as escaped shell words in case newlines are passed
std::fs::write(&self.link_args_file, args.join("\n"))?;
// If there's a linker specified, we use that. Otherwise, we write a dummy object file to satisfy
// any post-processing steps that rustc does.
match self.linker {
Some(linker) => {
let mut cmd = std::process::Command::new(linker);
match cfg!(target_os = "windows") {
true => cmd.arg(format!("@{}", &self.link_args_file.display())),
false => cmd.args(args),
};
let res = cmd.output().expect("Failed to run linker");
if !res.status.success() {
bail!(
"{}\n{}",
String::from_utf8_lossy(&res.stdout),
String::from_utf8_lossy(&res.stderr)
);
}
if !res.stderr.is_empty() || !res.stdout.is_empty() {
// Write linker warnings to file so that the main process can read them.
_ = std::fs::create_dir_all(self.link_err_file.parent().unwrap());
_ = std::fs::write(
self.link_err_file,
format!(
"Linker warnings: {}\n{}",
String::from_utf8_lossy(&res.stdout),
String::from_utf8_lossy(&res.stderr)
),
);
}
}
None => {
// Extract the out path - we're going to write a dummy object file to satisfy the linker
let out_file: PathBuf = match self.triple.operating_system {
target_lexicon::OperatingSystem::Windows => {
let out_arg = args.iter().find(|arg| arg.starts_with("/OUT")).unwrap();
out_arg.trim_start_matches("/OUT:").to_string().into()
}
_ => {
let out = args.iter().position(|arg| arg == "-o").unwrap();
args[out + 1].clone().into()
}
};
// This creates an object file that satisfies rust's use of llvm-objcopy
//
// I'd rather we *not* do this and instead generate a truly linked file (and then delete it) but
// this at least lets us delay linking until the host compiler is ready.
//
// This is because our host compiler is a stateful server and not a stateless linker.
//
// todo(jon): do we use Triple::host or the target triple? I think I ran into issues
// using the target triple, hence the use of "host" but it might not even matter?
let triple = Triple::host();
let format = match triple.binary_format {
target_lexicon::BinaryFormat::Elf => object::BinaryFormat::Elf,
target_lexicon::BinaryFormat::Coff => object::BinaryFormat::Coff,
target_lexicon::BinaryFormat::Macho => object::BinaryFormat::MachO,
target_lexicon::BinaryFormat::Wasm => object::BinaryFormat::Wasm,
target_lexicon::BinaryFormat::Xcoff => object::BinaryFormat::Xcoff,
target_lexicon::BinaryFormat::Unknown => unimplemented!(),
_ => unimplemented!("Binary format not supported"),
};
let arch = match triple.architecture {
target_lexicon::Architecture::Wasm32 => object::Architecture::Wasm32,
target_lexicon::Architecture::Wasm64 => object::Architecture::Wasm64,
target_lexicon::Architecture::X86_64 => object::Architecture::X86_64,
target_lexicon::Architecture::Arm(_) => object::Architecture::Arm,
target_lexicon::Architecture::Aarch64(_) => object::Architecture::Aarch64,
target_lexicon::Architecture::LoongArch64 => object::Architecture::LoongArch64,
target_lexicon::Architecture::Unknown => object::Architecture::Unknown,
_ => unimplemented!("Architecture not supported"),
};
let endian = match triple.endianness() {
Ok(target_lexicon::Endianness::Little) => object::Endianness::Little,
Ok(target_lexicon::Endianness::Big) => object::Endianness::Big,
Err(_) => unimplemented!("Endianness not supported"),
};
let bytes = object::write::Object::new(format, arch, endian)
.write()
.context("Failed to emit stub link file")?;
// Write a dummy object file to satisfy rust/linker since it'll run llvm-objcopy
// ... I wish it *didn't* do that but I can't tell how to disable the linker without
// using --emit=obj which is not exactly what we want since that will still pull in
// the dependencies.
std::fs::create_dir_all(out_file.parent().unwrap())?;
std::fs::write(out_file, bytes)?;
}
}
Ok(())
}
}
pub fn get_actual_linker_args_excluding_program_name(args: Vec<String>) -> Vec<String> {
args.into_iter()
.skip(1) // the first arg is program name
.flat_map(|arg| handle_linker_arg_response_file(arg).into_iter())
.collect()
}
// handle Windows linker response file. It's designed to workaround Windows command length limit.
// https://learn.microsoft.com/en-us/cpp/build/reference/at-specify-a-linker-response-file?view=msvc-170
pub fn handle_linker_arg_response_file(arg: String) -> Vec<String> {
if arg.starts_with('@') {
let path = arg.trim().trim_start_matches('@');
let file_binary = std::fs::read(path).unwrap();
// This may be a utf-16le file. Let's try utf-8 first.
let mut content = String::from_utf8(file_binary.clone()).unwrap_or_else(|_| {
// Convert Vec<u8> to Vec<u16> to convert into a String
let binary_u16le: Vec<u16> = file_binary
.chunks_exact(2)
.map(|a| u16::from_le_bytes([a[0], a[1]]))
.collect();
String::from_utf16_lossy(&binary_u16le)
});
// Remove byte order mark in the beginning
if content.starts_with('\u{FEFF}') {
content.remove(0);
}
// Gather linker args, and reset the args to be just the linker args
content
.lines()
.map(|line| {
let line_parsed = line.trim().to_string();
let line_parsed = line_parsed.trim_end_matches('"').to_string();
let line_parsed = line_parsed.trim_start_matches('"').to_string();
line_parsed
})
.collect()
} else {
vec![arg]
}
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/cli/bundle.rs | packages/cli/src/cli/bundle.rs | use crate::{AppBuilder, BuildArgs, BuildId, BuildMode, BuildRequest, BundleFormat};
use anyhow::{bail, Context};
use path_absolutize::Absolutize;
use std::collections::HashMap;
use tauri_bundler::{BundleBinary, BundleSettings, PackageSettings, SettingsBuilder};
use walkdir::WalkDir;
use super::*;
/// Bundle an app and its assets.
///
/// This will produce a client `public` folder and the associated server executable in the output folder.
#[derive(Clone, Debug, Parser)]
pub struct Bundle {
/// The package types to bundle
#[clap(long)]
pub package_types: Option<Vec<crate::PackageType>>,
/// The directory in which the final bundle will be placed.
///
/// Relative paths will be placed relative to the current working directory if specified.
/// Otherwise, the out_dir path specified in Dioxus.toml will be used (relative to the crate root).
///
/// We will flatten the artifacts into this directory - there will be no differentiation between
/// artifacts produced by different platforms.
#[clap(long)]
pub out_dir: Option<PathBuf>,
/// The arguments for the dioxus build
#[clap(flatten)]
pub(crate) args: CommandWithPlatformOverrides<BuildArgs>,
}
impl Bundle {
// todo: make sure to run pre-render static routes! we removed this from the other bundling step
pub(crate) async fn bundle(mut self) -> Result<StructuredOutput> {
tracing::info!("Bundling project...");
let BuildTargets { client, server } = self.args.into_targets().await?;
let mut server_artifacts = None;
let client_artifacts =
AppBuilder::started(&client, BuildMode::Base { run: false }, BuildId::PRIMARY)?
.finish_build()
.await?;
tracing::info!(path = ?client.root_dir(), "Client build completed successfully! 🚀");
if let Some(server) = server.as_ref() {
// If the server is present, we need to build it as well
server_artifacts = Some(
AppBuilder::started(server, BuildMode::Base { run: false }, BuildId::SECONDARY)?
.finish_build()
.await?,
);
tracing::info!(path = ?client.root_dir(), "Server build completed successfully! 🚀");
}
// If we're building for iOS, we need to bundle the iOS bundle
if client.bundle == BundleFormat::Ios && self.package_types.is_none() {
self.package_types = Some(vec![crate::PackageType::IosBundle]);
}
let mut bundles = vec![];
// Copy the server over if it exists
if let Some(server) = server.as_ref() {
bundles.push(server.main_exe());
}
// Create a list of bundles that we might need to copy
match client.bundle {
// By default, mac/win/linux work with tauri bundle
BundleFormat::MacOS | BundleFormat::Linux | BundleFormat::Windows => {
tracing::info!("Running desktop bundler...");
for bundle in Self::bundle_desktop(&client, &self.package_types)? {
bundles.extend(bundle.bundle_paths);
}
}
// Web/ios can just use their root_dir
BundleFormat::Web => bundles.push(client.root_dir()),
BundleFormat::Ios => {
tracing::warn!("iOS bundles are not currently codesigned! You will need to codesign the app before distributing.");
bundles.push(client.root_dir())
}
BundleFormat::Server => bundles.push(client.root_dir()),
BundleFormat::Android => {
let aab = client
.android_gradle_bundle()
.await
.context("Failed to run gradle bundleRelease")?;
bundles.push(aab);
}
};
// Copy the bundles to the output directory if one was specified
let crate_outdir = client.crate_out_dir();
if let Some(outdir) = self.out_dir.clone().or(crate_outdir) {
let outdir = outdir
.absolutize()
.context("Failed to absolutize output directory")?;
tracing::info!("Copying bundles to output directory: {}", outdir.display());
std::fs::create_dir_all(&outdir)?;
for bundle_path in bundles.iter_mut() {
let destination = outdir.join(bundle_path.file_name().unwrap());
tracing::debug!(
"Copying from {} to {}",
bundle_path.display(),
destination.display()
);
if bundle_path.is_dir() {
dircpy::CopyBuilder::new(&bundle_path, &destination)
.overwrite(true)
.run_par()
.context("Failed to copy the app to output directory")?;
} else {
std::fs::copy(&bundle_path, &destination)
.context("Failed to copy the app to output directory")?;
}
*bundle_path = destination;
}
}
for bundle_path in bundles.iter() {
tracing::info!(
"Bundled app at: {}",
bundle_path.absolutize().unwrap().display()
);
}
let client = client_artifacts.into_structured_output();
let server = server_artifacts.map(|s| s.into_structured_output());
Ok(StructuredOutput::BundleOutput {
bundles,
client,
server,
})
}
fn bundle_desktop(
build: &BuildRequest,
package_types: &Option<Vec<crate::PackageType>>,
) -> Result<Vec<tauri_bundler::Bundle>, Error> {
let krate = &build;
let exe = build.main_exe();
_ = std::fs::remove_dir_all(krate.bundle_dir(build.bundle));
let package = krate.package();
let mut name: PathBuf = krate.executable_name().into();
if cfg!(windows) {
name.set_extension("exe");
}
std::fs::create_dir_all(krate.bundle_dir(build.bundle))
.context("Failed to create bundle directory")?;
std::fs::copy(&exe, krate.bundle_dir(build.bundle).join(&name))
.with_context(|| "Failed to copy the output executable into the bundle directory")?;
let binaries = vec![
// We use the name of the exe but it has to be in the same directory
BundleBinary::new(krate.executable_name().to_string(), true)
.set_src_path(Some(exe.display().to_string())),
];
let mut bundle_settings: BundleSettings = krate.config.bundle.clone().into();
// Check if required fields are provided instead of failing silently.
if bundle_settings.identifier.is_none() {
bail!("\n\nBundle identifier was not provided in `Dioxus.toml`. Add it as:\n\n[bundle]\nidentifier = \"com.mycompany\"\n\n");
}
if bundle_settings.publisher.is_none() {
bail!("\n\nBundle publisher was not provided in `Dioxus.toml`. Add it as:\n\n[bundle]\npublisher = \"MyCompany\"\n\n");
}
/// Resolve an icon path relative to the crate dir
fn canonicalize_icon_path(build: &BuildRequest, icon: &mut String) -> Result<(), Error> {
let icon_path = build
.crate_dir()
.join(&icon)
.canonicalize()
.with_context(|| format!("Failed to canonicalize path to icon {icon:?}"))?;
*icon = icon_path.to_string_lossy().to_string();
Ok(())
}
// Resolve bundle.icon relative to the crate dir
if let Some(icons) = bundle_settings.icon.as_mut() {
for icon in icons.iter_mut() {
canonicalize_icon_path(build, icon)?;
}
}
#[allow(deprecated)]
if cfg!(windows) {
// Resolve bundle.windows.icon_path relative to the crate dir
let mut windows_icon_path = bundle_settings
.windows
.icon_path
.to_string_lossy()
.to_string();
canonicalize_icon_path(build, &mut windows_icon_path)?;
bundle_settings.windows.icon_path = PathBuf::from(&windows_icon_path);
let windows_icon_override = krate.config.bundle.windows.as_ref().map(|w| &w.icon_path);
if windows_icon_override.is_none() {
let icon_path = bundle_settings
.icon
.as_ref()
.and_then(|icons| icons.first());
if let Some(icon_path) = icon_path {
bundle_settings.icon = Some(vec![icon_path.into()]);
};
}
}
if bundle_settings.resources_map.is_none() {
bundle_settings.resources_map = Some(HashMap::new());
}
let asset_dir = build.asset_dir();
if asset_dir.exists() {
for entry in WalkDir::new(&asset_dir) {
let entry = entry.unwrap();
let path = entry.path();
if path.is_file() {
let old = path
.canonicalize()
.with_context(|| format!("Failed to canonicalize {entry:?}"))?;
let new =
PathBuf::from("assets").join(path.strip_prefix(&asset_dir).unwrap_or(path));
tracing::debug!("Bundled asset: {old:?} -> {new:?}");
bundle_settings
.resources_map
.as_mut()
.expect("to be set")
.insert(old.display().to_string(), new.display().to_string());
}
}
}
for resource_path in bundle_settings.resources.take().into_iter().flatten() {
bundle_settings
.resources_map
.as_mut()
.expect("to be set")
.insert(resource_path, "".to_string());
}
let mut settings = SettingsBuilder::new()
.project_out_directory(krate.bundle_dir(build.bundle))
.package_settings(PackageSettings {
product_name: krate.bundled_app_name(),
version: package.version.to_string(),
description: package.description.clone().unwrap_or_default(),
homepage: Some(package.homepage.clone().unwrap_or_default()),
authors: Some(package.authors.clone()),
default_run: Some(name.display().to_string()),
})
.log_level(log::Level::Debug)
.binaries(binaries)
.bundle_settings(bundle_settings);
if let Some(packages) = &package_types {
settings = settings.package_types(packages.iter().map(|p| (*p).into()).collect());
}
settings = settings.target(build.triple.to_string());
let settings = settings
.build()
.context("failed to bundle tauri bundle settings")?;
tracing::debug!("Bundling project with settings: {:#?}", settings);
if cfg!(target_os = "macos") {
std::env::set_var("CI", "true");
}
let bundles = tauri_bundler::bundle::bundle_project(&settings).inspect_err(|err| {
tracing::error!("Failed to bundle project: {:#?}", err);
if cfg!(target_os = "macos") {
tracing::error!("Make sure you have automation enabled in your terminal (https://github.com/tauri-apps/tauri/issues/3055#issuecomment-1624389208) and full disk access enabled for your terminal (https://github.com/tauri-apps/tauri/issues/3055#issuecomment-1624389208)");
}
})?;
Ok(bundles)
}
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/cli/update.rs | packages/cli/src/cli/update.rs | use super::*;
use crate::{Result, Workspace};
use anyhow::{bail, Context};
use itertools::Itertools;
use self_update::cargo_crate_version;
/// Run the project with the given arguments
///
/// This is a shorthand for `dx serve` with interactive mode and hot-reload disabled.
#[derive(Clone, Debug, Parser)]
pub(crate) struct SelfUpdate {
/// Use the latest nightly build.
#[clap(long, default_value = "false")]
pub nightly: bool,
/// Specify a version to install.
#[clap(long)]
pub version: Option<String>,
/// Install the update.
#[clap(long, default_value = "true", num_args = 0..=1)]
pub install: bool,
/// List available versions.
#[clap(long, default_value = "false")]
pub list: bool,
/// Force the update even if the current version is up to date.
#[clap(long, default_value = "false")]
pub force: bool,
}
impl SelfUpdate {
pub async fn self_update(self) -> Result<StructuredOutput> {
tokio::task::spawn_blocking(move || {
let start = std::time::Instant::now();
if self.list {
let res = self_update::backends::github::Update::configure()
.repo_owner("dioxuslabs")
.repo_name("dioxus")
.bin_name("dx")
.current_version(cargo_crate_version!())
.build()
.unwrap()
.get_latest_releases(cargo_crate_version!())
.context("Failed to fetch latest version")?;
if res.is_empty() {
tracing::info!("Your version {} is up to date!", cargo_crate_version!());
} else {
tracing::info!("Your version {} is out of date!", cargo_crate_version!());
tracing::info!(
"Available versions: [{}]",
res.iter()
.map(|r| r.version.clone())
.collect::<Vec<_>>()
.join(", ")
);
}
return Ok(StructuredOutput::Success);
}
let repo = self_update::backends::github::Update::configure()
.repo_owner("dioxuslabs")
.repo_name("dioxus")
.bin_name("dx")
.current_version(cargo_crate_version!())
.build()
.unwrap();
let force = self.force || self.version.is_some();
let latest = match self.version {
Some(version) => repo
.get_release_version(&version)
.context("Failed to fetch release by tag")?,
None => repo
.get_latest_release()
.context("Failed to fetch latest version")?,
};
if latest.version == cargo_crate_version!() && !force {
tracing::info!("Your version {} is up to date!", cargo_crate_version!());
return Ok(StructuredOutput::Success);
}
tracing::info!("Your version is out of date!");
tracing::info!("- Yours: {}", cargo_crate_version!());
tracing::info!("- Latest: {}", latest.version);
let cur_arch = if cfg!(target_arch = "x86_64") {
"x86_64"
} else if cfg!(target_arch = "aarch64") {
"aarch64"
} else {
bail!("Unsupported architecture");
};
let cur_os = if cfg!(target_os = "windows") {
"windows"
} else if cfg!(target_os = "linux") {
"linux"
} else if cfg!(target_os = "macos") {
"darwin"
} else {
bail!("Unsupported OS");
};
let zip_ext = "zip";
tracing::debug!("Available assets: {:?}", latest.assets);
let asset = latest
.assets
.iter()
.find(|a| {
a.name.contains(cur_os)
&& a.name.contains(cur_arch)
&& a.name.ends_with(zip_ext)
})
.context("No suitable asset found")?;
let install_dir = Workspace::dioxus_data_dir().join("self-update");
std::fs::create_dir_all(&install_dir).context("Failed to create install directory")?;
tracing::info!("Downloading update from Github");
tracing::debug!("Download URL: {}", asset.download_url);
let body = latest.body.unwrap_or_default();
let brief = vec![
latest.name.to_string(),
"".to_string(),
latest.date.to_string(),
asset.download_url.to_string(),
"".to_string(),
]
.into_iter()
.chain(body.lines().map(ToString::to_string).take(7))
.chain(std::iter::once(" ...".to_string()))
.map(|line| format!(" | {line}"))
.join("\n");
tracing::info!("{}", brief.trim());
let archive_path = install_dir.join(&asset.name);
_ = std::fs::remove_file(&archive_path).ok();
let archive_file = std::fs::File::create(&archive_path)?;
let download_url = asset.download_url.clone();
self_update::Download::from_url(&download_url)
.set_header(
hyper::http::header::ACCEPT,
"application/octet-stream".parse().unwrap(),
)
.download_to(archive_file)
.context("Failed to download update")?;
let install_dir = install_dir.join("dx");
_ = std::fs::remove_dir_all(&install_dir);
self_update::Extract::from_source(&archive_path)
.extract_into(&install_dir)
.context("Failed to extract update")?;
let exe = if cfg!(target_os = "windows") {
"dx.exe"
} else {
"dx"
};
let executable = install_dir.join(exe);
if !executable.exists() {
bail!("Executable not found in {}", install_dir.display());
}
tracing::info!(
"Successfully downloaded update in {}ms! 👍",
start.elapsed().as_millis()
);
if self.install {
tracing::info!(
"Installing dx v{} to {}",
latest.version,
std::env::current_exe()?.display()
);
if !self.force {
tracing::warn!("Continue? (y/n)");
print!(" > ");
std::io::stdout()
.flush()
.context("Failed to flush stdout")?;
let mut input = String::new();
std::io::stdin()
.read_line(&mut input)
.context("Failed to read input")?;
if !input.trim().to_ascii_lowercase().starts_with('y') {
tracing::info!("Aborting update");
return Ok(StructuredOutput::Success);
}
}
self_update::self_replace::self_replace(executable)?;
let time_taken = start.elapsed().as_millis();
tracing::info!("Done in {} ms! 💫", time_taken)
} else {
tracing::info!("Update downloaded to {}", install_dir.display());
tracing::info!("Run `dx self-update --install` to install the update");
}
Ok(StructuredOutput::Success)
})
.await
.context("Failed to run self-update")?
}
}
/// Check against the github release list to see if the currently released `dx` version is
/// more up-to-date than our own.
///
/// We only toss out this warning once and then save to the settings file to ignore this version
/// in the future.
pub fn log_if_cli_could_update() {
tokio::task::spawn_blocking(|| {
let release = self_update::backends::github::Update::configure()
.repo_owner("dioxuslabs")
.repo_name("dioxus")
.bin_name("dx")
.current_version(cargo_crate_version!())
.build()
.unwrap()
.get_latest_release();
if let Ok(release) = release {
let old = krates::semver::Version::parse(cargo_crate_version!());
let new = krates::semver::Version::parse(&release.version);
if let (Ok(old), Ok(new)) = (old, new) {
if old < new {
_ = crate::CliSettings::modify_settings(|f| {
let ignored = f.ignore_version_update.as_deref().unwrap_or_default();
if release.version != ignored {
use crate::styles::GLOW_STYLE;
tracing::warn!("A new dx version is available: {new}! Run {GLOW_STYLE}dx self-update{GLOW_STYLE:#} to update.");
f.ignore_version_update = Some(new.to_string());
}
});
}
}
}
});
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/cli/build_assets.rs | packages/cli/src/cli/build_assets.rs | use std::{fs::create_dir_all, path::PathBuf};
use crate::{extract_assets_from_file, Result, StructuredOutput};
use clap::Parser;
use dioxus_cli_opt::process_file_to;
use tracing::debug;
#[derive(Clone, Debug, Parser)]
pub struct BuildAssets {
/// The source executable to build assets for.
pub(crate) executable: PathBuf,
/// The destination directory for the assets.
pub(crate) destination: PathBuf,
}
impl BuildAssets {
pub async fn run(self) -> Result<StructuredOutput> {
let manifest = extract_assets_from_file(&self.executable).await?;
create_dir_all(&self.destination)?;
for asset in manifest.unique_assets() {
let source_path = PathBuf::from(asset.absolute_source_path());
let destination_path = self.destination.join(asset.bundled_path());
debug!(
"Processing asset {} --> {} {:#?}",
source_path.display(),
destination_path.display(),
asset
);
process_file_to(asset.options(), &source_path, &destination_path)?;
}
Ok(StructuredOutput::Success)
}
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/cli/check.rs | packages/cli/src/cli/check.rs | //! Run linting against the user's codebase.
//!
//! For reference, the rustfmt main.rs file
//! <https://github.com/rust-lang/rustfmt/blob/master/src/bin/main.rs>
use super::*;
use crate::BuildRequest;
use anyhow::{anyhow, Context};
use futures_util::{stream::FuturesUnordered, StreamExt};
use std::path::Path;
/// Check the Rust files in the project for issues.
#[derive(Clone, Debug, Parser)]
pub(crate) struct Check {
/// Input file
#[clap(short, long)]
pub(crate) file: Option<PathBuf>,
/// Information about the target to check
#[clap(flatten)]
pub(crate) build_args: CommandWithPlatformOverrides<BuildArgs>,
}
impl Check {
// Todo: check the entire crate
pub(crate) async fn check(self) -> Result<StructuredOutput> {
let BuildTargets { client, server } = self.build_args.into_targets().await?;
match self.file {
// Default to checking the project
None => {
check_project_and_report(&client)
.await
.context("error checking project")?;
if let Some(server) = server {
if server.package != client.package {
check_project_and_report(&server)
.await
.context("error checking project")?;
}
}
}
Some(file) => {
check_file_and_report(file)
.await
.context("error checking file")?;
}
}
Ok(StructuredOutput::Success)
}
}
async fn check_file_and_report(path: PathBuf) -> Result<()> {
check_files_and_report(vec![path]).await
}
/// Read every .rs file accessible when considering the .gitignore and check it
///
/// Runs using Tokio for multithreading, so it should be really really fast
///
/// Doesn't do mod-descending, so it will still try to check unreachable files. TODO.
async fn check_project_and_report(build: &BuildRequest) -> Result<()> {
let dioxus_crate = build
.workspace
.find_main_package(Some(build.package.clone()))?;
let dioxus_crate = &build.workspace.krates[dioxus_crate];
let mut files_to_check = vec![];
collect_rs_files(
dioxus_crate.manifest_path.parent().unwrap().as_std_path(),
&mut files_to_check,
);
check_files_and_report(files_to_check).await
}
/// Check a list of files and report the issues.
async fn check_files_and_report(files_to_check: Vec<PathBuf>) -> Result<()> {
let issue_reports = files_to_check
.into_iter()
.filter(|file| file.components().all(|f| f.as_os_str() != "target"))
.map(|path| async move {
let _path = path.clone();
let res = tokio::spawn(async move {
tokio::fs::read_to_string(&_path)
.await
.map(|contents| dioxus_check::check_file(_path, &contents))
})
.await;
if res.is_err() {
tracing::error!("error checking file: {}", path.display());
}
res
})
.collect::<FuturesUnordered<_>>()
.collect::<Vec<_>>()
.await;
// remove error results which we've already printed
let issue_reports = issue_reports
.into_iter()
.flatten()
.flatten()
.collect::<Vec<_>>();
let total_issues = issue_reports.iter().map(|r| r.issues.len()).sum::<usize>();
for report in issue_reports.into_iter() {
if !report.issues.is_empty() {
tracing::info!("{}", report);
}
}
match total_issues {
0 => {
tracing::info!("No issues found.");
Ok(())
}
1 => Err(anyhow!("1 issue found.")),
_ => Err(anyhow!("{total_issues} issues found.")),
}
}
pub(crate) fn collect_rs_files(folder: &Path, files: &mut Vec<PathBuf>) {
for entry in ignore::Walk::new(folder).flatten() {
if entry.path().extension() == Some("rs".as_ref()) {
files.push(entry.path().to_path_buf());
}
}
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/cli/target.rs | packages/cli/src/cli/target.rs | use crate::BundleFormat;
use crate::Platform;
use crate::{cli::*, Renderer};
// use crate::RendererArg;
// use crate::PlatformAlias;
use target_lexicon::Triple;
const HELP_HEADING: &str = "Target Options";
/// A single target to build for
#[derive(Clone, Debug, Default, Deserialize, Parser)]
pub(crate) struct TargetArgs {
/// Build platform: supports Web, MacOS, Windows, Linux, iOS, Android, and Server
///
/// The platform implies a combination of the target alias, renderer, and bundle format flags.
///
/// You should generally prefer to use the `--web`, `--webview`, or `--native` flags to set the renderer
/// or the `--wasm`, `--macos`, `--windows`, `--linux`, `--ios`, or `--android` flags to set the target alias
/// instead of this flag. The renderer, target alias, and bundle format will be inferred if you only pass one.
#[clap(flatten)]
pub(crate) platform: Platform,
/// Which renderer to use? By default, this is usually inferred from the platform.
#[clap(long, value_enum, help_heading = HELP_HEADING)]
pub(crate) renderer: Option<Renderer>,
/// The bundle format to target for the build: supports web, macos, windows, linux, ios, android, and server
#[clap(long, value_enum, help_heading = HELP_HEADING)]
pub(crate) bundle: Option<BundleFormat>,
/// Build in release mode [default: false]
#[clap(long, short, help_heading = HELP_HEADING)]
#[serde(default)]
pub(crate) release: bool,
/// The package to build
#[clap(short, long, help_heading = HELP_HEADING)]
pub(crate) package: Option<String>,
/// Build a specific binary [default: ""]
#[clap(long, help_heading = HELP_HEADING)]
pub(crate) bin: Option<String>,
/// Build a specific example [default: ""]
#[clap(long, help_heading = HELP_HEADING)]
pub(crate) example: Option<String>,
/// Build the app with custom a profile
#[clap(long, help_heading = HELP_HEADING)]
pub(crate) profile: Option<String>,
/// Space separated list of features to activate
#[clap(long, help_heading = HELP_HEADING)]
pub(crate) features: Vec<String>,
/// Don't include the default features in the build
#[clap(long, help_heading = HELP_HEADING)]
pub(crate) no_default_features: bool,
/// Include all features in the build
#[clap(long, help_heading = HELP_HEADING)]
pub(crate) all_features: bool,
/// Rustc platform triple
#[clap(long, help_heading = HELP_HEADING)]
pub(crate) target: Option<Triple>,
/// Extra arguments passed to `cargo`
///
/// To see a list of args, run `cargo rustc --help`
///
/// This can include stuff like, "--locked", "--frozen", etc. Note that `dx` sets many of these
/// args directly from other args in this command.
#[clap(long, help_heading = HELP_HEADING)]
pub(crate) cargo_args: Option<String>,
/// Extra arguments passed to `rustc`. This can be used to customize the linker, or other flags.
///
/// For example, specifign `dx build --rustc-args "-Clink-arg=-Wl,-blah"` will pass "-Clink-arg=-Wl,-blah"
/// to the underlying the `cargo rustc` command:
///
/// cargo rustc -- -Clink-arg=-Wl,-blah
///
#[clap(long, help_heading = HELP_HEADING)]
pub(crate) rustc_args: Option<String>,
/// Skip collecting assets from dependencies [default: false]
#[clap(long, help_heading = HELP_HEADING)]
#[serde(default)]
pub(crate) skip_assets: bool,
/// Inject scripts to load the wasm and js files for your dioxus app if they are not already present [default: true]
#[clap(long, default_value_t = true, help_heading = HELP_HEADING, num_args = 0..=1)]
pub(crate) inject_loading_scripts: bool,
/// Experimental: Bundle split the wasm binary into multiple chunks based on `#[wasm_split]` annotations [default: false]
#[clap(long, default_value_t = false, help_heading = HELP_HEADING)]
pub(crate) wasm_split: bool,
/// Generate debug symbols for the wasm binary [default: true]
///
/// This will make the binary larger and take longer to compile, but will allow you to debug the
/// wasm binary
#[clap(long, default_value_t = true, help_heading = HELP_HEADING, num_args = 0..=1)]
pub(crate) debug_symbols: bool,
/// The name of the device we are hoping to upload to. By default, dx tries to upload to the active
/// simulator. If the device name is passed, we will upload to that device instead.
///
/// This performs a search among devices, and fuzzy matches might be found.
#[arg(long, default_missing_value=Some("".into()), num_args=0..=1)]
pub(crate) device: Option<String>,
/// The base path the build will fetch assets relative to. This will override the
/// base path set in the `dioxus` config.
#[clap(long, help_heading = HELP_HEADING)]
pub(crate) base_path: Option<String>,
/// Should dx attempt to codesign the app bundle?
#[clap(long, default_value_t = false, help_heading = HELP_HEADING, num_args = 0..=1)]
pub(crate) codesign: bool,
/// The path to the Apple entitlements file to used to sign the resulting app bundle.
///
/// On iOS, this is required for deploy to a device and some configurations in the simulator.
#[clap(long, help_heading = HELP_HEADING)]
pub(crate) apple_entitlements: Option<PathBuf>,
/// The Apple team ID to use when signing the app bundle.
///
/// Usually this is an email or name associated with your Apple Developer account, usually in the
/// format `Signing Name (GXTEAMID123)`.
///
/// This is passed directly to the `codesign` tool.
///
/// ```
/// codesign --force --entitlements <entitlements_file> --sign <apple_team_id> <app_bundle>
/// ```
#[clap(long, help_heading = HELP_HEADING)]
pub(crate) apple_team_id: Option<String>,
/// The folder where DX stores its temporary artifacts for things like hotpatching, build caches,
/// window position, etc. This is meant to be stable within an invocation of the CLI, but you can
/// persist it by setting this flag.
#[clap(long, help_heading = HELP_HEADING)]
pub(crate) session_cache_dir: Option<PathBuf>,
/// The target for the client build, used for specifying which target the server should end up in
/// when merging `@client and @server` targets together.
#[clap(long, help_heading = HELP_HEADING)]
pub(crate) client_target: Option<String>,
/// Automatically pass `--features=js_cfg` when building for wasm targets. This is enabled by default.
#[clap(long, default_value_t = true, help_heading = HELP_HEADING, num_args = 0..=1)]
pub(crate) wasm_js_cfg: bool,
/// The Windows subsystem to use when building for Windows targets. This can be either `CONSOLE` or `WINDOWS`.
///
/// By default, DX uses `WINDOWS` since it assumes a GUI application, but you can override this behavior with this flag.
///
/// See <https://learn.microsoft.com/en-us/cpp/build/reference/subsystem-specify-subsystem?view=msvc-170> for more information.
#[clap(long, help_heading = HELP_HEADING)]
pub(crate) windows_subsystem: Option<String>,
/// Output raw JSON diagnostics from cargo instead of processing them [default: false]
///
/// When enabled, cargo's JSON output will be relayed directly to stdout without any processing or formatting by DX.
/// This is useful for integration with other tools that expect cargo's raw JSON format.
#[clap(long, help_heading = HELP_HEADING)]
#[serde(default)]
pub(crate) raw_json_diagnostics: bool,
}
impl Anonymized for TargetArgs {
fn anonymized(&self) -> Value {
json! {{
"renderer": self.renderer,
"bundle": self.bundle,
"platform": self.platform,
"release": self.release,
"package": self.package,
"bin": self.bin,
"example": self.example.is_some(),
"profile": self.profile.is_some(),
"features": !self.features.is_empty(),
"no_default_features": self.no_default_features,
"all_features": self.all_features,
"target": self.target.as_ref().map(|t| t.to_string()),
"skip_assets": self.skip_assets,
"inject_loading_scripts": self.inject_loading_scripts,
"wasm_split": self.wasm_split,
"debug_symbols": self.debug_symbols,
"device": self.device,
"base_path": self.base_path.is_some(),
"cargo_args": self.cargo_args.is_some(),
"rustc_args": self.rustc_args.is_some(),
"raw_json_diagnostics": self.raw_json_diagnostics,
}}
}
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/cli/build.rs | packages/cli/src/cli/build.rs | use dioxus_dx_wire_format::StructuredBuildArtifacts;
use crate::{
cli::*, Anonymized, AppBuilder, BuildArtifacts, BuildId, BuildMode, BuildRequest, BundleFormat,
Platform, TargetArgs, Workspace,
};
/// Build the Rust Dioxus app and all of its assets.
///
/// Produces a final output build. If a "server" feature is present in the package's Cargo.toml, it will
/// be considered a fullstack app and the server will be built as well.
#[derive(Clone, Debug, Default, Parser)]
pub struct BuildArgs {
/// Enable fullstack mode [default: false]
///
/// This is automatically detected from `dx serve` if the "fullstack" feature is enabled by default.
#[arg(
long,
default_missing_value = "true",
num_args = 0..=1,
)]
pub(crate) fullstack: Option<bool>,
/// Pre-render all routes returned from the app's `/static_routes` endpoint [default: false]
#[clap(long)]
pub(crate) ssg: bool,
/// Force a "fat" binary, required to use `dx build-tools hotpatch`
#[clap(long)]
pub(crate) fat_binary: bool,
/// This flag only applies to fullstack builds. By default fullstack builds will run the server
/// and client builds in parallel. This flag will force the build to run the server build first, then the client build. [default: false]
///
/// If CI is enabled, this will be set to true by default.
///
#[clap(
long, default_missing_value = "true",
num_args = 0..=1,
)]
pub(crate) force_sequential: Option<bool>,
/// Arguments for the build itself
#[clap(flatten)]
pub(crate) build_arguments: TargetArgs,
}
impl BuildArgs {
pub(crate) fn force_sequential_build(&self) -> bool {
self.force_sequential
.unwrap_or_else(|| std::env::var("CI").is_ok())
}
}
impl Anonymized for BuildArgs {
fn anonymized(&self) -> Value {
json! {{
"fullstack": self.fullstack,
"ssg": self.ssg,
"build_arguments": self.build_arguments.anonymized(),
}}
}
}
pub struct BuildTargets {
pub client: BuildRequest,
pub server: Option<BuildRequest>,
}
impl CommandWithPlatformOverrides<BuildArgs> {
/// We need to decompose the combined `BuildArgs` into the individual targets that we need to build.
///
/// Only in a few cases do we spin out an additional server binary:
/// - the fullstack feature is passed
/// - the fullstack flag is enabled
/// - the server flag is enabled
///
/// The buildtargets configuration comes in two flavors:
/// - implied via the `fullstack` feature
/// - explicit when using `@server and @client`
///
/// We use the client arguments to build the client target, and then make a few changes to make
/// the server target.
///
/// The `--fullstack` feature is basically the same as passing `--features fullstack`
///
/// Some examples:
/// ```shell, ignore
/// dx serve --target wasm32-unknown-unknown --fullstack # serves both client and server
/// dx serve --target wasm32-unknown-unknown --features fullstack # serves both client and server
/// dx serve --target wasm32-unknown-unknown # only serves the client
/// dx serve --target wasm32-unknown-unknown # servers both if `fullstack` is enabled on dioxus
/// dx serve @client --target wasm32-unknown-unknown # only serves the client
/// dx serve @client --target wasm32-unknown-unknown --fullstack # serves both client and server
/// ```
///
/// Currently it is not possible to serve the server without the client, but this could be added in the future.
pub async fn into_targets(mut self) -> Result<BuildTargets> {
let workspace = Workspace::current().await?;
// do some logging to ensure dx matches the dioxus version since we're not always API compatible
workspace.check_dioxus_version_against_cli();
// The client args are the `@client` arguments, or the shared build arguments if @client is not specified.
let client_args = &self.client.as_ref().unwrap_or(&self.shared).build_arguments;
// Create the client build request
let client = BuildRequest::new(client_args, workspace.clone()).await?;
// Create the server build request if needed
let mut server = None;
if matches!(self.shared.fullstack, Some(true))
|| client.fullstack_feature_enabled()
|| self.server.is_some()
{
match self.server.as_mut() {
Some(server_args) => {
// Make sure we set the client target here so @server knows to place its output into the @client target directory.
server_args.build_arguments.client_target = Some(client.main_target.clone());
// We don't override anything except the bundle format since @server usually implies a server output
server_args.build_arguments.bundle = server_args
.build_arguments
.bundle
.or(Some(BundleFormat::Server));
server = Some(
BuildRequest::new(&server_args.build_arguments, workspace.clone()).await?,
);
}
None if client_args.platform == Platform::Server => {
// If the user requests a server build with `--server`, then we don't need to build a separate server binary.
// There's no client to use, so even though fullstack is true, we only build the server.
}
None => {
let mut args = self.shared.build_arguments.clone();
args.platform = crate::Platform::Server;
args.renderer = Some(crate::Renderer::Server);
args.bundle = Some(crate::BundleFormat::Server);
args.target = Some(target_lexicon::Triple::host());
server = Some(BuildRequest::new(&args, workspace.clone()).await?);
}
}
}
Ok(BuildTargets { client, server })
}
pub async fn build(self) -> Result<StructuredOutput> {
tracing::info!("Building project...");
let force_sequential = self.shared.force_sequential_build();
let ssg = self.shared.ssg;
let mode = match self.shared.fat_binary {
true => BuildMode::Fat,
false => BuildMode::Base { run: false },
};
let targets = self.into_targets().await?;
let build_client = Self::build_client_inner(&targets.client, mode.clone());
let build_server = Self::build_server_inner(&targets.server, mode.clone(), ssg);
let (client, server) = match force_sequential {
true => (build_client.await, build_server.await),
false => tokio::join!(build_client, build_server),
};
Ok(StructuredOutput::BuildsFinished {
client: client?.into_structured_output(),
server: server?.map(|s| s.into_structured_output()),
})
}
pub(crate) async fn build_client_inner(
request: &BuildRequest,
mode: BuildMode,
) -> Result<BuildArtifacts> {
AppBuilder::started(request, mode, BuildId::PRIMARY)?
.finish_build()
.await
.inspect(|_| {
tracing::info!(path = ?request.root_dir(), "Client build completed successfully! 🚀");
})
}
pub(crate) async fn build_server_inner(
request: &Option<BuildRequest>,
mode: BuildMode,
ssg: bool,
) -> Result<Option<BuildArtifacts>> {
let Some(server) = request.as_ref() else {
return Ok(None);
};
// If the server is present, we need to build it as well
let mut server_build = AppBuilder::started(server, mode, BuildId::SECONDARY)?;
let server_artifacts = server_build.finish_build().await?;
// Run SSG and cache static routes
if ssg {
crate::pre_render_static_routes(None, &mut server_build, None).await?;
}
tracing::info!(path = ?server.root_dir(), "Server build completed successfully! 🚀");
Ok(Some(server_artifacts))
}
}
impl BuildArtifacts {
pub(crate) fn into_structured_output(self) -> StructuredBuildArtifacts {
StructuredBuildArtifacts {
path: self.root_dir,
exe: self.exe,
rustc_args: self.direct_rustc.args,
rustc_envs: self.direct_rustc.envs,
link_args: self.direct_rustc.link_args,
assets: self.assets.unique_assets().cloned().collect(),
}
}
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/cli/translate.rs | packages/cli/src/cli/translate.rs | use super::*;
use crate::{Result, StructuredOutput};
use anyhow::bail;
use dioxus_rsx::{BodyNode, CallBody, TemplateBody};
/// Translate some source file into Dioxus code
#[derive(Clone, Debug, Parser)]
pub(crate) struct Translate {
/// Activate debug mode
// short and long flags (-d, --debug) will be deduced from the field's name
#[clap(short, long)]
pub(crate) component: bool,
/// Input file
#[clap(short, long)]
pub(crate) file: Option<String>,
/// Input file
#[clap(short, long)]
pub(crate) raw: Option<String>,
/// Output file, stdout if not present
#[arg(short, long)]
pub(crate) output: Option<PathBuf>,
}
impl Translate {
pub(crate) fn translate(self) -> Result<StructuredOutput> {
// Get the right input for the translation
let contents = determine_input(self.file, self.raw)?;
// Ensure we're loading valid HTML
let dom = html_parser::Dom::parse(&contents)?;
// Convert the HTML to RSX
let html = convert_html_to_formatted_rsx(&dom, self.component);
// Write the output
// todo(jon): we should probably use tracing out a different output format
// right now we're just printing to stdout since some tools rely on that, but likely we don't want that
// instead we should be printing as json (or maybe even a different format) if we're not interactive
match self.output {
Some(output) => std::fs::write(output, &html)?,
None => print!("{html}"),
}
Ok(StructuredOutput::HtmlTranslate { html })
}
}
pub fn convert_html_to_formatted_rsx(dom: &Dom, component: bool) -> String {
let callbody = dioxus_rsx_rosetta::rsx_from_html(dom);
match component {
true => write_callbody_with_icon_section(callbody),
false => dioxus_autofmt::write_block_out(&callbody).unwrap(),
}
}
fn write_callbody_with_icon_section(mut callbody: CallBody) -> String {
let mut svgs = vec![];
dioxus_rsx_rosetta::collect_svgs(&mut callbody.body.roots, &mut svgs);
let mut out = write_component_body(dioxus_autofmt::write_block_out(&callbody).unwrap());
if !svgs.is_empty() {
write_svg_section(&mut out, svgs);
}
out
}
fn write_component_body(raw: String) -> String {
let mut out = String::from("fn component() -> Element {\n rsx! {");
indent_and_write(&raw, 1, &mut out);
out.push_str(" })\n}");
out
}
fn write_svg_section(out: &mut String, svgs: Vec<BodyNode>) {
out.push_str("\n\nmod icons {");
out.push_str("\n use super::*;");
for (idx, icon) in svgs.into_iter().enumerate() {
let raw =
dioxus_autofmt::write_block_out(&CallBody::new(TemplateBody::new(vec![icon]))).unwrap();
out.push_str("\n\n pub(crate) fn icon_");
out.push_str(&idx.to_string());
out.push_str("() -> Element {\n rsx! {");
indent_and_write(&raw, 2, out);
out.push_str(" })\n }");
}
out.push_str("\n}");
}
fn indent_and_write(raw: &str, idx: usize, out: &mut String) {
for line in raw.lines() {
for _ in 0..idx {
out.push_str(" ");
}
out.push_str(line);
out.push('\n');
}
}
fn determine_input(file: Option<String>, raw: Option<String>) -> Result<String> {
use std::io::IsTerminal as _;
// Make sure not both are specified
if file.is_some() && raw.is_some() {
bail!("Only one of --file or --raw should be specified.");
}
if let Some(raw) = raw {
return Ok(raw);
}
if let Some(file) = file {
return Ok(std::fs::read_to_string(file)?);
}
// If neither exist, we try to read from stdin
if std::io::stdin().is_terminal() {
bail!("No input file, source, or stdin to translate from.");
}
let mut buffer = String::new();
std::io::stdin().read_to_string(&mut buffer).unwrap();
Ok(buffer.trim().to_string())
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/cli/platform_override.rs | packages/cli/src/cli/platform_override.rs | #![allow(dead_code)]
use crate::Anonymized;
use clap::parser::ValueSource;
use clap::{ArgMatches, Args, CommandFactory, FromArgMatches, Parser, Subcommand};
use serde_json::{json, Value};
/// Wraps a component with the subcommands `@server` and `@client` which will let you override the
/// base arguments for the client and server instances.
#[derive(Debug, Clone, Default)]
pub struct CommandWithPlatformOverrides<T> {
/// The arguments that are shared between the client and server
pub shared: T,
/// The merged arguments for the server
pub server: Option<T>,
/// The merged arguments for the client
pub client: Option<T>,
}
impl<T> CommandWithPlatformOverrides<T> {
pub(crate) fn with_client_or_shared<'a, O>(&'a self, f: impl FnOnce(&'a T) -> O) -> O {
match &self.client {
Some(client) => f(client),
None => f(&self.shared),
}
}
pub(crate) fn with_server_or_shared<'a, O>(&'a self, f: impl FnOnce(&'a T) -> O) -> O {
match &self.server {
Some(server) => f(server),
None => f(&self.shared),
}
}
}
impl<T> Anonymized for CommandWithPlatformOverrides<T>
where
T: Anonymized,
{
fn anonymized(&self) -> Value {
json!({
"shared": self.shared.anonymized(),
"server": self.server.as_ref().map(|s| s.anonymized()),
"client": self.client.as_ref().map(|c| c.anonymized()),
})
}
}
impl<T: CommandFactory + Args> Parser for CommandWithPlatformOverrides<T> {}
impl<T: CommandFactory + Args> CommandFactory for CommandWithPlatformOverrides<T> {
fn command() -> clap::Command {
T::command()
}
fn command_for_update() -> clap::Command {
T::command_for_update()
}
}
impl<T> Args for CommandWithPlatformOverrides<T>
where
T: Args,
{
fn augment_args(cmd: clap::Command) -> clap::Command {
T::augment_args(cmd).defer(|cmd| {
PlatformOverrides::<Self>::augment_subcommands(cmd.disable_help_subcommand(true))
})
}
fn augment_args_for_update(_cmd: clap::Command) -> clap::Command {
unimplemented!()
}
}
fn merge_matches<T: Args>(base: &ArgMatches, platform: &ArgMatches) -> Result<T, clap::Error> {
let mut base = T::from_arg_matches(base)?;
let mut platform = platform.clone();
let original_ids: Vec<_> = platform.ids().cloned().collect();
for arg_id in original_ids {
let arg_name = arg_id.as_str();
// Remove any default values from the platform matches
if platform.value_source(arg_name) == Some(ValueSource::DefaultValue) {
_ = platform.try_clear_id(arg_name);
}
}
// Then merge the stripped platform matches into the base matches
base.update_from_arg_matches(&platform)?;
Ok(base)
}
impl<T> FromArgMatches for CommandWithPlatformOverrides<T>
where
T: Args,
{
fn from_arg_matches(matches: &ArgMatches) -> Result<Self, clap::Error> {
let mut client = None;
let mut server = None;
let mut subcommand = matches.subcommand();
while let Some((name, sub_matches)) = subcommand {
match name {
"@client" => client = Some(sub_matches),
"@server" => server = Some(sub_matches),
_ => {}
}
subcommand = sub_matches.subcommand();
}
let shared = T::from_arg_matches(matches)?;
let client = client
.map(|client| merge_matches::<T>(matches, client))
.transpose()?;
let server = server
.map(|server| merge_matches::<T>(matches, server))
.transpose()?;
Ok(Self {
shared,
server,
client,
})
}
fn update_from_arg_matches(&mut self, _matches: &ArgMatches) -> Result<(), clap::Error> {
unimplemented!()
}
}
#[derive(Debug, Subcommand, Clone)]
#[command(subcommand_precedence_over_arg = true)]
pub(crate) enum PlatformOverrides<T: Args> {
/// Specify the arguments for the client build
#[clap(name = "@client")]
Client(ChainedCommand<T, PlatformOverrides<T>>),
/// Specify the arguments for the server build
#[clap(name = "@server")]
Server(ChainedCommand<T, PlatformOverrides<T>>),
}
// https://github.com/clap-rs/clap/issues/2222#issuecomment-2524152894
//
//
/// `[Args]` wrapper to match `T` variants recursively in `U`.
#[derive(Debug, Clone)]
pub struct ChainedCommand<T, U> {
/// Specific Variant.
pub inner: T,
/// Enum containing `Self<T>` variants, in other words possible follow-up commands.
pub next: Option<Box<U>>,
}
impl<T, U> Args for ChainedCommand<T, U>
where
T: Args,
U: Subcommand,
{
fn augment_args(cmd: clap::Command) -> clap::Command {
// We use the special `defer` method which lets us recursively call `augment_args` on the inner command
// and thus `from_arg_matches`
T::augment_args(cmd).defer(|cmd| U::augment_subcommands(cmd.disable_help_subcommand(true)))
}
fn augment_args_for_update(_cmd: clap::Command) -> clap::Command {
unimplemented!()
}
}
impl<T, U> FromArgMatches for ChainedCommand<T, U>
where
T: Args,
U: Subcommand,
{
fn from_arg_matches(_: &ArgMatches) -> Result<Self, clap::Error> {
unimplemented!()
}
fn update_from_arg_matches(&mut self, _matches: &ArgMatches) -> Result<(), clap::Error> {
unimplemented!()
}
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/cli/mod.rs | packages/cli/src/cli/mod.rs | pub(crate) mod autoformat;
pub(crate) mod build;
pub(crate) mod build_assets;
pub(crate) mod bundle;
pub(crate) mod check;
pub(crate) mod component;
pub(crate) mod config;
pub(crate) mod create;
pub(crate) mod doctor;
pub(crate) mod hotpatch;
pub(crate) mod init;
pub(crate) mod link;
pub(crate) mod platform_override;
pub(crate) mod print;
pub(crate) mod run;
pub(crate) mod serve;
pub(crate) mod target;
pub(crate) mod translate;
pub(crate) mod update;
pub(crate) mod verbosity;
pub(crate) use build::*;
pub(crate) use serve::*;
pub(crate) use target::*;
pub(crate) use verbosity::*;
use crate::platform_override::CommandWithPlatformOverrides;
use crate::Anonymized;
use crate::{error::Result, Error, StructuredOutput};
use clap::builder::styling::{AnsiColor, Effects, Style, Styles};
use clap::{Parser, Subcommand};
use html_parser::Dom;
use serde::Deserialize;
use serde_json::{json, Value};
use std::sync::LazyLock;
use std::{
fmt::Display,
fs::File,
io::{Read, Write},
path::PathBuf,
process::Command,
};
/// Dioxus: build web, desktop, and mobile apps with a single codebase.
#[derive(Parser)]
#[clap(name = "dioxus", version = VERSION.as_str())]
#[clap(styles = CARGO_STYLING)]
pub(crate) struct Cli {
#[command(subcommand)]
pub(crate) action: Commands,
#[command(flatten)]
pub(crate) verbosity: Verbosity,
}
#[derive(Subcommand)]
pub(crate) enum Commands {
/// Create a new Dioxus project.
#[clap(name = "new")]
New(create::Create),
/// Build, watch, and serve the project.
#[clap(name = "serve")]
Serve(serve::ServeArgs),
/// Bundle the Dioxus app into a shippable object.
#[clap(name = "bundle")]
Bundle(bundle::Bundle),
/// Build the Dioxus project and all of its assets.
#[clap(name = "build")]
Build(CommandWithPlatformOverrides<build::BuildArgs>),
/// Run the project without any hotreloading.
#[clap(name = "run")]
Run(run::RunArgs),
/// Init a new project for Dioxus in the current directory (by default).
/// Will attempt to keep your project in a good state.
#[clap(name = "init")]
Init(init::Init),
/// Diagnose installed tools and system configuration.
#[clap(name = "doctor")]
Doctor(doctor::Doctor),
/// Print project information in a structured format, like cargo args, linker args, and other
/// flags DX sets that might be useful in third-party tools.
#[clap(name = "print")]
#[clap(subcommand)]
Print(print::Print),
/// Translate a source file into Dioxus code.
#[clap(name = "translate")]
Translate(translate::Translate),
/// Automatically format RSX.
#[clap(name = "fmt")]
Autoformat(autoformat::Autoformat),
/// Check the project for any issues.
#[clap(name = "check")]
Check(check::Check),
/// Dioxus config file controls.
#[clap(subcommand)]
#[clap(name = "config")]
Config(config::Config),
/// Update the Dioxus CLI to the latest version.
#[clap(name = "self-update")]
SelfUpdate(update::SelfUpdate),
/// Run a dioxus build tool. IE `build-assets`, `hotpatch`, etc
#[clap(name = "tools")]
#[clap(subcommand)]
Tools(BuildTools),
/// Manage components from the `dioxus-component` registry.
#[clap(name = "components")]
#[clap(subcommand)]
Components(component::ComponentCommand),
}
#[allow(clippy::large_enum_variant)]
#[derive(Subcommand)]
pub enum BuildTools {
/// Build the assets for a specific target.
#[clap(name = "assets")]
BuildAssets(build_assets::BuildAssets),
/// Hotpatch the "tip" of a given "fat" binary. The output here must be from the `dx build` command with "fat" enabled
#[clap(name = "hotpatch")]
HotpatchTip(hotpatch::HotpatchTip),
}
pub(crate) static VERSION: LazyLock<String> = LazyLock::new(|| {
format!(
"{} ({})",
crate::dx_build_info::PKG_VERSION,
crate::dx_build_info::GIT_COMMIT_HASH_SHORT.unwrap_or("was built without git repository")
)
});
/// Cargo's color style
/// [source](https://github.com/crate-ci/clap-cargo/blob/master/src/style.rs)
pub(crate) const CARGO_STYLING: Styles = Styles::styled()
.header(styles::HEADER)
.usage(styles::USAGE)
.literal(styles::LITERAL)
.placeholder(styles::PLACEHOLDER)
.error(styles::ERROR)
.valid(styles::VALID)
.invalid(styles::INVALID);
pub mod styles {
use super::*;
pub(crate) const HEADER: Style = AnsiColor::Green.on_default().effects(Effects::BOLD);
pub(crate) const USAGE: Style = AnsiColor::Green.on_default().effects(Effects::BOLD);
pub(crate) const LITERAL: Style = AnsiColor::Cyan.on_default().effects(Effects::BOLD);
pub(crate) const PLACEHOLDER: Style = AnsiColor::Cyan.on_default();
pub(crate) const ERROR: Style = AnsiColor::Red.on_default().effects(Effects::BOLD);
pub(crate) const VALID: Style = AnsiColor::Cyan.on_default().effects(Effects::BOLD);
pub(crate) const INVALID: Style = AnsiColor::Yellow.on_default().effects(Effects::BOLD);
// extra styles for styling logs
// we can style stuff using the ansi sequences like: "hotpatched in {GLOW_STYLE}{}{GLOW_STYLE:#}ms"
pub(crate) const GLOW_STYLE: Style = AnsiColor::Yellow.on_default();
pub(crate) const NOTE_STYLE: Style = AnsiColor::Green.on_default();
pub(crate) const LINK_STYLE: Style = AnsiColor::Blue.on_default();
pub(crate) const ERROR_STYLE: Style = AnsiColor::Red.on_default();
pub(crate) const HINT_STYLE: Style = clap::builder::styling::Ansi256Color(244).on_default();
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/cli/verbosity.rs | packages/cli/src/cli/verbosity.rs | use clap::Parser;
use std::path::PathBuf;
#[derive(Parser, Clone, Debug, Default)]
pub struct Verbosity {
/// Use verbose output [default: false]
#[clap(long, global = true)]
pub(crate) verbose: bool,
/// Use trace output [default: false]
#[clap(long, global = true)]
pub(crate) trace: bool,
/// Output logs in JSON format
#[clap(long, global = true)]
pub(crate) json_output: bool,
/// Write *all* logs to a file
#[clap(long, global = true, help_heading = "Logging Options")]
pub(crate) log_to_file: Option<PathBuf>,
/// Assert that `Cargo.lock` will remain unchanged
#[clap(long, global = true, help_heading = "Manifest Options")]
pub(crate) locked: bool,
/// Run without accessing the network
#[clap(long, global = true, help_heading = "Manifest Options")]
pub(crate) offline: bool,
/// Equivalent to specifying both --locked and --offline
#[clap(long, global = true, help_heading = "Manifest Options")]
pub(crate) frozen: bool,
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/cli/run.rs | packages/cli/src/cli/run.rs | use super::*;
use crate::{
serve::{AppServer, ServeUpdate, WebServer},
BuilderUpdate, BundleFormat, Result,
};
use anyhow::bail;
use dioxus_dx_wire_format::BuildStage;
/// Run the project with the given arguments
///
/// This is a shorthand for `dx serve` with interactive mode and hot-reload disabled.
///
/// Unlike `dx serve`, errors during build and run will cascade out as an error, rather than being
/// handled by the TUI, making it more suitable for scripting, automation, or CI/CD pipelines.
#[derive(Clone, Debug, Parser)]
pub(crate) struct RunArgs {
/// Information about the target to build
#[clap(flatten)]
pub(crate) args: ServeArgs,
}
impl RunArgs {
pub(crate) async fn run(mut self) -> Result<StructuredOutput> {
// Override the build arguments, leveraging our serve infrastructure.
//
// We want to turn off the fancy stuff like the TUI, watcher, and hot-reload, but leave logging
// and other things like the devserver on.
self.args.hot_patch = false;
self.args.interactive = Some(false);
self.args.hot_reload = Some(false);
self.args.watch = Some(false);
let mut builder = AppServer::new(self.args).await?;
let mut devserver = WebServer::start(&builder)?;
builder.initialize();
loop {
let msg = tokio::select! {
msg = builder.wait() => msg,
msg = devserver.wait() => msg,
};
match msg {
ServeUpdate::BuilderUpdate { id, update } => {
let bundle_format = builder.get_build(id).unwrap().build.bundle;
// And then update the websocketed clients with the new build status in case they want it
devserver.new_build_update(&update).await;
// Finally, we also want to update the builder with the new update
builder.new_build_update(&update, &devserver).await;
// And then open the app if it's ready
match update {
BuilderUpdate::BuildReady { bundle } => {
_ = builder
.open(&bundle, &mut devserver)
.await
.inspect_err(|e| {
tracing::error!(
telemetry = %serde_json::json!({ "event": "failed_to_open_app_run" }),
"Failed to open app: {e}"
);
});
if bundle_format == BundleFormat::Web {
tracing::info!(
"Serving app at http://{}:{}",
builder.devserver_bind_ip,
builder.devserver_port
);
}
}
BuilderUpdate::Progress { stage } => match stage {
BuildStage::Initializing => {
tracing::info!("[{bundle_format}] Initializing build")
}
BuildStage::Starting { .. } => {}
BuildStage::InstallingTooling => {}
BuildStage::Compiling {
current,
total,
krate,
} => {
tracing::debug!(
"[{bundle_format}] ({current}/{total}) Compiling {krate} ",
)
}
BuildStage::RunningBindgen => {
tracing::info!("[{bundle_format}] Running WASM bindgen")
}
BuildStage::SplittingBundle => {}
BuildStage::OptimizingWasm => {
tracing::info!("[{bundle_format}] Optimizing WASM with `wasm-opt`")
}
BuildStage::Linking => tracing::info!("Linking app"),
BuildStage::Hotpatching => {}
BuildStage::CopyingAssets {
current,
total,
path,
} => tracing::info!(
"[{bundle_format}] Copying asset {} ({current}/{total})",
path.file_name()
.map(|f| f.to_string_lossy())
.unwrap_or_default(),
),
BuildStage::Bundling => {
tracing::info!("[{bundle_format}] Bundling app")
}
BuildStage::RunningGradle => {
tracing::info!("[{bundle_format}] Running Gradle")
}
BuildStage::CodeSigning => {
tracing::info!("[{bundle_format}] Code signing app")
}
BuildStage::Success => {}
BuildStage::Restarting => {}
BuildStage::CompressingAssets => {}
BuildStage::ExtractingAssets => {}
BuildStage::Prerendering => {
tracing::info!("[{bundle_format}] Prerendering app")
}
BuildStage::Failed => {
tracing::error!("[{bundle_format}] Build failed");
bail!("Build failed for platform: {bundle_format}");
}
BuildStage::Aborted => {
tracing::error!("[{bundle_format}] Build aborted");
bail!("Build aborted for platform: {bundle_format}");
}
_ => {}
},
BuilderUpdate::CompilerMessage { message } => {
print!("{message}");
}
BuilderUpdate::BuildFailed { err } => {
tracing::error!("❌ Build failed: {}", err);
return Err(err);
}
BuilderUpdate::StdoutReceived { msg } => {
tracing::info!("[{bundle_format}] {msg}");
}
BuilderUpdate::StderrReceived { msg } => {
tracing::error!("[{bundle_format}] {msg}");
}
BuilderUpdate::ProcessExited { status } => {
if !status.success() {
tracing::error!(
"Application [{bundle_format}] exited with error: {status}"
);
bail!("Application [{bundle_format}] exited with error: {status}");
}
break;
}
BuilderUpdate::ProcessWaitFailed { err } => {
return Err(err.into());
}
}
}
ServeUpdate::Exit { .. } => break,
ServeUpdate::NewConnection { .. } => {}
ServeUpdate::WsMessage { .. } => {}
ServeUpdate::FilesChanged { .. } => {}
ServeUpdate::OpenApp => {}
ServeUpdate::RequestRebuild => {}
ServeUpdate::ToggleShouldRebuild => {}
ServeUpdate::OpenDebugger { .. } => {}
ServeUpdate::Redraw => {}
ServeUpdate::TracingLog { .. } => {}
}
}
Ok(StructuredOutput::Success)
}
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/cli/init.rs | packages/cli/src/cli/init.rs | use super::*;
use cargo_generate::{GenerateArgs, TemplatePath, Vcs};
#[derive(Clone, Debug, Default, Deserialize, Parser)]
#[clap(name = "init")]
pub struct Init {
/// Create a new Dioxus project at PATH
#[arg(default_value = ".")]
pub path: PathBuf,
/// Project name. Defaults to directory name
#[arg(short, long)]
pub name: Option<String>,
/// Template path
#[clap(short, long)]
pub template: Option<String>,
/// Branch to select when using `template` from a git repository.
/// Mutually exclusive with: `--revision`, `--tag`.
#[clap(long, conflicts_with_all(["revision", "tag"]))]
pub branch: Option<String>,
/// A commit hash to select when using `template` from a git repository.
/// Mutually exclusive with: `--branch`, `--tag`.
#[clap(long, conflicts_with_all(["branch", "tag"]))]
pub revision: Option<String>,
/// Tag to select when using `template` from a git repository.
/// Mutually exclusive with: `--branch`, `--revision`.
#[clap(long, conflicts_with_all(["branch", "revision"]))]
pub tag: Option<String>,
/// Specify a sub-template within the template repository to be used as the actual template
#[clap(long)]
pub subtemplate: Option<String>,
/// Pass `<option>=<value>` for the used template (e.g., `foo=bar`)
#[clap(short, long)]
pub option: Vec<String>,
/// Skip user interaction by using the default values for the used template.
/// Default values can be overridden with `--option`
#[clap(short, long)]
pub yes: bool,
/// Specify the VCS used to initialize the generated template.
/// Options: `git`, `none`.
#[arg(long, value_parser)]
pub vcs: Option<Vcs>,
}
impl Init {
pub async fn init(mut self) -> Result<StructuredOutput> {
// Project name defaults to directory name.
if self.name.is_none() {
self.name = Some(create::name_from_path(&self.path)?);
}
// Perform a connectivity check so we just don't it around doing nothing if there's a network error
if self.template.is_none() {
create::check_connectivity().await?;
}
// If no template is specified, use the default one and set the branch to the latest release.
create::resolve_template_and_branch(&mut self.template, &mut self.branch);
// cargo-generate requires the path to be created first.
std::fs::create_dir_all(&self.path)?;
let args = GenerateArgs {
define: self.option,
destination: Some(self.path),
init: true,
name: self.name,
silent: self.yes,
vcs: self.vcs,
template_path: TemplatePath {
auto_path: self.template,
branch: self.branch,
revision: self.revision,
subfolder: self.subtemplate,
tag: self.tag,
..Default::default()
},
..Default::default()
};
let path = cargo_generate::generate(args)?;
_ = create::post_create(&path, &self.vcs.unwrap_or(Vcs::Git));
Ok(StructuredOutput::Success)
}
}
// todo: re-enable these tests with better parallelization
//
// #[cfg(test)]
// mod tests {
// use std::{fs::create_dir_all, process::Command};
// use tempfile::tempdir;
// use super::create::tests::*;
// // Note: tests below (at least 6 of them) were written to mainly test
// // correctness of project's directory and its name, because previously it
// // was broken and tests bring a peace of mind. And also so that I don't have
// // to run my local hand-made tests every time.
// fn subcommand_init() -> Command {
// subcommand("init")
// }
// #[test]
// fn test_subcommand_init_with_default_path() -> Result<()> {
// let project_dir = "dir";
// let project_name = project_dir;
// let temp_dir = tempdir()?;
// // Make current dir's name deterministic.
// let current_dir = temp_dir.path().join(project_dir);
// create_dir_all(¤t_dir)?;
// let project_path = ¤t_dir;
// assert!(project_path.exists());
// assert!(subcommand_init().current_dir(¤t_dir).status().is_ok());
// let cargo_toml_path = get_cargo_toml_path(project_path);
// assert!(cargo_toml_path.exists());
// assert_eq!(get_project_name(&cargo_toml_path)?, project_name);
// Ok(())
// }
// #[test]
// fn test_subcommand_init_with_1_dir_path() -> Result<()> {
// let project_dir = "dir";
// let project_name = project_dir;
// let current_dir = tempdir()?;
// assert!(subcommand_init()
// .arg(project_dir)
// .current_dir(¤t_dir)
// .status()
// .is_ok());
// let project_path = current_dir.path().join(project_dir);
// let cargo_toml_path = get_cargo_toml_path(&project_path);
// assert!(project_path.exists());
// assert!(cargo_toml_path.exists());
// assert_eq!(get_project_name(&cargo_toml_path)?, project_name);
// Ok(())
// }
// #[test]
// fn test_subcommand_init_with_2_dir_path() -> Result<()> {
// let project_dir = "a/b";
// let project_name = "b";
// let current_dir = tempdir()?;
// assert!(subcommand_init()
// .arg(project_dir)
// .current_dir(¤t_dir)
// .status()
// .is_ok());
// let project_path = current_dir.path().join(project_dir);
// let cargo_toml_path = get_cargo_toml_path(&project_path);
// assert!(project_path.exists());
// assert!(cargo_toml_path.exists());
// assert_eq!(get_project_name(&cargo_toml_path)?, project_name);
// Ok(())
// }
// #[test]
// fn test_subcommand_init_with_default_path_and_custom_name() -> Result<()> {
// let project_dir = "dir";
// let project_name = "project";
// let temp_dir = tempdir()?;
// // Make current dir's name deterministic.
// let current_dir = temp_dir.path().join(project_dir);
// create_dir_all(¤t_dir)?;
// let project_path = ¤t_dir;
// assert!(project_path.exists());
// assert!(subcommand_init()
// .arg("--name")
// .arg(project_name)
// .current_dir(¤t_dir)
// .status()
// .is_ok());
// let cargo_toml_path = get_cargo_toml_path(project_path);
// assert!(cargo_toml_path.exists());
// assert_eq!(get_project_name(&cargo_toml_path)?, project_name);
// Ok(())
// }
// #[test]
// fn test_subcommand_init_with_1_dir_path_and_custom_name() -> Result<()> {
// let project_dir = "dir";
// let project_name = "project";
// let current_dir = tempdir()?;
// assert!(subcommand_init()
// .arg(project_dir)
// .arg("--name")
// .arg(project_name)
// .current_dir(¤t_dir)
// .status()
// .is_ok());
// let project_path = current_dir.path().join(project_dir);
// let cargo_toml_path = get_cargo_toml_path(&project_path);
// assert!(project_path.exists());
// assert!(cargo_toml_path.exists());
// assert_eq!(get_project_name(&cargo_toml_path)?, project_name);
// Ok(())
// }
// #[test]
// fn test_subcommand_init_with_2_dir_path_and_custom_name() -> Result<()> {
// let project_dir = "a/b";
// let project_name = "project";
// let current_dir = tempdir()?;
// assert!(subcommand_init()
// .arg(project_dir)
// .arg("--name")
// .arg(project_name)
// .current_dir(¤t_dir)
// .status()
// .is_ok());
// let project_path = current_dir.path().join(project_dir);
// let cargo_toml_path = get_cargo_toml_path(&project_path);
// assert!(project_path.exists());
// assert!(cargo_toml_path.exists());
// assert_eq!(get_project_name(&cargo_toml_path)?, project_name);
// Ok(())
// }
// }
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/cli/doctor.rs | packages/cli/src/cli/doctor.rs | use super::*;
use crate::{Result, Workspace};
use anyhow::{bail, Context};
use itertools::Itertools;
/// Perform a system analysis to verify the system install is working correctly.
#[derive(Clone, Debug, Parser)]
pub(crate) struct Doctor {}
impl Doctor {
pub async fn doctor(self) -> Result<StructuredOutput> {
let mut rustc_version = "not found".to_string();
let mut rustc_sysroot = "not found".to_string();
let mut rustlib = PathBuf::from(".");
if let Ok(r) = Workspace::get_rustc_sysroot().await {
rustlib = PathBuf::from(r.as_str()).join("lib").join("rustlib");
rustc_sysroot = r;
}
if let Ok(r) = Workspace::get_rustc_version().await {
rustc_version = r;
}
// wasm-opt
let wasm_opt_location = crate::wasm_opt::installed_location();
let wasm_opt_message = match wasm_opt_location.clone() {
Some(path) => path.to_string_lossy().to_string(),
None => "not installed".into(),
};
// wasm-bindgen
let mut wbg_version_msg = "automatically managed".to_string();
let mut wasm_bindgen_location = "automatically managed".to_string();
if let Ok(workspace) = Workspace::current().await {
let wbg_version = workspace.wasm_bindgen_version();
if let Some(vers) = &wbg_version {
wbg_version_msg = vers.to_string();
wasm_bindgen_location =
match crate::wasm_bindgen::WasmBindgen::new(vers).get_binary_path() {
Ok(path) => path.to_string_lossy().to_string(),
Err(err) => err.to_string().lines().join(""),
};
}
}
// extensions
fn has_dioxus_ext(editor_dir: &str) -> anyhow::Result<PathBuf> {
let home = dirs::home_dir().context("no home dir")?;
let exts = home.join(editor_dir).join("extensions");
for dir in exts.read_dir()?.flatten() {
if dir
.file_name()
.to_string_lossy()
.contains("dioxuslabs.dioxus-")
{
return Ok(dir.path());
}
}
bail!("not found")
}
// Editors
let vscode_ext = has_dioxus_ext(".vscode");
let vscode_ext_msg = match vscode_ext.as_ref() {
Ok(path) => path.to_string_lossy().to_string(),
Err(_) => "not found".to_string(),
};
let vscode_insiders_ext = has_dioxus_ext(".vscode-insiders");
let vscode_insiders_ext_msg = match vscode_insiders_ext.as_ref() {
Ok(path) => path.to_string_lossy().to_string(),
Err(_) => "not found".to_string(),
};
let cursor_ext = has_dioxus_ext(".cursor");
let cursor_ext_msg = match cursor_ext.as_ref() {
Ok(path) => path.to_string_lossy().to_string(),
Err(_) => "not found".to_string(),
};
// Tailwind
let mut tailwindcss = "not found".to_string();
if let Ok(path) = crate::tailwind::TailwindCli::v3().get_binary_path() {
tailwindcss = path.display().to_string();
}
if let Ok(path) = crate::tailwind::TailwindCli::v4().get_binary_path() {
tailwindcss = path.display().to_string();
}
let mut adb = "not found".to_string();
let mut ndk = "not found".to_string();
let mut sdk = "not found".to_string();
let mut java_home = "not found".to_string();
let mut emulator = "not found".to_string();
if let Some(rf) = crate::build::get_android_tools() {
if rf.adb.exists() {
adb = rf.adb.display().to_string();
}
if rf.ndk.exists() {
ndk = rf.ndk.display().to_string();
}
if let Some(jh) = rf.java_home.as_ref() {
java_home = jh.display().to_string();
}
if rf.sdk().exists() {
sdk = rf.sdk().display().to_string();
}
if let Some(jh) = rf.java_home.as_ref() {
java_home = jh.display().to_string();
}
if rf.emulator().exists() {
emulator = rf.emulator().display().to_string();
}
};
let mut simulator_location = "not found".to_string();
let mut xcode_install = "not found".to_string();
if let Some(xcode) = Workspace::get_xcode_path().await {
let sim_location = xcode.join("Applications").join("Simulator.app");
if sim_location.exists() {
simulator_location = sim_location.display().to_string();
}
if xcode.exists() {
xcode_install = xcode.display().to_string();
}
}
let mut security_cli_path = "not found".to_string();
let mut codesign_path = "not found".to_string();
let mut xcode_select_path = "not found".to_string();
let mut xcrun_path = "not found".to_string();
let mut ranlib_path = "not found".to_string();
if let Ok(path) = which::which("security") {
security_cli_path = path.display().to_string();
}
if let Ok(path) = which::which("codesign") {
codesign_path = path.display().to_string();
}
if let Ok(path) = which::which("xcode-select") {
xcode_select_path = path.display().to_string();
}
if let Ok(path) = which::which("xcrun") {
xcrun_path = path.display().to_string();
}
if let Some(path) = Workspace::select_ranlib() {
ranlib_path = path.display().to_string();
}
// toolchains
let mut has_wasm32_unknown_unknown = "❌";
let mut has_aarch64_linux_android = "❌";
let mut has_i686_linux_android = "❌";
let mut has_armv7_linux_androideabi = "❌";
let mut has_x86_64_linux_android = "❌";
let mut has_x86_64_apple_ios = "❌";
let mut has_aarch64_apple_ios = "❌";
let mut has_aarch64_apple_ios_sim = "❌";
let mut has_aarch64_apple_darwin = "❌";
if rustlib.join("wasm32-unknown-unknown").exists() {
has_wasm32_unknown_unknown = "✅";
}
if rustlib.join("aarch64-linux-android").exists() {
has_aarch64_linux_android = "✅";
}
if rustlib.join("i686-linux-android").exists() {
has_i686_linux_android = "✅";
}
if rustlib.join("armv7-linux-androideabi").exists() {
has_armv7_linux_androideabi = "✅";
}
if rustlib.join("x86_64-linux-android").exists() {
has_x86_64_linux_android = "✅";
}
if rustlib.join("x86_64-apple-ios").exists() {
has_x86_64_apple_ios = "✅";
}
if rustlib.join("aarch64-apple-ios").exists() {
has_aarch64_apple_ios = "✅";
}
if rustlib.join("aarch64-apple-ios-sim").exists() {
has_aarch64_apple_ios_sim = "✅";
}
if rustlib.join("aarch64-apple-darwin").exists() {
has_aarch64_apple_darwin = "✅";
}
// Rust tool paths
let mut rustc_path = "not found".to_string();
let mut cargo_path = "not found".to_string();
let mut cc_path = "not found".to_string();
if let Ok(path) = which::which("rustc") {
rustc_path = path.display().to_string();
}
if let Ok(path) = which::which("cargo") {
cargo_path = path.display().to_string();
}
if let Ok(path) = which::which("cc") {
cc_path = path.display().to_string();
}
// Things to know
// - current rust version and rust-related things
// - installed toolchains
// -
use crate::styles::*;
println!(
r#"{LINK_STYLE}Setup{LINK_STYLE:#}
{GLOW_STYLE}Web{GLOW_STYLE:#}: wasm-bindgen, wasm-opt, and TailwindCSS are downloaded automatically
{GLOW_STYLE}iOS{GLOW_STYLE:#}: Install iOS SDK and developer tools and through XCode
{GLOW_STYLE}Android{GLOW_STYLE:#}: Install Android Studio, NDK, and then set ANDROID_HOME and ANDROID_NDK_HOME
{GLOW_STYLE}macOS{GLOW_STYLE:#}: all tools should be installed by default
{GLOW_STYLE}Windows{GLOW_STYLE:#}: install the webview2 binary
{GLOW_STYLE}Linux{GLOW_STYLE:#}: Install libwebkit2gtk-4.1-dev libgtk-3-dev libasound2-dev libudev-dev libayatana-appindicator3-dev libxdo-dev libglib2.0-dev
{GLOW_STYLE}nix{GLOW_STYLE:#}: Make sure all tools are in your path (codesign, ld, etc.)
{LINK_STYLE}Rust{LINK_STYLE:#}
Rustc version: {HINT_STYLE}{rustc_version}{HINT_STYLE:#}
Rustc sysroot: {HINT_STYLE}{rustc_sysroot}{HINT_STYLE:#}
Rustc path: {HINT_STYLE}{rustc_path}{HINT_STYLE:#}
Cargo path: {HINT_STYLE}{cargo_path}{HINT_STYLE:#}
cc path: {HINT_STYLE}{cc_path}{HINT_STYLE:#}
{LINK_STYLE}Devtools{LINK_STYLE:#}
VSCode Extension: {HINT_STYLE}{vscode_ext_msg}{HINT_STYLE:#}
VSCode-Insiders Extension: {HINT_STYLE}{vscode_insiders_ext_msg}{HINT_STYLE:#}
Cursor Extension: {HINT_STYLE}{cursor_ext_msg}{HINT_STYLE:#}
TailwindCSS: {HINT_STYLE}{tailwindcss}{HINT_STYLE:#}
{LINK_STYLE}Web{LINK_STYLE:#}
wasm-opt: {HINT_STYLE}{wasm_opt_message}{HINT_STYLE:#}
wasm-bindgen: {HINT_STYLE}{wasm_bindgen_location}{HINT_STYLE:#}
wasm-bindgen version: {HINT_STYLE}{wbg_version_msg}{HINT_STYLE:#}
{LINK_STYLE}iOS/macOS{LINK_STYLE:#}
XCode: {HINT_STYLE}{xcode_install}{HINT_STYLE:#}
Simulator: {HINT_STYLE}{simulator_location}{HINT_STYLE:#}
Security CLI: {HINT_STYLE}{security_cli_path}{HINT_STYLE:#}
Codesign CII: {HINT_STYLE}{codesign_path}{HINT_STYLE:#}
xcode-select: {HINT_STYLE}{xcode_select_path}{HINT_STYLE:#}
xcrun: {HINT_STYLE}{xcrun_path}{HINT_STYLE:#}
ranlib: {HINT_STYLE}{ranlib_path}{HINT_STYLE:#}
{LINK_STYLE}Android{LINK_STYLE:#}
sdk: {HINT_STYLE}{sdk}{HINT_STYLE:#}
ndk: {HINT_STYLE}{ndk}{HINT_STYLE:#}
adb: {HINT_STYLE}{adb}{HINT_STYLE:#}
emulator: {HINT_STYLE}{emulator}{HINT_STYLE:#}
java_home: {HINT_STYLE}{java_home}{HINT_STYLE:#}
{LINK_STYLE}Toolchains{LINK_STYLE:#}
{HINT_STYLE}{has_wasm32_unknown_unknown}{HINT_STYLE:#} wasm32-unknown-unknown {HINT_STYLE}(web){HINT_STYLE:#}
{HINT_STYLE}{has_aarch64_linux_android}{HINT_STYLE:#} aarch64-linux-android {HINT_STYLE}(android){HINT_STYLE:#}
{HINT_STYLE}{has_i686_linux_android}{HINT_STYLE:#} i686-linux-android {HINT_STYLE}(android){HINT_STYLE:#}
{HINT_STYLE}{has_armv7_linux_androideabi}{HINT_STYLE:#} armv7-linux-androideabi {HINT_STYLE}(android){HINT_STYLE:#}
{HINT_STYLE}{has_x86_64_linux_android}{HINT_STYLE:#} x86_64-linux-android {HINT_STYLE}(android){HINT_STYLE:#}
{HINT_STYLE}{has_x86_64_apple_ios}{HINT_STYLE:#} x86_64-apple-ios {HINT_STYLE}(iOS){HINT_STYLE:#}
{HINT_STYLE}{has_aarch64_apple_ios}{HINT_STYLE:#} aarch64-apple-ios {HINT_STYLE}(iOS){HINT_STYLE:#}
{HINT_STYLE}{has_aarch64_apple_ios_sim}{HINT_STYLE:#} aarch64-apple-ios-sim {HINT_STYLE}(iOS){HINT_STYLE:#}
{HINT_STYLE}{has_aarch64_apple_darwin}{HINT_STYLE:#} aarch64-apple-darwin {HINT_STYLE}(iOS){HINT_STYLE:#}
Get help: {LINK_STYLE}https://discord.gg/XgGxMSkvUM{LINK_STYLE:#}
More info: {LINK_STYLE}https://dioxuslabs.com/learn/0.7/{LINK_STYLE:#}
"#
);
Ok(StructuredOutput::Success)
}
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/cli/hotpatch.rs | packages/cli/src/cli/hotpatch.rs | use crate::{
platform_override::CommandWithPlatformOverrides, AppBuilder, BuildArgs, BuildId, BuildMode,
HotpatchModuleCache, Result, StructuredOutput,
};
use anyhow::Context;
use clap::Parser;
use dioxus_dx_wire_format::StructuredBuildArtifacts;
use std::io::Read;
use std::sync::Arc;
const HELP_HEADING: &str = "Hotpatching a binary";
/// Patches a single binary, but takes the same arguments as a `cargo build` and the serialized output from `dx build --fat-binary` as input.
///
/// This is intended to be used with something like `dx build --fat-binary >> output.json` and then
/// `cat output.json | dx hotpatch --aslr-reference 0x12345678` to produce a hotpatched binary.
///
/// By default, patches the client, but you can set patch_server to true to patch the server instead.
#[derive(Clone, Debug, Parser)]
pub struct HotpatchTip {
/// Should we patch the server or the client? False = client, True = server
#[clap(long, num_args = 0..=1, default_missing_value="true", help_heading = HELP_HEADING)]
pub patch_server: Option<bool>,
/// The ASLR reference of the running app being patched. Used to generate sensible offsets for patched code.
#[clap(long, help_heading = HELP_HEADING)]
pub aslr_reference: u64,
#[clap(flatten)]
pub build_args: CommandWithPlatformOverrides<BuildArgs>,
}
impl HotpatchTip {
pub async fn run(self) -> Result<StructuredOutput> {
let targets = self.build_args.into_targets().await?;
let patch_server = self.patch_server.unwrap_or(false);
let build_id = if patch_server {
BuildId::SECONDARY
} else {
BuildId::PRIMARY
};
// Select which target to patch
let request = if patch_server {
targets.server.as_ref().context("No server to patch!")?
} else {
&targets.client
};
let mut serialized_artifacts = String::new();
std::io::stdin()
.lock()
.read_to_string(&mut serialized_artifacts)
.context("Failed to read serialized build artifacts from stdin")?;
let structured_build_artifacts =
serde_json::from_str::<StructuredBuildArtifacts>(&serialized_artifacts)
.context("Failed to parse structured build artifacts")?;
let StructuredBuildArtifacts {
exe,
rustc_args,
rustc_envs,
link_args,
..
} = structured_build_artifacts;
// todo: loading this cache over and over defeats the purpose of a cache
// consider a shared-mem approach or a binary serializer? something like arrow / parquet / bincode?
let cache = Arc::new(HotpatchModuleCache::new(&exe, &request.triple)?);
let mode = BuildMode::Thin {
rustc_args: crate::RustcArgs {
args: rustc_args,
envs: rustc_envs,
link_args,
},
changed_files: vec![],
aslr_reference: self.aslr_reference,
cache: cache.clone(),
};
let artifacts = AppBuilder::started(request, mode, build_id)?
.finish_build()
.await?;
let patch_exe = request.patch_exe(artifacts.time_start);
Ok(StructuredOutput::Hotpatch {
jump_table: request.create_jump_table(&patch_exe, &cache)?,
artifacts: artifacts.into_structured_output(),
})
}
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/cli/print.rs | packages/cli/src/cli/print.rs | use std::{borrow::Cow, ffi::OsString};
use super::*;
use crate::{BuildMode, Result};
use anyhow::Context;
/// Perform a system analysis to verify the system install is working correctly.
#[derive(Clone, Debug, Subcommand)]
pub(crate) enum Print {
/// Print the cargo args dioxus uses to build the server app.
/// Environment variables will be set with the `env` command.
#[clap(name = "client-args")]
ClientArgs(PrintCargoArgs),
/// Print the cargo args dioxus uses to build the client app.
/// Environment variables will be set with the `env` command.
#[clap(name = "server-args")]
ServerArgs(PrintCargoArgs),
}
#[derive(Clone, Debug, Parser)]
pub(crate) struct PrintCargoArgs {
#[clap(flatten)]
pub(crate) args: CommandWithPlatformOverrides<build::BuildArgs>,
/// The print output style to use. By default, this uses the current-platform's best fit,
/// though you can customize it in the case you might be driving an external build system.
/// - Unix style uses the `env` command
/// - Windows style uses the `set` command
/// - JSON style prints the arguments as JSON
/// - Pretty JSON style prints the arguments as pretty JSON
/// - Args style prints only the arguments, one per line, without any environment variables.
/// - Env style prints only the environment variables, one key-pair per line, without any arguments.
#[clap(long)]
pub(crate) style: Option<PrintStyle>,
}
#[derive(Clone, Debug, clap::ValueEnum)]
pub(crate) enum PrintStyle {
/// Print the arguments as a list of arguments, one per line.
/// Does not include the `cargo rustc` command itself
Args,
/// Print the environment variables as a list of key=value pairs, one per line.
Env,
/// Print the arguments using the Unix `env` command.
Unix,
/// Print the arguments using the Windows `set` command.
Cmd,
/// Print the arguments as JSON. Does not include the `cargo rustc` command itself
Json,
/// Print the arguments as pretty JSON. Does not include the `cargo rustc` command itself
PrettyJson,
}
impl Print {
pub(crate) async fn print(self) -> Result<StructuredOutput> {
match self {
Self::ClientArgs(opts) => {
let targets = opts.args.into_targets().await?;
let mode = BuildMode::Base { run: false };
let args = targets.client.cargo_build_arguments(&mode);
let env = targets.client.cargo_build_env_vars(&mode)?;
Self::print_as_unified_command(&env, &args, &opts.style);
Ok(StructuredOutput::PrintCargoArgs {
args,
env: env
.into_iter()
.map(|(k, v)| (k, v.to_string_lossy().to_string()))
.collect::<Vec<_>>(),
})
}
Self::ServerArgs(print_cargo_args) => {
let targets = print_cargo_args.args.into_targets().await?;
let mode = BuildMode::Base { run: false };
let server = targets
.server
.context("No server target found, cannot print server args")?;
let args = server.cargo_build_arguments(&mode);
let env = server.cargo_build_env_vars(&mode)?;
Self::print_as_unified_command(&env, &args, &print_cargo_args.style);
Ok(StructuredOutput::PrintCargoArgs {
args,
env: env
.into_iter()
.map(|(k, v)| (k, v.to_string_lossy().to_string()))
.collect::<Vec<_>>(),
})
}
}
}
/// Prints the given env and args as a unified command.
/// - Uses `env` on unix systems
/// - Uses `set VAR=value &&` on windows systems
/// - Prints structured JSON on json style
fn print_as_unified_command(
env: &[(Cow<'static, str>, OsString)],
args: &[String],
style: &Option<PrintStyle>,
) {
let style = style.clone().unwrap_or({
if cfg!(unix) || std::env::var("MSYSTEM").is_ok() || std::env::var("CYGWIN").is_ok() {
PrintStyle::Unix
} else {
PrintStyle::Cmd
}
});
match style {
PrintStyle::Args => {
for arg in args {
println!("{}", arg);
}
}
PrintStyle::Env => {
for (key, value) in env {
println!("{}={}", key, value.to_string_lossy());
}
}
PrintStyle::Unix => {
let mut cmd = String::from("env");
for (key, value) in env {
cmd.push_str(&format!(
" {}={}",
key,
shell_words::quote(&value.to_string_lossy())
));
}
cmd.push_str(" cargo rustc");
for arg in args {
cmd.push_str(&format!(" {}", shell_words::quote(arg)));
}
println!("{}", cmd);
}
PrintStyle::Cmd => {
let mut cmd = String::new();
for (key, value) in env {
cmd.push_str(&format!(
"set {}={} && ",
key,
Self::escape_windows(value.to_string_lossy())
));
}
cmd.push_str("cargo rustc");
for arg in args {
cmd.push_str(&format!(
" {}",
Self::escape_windows(Cow::Borrowed(arg.as_str()))
));
}
println!("{}", cmd);
}
PrintStyle::Json | PrintStyle::PrettyJson => {
let output = serde_json::json!({
"env": env.iter().map(|(k, v)| (k.as_ref(), v)).collect::<std::collections::HashMap<_, _>>(),
"args": args
});
if matches!(style, PrintStyle::PrettyJson) {
println!("{}", serde_json::to_string_pretty(&output).unwrap());
} else {
println!("{}", serde_json::to_string(&output).unwrap());
}
}
}
}
/// Escape for the windows cmd.exe shell.
///
/// See [here][msdn] for more information.
///
/// [msdn]: http://blogs.msdn.com/b/twistylittlepassagesallalike/archive/2011/04/23/everyone-quotes-arguments-the-wrong-way.aspx
///
/// This function comes from shell-escape
fn escape_windows(s: Cow<str>) -> Cow<str> {
use std::iter::repeat;
let mut needs_escape = s.is_empty();
for ch in s.chars() {
match ch {
'"' | '\t' | '\n' | ' ' => needs_escape = true,
_ => {}
}
}
if !needs_escape {
return s;
}
let mut es = String::with_capacity(s.len());
es.push('"');
let mut chars = s.chars().peekable();
loop {
let mut nslashes = 0;
while let Some(&'\\') = chars.peek() {
chars.next();
nslashes += 1;
}
match chars.next() {
Some('"') => {
es.extend(repeat('\\').take(nslashes * 2 + 1));
es.push('"');
}
Some(c) => {
es.extend(repeat('\\').take(nslashes));
es.push(c);
}
None => {
es.extend(repeat('\\').take(nslashes * 2));
break;
}
}
}
es.push('"');
es.into()
}
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/cli/create.rs | packages/cli/src/cli/create.rs | use super::*;
use crate::TraceSrc;
use anyhow::{bail, Context};
use cargo_generate::{GenerateArgs, TemplatePath, Vcs};
use std::{fs, path::Path};
pub(crate) static DEFAULT_TEMPLATE: &str = "gh:dioxuslabs/dioxus-template";
#[derive(Clone, Debug, Default, Deserialize, Parser)]
#[clap(name = "new")]
pub struct Create {
/// Create a new Dioxus project at PATH
pub path: PathBuf,
/// Project name. Defaults to directory name
#[arg(short, long)]
pub name: Option<String>,
/// Template path
#[clap(short, long)]
pub template: Option<String>,
/// Branch to select when using `template` from a git repository.
/// Mutually exclusive with: `--revision`, `--tag`.
#[clap(long, conflicts_with_all(["revision", "tag"]))]
pub branch: Option<String>,
/// A commit hash to select when using `template` from a git repository.
/// Mutually exclusive with: `--branch`, `--tag`.
#[clap(long, conflicts_with_all(["branch", "tag"]))]
pub revision: Option<String>,
/// Tag to select when using `template` from a git repository.
/// Mutually exclusive with: `--branch`, `--revision`.
#[clap(long, conflicts_with_all(["branch", "revision"]))]
pub tag: Option<String>,
/// Specify a sub-template within the template repository to be used as the actual template
#[clap(long)]
pub subtemplate: Option<String>,
/// Pass `<option>=<value>` for the used template (e.g., `foo=bar`)
#[clap(short, long)]
pub option: Vec<String>,
/// Skip user interaction by using the default values for the used template.
/// Default values can be overridden with `--option`
#[clap(short, long)]
pub yes: bool,
/// Specify the VCS used to initialize the generated template.
/// Options: `git`, `none`.
#[arg(long, value_parser)]
pub vcs: Option<Vcs>,
}
impl Create {
pub async fn create(mut self) -> Result<StructuredOutput> {
// Project name defaults to directory name.
if self.name.is_none() {
self.name = Some(create::name_from_path(&self.path)?);
}
check_path(&self.path).await?;
// Perform a connectivity check so we just don't it around doing nothing if there's a network error
if self.template.is_none() {
check_connectivity().await?;
}
// If no template is specified, use the default one and set the branch to the latest release.
resolve_template_and_branch(&mut self.template, &mut self.branch);
// cargo-generate requires the path to be created first.
std::fs::create_dir_all(&self.path)?;
let args = GenerateArgs {
define: self.option,
destination: Some(self.path),
// NOTE: destination without init means base_dir + name, with —
// means dest_dir. So use `init: true` and always handle
// the dest_dir manually and carefully.
// Cargo never adds name to the path. Name is solely for project name.
// https://github.com/cargo-generate/cargo-generate/issues/1250
init: true,
name: self.name,
silent: self.yes,
vcs: self.vcs,
template_path: TemplatePath {
auto_path: self.template,
branch: self.branch,
revision: self.revision,
subfolder: self.subtemplate,
tag: self.tag,
..Default::default()
},
verbose: crate::logging::VERBOSITY
.get()
.map(|f| f.verbose)
.unwrap_or(false),
..Default::default()
};
tracing::debug!(dx_src = ?TraceSrc::Dev, "Creating new project with args: {args:#?}");
let path = cargo_generate::generate(args)?;
_ = post_create(&path, &self.vcs.unwrap_or(Vcs::Git));
Ok(StructuredOutput::Success)
}
}
/// If no template is specified, use the default one and set the branch to the latest release.
///
/// Allows us to version templates under the v0.5/v0.6 scheme on the templates repo.
pub(crate) fn resolve_template_and_branch(
template: &mut Option<String>,
branch: &mut Option<String>,
) {
if template.is_none() {
use crate::dx_build_info::{PKG_VERSION_MAJOR, PKG_VERSION_MINOR};
*template = Some(DEFAULT_TEMPLATE.to_string());
if branch.is_none() {
*branch = Some(format!("v{PKG_VERSION_MAJOR}.{PKG_VERSION_MINOR}"));
}
};
}
/// Extracts the last directory name from the `path`.
pub(crate) fn name_from_path(path: &Path) -> Result<String> {
use path_absolutize::Absolutize;
Ok(path
.absolutize()?
.to_path_buf()
.file_name()
.context("Current path does not include directory name".to_string())?
.to_str()
.context("Current directory name is not a valid UTF-8 string".to_string())?
.to_string())
}
/// Post-creation actions for newly setup crates.
pub(crate) fn post_create(path: &Path, vcs: &Vcs) -> Result<()> {
let metadata = if let Some(parent_dir) = path.parent() {
match cargo_metadata::MetadataCommand::new()
.current_dir(parent_dir)
.exec()
{
Ok(v) => Some(v),
// Only 1 error means that CWD isn't a cargo project.
Err(cargo_metadata::Error::CargoMetadata { .. }) => None,
Err(err) => {
anyhow::bail!("Couldn't retrieve cargo metadata: {:?}", err)
}
}
} else {
None
};
// 1. Add the new project to the workspace, if it exists.
// This must be executed first in order to run `cargo fmt` on the new project.
let is_workspace = metadata.is_some();
metadata.and_then(|metadata| {
let cargo_toml_path = &metadata.workspace_root.join("Cargo.toml");
let cargo_toml_str = std::fs::read_to_string(cargo_toml_path).ok()?;
let relative_path = path.strip_prefix(metadata.workspace_root).ok()?;
let mut cargo_toml: toml_edit::DocumentMut = cargo_toml_str.parse().ok()?;
cargo_toml
.get_mut("workspace")?
.get_mut("members")?
.as_array_mut()?
.push(relative_path.display().to_string());
std::fs::write(cargo_toml_path, cargo_toml.to_string()).ok()
});
// 2. Run `cargo fmt` on the new project.
let mut cmd = Command::new("cargo");
let cmd = cmd.arg("fmt").current_dir(path);
let output = cmd.output().expect("failed to execute process");
if !output.status.success() {
tracing::error!(dx_src = ?TraceSrc::Dev, "cargo fmt failed");
tracing::error!(dx_src = ?TraceSrc::Build, "stdout: {}", String::from_utf8_lossy(&output.stdout));
tracing::error!(dx_src = ?TraceSrc::Build, "stderr: {}", String::from_utf8_lossy(&output.stderr));
}
// 3. Format the `Cargo.toml` and `Dioxus.toml` files.
let toml_paths = [path.join("Cargo.toml"), path.join("Dioxus.toml")];
for toml_path in &toml_paths {
let Ok(toml) = std::fs::read_to_string(toml_path) else {
continue;
};
let mut toml = toml.parse::<toml_edit::DocumentMut>().map_err(|e| {
anyhow::anyhow!("failed to parse toml at {}: {}", toml_path.display(), e)
})?;
toml.as_table_mut().fmt();
let as_string = toml.to_string();
let new_string = remove_triple_newlines(&as_string);
let mut file = std::fs::File::create(toml_path)?;
file.write_all(new_string.as_bytes())?;
}
// 4. Remove any triple newlines from the readme.
let readme_path = path.join("README.md");
let readme = std::fs::read_to_string(&readme_path)?;
let new_readme = remove_triple_newlines(&readme);
let mut file = std::fs::File::create(readme_path)?;
file.write_all(new_readme.as_bytes())?;
// 5. Run git init
if !is_workspace {
vcs.initialize(path, Some("main"), true)?;
}
tracing::info!(dx_src = ?TraceSrc::Dev, "Generated project at {}\n\n`cd` to your project and run `dx serve` to start developing.\nMore information is available in the generated `README.md`.\n\nBuild cool things! ✌️", path.display());
Ok(())
}
fn remove_triple_newlines(string: &str) -> String {
let mut new_string = String::new();
for char in string.chars() {
if char == '\n' && new_string.ends_with("\n\n") {
continue;
}
new_string.push(char);
}
new_string
}
/// Check if the requested project can be created in the filesystem
pub(crate) async fn check_path(path: &std::path::PathBuf) -> Result<()> {
match fs::metadata(path) {
Ok(_metadata) => {
bail!(
"A file or directory with the given project name \"{}\" already exists.",
path.to_string_lossy()
)
}
Err(_err) => Ok(()),
}
}
/// Perform a health check against github itself before we attempt to download any templates hosted
/// on github.
pub(crate) async fn check_connectivity() -> Result<()> {
if crate::verbosity_or_default().offline {
return Ok(());
}
use crate::styles::{GLOW_STYLE, LINK_STYLE};
let client = reqwest::Client::new();
for x in 0..=5 {
tokio::select! {
res = client.head("https://github.com/DioxusLabs/").header("User-Agent", "dioxus-cli").send() => {
if res.is_ok() {
return Ok(());
}
tokio::time::sleep(std::time::Duration::from_millis(2000)).await;
},
_ = tokio::time::sleep(std::time::Duration::from_millis(if x == 1 { 500 } else { 2000 })) => {}
}
if x == 0 {
eprintln!("{GLOW_STYLE}warning{GLOW_STYLE:#}: Waiting for {LINK_STYLE}https://github.com/dioxuslabs{LINK_STYLE:#}...")
} else {
eprintln!(
"{GLOW_STYLE}warning{GLOW_STYLE:#}: ({x}/5) Taking a while, maybe your internet is down?"
);
}
}
bail!(
"Error connecting to template repository. Try cloning the template manually or add `dioxus` to a `cargo new` project."
)
}
// todo: re-enable these tests with better parallelization
//
// #[cfg(test)]
// pub(crate) mod tests {
// use escargot::{CargoBuild, CargoRun};
// use std::sync::LazyLock;
// use std::fs::{create_dir_all, read_to_string};
// use std::path::{Path, PathBuf};
// use std::process::Command;
// use tempfile::tempdir;
// use toml::Value;
// static BINARY: LazyLock<CargoRun> = LazyLock::new(|| {
// CargoBuild::new()
// .bin(env!("CARGO_BIN_NAME"))
// .current_release()
// .run()
// .expect("Couldn't build the binary for tests.")
// });
// // Note: tests below (at least 6 of them) were written to mainly test
// // correctness of project's directory and its name, because previously it
// // was broken and tests bring a peace of mind. And also so that I don't have
// // to run my local hand-made tests every time.
// pub(crate) type Result<T> = std::result::Result<T, Box<dyn std::error::Error>>;
// pub(crate) fn subcommand(name: &str) -> Command {
// let mut command = BINARY.command();
// command.arg(name).arg("--yes"); // Skip any questions by choosing default answers.
// command
// }
// pub(crate) fn get_cargo_toml_path(project_path: &Path) -> PathBuf {
// project_path.join("Cargo.toml")
// }
// pub(crate) fn get_project_name(cargo_toml_path: &Path) -> Result<String> {
// Ok(toml::from_str::<Value>(&read_to_string(cargo_toml_path)?)?
// .get("package")
// .unwrap()
// .get("name")
// .unwrap()
// .as_str()
// .unwrap()
// .to_string())
// }
// fn subcommand_new() -> Command {
// subcommand("new")
// }
// #[test]
// fn test_subcommand_new_with_dot_path() -> Result<()> {
// let project_dir = "dir";
// let project_name = project_dir;
// let temp_dir = tempdir()?;
// // Make current dir's name deterministic.
// let current_dir = temp_dir.path().join(project_dir);
// create_dir_all(¤t_dir)?;
// let project_path = ¤t_dir;
// assert!(project_path.exists());
// assert!(subcommand_new()
// .arg(".")
// .current_dir(¤t_dir)
// .status()
// .is_ok());
// let cargo_toml_path = get_cargo_toml_path(project_path);
// assert!(cargo_toml_path.exists());
// assert_eq!(get_project_name(&cargo_toml_path)?, project_name);
// Ok(())
// }
// #[test]
// fn test_subcommand_new_with_1_dir_path() -> Result<()> {
// let project_dir = "dir";
// let project_name = project_dir;
// let current_dir = tempdir()?;
// assert!(subcommand_new()
// .arg(project_dir)
// .current_dir(¤t_dir)
// .status()
// .is_ok());
// let project_path = current_dir.path().join(project_dir);
// let cargo_toml_path = get_cargo_toml_path(&project_path);
// assert!(project_path.exists());
// assert!(cargo_toml_path.exists());
// assert_eq!(get_project_name(&cargo_toml_path)?, project_name);
// Ok(())
// }
// #[test]
// fn test_subcommand_new_with_2_dir_path() -> Result<()> {
// let project_dir = "a/b";
// let project_name = "b";
// let current_dir = tempdir()?;
// assert!(subcommand_new()
// .arg(project_dir)
// .current_dir(¤t_dir)
// .status()
// .is_ok());
// let project_path = current_dir.path().join(project_dir);
// let cargo_toml_path = get_cargo_toml_path(&project_path);
// assert!(project_path.exists());
// assert!(cargo_toml_path.exists());
// assert_eq!(get_project_name(&cargo_toml_path)?, project_name);
// Ok(())
// }
// #[test]
// fn test_subcommand_new_with_dot_path_and_custom_name() -> Result<()> {
// let project_dir = "dir";
// let project_name = "project";
// let temp_dir = tempdir()?;
// // Make current dir's name deterministic.
// let current_dir = temp_dir.path().join(project_dir);
// create_dir_all(¤t_dir)?;
// let project_path = ¤t_dir;
// assert!(project_path.exists());
// assert!(subcommand_new()
// .arg("--name")
// .arg(project_name)
// .arg(".")
// .current_dir(¤t_dir)
// .status()
// .is_ok());
// let cargo_toml_path = get_cargo_toml_path(project_path);
// assert!(cargo_toml_path.exists());
// assert_eq!(get_project_name(&cargo_toml_path)?, project_name);
// Ok(())
// }
// #[test]
// fn test_subcommand_new_with_1_dir_path_and_custom_name() -> Result<()> {
// let project_dir = "dir";
// let project_name = "project";
// let current_dir = tempdir()?;
// assert!(subcommand_new()
// .arg(project_dir)
// .arg("--name")
// .arg(project_name)
// .current_dir(¤t_dir)
// .status()
// .is_ok());
// let project_path = current_dir.path().join(project_dir);
// let cargo_toml_path = get_cargo_toml_path(&project_path);
// assert!(project_path.exists());
// assert!(cargo_toml_path.exists());
// assert_eq!(get_project_name(&cargo_toml_path)?, project_name);
// Ok(())
// }
// #[test]
// fn test_subcommand_new_with_2_dir_path_and_custom_name() -> Result<()> {
// let project_dir = "a/b";
// let project_name = "project";
// let current_dir = tempdir()?;
// assert!(subcommand_new()
// .arg(project_dir)
// .arg("--name")
// .arg(project_name)
// .current_dir(¤t_dir)
// .status()
// .is_ok());
// let project_path = current_dir.path().join(project_dir);
// let cargo_toml_path = get_cargo_toml_path(&project_path);
// assert!(project_path.exists());
// assert!(cargo_toml_path.exists());
// assert_eq!(get_project_name(&cargo_toml_path)?, project_name);
// Ok(())
// }
// }
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/cli/component.rs | packages/cli/src/cli/component.rs | use std::{
collections::{HashMap, HashSet},
ops::Deref,
path::{Path, PathBuf},
};
use crate::{verbosity_or_default, DioxusConfig, Result, StructuredOutput, Workspace};
use anyhow::{bail, Context};
use clap::Parser;
use dioxus_component_manifest::{
component_manifest_schema, CargoDependency, Component, ComponentDependency,
};
use git2::Repository;
use serde::{Deserialize, Serialize};
use tokio::{process::Command, task::JoinSet};
use tracing::debug;
#[derive(Clone, Debug, Parser)]
pub enum ComponentCommand {
/// Add a component from a registry
Add {
#[clap(flatten)]
component: ComponentArgs,
/// The registry to use
#[clap(flatten)]
registry: ComponentRegistry,
/// Overwrite the component if it already exists
#[clap(long)]
force: bool,
},
/// Remove a component
Remove {
#[clap(flatten)]
component: ComponentArgs,
/// The registry to use
#[clap(flatten)]
registry: ComponentRegistry,
},
/// Update a component registry
Update {
/// The registry to update
#[clap(flatten)]
registry: Option<RemoteComponentRegistry>,
},
/// List available components in a registry
List {
/// The registry to list components in
#[clap(flatten)]
registry: ComponentRegistry,
},
/// Clear the component registry cache
Clean,
/// Print the schema for component manifests
Schema,
}
/// Arguments for a component and component module location
#[derive(Clone, Debug, Parser, Serialize)]
pub struct ComponentArgs {
/// The components to add or remove
#[clap(required_unless_present = "all", value_delimiter = ',')]
components: Vec<String>,
/// The location of the component module in your project (default: src/components)
#[clap(long)]
module_path: Option<PathBuf>,
/// The location of the global assets in your project (default: assets)
#[clap(long)]
global_assets_path: Option<PathBuf>,
/// Include all components in the registry
#[clap(long)]
all: bool,
}
impl ComponentCommand {
/// Run the component command
pub async fn run(self) -> Result<StructuredOutput> {
match self {
// List all components in the registry
Self::List { registry } => {
let config = Self::resolve_config().await?;
let registry = Self::resolve_registry(registry, &config)?;
let mut components = registry.read_components().await?;
components.sort_by_key(|c| c.name.clone());
for component in components {
println!("- {}: {}", component.name, component.description);
}
}
// Add a component to the managed component module
Self::Add {
component: component_args,
registry,
force,
} => {
// Resolve the config
let config = Self::resolve_config().await?;
// Resolve the registry
let registry = Self::resolve_registry(registry, &config)?;
// Get the registry root. Components can't copy files outside of this path
let registry_root = registry.resolve().await?;
// Read all components from the registry
let components = registry.read_components().await?;
let mode = if force {
ComponentExistsBehavior::Overwrite
} else {
ComponentExistsBehavior::Error
};
// Find the requested components
let components = if component_args.all {
components
} else {
component_args
.components
.iter()
.map(|component| find_component(&components, component))
.collect::<Result<Vec<_>>>()?
};
// Find and initialize the components module if it doesn't exist
let components_root =
components_root(component_args.module_path.as_deref(), &config)?;
let new_components_module =
ensure_components_module_exists(&components_root).await?;
// Recursively add dependencies
// A map of the components that have been added or are queued to be added
let mut required_components = HashMap::new();
required_components.extend(components.iter().cloned().map(|c| (c, mode)));
// A stack of components to process
let mut queued_components = components;
while let Some(queued_component) = queued_components.pop() {
for dependency in &queued_component.component_dependencies {
let (registry, name) = match dependency {
ComponentDependency::Builtin(name) => {
(ComponentRegistry::default(), name)
}
ComponentDependency::ThirdParty { name, git, rev } => (
ComponentRegistry {
remote: RemoteComponentRegistry {
git: Some(git.clone()),
rev: rev.clone(),
},
path: None,
},
name,
),
};
let registry_components = registry.read_components().await?;
let dependency_component = find_component(®istry_components, name)?;
if required_components
.insert(
dependency_component.clone(),
ComponentExistsBehavior::Return,
)
.is_none()
{
queued_components.push(dependency_component);
}
}
}
// Then collect all required rust dependencies
let mut rust_dependencies = HashSet::new();
for component in required_components.keys() {
rust_dependencies.extend(component.cargo_dependencies.iter().cloned());
}
// And add them to Cargo.toml
Self::add_rust_dependencies(&rust_dependencies).await?;
// Once we have all required components, add them
for (component, mode) in required_components {
add_component(
®istry_root,
component_args.global_assets_path.as_deref(),
component_args.module_path.as_deref(),
&component,
mode,
&config,
)
.await?;
}
// If we created a new components module, print instructions about the final setup steps required
if new_components_module {
println!(
"Created new components module at {}.",
components_root.display()
);
println!("To finish setting up components, you will need to:");
println!("- manually reference the module by adding `mod components;` to your `main.rs` file");
if registry.is_default() {
println!("- add a reference to `asset!(\"/assets/dx-components-theme.css\")` as a stylesheet in your app");
}
}
}
// Update the remote component registry
Self::Update { registry } => {
let config = Self::resolve_config().await?;
registry
.unwrap_or(config.components.registry.remote)
.update()
.await?;
}
// Remove a component from the managed component module
Self::Remove {
component,
registry,
} => {
Self::remove_component(&component, registry).await?;
}
// Clear the component registry cache
Self::Clean => {
_ = tokio::fs::remove_dir_all(&Workspace::component_cache_dir()).await;
}
// Print the schema for component manifests
Self::Schema => {
let schema = component_manifest_schema();
println!(
"{}",
serde_json::to_string_pretty(&schema).unwrap_or_default()
);
}
}
Ok(StructuredOutput::Success)
}
/// Remove a component from the managed component module
async fn remove_component(
component_args: &ComponentArgs,
registry: ComponentRegistry,
) -> Result<()> {
let config = Self::resolve_config().await?;
let registry = Self::resolve_registry(registry, &config)?;
let components_root = components_root(component_args.module_path.as_deref(), &config)?;
// Find the requested components
let components = if component_args.all {
registry
.read_components()
.await?
.into_iter()
.map(|c| c.component.name)
.collect()
} else {
component_args.components.clone()
};
for component_name in components {
// Remove the component module
_ = tokio::fs::remove_dir_all(&components_root.join(&component_name)).await;
// Remove the module from the components mod.rs
let mod_rs_path = components_root.join("mod.rs");
let mod_rs_content = tokio::fs::read_to_string(&mod_rs_path)
.await
.with_context(|| format!("Failed to read {}", mod_rs_path.display()))?;
let mod_line = format!("pub mod {};\n", component_name);
let new_mod_rs_content = mod_rs_content.replace(&mod_line, "");
tokio::fs::write(&mod_rs_path, new_mod_rs_content)
.await
.with_context(|| format!("Failed to write to {}", mod_rs_path.display()))?;
}
Ok(())
}
/// Load the config
async fn resolve_config() -> Result<DioxusConfig> {
let workspace = Workspace::current().await?;
let crate_package = workspace.find_main_package(None)?;
Ok(workspace
.load_dioxus_config(crate_package)?
.unwrap_or_default())
}
/// Resolve a registry from the config if none is provided
fn resolve_registry(
registry: ComponentRegistry,
config: &DioxusConfig,
) -> Result<ComponentRegistry> {
if !registry.is_default() {
return Ok(registry);
}
Ok(config.components.registry.clone())
}
/// Add any rust dependencies required for a component
async fn add_rust_dependencies(dependencies: &HashSet<CargoDependency>) -> Result<()> {
for dep in dependencies {
let status = Command::from(dep.add_command())
.status()
.await
.with_context(|| {
format!(
"Failed to run command to add dependency {} to Cargo.toml",
dep.name()
)
})?;
if !status.success() {
bail!("Failed to add dependency {} to Cargo.toml", dep.name());
}
}
Ok(())
}
}
/// Arguments for the default or custom remote registry
/// If both values are None, the default registry will be used
#[derive(Clone, Debug, Parser, Default, Serialize, Deserialize)]
pub struct RemoteComponentRegistry {
/// The url of the the component registry
#[arg(long)]
git: Option<String>,
/// The revision of the the component registry
#[arg(long)]
rev: Option<String>,
}
impl RemoteComponentRegistry {
/// Resolve the path to the component registry, downloading the remote registry if needed
async fn resolve(&self) -> Result<PathBuf> {
// If a git url is provided use that (plus optional rev)
// Otherwise use the built-in registry
let (git, rev) = self.resolve_or_default();
let repo_dir = Workspace::component_cache_path(&git, rev.as_deref());
// If the repo already exists, use it otherwise clone it
if !repo_dir.exists() {
// If offline, we cannot download the registry
if verbosity_or_default().offline {
bail!("Cannot download component registry '{}' while offline", git);
}
// Make sure the parent directory exists
tokio::fs::create_dir_all(&repo_dir).await?;
tokio::task::spawn_blocking({
let git = git.clone();
let repo_dir = repo_dir.clone();
move || {
println!("Downloading {git}...");
// Clone the repo
let repo = Repository::clone(&git, repo_dir)?;
// If a rev is provided, checkout that rev
if let Some(rev) = &rev {
Self::checkout_rev(&repo, &git, rev)?;
}
anyhow::Ok(())
}
})
.await??;
}
Ok(repo_dir)
}
/// Update the component registry by fetching the latest changes from the remote
async fn update(&self) -> Result<()> {
let (git, rev) = self.resolve_or_default();
// Make sure the repo is cloned
let path = self.resolve().await?;
// Open the repo and update it
tokio::task::spawn_blocking({
let path = path.clone();
move || {
let repo = Repository::open(path)?;
let mut remote = repo.find_remote("origin")?;
// Fetch all remote branches with the same name as local branches
remote.fetch(&["refs/heads/*:refs/heads/*"], None, None)?;
// If a rev is provided, checkout that rev
if let Some(rev) = &rev {
Self::checkout_rev(&repo, &git, rev)?;
}
// Otherwise, just checkout the latest commit on the default branch
else {
let head = repo.head()?;
let branch = head.shorthand().unwrap_or("main");
let oid = repo.refname_to_id(&format!("refs/remotes/origin/{branch}"))?;
let object = repo.find_object(oid, None).unwrap();
repo.reset(&object, git2::ResetType::Hard, None)?;
}
anyhow::Ok(())
}
})
.await??;
Ok(())
}
/// If a git url is provided use that (plus optional rev)
/// Otherwise use the built-in registry
fn resolve_or_default(&self) -> (String, Option<String>) {
if let Some(git) = &self.git {
(git.clone(), self.rev.clone())
} else {
("https://github.com/dioxuslabs/components".into(), None)
}
}
/// Checkout the given rev in the given repo
fn checkout_rev(repo: &Repository, git: &str, rev: &str) -> Result<()> {
let (object, reference) = repo
.revparse_ext(rev)
.with_context(|| format!("Failed to find revision '{}' in '{}'", rev, git))?;
repo.checkout_tree(&object, None)?;
if let Some(gref) = reference {
if let Some(name) = gref.name() {
repo.set_head(name)?;
}
} else {
repo.set_head_detached(object.id())?;
}
Ok(())
}
}
/// Arguments for a component registry
/// Either a path to a local directory or a remote git repo (with optional rev)
#[derive(Clone, Debug, Parser, Default, Serialize, Deserialize)]
pub struct ComponentRegistry {
/// The remote repo args
#[clap(flatten)]
#[serde(flatten)]
remote: RemoteComponentRegistry,
/// The path to the components directory
#[arg(long)]
path: Option<String>,
}
impl ComponentRegistry {
/// Resolve the path to the component registry, downloading the remote registry if needed
async fn resolve(&self) -> Result<PathBuf> {
// If a path is provided, use that
if let Some(path) = &self.path {
return Ok(PathBuf::from(path));
}
// Otherwise use the remote/default registry
self.remote.resolve().await
}
/// Read all components that are part of this registry
async fn read_components(&self) -> Result<Vec<ResolvedComponent>> {
let path = self.resolve().await?;
let root = read_component(&path).await?;
let mut components = discover_components(root).await?;
// Filter out any virtual components with members
components.retain(|c| c.members.is_empty());
Ok(components)
}
/// Check if this is the default registry
fn is_default(&self) -> bool {
self.path.is_none() && self.remote.git.is_none() && self.remote.rev.is_none()
}
}
/// A component that has been downloaded and resolved at a specific path
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
struct ResolvedComponent {
path: PathBuf,
component: Component,
}
impl ResolvedComponent {
/// Get the absolute paths to members of this component
fn member_paths(&self) -> Vec<PathBuf> {
self.component
.members
.iter()
.map(|m| self.path.join(m))
.collect()
}
}
impl Deref for ResolvedComponent {
type Target = Component;
fn deref(&self) -> &Self::Target {
&self.component
}
}
// Find a component by name in a list of components
fn find_component(components: &[ResolvedComponent], component: &str) -> Result<ResolvedComponent> {
components
.iter()
.find(|c| c.name == component)
.cloned()
.ok_or_else(|| anyhow::anyhow!("Component '{}' not found in registry", component))
}
/// Get the path to the components module, defaulting to src/components
fn components_root(module_path: Option<&Path>, config: &DioxusConfig) -> Result<PathBuf> {
if let Some(module_path) = module_path {
return Ok(PathBuf::from(module_path));
}
let root = Workspace::crate_root_from_path()?;
if let Some(component_path) = &config.components.components_dir {
return Ok(root.join(component_path));
}
Ok(root.join("src").join("components"))
}
/// Get the path to the global assets directory, defaulting to assets
async fn global_assets_root(assets_path: Option<&Path>, config: &DioxusConfig) -> Result<PathBuf> {
if let Some(assets_path) = assets_path {
return Ok(PathBuf::from(assets_path));
}
if let Some(asset_dir) = &config.application.asset_dir {
return Ok(asset_dir.clone());
}
let root = Workspace::crate_root_from_path()?;
Ok(root.join("assets"))
}
/// How should we handle the component if it already exists
#[derive(Clone, Copy, Debug)]
enum ComponentExistsBehavior {
/// Return an error (default)
Error,
/// Return early for component dependencies
Return,
/// Overwrite the existing component
Overwrite,
}
/// Add a component to the managed component module
async fn add_component(
registry_root: &Path,
assets_path: Option<&Path>,
component_path: Option<&Path>,
component: &ResolvedComponent,
behavior: ComponentExistsBehavior,
config: &DioxusConfig,
) -> Result<()> {
// Copy the folder content to the components directory
let components_root = components_root(component_path, config)?;
let copied = copy_component_files(
&component.path,
&components_root.join(&component.name),
&component.exclude,
behavior,
)
.await?;
if !copied {
debug!(
"Component '{}' already exists, skipping copy",
component.name
);
return Ok(());
}
// Copy any global assets
let assets_root = global_assets_root(assets_path, config).await?;
copy_global_assets(registry_root, &assets_root, component).await?;
// Add the module to the components mod.rs
let mod_rs_path = components_root.join("mod.rs");
let mut mod_rs = tokio::fs::OpenOptions::new()
.append(true)
.read(true)
.open(&mod_rs_path)
.await
.with_context(|| format!("Failed to open {}", mod_rs_path.display()))?;
// Check if the module already exists
let mod_rs_content = tokio::fs::read_to_string(&mod_rs_path)
.await
.with_context(|| format!("Failed to read {}", mod_rs_path.display()))?;
if !mod_rs_content.contains(&format!("mod {};", component.name)) {
let mod_line = format!("pub mod {};\n", component.name);
tokio::io::AsyncWriteExt::write_all(&mut mod_rs, mod_line.as_bytes())
.await
.with_context(|| format!("Failed to write to {}", mod_rs_path.display()))?;
}
Ok(())
}
/// Copy the component files. Returns true if the component was copied, false if it was skipped.
async fn copy_component_files(
src: &Path,
dest: &Path,
exclude: &[String],
behavior: ComponentExistsBehavior,
) -> Result<bool> {
async fn read_dir_paths(src: &Path) -> Result<Vec<PathBuf>> {
let mut entries = tokio::fs::read_dir(src).await?;
let mut paths = vec![];
while let Some(entry) = entries.next_entry().await? {
paths.push(entry.path());
}
Ok(paths)
}
// If the directory already exists, return an error, return silently or overwrite it depending on the behavior
if dest.exists() {
match behavior {
// The default behavior is to return an error
ComponentExistsBehavior::Error => {
bail!("Destination directory '{}' already exists", dest.display());
}
// For dependencies, we return early
ComponentExistsBehavior::Return => {
debug!(
"Destination directory '{}' already exists, returning early",
dest.display()
);
return Ok(false);
}
// If the force flag is set, we overwrite the existing component
ComponentExistsBehavior::Overwrite => {
debug!(
"Destination directory '{}' already exists, overwriting",
dest.display()
);
tokio::fs::remove_dir_all(dest).await?;
}
}
}
tokio::fs::create_dir_all(dest).await?;
let exclude = exclude
.iter()
.map(|exclude| dunce::canonicalize(src.join(exclude)))
.collect::<Result<Vec<_>, _>>()?;
// Set set of tasks to read directories
let mut read_folder_tasks = JoinSet::new();
// Set set of tasks to copy files
let mut copy_tasks = JoinSet::new();
// Start by reading the source directory
let src = src.to_path_buf();
read_folder_tasks.spawn({
let src = src.clone();
async move { read_dir_paths(&src).await }
});
// Continue while there are read tasks
while let Some(res) = read_folder_tasks.join_next().await {
let paths = res??;
for path in paths {
let path = dunce::canonicalize(path)?;
// Skip excluded paths
if exclude.iter().any(|e| *e == path || path.starts_with(e)) {
debug!("Excluding path {}", path.display());
continue;
}
// Find the path in the destination directory
let Ok(path_relative_to_src) = path.strip_prefix(&src) else {
continue;
};
let dest = dest.join(path_relative_to_src);
// If it's a directory, read it, otherwise copy the file
if path.is_dir() {
read_folder_tasks.spawn(async move { read_dir_paths(&path).await });
} else {
copy_tasks.spawn(async move {
if let Some(parent) = dest.parent() {
if !parent.exists() {
tokio::fs::create_dir_all(parent).await?;
}
}
tokio::fs::copy(&path, &dest).await
});
}
}
}
// Wait for all copy tasks to finish
while let Some(res) = copy_tasks.join_next().await {
res??;
}
Ok(true)
}
/// Make sure the components directory and a mod.rs file exists. Returns true if the directory was created, false if it already existed.
async fn ensure_components_module_exists(components_dir: &Path) -> Result<bool> {
if components_dir.exists() {
return Ok(false);
}
tokio::fs::create_dir_all(&components_dir).await?;
let mod_rs_path = components_dir.join("mod.rs");
if mod_rs_path.exists() {
return Ok(false);
}
tokio::fs::write(&mod_rs_path, "// AUTOGENERTED Components module\n").await?;
Ok(true)
}
/// Read a component from the given path
async fn read_component(path: &Path) -> Result<ResolvedComponent> {
let json_path = path.join("component.json");
let bytes = tokio::fs::read(&json_path).await.with_context(|| {
format!(
"Failed to open component manifest at {}",
json_path.display()
)
})?;
let component = serde_json::from_slice(&bytes)?;
let absolute_path = dunce::canonicalize(path)?;
Ok(ResolvedComponent {
path: absolute_path,
component,
})
}
/// Recursively discover all components starting from the root component
async fn discover_components(root: ResolvedComponent) -> Result<Vec<ResolvedComponent>> {
// Create a queue of members to read
let mut queue = root.member_paths();
// The list of discovered components
let mut components = vec![root];
// The set of pending read tasks
let mut pending = JoinSet::new();
loop {
// First, spawn tasks for all queued paths
while let Some(root_path) = queue.pop() {
pending.spawn(async move { read_component(&root_path).await });
}
// Then try to join the next task
let Some(component) = pending.join_next().await else {
break;
};
let component = component??;
// And add the result to the queue and list
queue.extend(component.member_paths());
components.push(component);
}
Ok(components)
}
/// Copy any global assets for the component
async fn copy_global_assets(
registry_root: &Path,
assets_root: &Path,
component: &ResolvedComponent,
) -> Result<()> {
let canonical_registry_root = dunce::canonicalize(registry_root)?;
for path in &component.global_assets {
let src = component.path.join(path);
let absolute_source = dunce::canonicalize(&src).with_context(|| {
format!(
"Failed to find global asset '{}' for component '{}'",
src.display(),
component.name
)
})?;
// Make sure the source is inside the component registry somewhere
if !absolute_source.starts_with(&canonical_registry_root) {
bail!(
"Cannot copy global asset '{}' for component '{}' because it is outside of the component registry '{}'",
absolute_source.display(),
component.name,
canonical_registry_root.display()
);
}
// Copy the file into the assets directory, preserving the file name and extension
let dest = assets_root.join(
absolute_source
.components()
.next_back()
.context("Global assets must have at least one file component")?,
);
// Make sure the asset dir exists
if let Some(parent) = dest.parent() {
if !parent.exists() {
tokio::fs::create_dir_all(parent).await?;
}
}
tokio::fs::copy(&src, &dest).await.with_context(|| {
format!(
"Failed to copy global asset from {} to {}",
src.display(),
dest.display()
)
})?;
}
Ok(())
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/serve/proxy_ws.rs | packages/cli/src/serve/proxy_ws.rs | use crate::logging::TraceSrc;
use crate::serve::proxy::handle_proxy_error;
use anyhow::Context;
use axum::body::Body;
use axum::extract::ws::{CloseFrame as ClientCloseFrame, Message as ClientMessage};
use axum::extract::{FromRequestParts, WebSocketUpgrade};
use axum::http::request::Parts;
use axum::response::IntoResponse;
use futures_util::{SinkExt, StreamExt};
use hyper::{Request, Response, Uri};
use tokio_tungstenite::tungstenite::protocol::{
CloseFrame as ServerCloseFrame, Message as ServerMessage,
};
pub(crate) async fn proxy_websocket(
mut parts: Parts,
req: Request<Body>,
backend_url: &Uri,
) -> Result<Response<Body>, Response<Body>> {
let ws = WebSocketUpgrade::from_request_parts(&mut parts, &())
.await
.map_err(IntoResponse::into_response)?;
tracing::trace!(dx_src = ?TraceSrc::Dev, "Proxying websocket connection {req:?}");
let proxied_request = into_proxied_request(req, backend_url).map_err(handle_proxy_error)?;
tracing::trace!(dx_src = ?TraceSrc::Dev, "Connection proxied to {proxied_uri}", proxied_uri = proxied_request.uri());
Ok(ws.on_upgrade(move |client_ws| async move {
match handle_ws_connection(client_ws, proxied_request).await {
Ok(()) => tracing::trace!(dx_src = ?TraceSrc::Dev, "Websocket connection closed"),
Err(e) => {
tracing::error!(dx_src = ?TraceSrc::Dev, "Error proxying websocket connection: {e}")
}
}
}))
}
fn into_proxied_request(
req: Request<Body>,
backend_url: &Uri,
) -> crate::Result<tokio_tungstenite::tungstenite::handshake::client::Request> {
// ensure headers from original request are preserved
let (mut request_parts, _) = req.into_parts();
let mut uri_parts = request_parts.uri.into_parts();
uri_parts.scheme = uri_parts.scheme.or("ws".parse().ok());
uri_parts.authority = backend_url.authority().cloned();
request_parts.uri = Uri::from_parts(uri_parts).context("Could not construct proxy URI")?;
Ok(Request::from_parts(request_parts, ()))
}
#[derive(thiserror::Error, Debug)]
enum WsError {
#[error("Error connecting to server: {0}")]
Connect(tokio_tungstenite::tungstenite::Error),
#[error("Error sending message to server: {0}")]
ToServer(tokio_tungstenite::tungstenite::Error),
#[error("Error receiving message from server: {0}")]
FromServer(tokio_tungstenite::tungstenite::Error),
#[error("Error sending message to client: {0}")]
ToClient(axum::Error),
#[error("Error receiving message from client: {0}")]
FromClient(axum::Error),
}
async fn handle_ws_connection(
mut client_ws: axum::extract::ws::WebSocket,
proxied_request: tokio_tungstenite::tungstenite::handshake::client::Request,
) -> Result<(), WsError> {
let (mut server_ws, _) = tokio_tungstenite::connect_async(proxied_request)
.await
.map_err(WsError::Connect)?;
let mut closed = false;
while !closed {
tokio::select! {
Some(server_msg) = server_ws.next() => {
closed = matches!(server_msg, Ok(ServerMessage::Close(..)));
match server_msg.map_err(WsError::FromServer)?.into_msg() {
Ok(msg) => client_ws.send(msg).await.map_err(WsError::ToClient)?,
Err(UnexpectedRawFrame) => tracing::warn!(dx_src = ?TraceSrc::Dev, "Dropping unexpected raw websocket frame"),
}
},
Some(client_msg) = client_ws.next() => {
closed = matches!(client_msg, Ok(ClientMessage::Close(..)));
let Ok(msg) = client_msg.map_err(WsError::FromClient)?.into_msg();
server_ws.send(msg).await.map_err(WsError::ToServer)?;
},
else => break,
}
}
Ok(())
}
trait IntoMsg<T> {
type Error;
fn into_msg(self) -> Result<T, Self::Error>;
}
impl IntoMsg<ServerMessage> for ClientMessage {
type Error = std::convert::Infallible;
fn into_msg(self) -> Result<ServerMessage, Self::Error> {
use ServerMessage as SM;
Ok(match self {
Self::Text(v) => SM::Text(v.as_str().into()),
Self::Binary(v) => SM::Binary(v),
Self::Ping(v) => SM::Ping(v),
Self::Pong(v) => SM::Pong(v),
Self::Close(v) => SM::Close(v.map(|cf| ServerCloseFrame {
code: cf.code.into(),
reason: cf.reason.as_str().into(),
})),
})
}
}
struct UnexpectedRawFrame;
impl IntoMsg<ClientMessage> for ServerMessage {
type Error = UnexpectedRawFrame;
fn into_msg(self) -> Result<ClientMessage, Self::Error> {
use ClientMessage as CM;
Ok(match self {
Self::Text(v) => CM::Text(v.as_str().into()),
Self::Binary(v) => CM::Binary(v),
Self::Ping(v) => CM::Ping(v),
Self::Pong(v) => CM::Pong(v),
Self::Close(v) => CM::Close(v.map(|cf| ClientCloseFrame {
code: cf.code.into(),
reason: cf.reason.as_str().into(),
})),
Self::Frame(_) => {
// this variant should never be returned by next(), but handle it
// gracefully by dropping it instead of panicking out of an abundance of caution
return Err(UnexpectedRawFrame);
}
})
}
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/serve/update.rs | packages/cli/src/serve/update.rs | use crate::{BuildId, BuilderUpdate, BundleFormat, Error, TraceMsg};
use axum::extract::ws::Message as WsMessage;
use std::path::PathBuf;
/// One fat enum to rule them all....
///
/// Thanks to libraries like winit for the inspiration
#[allow(clippy::large_enum_variant)]
pub(crate) enum ServeUpdate {
NewConnection {
id: BuildId,
aslr_reference: Option<u64>,
pid: Option<u32>,
},
WsMessage {
bundle: BundleFormat,
msg: WsMessage,
},
/// An update regarding the state of the build and running app from an AppBuilder
BuilderUpdate {
id: BuildId,
update: BuilderUpdate,
},
FilesChanged {
files: Vec<PathBuf>,
},
OpenApp,
RequestRebuild,
ToggleShouldRebuild,
OpenDebugger {
id: BuildId,
},
Redraw,
TracingLog {
log: TraceMsg,
},
Exit {
error: Option<Error>,
},
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/serve/runner.rs | packages/cli/src/serve/runner.rs | use super::{AppBuilder, ServeUpdate, WebServer};
use crate::{
platform_override::CommandWithPlatformOverrides, BuildArtifacts, BuildId, BuildMode,
BuildTargets, BuilderUpdate, BundleFormat, HotpatchModuleCache, Result, ServeArgs, TailwindCli,
TraceSrc, Workspace,
};
use anyhow::{bail, Context};
use dioxus_core::internal::{
HotReloadTemplateWithLocation, HotReloadedTemplate, TemplateGlobalKey,
};
use dioxus_devtools_types::HotReloadMsg;
use dioxus_dx_wire_format::BuildStage;
use dioxus_html::HtmlCtx;
use dioxus_rsx::CallBody;
use dioxus_rsx_hotreload::{ChangedRsx, HotReloadResult};
use futures_channel::mpsc::{UnboundedReceiver, UnboundedSender};
use futures_util::future::OptionFuture;
use futures_util::StreamExt;
use krates::NodeId;
use notify::{
event::{MetadataKind, ModifyKind},
Config, EventKind, RecursiveMode, Watcher as NotifyWatcher,
};
use std::{
collections::{HashMap, HashSet},
net::{IpAddr, TcpListener},
path::PathBuf,
sync::Arc,
time::Duration,
};
use syn::spanned::Spanned;
use tokio::process::Command;
/// This is the primary "state" object that holds the builds and handles for the running apps.
///
/// It also holds the watcher which is used to watch for changes in the filesystem and trigger rebuilds,
/// hotreloads, asset updates, etc.
///
/// Since we resolve the build request before initializing the CLI, it also serves as a place to store
/// resolved "serve" arguments, which is why it takes ServeArgs instead of BuildArgs. Simply wrap the
/// BuildArgs in a default ServeArgs and pass it in.
pub(crate) struct AppServer {
/// the platform of the "primary" crate (ie the first)
pub(crate) workspace: Arc<Workspace>,
pub(crate) client: AppBuilder,
pub(crate) server: Option<AppBuilder>,
// Related to to the filesystem watcher
pub(crate) watcher: Box<dyn notify::Watcher>,
pub(crate) _watcher_tx: UnboundedSender<notify::Event>,
pub(crate) watcher_rx: UnboundedReceiver<notify::Event>,
// Tracked state related to open builds and hot reloading
pub(crate) applied_client_hot_reload_message: HotReloadMsg,
pub(crate) file_map: HashMap<PathBuf, CachedFile>,
// Resolved args related to how we go about processing the rebuilds and logging
pub(crate) use_hotpatch_engine: bool,
pub(crate) automatic_rebuilds: bool,
pub(crate) interactive: bool,
pub(crate) _force_sequential: bool,
pub(crate) hot_reload: bool,
pub(crate) open_browser: bool,
pub(crate) _wsl_file_poll_interval: u16,
pub(crate) always_on_top: bool,
pub(crate) fullstack: bool,
pub(crate) ssg: bool,
pub(crate) watch_fs: bool,
// resolve args related to the webserver
pub(crate) devserver_port: u16,
pub(crate) devserver_bind_ip: IpAddr,
pub(crate) proxied_port: Option<u16>,
pub(crate) cross_origin_policy: bool,
// The arguments that should be forwarded to the client app when it is opened
pub(crate) client_args: Vec<String>,
// The arguments that should be forwarded to the server app when it is opened
pub(crate) server_args: Vec<String>,
// Additional plugin-type tools
pub(crate) tw_watcher: tokio::task::JoinHandle<Result<()>>,
// File changes that arrived while a build was in progress, to be processed after build completes
pub(crate) pending_file_changes: Vec<PathBuf>,
}
pub(crate) struct CachedFile {
contents: String,
most_recent: Option<String>,
templates: HashMap<TemplateGlobalKey, HotReloadedTemplate>,
}
impl AppServer {
/// Create the AppRunner and then initialize the filemap with the crate directory.
pub(crate) async fn new(args: ServeArgs) -> Result<Self> {
let workspace = Workspace::current().await?;
// Resolve the simpler args
let interactive = args.is_interactive_tty();
let force_sequential = args.platform_args.shared.targets.force_sequential_build();
let cross_origin_policy = args.cross_origin_policy;
// Find the launch args for the client and server
let split_args = |args: &str| {
args.split_whitespace()
.map(|s| s.to_string())
.collect::<Vec<_>>()
};
let server_args = args.platform_args.with_server_or_shared(|c| &c.args);
let server_args = split_args(server_args);
let client_args = args.platform_args.with_client_or_shared(|c| &c.args);
let client_args = split_args(client_args);
// These come from the args but also might come from the workspace settings
// We opt to use the manually specified args over the workspace settings
let hot_reload = args
.hot_reload
.unwrap_or_else(|| workspace.settings.always_hot_reload.unwrap_or(true));
let open_browser = args
.open
.unwrap_or_else(|| workspace.settings.always_open_browser.unwrap_or(false))
&& interactive;
let wsl_file_poll_interval = args
.wsl_file_poll_interval
.unwrap_or_else(|| workspace.settings.wsl_file_poll_interval.unwrap_or(2));
let always_on_top = args
.always_on_top
.unwrap_or_else(|| workspace.settings.always_on_top.unwrap_or(true));
// Use 127.0.0.1 as the default address if none is specified.
// If the user wants to export on the network, they can use `0.0.0.0` instead.
let devserver_bind_ip = args.address.addr.unwrap_or(WebServer::SELF_IP);
// If the user specified a port, use that, otherwise use any available port, preferring 8080
let devserver_port = args
.address
.port
.unwrap_or_else(|| get_available_port(devserver_bind_ip, Some(8080)).unwrap_or(8080));
// Spin up the file watcher
let (watcher_tx, watcher_rx) = futures_channel::mpsc::unbounded();
let watcher = create_notify_watcher(watcher_tx.clone(), wsl_file_poll_interval as u64);
let ssg = args.platform_args.shared.targets.ssg;
let target_args = CommandWithPlatformOverrides {
shared: args.platform_args.shared.targets,
server: args.platform_args.server.map(|s| s.targets),
client: args.platform_args.client.map(|c| c.targets),
};
let BuildTargets { client, server } = target_args.into_targets().await?;
// All servers will end up behind us (the devserver) but on a different port
// This is so we can serve a loading screen as well as devtools without anything particularly fancy
let fullstack = server.is_some();
let should_proxy_port = match client.bundle {
BundleFormat::Server => true,
_ => fullstack && !ssg,
};
let proxied_port = should_proxy_port
.then(|| get_available_port(devserver_bind_ip, None))
.flatten();
let watch_fs = args.watch.unwrap_or(true);
let use_hotpatch_engine = args.hot_patch;
let client = AppBuilder::new(&client)?;
let server = server.map(|server| AppBuilder::new(&server)).transpose()?;
let tw_watcher = TailwindCli::serve(
client.build.package_manifest_dir(),
client.build.config.application.tailwind_input.clone(),
client.build.config.application.tailwind_output.clone(),
);
_ = client.build.start_simulators().await;
// Encourage the user to update to a new dx version
crate::update::log_if_cli_could_update();
// Create the runner
let mut runner = Self {
file_map: Default::default(),
applied_client_hot_reload_message: Default::default(),
automatic_rebuilds: true,
watch_fs,
use_hotpatch_engine,
client,
server,
hot_reload,
open_browser,
_wsl_file_poll_interval: wsl_file_poll_interval,
always_on_top,
workspace,
devserver_port,
devserver_bind_ip,
proxied_port,
watcher,
watcher_rx,
_watcher_tx: watcher_tx,
interactive,
_force_sequential: force_sequential,
cross_origin_policy,
fullstack,
ssg,
tw_watcher,
server_args,
client_args,
pending_file_changes: Vec::new(),
};
// Only register the hot-reload stuff if we're watching the filesystem
if runner.watch_fs {
// Spin up the notify watcher
// When builds load though, we're going to parse their depinfo and add the paths to the watcher
runner.watch_filesystem();
// todo(jon): this might take a while so we should try and background it, or make it lazy somehow
// we could spawn a thread to search the FS and then when it returns we can fill the filemap
// in testing, if this hits a massive directory, it might take several seconds with no feedback.
// really, we should be using depinfo to get the files that are actually used, but the depinfo file might not be around yet
// todo(jon): see if we can just guess the depinfo file before it generates. might be stale but at least it catches most of the files
runner.load_rsx_filemap();
}
Ok(runner)
}
pub(crate) fn initialize(&mut self) {
let build_mode = match self.use_hotpatch_engine {
true => BuildMode::Fat,
false => BuildMode::Base { run: true },
};
self.client.start(build_mode.clone(), BuildId::PRIMARY);
if let Some(server) = self.server.as_mut() {
server.start(build_mode, BuildId::SECONDARY);
}
}
/// Take any pending file changes that were queued while a build was in progress.
/// Returns the files and clears the pending list.
pub(crate) fn take_pending_file_changes(&mut self) -> Vec<PathBuf> {
std::mem::take(&mut self.pending_file_changes)
}
pub(crate) async fn rebuild_ssg(&mut self, devserver: &WebServer) {
if self.client.stage != BuildStage::Success {
return;
}
// Run SSG and cache static routes if the server build is done
if let Some(server) = self.server.as_mut() {
if !self.ssg || server.stage != BuildStage::Success {
return;
}
if let Err(err) = crate::pre_render_static_routes(
Some(devserver.devserver_address()),
server,
Some(&server.tx.clone()),
)
.await
{
tracing::error!("Failed to pre-render static routes: {err}");
}
}
}
pub(crate) async fn wait(&mut self) -> ServeUpdate {
let client = &mut self.client;
let server = self.server.as_mut();
let client_wait = client.wait();
let server_wait = OptionFuture::from(server.map(|s| s.wait()));
let watcher_wait = self.watcher_rx.next();
tokio::select! {
// Wait for the client to finish
client_update = client_wait => {
ServeUpdate::BuilderUpdate {
id: BuildId::PRIMARY,
update: client_update,
}
}
Some(server_update) = server_wait => {
ServeUpdate::BuilderUpdate {
id: BuildId::SECONDARY,
update: server_update,
}
}
// Wait for the watcher to send us an event
event = watcher_wait => {
let mut changes: Vec<_> = event.into_iter().collect();
// Dequeue in bulk if we can, we might've received a lot of events in one go
while let Some(event) = self.watcher_rx.try_next().ok().flatten() {
changes.push(event);
}
// Filter the changes
let mut files: Vec<PathBuf> = vec![];
// Decompose the events into a list of all the files that have changed
for event in changes.drain(..) {
// Make sure we add new folders to the watch list, provided they're not matched by the ignore list
// We'll only watch new folders that are found under the crate, and then update our watcher to watch them
// This unfortunately won't pick up new krates added "at a distance" - IE krates not within the workspace.
if let EventKind::Create(_create_kind) = event.kind {
// If it's a new folder, watch it
// If it's a new cargo.toml (ie dep on the fly),
// todo(jon) support new folders on the fly
}
for path in event.paths {
// Workaround for notify and vscode-like editor:
// - when edit & save a file in vscode, there will be two notifications,
// - the first one is a file with empty content.
// - filter the empty file notification to avoid false rebuild during hot-reload
if let Ok(metadata) = std::fs::metadata(&path) {
if metadata.len() == 0 {
continue;
}
}
files.push(path);
}
}
ServeUpdate::FilesChanged { files }
}
}
}
/// Handle an update from the builder
pub(crate) async fn new_build_update(&mut self, update: &BuilderUpdate, devserver: &WebServer) {
if let BuilderUpdate::BuildReady { .. } = update {
// If the build is ready, we need to check if we need to pre-render with ssg
self.rebuild_ssg(devserver).await;
}
}
/// Handle the list of changed files from the file watcher, attempting to aggressively prevent
/// full rebuilds by hot-reloading RSX and hot-patching Rust code.
///
/// This will also handle any assets that are linked in the files, and copy them to the bundle
/// and send them to the client.
pub(crate) async fn handle_file_change(&mut self, files: &[PathBuf], server: &mut WebServer) {
// We can attempt to hotpatch if the build is in a bad state, since this patch might be a recovery.
if !matches!(
self.client.stage,
BuildStage::Failed | BuildStage::Aborted | BuildStage::Success
) {
// Queue file changes that arrive during a build, so we can process them after the build completes.
// This prevents losing changes from tools like stylance, tailwind, or sass that generate files
// in response to source changes.
tracing::debug!(
"Queueing file change: client is not ready to receive hotreloads. Files: {:#?}",
files
);
self.pending_file_changes.extend(files.iter().cloned());
return;
}
// If we have any changes to the rust files, we need to update the file map
let mut templates = vec![];
// Prepare the hotreload message we need to send
let mut assets = Vec::new();
let mut needs_full_rebuild = false;
// We attempt to hotreload rsx blocks without a full rebuild
for path in files {
// for various assets that might be linked in, we just try to hotreloading them forcefully
// That is, unless they appear in an include! macro, in which case we need to a full rebuild....
let ext = path
.extension()
.and_then(|v| v.to_str())
.unwrap_or_default();
// If it's an asset, we want to hotreload it
// todo(jon): don't hardcode this here
if let Some(bundled_names) = self.client.hotreload_bundled_assets(path).await {
for bundled_name in bundled_names {
assets.push(PathBuf::from("/assets/").join(bundled_name));
}
}
// If it's in the public dir, we sync it and trigger a full rebuild
if self.client.build.path_is_in_public_dir(path) {
needs_full_rebuild = true;
continue;
}
// If it's a rust file, we want to hotreload it using the filemap
if ext == "rs" {
// And grabout the contents
let Ok(new_contents) = std::fs::read_to_string(path) else {
tracing::debug!("Failed to read rust file while hotreloading: {:?}", path);
continue;
};
// Get the cached file if it exists - ignoring if it doesn't exist
let Some(cached_file) = self.file_map.get_mut(path) else {
tracing::debug!("No entry for file in filemap: {:?}", path);
tracing::debug!("Filemap: {:#?}", self.file_map.keys());
continue;
};
let Ok(local_path) = path.strip_prefix(self.workspace.workspace_root()) else {
tracing::debug!("Skipping file outside workspace dir: {:?}", path);
continue;
};
// We assume we can parse the old file and the new file, ignoring untracked rust files
let old_syn = syn::parse_file(&cached_file.contents);
let new_syn = syn::parse_file(&new_contents);
let (Ok(old_file), Ok(new_file)) = (old_syn, new_syn) else {
tracing::debug!("Diff rsx returned not parseable");
continue;
};
// Update the most recent version of the file, so when we force a rebuild, we keep operating on the most recent version
cached_file.most_recent = Some(new_contents);
// This assumes the two files are structured similarly. If they're not, we can't diff them
let Some(changed_rsx) = dioxus_rsx_hotreload::diff_rsx(&new_file, &old_file) else {
needs_full_rebuild = true;
break;
};
for ChangedRsx { old, new } in changed_rsx {
let old_start = old.span().start();
let old_parsed = syn::parse2::<CallBody>(old.tokens);
let new_parsed = syn::parse2::<CallBody>(new.tokens);
let (Ok(old_call_body), Ok(new_call_body)) = (old_parsed, new_parsed) else {
continue;
};
// Format the template location, normalizing the path
let file_name: String = local_path
.components()
.map(|c| c.as_os_str().to_string_lossy())
.collect::<Vec<_>>()
.join("/");
// Returns a list of templates that are hotreloadable
let results = HotReloadResult::new::<HtmlCtx>(
&old_call_body.body,
&new_call_body.body,
file_name.clone(),
);
// If no result is returned, we can't hotreload this file and need to keep the old file
let Some(results) = results else {
needs_full_rebuild = true;
break;
};
// Only send down templates that have roots, and ideally ones that have changed
// todo(jon): maybe cache these and don't send them down if they're the same
for (index, template) in results.templates {
if template.roots.is_empty() {
continue;
}
// Create the key we're going to use to identify this template
let key = TemplateGlobalKey {
file: file_name.clone(),
line: old_start.line,
column: old_start.column + 1,
index,
};
// if the template is the same, don't send its
if cached_file.templates.get(&key) == Some(&template) {
continue;
};
cached_file.templates.insert(key.clone(), template.clone());
templates.push(HotReloadTemplateWithLocation { template, key });
}
}
}
// If it's not a rust file, then it might be depended on via include! or similar
if ext != "rs" {
if let Some(artifacts) = self.client.artifacts.as_ref() {
if artifacts.depinfo.files.contains(path) {
needs_full_rebuild = true;
break;
}
}
}
}
// If the client is in a failed state, any changes to rsx should trigger a rebuild/hotpatch
if self.client.stage == BuildStage::Failed && !templates.is_empty() {
needs_full_rebuild = true
}
// todo - we need to distinguish between hotpatchable rebuilds and true full rebuilds.
// A full rebuild is required when the user modifies static initializers which we haven't wired up yet.
if needs_full_rebuild && self.automatic_rebuilds {
if self.use_hotpatch_engine {
self.client.patch_rebuild(files.to_vec(), BuildId::PRIMARY);
if let Some(server) = self.server.as_mut() {
server.patch_rebuild(files.to_vec(), BuildId::SECONDARY);
}
self.clear_hot_reload_changes();
self.clear_cached_rsx();
server.send_patch_start().await;
} else {
self.client
.start_rebuild(BuildMode::Base { run: true }, BuildId::PRIMARY);
if let Some(server) = self.server.as_mut() {
server.start_rebuild(BuildMode::Base { run: true }, BuildId::SECONDARY);
}
self.clear_hot_reload_changes();
self.clear_cached_rsx();
server.send_reload_start().await;
}
} else {
let msg = HotReloadMsg {
templates,
assets,
ms_elapsed: 0,
jump_table: Default::default(),
for_build_id: None,
for_pid: None,
};
self.add_hot_reload_message(&msg);
let file = files[0].display().to_string();
let file =
file.trim_start_matches(&self.client.build.crate_dir().display().to_string());
if needs_full_rebuild && !self.automatic_rebuilds {
use crate::styles::NOTE_STYLE;
tracing::warn!(
"Ignoring full rebuild for: {NOTE_STYLE}{}{NOTE_STYLE:#}",
file
);
}
// Only send a hotreload message for templates and assets - otherwise we'll just get a full rebuild
//
// todo: move the android file uploading out of hotreload_bundled_asset and
//
// Also make sure the builder isn't busy since that might cause issues with hotreloads
// https://github.com/DioxusLabs/dioxus/issues/3361
if !msg.is_empty() && self.client.can_receive_hotreloads() {
use crate::styles::NOTE_STYLE;
tracing::info!(dx_src = ?TraceSrc::Dev, "Hotreloading: {NOTE_STYLE}{}{NOTE_STYLE:#}", file);
if !server.has_hotreload_sockets() && self.client.build.bundle != BundleFormat::Web
{
tracing::warn!("No clients to hotreload - try reloading the app!");
}
server.send_hotreload(msg).await;
} else {
tracing::debug!(dx_src = ?TraceSrc::Dev, "Ignoring file change: {}", file);
}
}
}
/// Finally "bundle" this app and return a handle to it
pub(crate) async fn open(
&mut self,
artifacts: &BuildArtifacts,
devserver: &mut WebServer,
) -> Result<()> {
// Make sure to save artifacts regardless of if we're opening the app or not
match artifacts.build_id {
BuildId::PRIMARY => self.client.artifacts = Some(artifacts.clone()),
BuildId::SECONDARY => {
if let Some(server) = self.server.as_mut() {
server.artifacts = Some(artifacts.clone());
}
}
_ => {}
}
let should_open = self.client.stage == BuildStage::Success
&& (self.server.as_ref().map(|s| s.stage == BuildStage::Success)).unwrap_or(true);
use crate::cli::styles::GLOW_STYLE;
if should_open {
let time_taken = artifacts
.time_end
.duration_since(artifacts.time_start)
.unwrap();
if self.client.builds_opened == 0 {
tracing::info!(
"Build completed successfully in {GLOW_STYLE}{:?}ms{GLOW_STYLE:#}, launching app! 💫",
time_taken.as_millis()
);
} else {
tracing::info!(
"Build completed in {GLOW_STYLE}{:?}ms{GLOW_STYLE:#}",
time_taken.as_millis()
);
}
let open_browser = self.client.builds_opened == 0 && self.open_browser;
self.open_all(devserver, open_browser).await?;
// Give a second for the server to boot
tokio::time::sleep(Duration::from_millis(300)).await;
// Update the screen + devserver with the new handle info
devserver.send_reload_command().await
}
Ok(())
}
/// Open an existing app bundle, if it exists
///
/// Will attempt to open the server and client together, in a coordinated way such that the server
/// opens first, initializes, and then the client opens.
///
/// There's a number of issues we need to be careful to work around:
/// - The server failing to boot or crashing on startup (and entering a boot loop)
/// -
pub(crate) async fn open_all(
&mut self,
devserver: &WebServer,
open_browser: bool,
) -> Result<()> {
let devserver_ip = devserver.devserver_address();
let fullstack_address = devserver.proxied_server_address();
let displayed_address = devserver.displayed_address();
// Always open the server first after the client has been built
// Only open the server if it isn't prerendered
if let Some(server) = self.server.as_mut().filter(|_| !self.ssg) {
tracing::debug!("Opening server build");
server.soft_kill().await;
server
.open(
devserver_ip,
displayed_address,
fullstack_address,
false,
false,
BuildId::SECONDARY,
&self.server_args,
)
.await?;
}
// Start the new app before we kill the old one to give it a little bit of time
self.client.soft_kill().await;
self.client
.open(
devserver_ip,
displayed_address,
fullstack_address,
open_browser,
self.always_on_top,
BuildId::PRIMARY,
&self.client_args,
)
.await?;
Ok(())
}
/// Shutdown all the running processes
pub(crate) async fn shutdown(&mut self) -> Result<()> {
self.client.soft_kill().await;
if let Some(server) = self.server.as_mut() {
server.soft_kill().await;
}
// If the client is running on Android, we need to remove the port forwarding
// todo: use the android tools "adb"
if matches!(self.client.build.bundle, BundleFormat::Android) {
if let Err(err) = Command::new(&self.workspace.android_tools()?.adb)
.arg("reverse")
.arg("--remove")
.arg(format!("tcp:{}", self.devserver_port))
.output()
.await
{
tracing::error!(
"failed to remove forwarded port {}: {err}",
self.devserver_port
);
}
}
// force the tailwind watcher to stop - if we don't, it eats our stdin
self.tw_watcher.abort();
Ok(())
}
/// Perform a full rebuild of the app, equivalent to `cargo rustc` from scratch with no incremental
/// hot-patch engine integration.
pub(crate) async fn full_rebuild(&mut self) {
let build_mode = match self.use_hotpatch_engine {
true => BuildMode::Fat,
false => BuildMode::Base { run: true },
};
self.client
.start_rebuild(build_mode.clone(), BuildId::PRIMARY);
if let Some(s) = self.server.as_mut() {
s.start_rebuild(build_mode, BuildId::SECONDARY);
}
self.clear_hot_reload_changes();
self.clear_cached_rsx();
self.clear_patches();
}
pub(crate) async fn hotpatch(
&mut self,
bundle: &BuildArtifacts,
id: BuildId,
cache: &HotpatchModuleCache,
devserver: &mut WebServer,
) -> Result<()> {
let elapsed = bundle
.time_end
.duration_since(bundle.time_start)
.unwrap_or_default();
let jump_table = match id {
BuildId::PRIMARY => self.client.hotpatch(bundle, cache).await,
BuildId::SECONDARY => {
self.server
.as_mut()
.context("Server not found")?
.hotpatch(bundle, cache)
.await
}
_ => bail!("Invalid build id"),
}?;
if id == BuildId::PRIMARY {
self.applied_client_hot_reload_message.jump_table = self.client.patches.last().cloned();
}
// If no server, just send the patch immediately
let Some(server) = self.server.as_mut() else {
devserver
.send_patch(jump_table, elapsed, id, self.client.pid)
.await;
return Ok(());
};
// If we have a server, we need to wait until both the client and server are ready
// Otherwise we end up with an annoying race condition where the client can't actually load the patch
if self.client.stage == BuildStage::Success && server.stage == BuildStage::Success {
let client_jump_table = self
.client
.patches
.last()
.cloned()
.context("Missing client jump table")?;
let server_jump_table = server
.patches
.last()
.cloned()
.context("Missing server jump table")?;
devserver
.send_patch(server_jump_table, elapsed, BuildId::SECONDARY, server.pid)
.await;
devserver
.send_patch(
client_jump_table,
elapsed,
BuildId::PRIMARY,
self.client.pid,
)
.await;
}
Ok(())
}
pub(crate) fn get_build(&self, id: BuildId) -> Option<&AppBuilder> {
match id {
BuildId::PRIMARY => Some(&self.client),
BuildId::SECONDARY => self.server.as_ref(),
_ => None,
}
}
pub(crate) fn client(&self) -> &AppBuilder {
&self.client
}
/// The name of the app being served, to display
pub(crate) fn app_name(&self) -> &str {
self.client.build.executable_name()
}
/// Get any hot reload changes that have been applied since the last full rebuild
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | true |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/serve/mod.rs | packages/cli/src/serve/mod.rs | use crate::{
styles::{GLOW_STYLE, LINK_STYLE},
AppBuilder, BuildId, BuildMode, BuilderUpdate, BundleFormat, Result, ServeArgs,
TraceController,
};
mod ansi_buffer;
mod output;
mod proxy;
mod proxy_ws;
mod runner;
mod server;
mod update;
use anyhow::bail;
use dioxus_dx_wire_format::BuildStage;
pub(crate) use output::*;
pub(crate) use runner::*;
pub(crate) use server::*;
pub(crate) use update::*;
/// For *all* builds, the CLI spins up a dedicated webserver, file watcher, and build infrastructure to serve the project.
///
/// This includes web, desktop, mobile, fullstack, etc.
///
/// Platform specifics:
/// -------------------
/// - Web: We need to attach a filesystem server to our devtools webserver to serve the project. We
/// want to emulate GithubPages here since most folks are deploying there and expect things like
/// basepath to match.
/// - Desktop: We spin up the dev server but without a filesystem server.
/// - Mobile: Basically the same as desktop.
///
/// When fullstack is enabled, we'll also build for the `server` target and then hotreload the server.
/// The "server" is special here since "fullstack" is functionally just an addition to the regular client
/// setup.
///
/// Todos(Jon):
/// - I'd love to be able to configure the CLI while it's running so we can change settings on the fly.
/// - I want us to be able to detect a `server_fn` in the project and then upgrade from a static server
/// to a dynamic one on the fly.
pub(crate) async fn serve_all(args: ServeArgs, tracer: &TraceController) -> Result<()> {
// Load the args into a plan, resolving all tooling, build dirs, arguments, decoding the multi-target, etc
let exit_on_error = args.exit_on_error;
let mut builder = AppServer::new(args).await?;
let mut devserver = WebServer::start(&builder)?;
let mut screen = Output::start(builder.interactive).await?;
// This is our default splash screen. We might want to make this a fancier splash screen in the future
// Also, these commands might not be the most important, but it's all we've got enabled right now
tracing::info!(
r#"-----------------------------------------------------------------
Serving your app: {binname}! 🚀
• Press {GLOW_STYLE}`ctrl+c`{GLOW_STYLE:#} to exit the server
• Press {GLOW_STYLE}`r`{GLOW_STYLE:#} to rebuild the app
• Press {GLOW_STYLE}`p`{GLOW_STYLE:#} to toggle automatic rebuilds
• Press {GLOW_STYLE}`v`{GLOW_STYLE:#} to toggle verbose logging
• Press {GLOW_STYLE}`/`{GLOW_STYLE:#} for more commands and shortcuts{extra}
----------------------------------------------------------------"#,
binname = builder.client.build.executable_name(),
extra = if builder.client.build.using_dioxus_explicitly {
format!(
"\n Learn more at {LINK_STYLE}https://dioxuslabs.com/learn/0.7/getting_started{LINK_STYLE:#}"
)
} else {
String::new()
}
);
builder.initialize();
loop {
// Draw the state of the server to the screen
screen.render(&builder, &devserver);
// And then wait for any updates before redrawing
let msg = tokio::select! {
msg = builder.wait() => msg,
msg = devserver.wait() => msg,
msg = screen.wait() => msg,
msg = tracer.wait() => msg,
};
match msg {
ServeUpdate::FilesChanged { files } => {
if files.is_empty() || !builder.hot_reload {
continue;
}
builder.handle_file_change(&files, &mut devserver).await;
}
ServeUpdate::RequestRebuild => {
// The spacing here is important-ish: we want
// `Full rebuild:` to line up with
// `Hotreloading:` to keep the alignment during long edit sessions
// `Hot-patching:` to keep the alignment during long edit sessions
tracing::info!("Full rebuild: triggered manually");
builder.full_rebuild().await;
devserver.send_reload_start().await;
devserver.start_build().await;
}
// Run the server in the background
// Waiting for updates here lets us tap into when clients are added/removed
ServeUpdate::NewConnection {
id,
aslr_reference,
pid,
} => {
devserver
.send_hotreload(builder.applied_hot_reload_changes(BuildId::PRIMARY))
.await;
if builder.server.is_some() {
devserver
.send_hotreload(builder.applied_hot_reload_changes(BuildId::SECONDARY))
.await;
}
builder.client_connected(id, aslr_reference, pid).await;
}
// Received a message from the devtools server - currently we only use this for
// logging, so we just forward it the tui
ServeUpdate::WsMessage { msg, bundle } => {
screen.push_ws_message(bundle, &msg);
}
// Wait for logs from the build engine
// These will cause us to update the screen
// We also can check the status of the builds here in case we have multiple ongoing builds
ServeUpdate::BuilderUpdate { id, update } => {
let bundle_format = builder.get_build(id).unwrap().build.bundle;
// Queue any logs to be printed if need be
screen.new_build_update(&update);
// And then update the websocketed clients with the new build status in case they want it
devserver.new_build_update(&update).await;
// Start the SSG build if we need to
builder.new_build_update(&update, &devserver).await;
// And then open the app if it's ready
match update {
BuilderUpdate::Progress {
stage: BuildStage::Failed,
} => {
if exit_on_error {
bail!("Build failed for platform: {bundle_format}");
}
}
BuilderUpdate::Progress {
stage: BuildStage::Aborted,
} => {
if exit_on_error {
bail!("Build aborted for platform: {bundle_format}");
}
}
BuilderUpdate::Progress { .. } => {}
BuilderUpdate::CompilerMessage { message } => {
screen.push_cargo_log(message);
}
BuilderUpdate::BuildFailed { err } => {
tracing::error!(
"{ERROR_STYLE}Build failed{ERROR_STYLE:#}: {}",
crate::error::log_stacktrace(&err, 15),
ERROR_STYLE = crate::styles::ERROR_STYLE,
);
if exit_on_error {
return Err(err);
}
}
BuilderUpdate::BuildReady { bundle } => {
match bundle.mode {
BuildMode::Thin { ref cache, .. } => {
if let Err(err) =
builder.hotpatch(&bundle, id, cache, &mut devserver).await
{
tracing::error!("Failed to hot-patch app: {err}");
if let Some(_patching) =
err.downcast_ref::<crate::build::PatchError>()
{
tracing::info!("Starting full rebuild: {err}");
builder.full_rebuild().await;
devserver.send_reload_start().await;
devserver.start_build().await;
}
}
}
BuildMode::Base { .. } | BuildMode::Fat => {
_ = builder
.open(&bundle, &mut devserver)
.await
.inspect_err(|e| tracing::error!("Failed to open app: {}", e));
}
}
// Process any file changes that were queued while the build was in progress.
// This handles tools like stylance, tailwind, or sass that generate files
// in response to source changes - those changes would otherwise be lost.
let pending = builder.take_pending_file_changes();
if !pending.is_empty() {
tracing::debug!(
"Processing {} pending file changes after build",
pending.len()
);
builder.handle_file_change(&pending, &mut devserver).await;
}
}
BuilderUpdate::StdoutReceived { msg } => {
screen.push_stdio(bundle_format, msg, tracing::Level::INFO);
}
BuilderUpdate::StderrReceived { msg } => {
screen.push_stdio(bundle_format, msg, tracing::Level::ERROR);
}
BuilderUpdate::ProcessExited { status } => {
if status.success() {
tracing::info!(
r#"Application [{bundle_format}] exited gracefully.
• To restart the app, press `r` to rebuild or `o` to open
• To exit the server, press `ctrl+c`"#
);
} else {
tracing::error!(
"Application [{bundle_format}] exited with error: {status}"
);
if exit_on_error {
bail!("Application [{bundle_format}] exited with error: {status}");
}
}
}
BuilderUpdate::ProcessWaitFailed { err } => {
tracing::warn!(
"Failed to wait for process - maybe it's hung or being debugged?: {err}"
);
if exit_on_error {
return Err(err.into());
}
}
}
}
ServeUpdate::TracingLog { log } => {
screen.push_log(log);
}
ServeUpdate::OpenApp => match builder.use_hotpatch_engine {
true if !matches!(builder.client.build.bundle, BundleFormat::Web) => {
tracing::warn!(
"Opening a native app with hotpatching enabled requires a full rebuild..."
);
builder.full_rebuild().await;
devserver.send_reload_start().await;
devserver.start_build().await;
}
_ => {
if let Err(err) = builder.open_all(&devserver, true).await {
tracing::error!(
"Failed to open app: {}",
crate::error::log_stacktrace(&err, 15)
)
}
}
},
ServeUpdate::Redraw => {
// simply returning will cause a redraw
}
ServeUpdate::ToggleShouldRebuild => {
use crate::styles::{ERROR, NOTE_STYLE};
builder.automatic_rebuilds = !builder.automatic_rebuilds;
tracing::info!(
"Automatic rebuilds are currently: {}",
if builder.automatic_rebuilds {
format!("{NOTE_STYLE}enabled{NOTE_STYLE:#}")
} else {
format!("{ERROR}disabled{ERROR:#}")
}
)
}
ServeUpdate::OpenDebugger { id } => {
builder.open_debugger(&devserver, id).await;
}
ServeUpdate::Exit { error } => {
_ = builder.shutdown().await;
_ = devserver.shutdown().await;
match error {
Some(err) => return Err(err),
None => return Ok(()),
}
}
}
}
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/serve/server.rs | packages/cli/src/serve/server.rs | use crate::{
config::WebHttpsConfig, serve::ServeUpdate, BuildId, BuildStage, BuilderUpdate, BundleFormat,
Result, TraceSrc,
};
use anyhow::{bail, Context};
use axum::{
body::Body,
extract::{
ws::{Message, WebSocket},
Query, Request, State, WebSocketUpgrade,
},
http::{
header::{HeaderName, HeaderValue, CACHE_CONTROL, EXPIRES, PRAGMA},
Method, Response, StatusCode,
},
middleware::{self, Next},
response::IntoResponse,
routing::{get, get_service},
Extension, Router,
};
use dioxus_devtools_types::{DevserverMsg, HotReloadMsg};
use futures_channel::mpsc::{UnboundedReceiver, UnboundedSender};
use futures_util::{
future,
stream::{self, FuturesUnordered},
StreamExt,
};
use hyper::HeaderMap;
use rustls::crypto::{aws_lc_rs::default_provider, CryptoProvider};
use serde::{Deserialize, Serialize};
use std::{
convert::Infallible,
fs, io,
net::{IpAddr, Ipv4Addr, SocketAddr, TcpListener},
path::Path,
sync::{Arc, RwLock},
time::Duration,
};
use subsecond_types::JumpTable;
use tokio::process::Command;
use tower_http::{
cors::Any,
services::fs::{ServeDir, ServeFileSystemResponseBody},
ServiceBuilderExt,
};
use super::AppServer;
/// The webserver that serves statics assets (if fullstack isn't already doing that) and the websocket
/// communication layer that we use to send status updates and hotreloads to the client.
///
/// todo(jon): we should merge the build status and hotreload sockets into just a "devtools" socket
/// which carries all the message types. This would make it easier for us to add more message types
/// and better tooling on the pages that we serve.
pub(crate) struct WebServer {
devserver_exposed_ip: IpAddr,
devserver_bind_ip: IpAddr,
devserver_port: u16,
proxied_port: Option<u16>,
hot_reload_sockets: Vec<ConnectedWsClient>,
build_status_sockets: Vec<ConnectedWsClient>,
new_hot_reload_sockets: UnboundedReceiver<ConnectedWsClient>,
new_build_status_sockets: UnboundedReceiver<ConnectedWsClient>,
build_status: SharedStatus,
application_name: String,
bundle: BundleFormat,
}
pub(crate) struct ConnectedWsClient {
socket: WebSocket,
build_id: Option<BuildId>,
aslr_reference: Option<u64>,
pid: Option<u32>,
}
impl WebServer {
pub const SELF_IP: IpAddr = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1));
/// Start the development server.
/// This will set up the default http server if there's no server specified (usually via fullstack).
///
/// This will also start the websocket server that powers the devtools. If you want to communicate
/// with connected devtools clients, this is the place to do it.
pub(crate) fn start(runner: &AppServer) -> Result<Self> {
let (hot_reload_sockets_tx, hot_reload_sockets_rx) = futures_channel::mpsc::unbounded();
let (build_status_sockets_tx, build_status_sockets_rx) = futures_channel::mpsc::unbounded();
// Create the listener that we'll pass into the devserver, but save its IP here so
// we can display it to the user in the tui
let devserver_bind_ip = runner.devserver_bind_ip;
let devserver_port = runner.devserver_port;
let proxied_port = runner.proxied_port;
let devserver_exposed_ip = devserver_bind_ip;
let devserver_bind_address = SocketAddr::new(devserver_bind_ip, devserver_port);
let listener = std::net::TcpListener::bind(devserver_bind_address).with_context(|| {
anyhow::anyhow!(
"Failed to bind server to: {devserver_bind_address}, is there another devserver running?\nTo run multiple devservers, use the --port flag to specify a different port"
)
})?;
let proxied_address = proxied_port.map(|port| SocketAddr::new(devserver_exposed_ip, port));
// Set up the router with some shared state that we'll update later to reflect the current state of the build
let build_status = SharedStatus::new_with_starting_build();
let router = build_devserver_router(
runner,
hot_reload_sockets_tx,
build_status_sockets_tx,
proxied_address,
build_status.clone(),
)?;
// And finally, start the server mainloop
tokio::spawn(devserver_mainloop(
runner.client().build.config.web.https.clone(),
listener,
router,
));
Ok(Self {
build_status,
proxied_port,
devserver_bind_ip,
devserver_exposed_ip,
devserver_port,
hot_reload_sockets: Default::default(),
build_status_sockets: Default::default(),
new_hot_reload_sockets: hot_reload_sockets_rx,
new_build_status_sockets: build_status_sockets_rx,
application_name: runner.app_name().to_string(),
bundle: runner.client.build.bundle,
})
}
/// Wait for new clients to be connected and then save them
pub(crate) async fn wait(&mut self) -> ServeUpdate {
let mut new_hot_reload_socket = self.new_hot_reload_sockets.next();
let mut new_build_status_socket = self.new_build_status_sockets.next();
let mut new_message = self
.hot_reload_sockets
.iter_mut()
.enumerate()
.map(|(idx, socket)| async move { (idx, socket.socket.next().await) })
.collect::<FuturesUnordered<_>>();
tokio::select! {
new_hot_reload_socket = &mut new_hot_reload_socket => {
if let Some(new_socket) = new_hot_reload_socket {
let aslr_reference = new_socket.aslr_reference;
let pid = new_socket.pid;
let id = new_socket.build_id.unwrap_or(BuildId::PRIMARY);
drop(new_message);
self.hot_reload_sockets.push(new_socket);
return ServeUpdate::NewConnection { aslr_reference, id, pid };
} else {
panic!("Could not receive a socket - the devtools could not boot - the port is likely already in use");
}
}
new_build_status_socket = &mut new_build_status_socket => {
if let Some(mut new_socket) = new_build_status_socket {
drop(new_message);
// Update the socket with project info and current build status
let project_info = SharedStatus::new(Status::ClientInit { application_name: self.application_name.clone(), bundle: self.bundle });
if project_info.send_to(&mut new_socket.socket).await.is_ok() {
_ = self.build_status.send_to(&mut new_socket.socket).await;
self.build_status_sockets.push(new_socket);
}
return future::pending::<ServeUpdate>().await;
} else {
panic!("Could not receive a socket - the devtools could not boot - the port is likely already in use");
}
}
Some((idx, message)) = new_message.next() => {
match message {
Some(Ok(msg)) => return ServeUpdate::WsMessage { msg, bundle: BundleFormat::Web },
_ => {
drop(new_message);
_ = self.hot_reload_sockets.remove(idx);
}
}
}
}
future::pending().await
}
pub(crate) async fn shutdown(&mut self) {
self.send_shutdown().await;
for mut socket in self.hot_reload_sockets.drain(..) {
_ = socket.socket.send(Message::Close(None)).await;
}
}
/// Sends the current build status to all clients.
async fn send_build_status(&mut self) {
let mut i = 0;
while i < self.build_status_sockets.len() {
let socket = &mut self.build_status_sockets[i];
if self.build_status.send_to(&mut socket.socket).await.is_err() {
self.build_status_sockets.remove(i);
} else {
i += 1;
}
}
}
/// Sends a start build message to all clients.
pub(crate) async fn start_build(&mut self) {
self.build_status.set(Status::Building {
progress: 0.0,
build_message: "Starting the build...".to_string(),
});
self.send_build_status().await;
}
/// Sends an updated build status to all clients.
pub(crate) async fn new_build_update(&mut self, update: &BuilderUpdate) {
match update {
BuilderUpdate::Progress { stage } => {
// Todo(miles): wire up more messages into the splash screen UI
match stage {
BuildStage::Success => {}
BuildStage::Failed => self.send_reload_failed().await,
BuildStage::Restarting => self.send_reload_start().await,
BuildStage::Initializing => {}
BuildStage::InstallingTooling => {}
BuildStage::Compiling {
current,
total,
krate,
..
} => {
if !matches!(
self.build_status.get(),
Status::Ready | Status::BuildError { .. }
) {
self.build_status.set(Status::Building {
progress: (*current as f64 / *total as f64).clamp(0.0, 1.0),
build_message: format!("{krate} compiling"),
});
self.send_build_status().await;
}
}
BuildStage::OptimizingWasm => {}
BuildStage::Aborted => {}
BuildStage::CopyingAssets { .. } => {}
_ => {}
}
}
BuilderUpdate::CompilerMessage { .. } => {}
BuilderUpdate::BuildReady { .. } => {}
BuilderUpdate::BuildFailed { err } => {
let error = err.to_string();
self.build_status.set(Status::BuildError {
error: ansi_to_html::convert(&error).unwrap_or(error),
});
self.send_reload_failed().await;
self.send_build_status().await;
}
BuilderUpdate::StdoutReceived { .. } => {}
BuilderUpdate::StderrReceived { .. } => {}
BuilderUpdate::ProcessExited { .. } => {}
BuilderUpdate::ProcessWaitFailed { .. } => {}
}
}
pub(crate) fn has_hotreload_sockets(&self) -> bool {
!self.hot_reload_sockets.is_empty()
}
/// Sends hot reloadable changes to all clients.
pub(crate) async fn send_hotreload(&mut self, reload: HotReloadMsg) {
if reload.is_empty() {
return;
}
tracing::trace!("Sending hotreload to clients {:?}", reload);
let msg = DevserverMsg::HotReload(reload);
let msg = serde_json::to_string(&msg).unwrap();
// Send the changes to any connected clients
let mut i = 0;
while i < self.hot_reload_sockets.len() {
let socket = &mut self.hot_reload_sockets[i];
if socket
.socket
.send(Message::Text(msg.clone().into()))
.await
.is_err()
{
self.hot_reload_sockets.remove(i);
} else {
i += 1;
}
}
}
pub(crate) async fn send_patch(
&mut self,
jump_table: JumpTable,
time_taken: Duration,
build: BuildId,
for_pid: Option<u32>,
) {
let msg = DevserverMsg::HotReload(HotReloadMsg {
jump_table: Some(jump_table),
ms_elapsed: time_taken.as_millis() as u64,
templates: vec![],
assets: vec![],
for_pid,
for_build_id: Some(build.0 as _),
});
self.send_devserver_message_to_all(msg).await;
self.set_ready().await;
}
/// Tells all clients that a hot patch has started.
pub(crate) async fn send_patch_start(&mut self) {
self.send_devserver_message_to_all(DevserverMsg::HotPatchStart)
.await;
}
/// Tells all clients that a full rebuild has started.
pub(crate) async fn send_reload_start(&mut self) {
self.send_devserver_message_to_all(DevserverMsg::FullReloadStart)
.await;
}
/// Tells all clients that a full rebuild has failed.
pub(crate) async fn send_reload_failed(&mut self) {
self.send_devserver_message_to_all(DevserverMsg::FullReloadFailed)
.await;
}
/// Tells all clients to reload if possible for new changes.
pub(crate) async fn send_reload_command(&mut self) {
self.set_ready().await;
self.send_devserver_message_to_all(DevserverMsg::FullReloadCommand)
.await;
}
/// Send a shutdown message to all connected clients.
pub(crate) async fn send_shutdown(&mut self) {
self.send_devserver_message_to_all(DevserverMsg::Shutdown)
.await;
}
/// Sends a devserver message to all connected clients.
async fn send_devserver_message_to_all(&mut self, msg: DevserverMsg) {
for socket in self.hot_reload_sockets.iter_mut() {
_ = socket
.socket
.send(Message::Text(serde_json::to_string(&msg).unwrap().into()))
.await;
}
}
/// Mark the devserver status as ready and notify listeners.
async fn set_ready(&mut self) {
if matches!(self.build_status.get(), Status::Ready) {
return;
}
self.build_status.set(Status::Ready);
self.send_build_status().await;
}
/// Get the address the devserver should run on
pub fn devserver_address(&self) -> SocketAddr {
SocketAddr::new(self.devserver_exposed_ip, self.devserver_port)
}
// Get the address the server should run on if we're serving the user's server
pub fn proxied_server_address(&self) -> Option<SocketAddr> {
self.proxied_port
.map(|port| SocketAddr::new(self.devserver_exposed_ip, port))
}
pub fn server_address(&self) -> Option<SocketAddr> {
match self.bundle {
BundleFormat::Web | BundleFormat::Server => Some(self.devserver_address()),
_ => self.proxied_server_address(),
}
}
/// Get the address the server is running - showing 127.0.0.1 if the devserver is bound to 0.0.0.0
/// This is designed this way to not confuse users who expect the devserver to be bound to localhost
/// ... which it is, but they don't know that 0.0.0.0 also serves localhost.
pub fn displayed_address(&self) -> Option<SocketAddr> {
let mut address = self.server_address()?;
// Set the port to the devserver port since that's usually what people expect
address.set_port(self.devserver_port);
if self.devserver_bind_ip == IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)) {
address = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), address.port());
}
Some(address)
}
}
async fn devserver_mainloop(
https_cfg: WebHttpsConfig,
listener: TcpListener,
router: Router,
) -> Result<()> {
// We have a native listener that we're going to give to tokio, so we need to make it non-blocking
let _ = listener.set_nonblocking(true);
// If we're not using rustls, just use regular axum
if https_cfg.enabled != Some(true) {
axum::serve(
tokio::net::TcpListener::from_std(listener).unwrap(),
router.into_make_service(),
)
.await?;
return Ok(());
}
// If we're using rustls, we need to install the provider, get the cert/key paths, and then set up rustls
if let Err(provider) = CryptoProvider::install_default(default_provider()) {
bail!("Failed to install default CryptoProvider: {provider:?}");
}
let (cert_path, key_path) = get_rustls(&https_cfg).await?;
let rustls = axum_server::tls_rustls::RustlsConfig::from_pem_file(cert_path, key_path).await?;
axum_server::from_tcp_rustls(listener, rustls)
.serve(router.into_make_service())
.await?;
Ok(())
}
/// Sets up and returns a router
///
/// Steps include:
/// - Setting up cors
/// - Setting up the proxy to the endpoint specified in the config
/// - Setting up the file serve service
/// - Setting up the websocket endpoint for devtools
fn build_devserver_router(
runner: &AppServer,
hot_reload_sockets: UnboundedSender<ConnectedWsClient>,
build_status_sockets: UnboundedSender<ConnectedWsClient>,
fullstack_address: Option<SocketAddr>,
build_status: SharedStatus,
) -> Result<Router> {
let mut router = Router::new();
let build = runner.client();
// Setup proxy for the endpoint specified in the config
for proxy_config in build.build.config.web.proxy.iter() {
router = super::proxy::add_proxy(router, proxy_config)?;
}
// For fullstack, liveview, and server, forward all requests to the inner server
if runner.proxied_port.is_some() {
tracing::debug!("Proxying requests to fullstack server at {fullstack_address:?}");
let address = fullstack_address.context("No fullstack address specified")?;
tracing::debug!("Proxying requests to fullstack server at {address}");
router = router.fallback_service(super::proxy::proxy_to(
format!("http://{address}").parse().unwrap(),
true,
|error| {
Response::builder()
.status(StatusCode::INTERNAL_SERVER_ERROR)
.body(Body::from(format!(
"Backend connection failed. The backend is likely still starting up. Please try again in a few seconds. Error: {error:#?}"
)))
.unwrap()
},
));
} else {
// Otherwise, just serve the dir ourselves
// Route file service to output the .wasm and assets if this is a web build
let base_path = format!(
"/{}",
runner
.client()
.build
.base_path()
.unwrap_or_default()
.trim_matches('/')
);
if base_path == "/" {
router = router.fallback_service(build_serve_dir(runner));
} else {
router = router.nest_service(&base_path, build_serve_dir(runner));
}
}
// Setup middleware to intercept html requests if the build status is "Building"
router = router.layer(middleware::from_fn_with_state(
build_status,
build_status_middleware,
));
#[derive(Deserialize, Debug)]
struct ConnectionQuery {
aslr_reference: Option<u64>,
build_id: Option<BuildId>,
pid: Option<u32>,
}
// Setup websocket endpoint - and pass in the extension layer immediately after
router = router.nest(
"/_dioxus",
Router::new()
.route(
"/",
get(
|ws: WebSocketUpgrade, ext: Extension<UnboundedSender<ConnectedWsClient>>, query: Query<ConnectionQuery>| async move {
tracing::debug!("New devtool websocket connection: {:?}", query);
ws.on_upgrade(move |socket| async move { _ = ext.0.unbounded_send(ConnectedWsClient { socket, aslr_reference: query.aslr_reference, build_id: query.build_id, pid: query.pid }) })
},
),
)
.layer(Extension(hot_reload_sockets))
.route(
"/build_status",
get(
|ws: WebSocketUpgrade, ext: Extension<UnboundedSender<ConnectedWsClient>>| async move {
ws.on_upgrade(move |socket| async move { _ = ext.0.unbounded_send(ConnectedWsClient { socket, aslr_reference: None, build_id: None, pid: None }) })
},
),
)
.layer(Extension(build_status_sockets)),
);
// Setup cors
router = router.layer(
tower_http::cors::CorsLayer::new()
// allow `GET` and `POST` when accessing the resource
.allow_methods([Method::GET, Method::POST])
// allow requests from any origin
.allow_origin(Any)
.allow_headers(Any),
);
Ok(router)
}
fn build_serve_dir(runner: &AppServer) -> axum::routing::MethodRouter {
use tower::ServiceBuilder;
static CORS_UNSAFE: (HeaderValue, HeaderValue) = (
HeaderValue::from_static("unsafe-none"),
HeaderValue::from_static("unsafe-none"),
);
static CORS_REQUIRE: (HeaderValue, HeaderValue) = (
HeaderValue::from_static("require-corp"),
HeaderValue::from_static("same-origin"),
);
let (coep, coop) = match runner.cross_origin_policy {
true => CORS_REQUIRE.clone(),
false => CORS_UNSAFE.clone(),
};
let app = &runner.client;
let cfg = &runner.client.build.config;
let out_dir = app.build.root_dir();
let index_on_404: bool = cfg.web.watcher.index_on_404;
get_service(
ServiceBuilder::new()
.override_response_header(
HeaderName::from_static("cross-origin-embedder-policy"),
coep,
)
.override_response_header(HeaderName::from_static("cross-origin-opener-policy"), coop)
.and_then({
let out_dir = out_dir.clone();
move |response| async move { Ok(no_cache(index_on_404, &out_dir, response)) }
})
.service(ServeDir::new(&out_dir)),
)
.handle_error(|error: Infallible| async move {
(
StatusCode::INTERNAL_SERVER_ERROR,
format!("Unhandled internal error: {error}"),
)
})
}
fn no_cache(
index_on_404: bool,
out_dir: &Path,
response: Response<ServeFileSystemResponseBody>,
) -> Response<Body> {
// By default we just decompose into the response
let mut response = response.into_response();
// If there's a 404 and we're supposed to index on 404, upgrade that failed request to the index.html
// We might want to isnert a header here saying we *did* that but oh well
if response.status() == StatusCode::NOT_FOUND && index_on_404 {
let fallback = out_dir.join("index.html");
let contents = std::fs::read_to_string(fallback).unwrap_or_else(|_| {
String::from(
r#"
<!DOCTYPE html>
<html>
<head>
<title>Err 404 - dx is not serving a web app</title>
</head>
<body>
<p>Err 404 - dioxus is not currently serving a web app</p>
</body>
</html>
"#,
)
});
let body = Body::from(contents);
response = Response::builder()
.status(StatusCode::OK)
.body(body)
.unwrap();
};
insert_no_cache_headers(response.headers_mut());
response
}
pub(crate) fn insert_no_cache_headers(headers: &mut HeaderMap) {
headers.insert(CACHE_CONTROL, HeaderValue::from_static("no-cache"));
headers.insert(PRAGMA, HeaderValue::from_static("no-cache"));
headers.insert(EXPIRES, HeaderValue::from_static("0"));
}
async fn get_rustls(web_config: &WebHttpsConfig) -> Result<(String, String)> {
// If we're not using mkcert, just use the cert/key paths given to use in the config
if !web_config.mkcert.unwrap_or(false) {
if let (Some(key), Some(cert)) = (web_config.key_path.clone(), web_config.cert_path.clone())
{
return Ok((cert, key));
} else {
bail!("https is enabled but cert or key path is missing");
}
}
const DEFAULT_KEY_PATH: &str = "ssl/key.pem";
const DEFAULT_CERT_PATH: &str = "ssl/cert.pem";
// Get paths to store certs, otherwise use ssl/item.pem
let key_path = web_config
.key_path
.clone()
.unwrap_or(DEFAULT_KEY_PATH.to_string());
let cert_path = web_config
.cert_path
.clone()
.unwrap_or(DEFAULT_CERT_PATH.to_string());
// Create ssl directory if using defaults
if key_path == DEFAULT_KEY_PATH && cert_path == DEFAULT_CERT_PATH {
_ = fs::create_dir("ssl");
}
let cmd = Command::new("mkcert")
.args([
"-install",
"-key-file",
&key_path,
"-cert-file",
&cert_path,
"localhost",
"::1",
"127.0.0.1",
])
.spawn();
match cmd {
Err(e) => {
match e.kind() {
io::ErrorKind::NotFound => {
tracing::error!(dx_src = ?TraceSrc::Dev, "`mkcert` is not installed. See https://github.com/FiloSottile/mkcert#installation for installation instructions.")
}
e => {
tracing::error!(dx_src = ?TraceSrc::Dev, "An error occurred while generating mkcert certificates: {}", e.to_string())
}
};
bail!("failed to generate mkcert certificates");
}
Ok(mut cmd) => {
cmd.wait().await?;
}
}
Ok((cert_path, key_path))
}
/// Middleware that intercepts html requests if the status is "Building" and returns a loading page instead
async fn build_status_middleware(
state: State<SharedStatus>,
request: Request,
next: Next,
) -> axum::response::Response {
// If the request is for html, and the status is "Building", return the loading page instead of the contents of the response
let accepts = request.headers().get(hyper::header::ACCEPT);
let accepts_html = accepts
.and_then(|v| v.to_str().ok())
.map(|v| v.contains("text/html"));
if let Some(true) = accepts_html {
let status = state.get();
if status != Status::Ready {
let html = include_str!("../../assets/web/dev.loading.html");
return axum::response::Response::builder()
.status(StatusCode::OK)
// Load the html loader then keep loading forever
// We never close the stream so any headless testing framework (like playwright) will wait until the real build is done
.body(Body::from_stream(
stream::once(async move { Ok::<_, std::convert::Infallible>(html) })
.chain(stream::pending()),
))
.unwrap();
}
}
next.run(request).await
}
#[derive(Debug, Clone)]
struct SharedStatus(Arc<RwLock<Status>>);
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(tag = "type", content = "data")]
enum Status {
ClientInit {
application_name: String,
bundle: BundleFormat,
},
Building {
progress: f64,
build_message: String,
},
BuildError {
error: String,
},
Ready,
}
impl SharedStatus {
fn new(status: Status) -> Self {
Self(Arc::new(RwLock::new(status)))
}
fn new_with_starting_build() -> Self {
Self::new(Status::Building {
progress: 0.0,
build_message: "Starting the build...".to_string(),
})
}
fn set(&self, status: Status) {
*self.0.write().unwrap() = status;
}
fn get(&self) -> Status {
self.0.read().unwrap().clone()
}
async fn send_to(&self, socket: &mut WebSocket) -> Result<(), axum::Error> {
let msg = serde_json::to_string(&self.get()).unwrap();
socket.send(Message::Text(msg.into())).await
}
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/serve/output.rs | packages/cli/src/serve/output.rs | use crate::Result;
use crate::{
serve::{ansi_buffer::ansi_string_to_line, ServeUpdate, WebServer},
BuildId, BuildStage, BuilderUpdate, BundleFormat, TraceContent, TraceMsg, TraceSrc,
};
use anyhow::{anyhow, bail, Context};
use cargo_metadata::diagnostic::Diagnostic;
use crossterm::{
cursor::{Hide, Show},
event::{
DisableBracketedPaste, DisableFocusChange, EnableBracketedPaste, EnableFocusChange, Event,
EventStream, KeyCode, KeyEvent, KeyEventKind, KeyModifiers,
},
terminal::{disable_raw_mode, enable_raw_mode, Clear, ClearType},
ExecutableCommand,
};
use ratatui::{
prelude::*,
widgets::{Block, BorderType, Borders, LineGauge, Paragraph},
TerminalOptions, Viewport,
};
use std::{
cell::RefCell,
collections::VecDeque,
io::{self, stdout},
rc::Rc,
time::Duration,
};
use tracing::Level;
use super::AppServer;
const TICK_RATE_MS: u64 = 100;
const VIEWPORT_MAX_WIDTH: u16 = 90;
const VIEWPORT_HEIGHT_SMALL: u16 = 5;
const VIEWPORT_HEIGHT_BIG: u16 = 14;
/// The TUI that drives the console output.
///
/// We try not to store too much state about the world here, just the state about the tui itself.
/// This is to prevent out-of-sync issues with the rest of the build engine and to use the components
/// of the serve engine as the source of truth.
///
/// Please please, do not add state here that does not belong here. We should only be storing state
/// here that is used to change how we display *other* state. Things like throbbers, modals, etc.
pub struct Output {
term: Rc<RefCell<Option<Terminal<CrosstermBackend<io::Stdout>>>>>,
events: Option<EventStream>,
// A list of all messages from build, dev, app, and more.
more_modal_open: bool,
interactive: bool,
// Whether to show verbose logs or not
// We automatically hide "debug" logs if verbose is false (only showing "info" / "warn" / "error")
verbose: bool,
trace: bool,
// Pending logs
pending_logs: VecDeque<TraceMsg>,
dx_version: String,
tick_animation: bool,
tick_interval: tokio::time::Interval,
// ! needs to be wrapped in an &mut since `render stateful widget` requires &mut... but our
// "render" method only borrows &self (for no particular reason at all...)
throbber: RefCell<throbber_widgets_tui::ThrobberState>,
}
#[derive(Clone, Copy)]
struct RenderState<'a> {
runner: &'a AppServer,
server: &'a WebServer,
}
impl Output {
pub(crate) async fn start(interactive: bool) -> crate::Result<Self> {
let mut output = Self {
interactive,
term: Rc::new(RefCell::new(None)),
dx_version: format!(
"{}-{}",
env!("CARGO_PKG_VERSION"),
crate::dx_build_info::GIT_COMMIT_HASH_SHORT.unwrap_or("main")
),
events: None,
more_modal_open: false,
pending_logs: VecDeque::new(),
throbber: RefCell::new(throbber_widgets_tui::ThrobberState::default()),
trace: crate::logging::VERBOSITY.get().unwrap().trace,
verbose: crate::logging::VERBOSITY.get().unwrap().verbose,
tick_animation: false,
tick_interval: {
let mut interval = tokio::time::interval(Duration::from_millis(TICK_RATE_MS));
interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay);
interval
},
};
output.startup()?;
Ok(output)
}
/// Call the startup functions that might mess with the terminal settings.
/// This is meant to be paired with "shutdown" to restore the terminal to its original state.
fn startup(&mut self) -> Result<()> {
if self.interactive {
// Check if writing the terminal is going to block infinitely.
// If it does, we should disable interactive mode. This ensures we work with programs like `bg`
// which suspend the process and cause us to block when writing output.
if Self::enable_raw_mode().is_err() {
self.term.take();
self.interactive = false;
return Ok(());
}
self.term.replace(
Terminal::with_options(
CrosstermBackend::new(stdout()),
TerminalOptions {
viewport: Viewport::Inline(VIEWPORT_HEIGHT_SMALL),
},
)
.ok(),
);
// Initialize the event stream here - this is optional because an EvenStream in a non-interactive
// terminal will cause a panic instead of simply doing nothing.
// https://github.com/crossterm-rs/crossterm/issues/659
self.events = Some(EventStream::new());
}
Ok(())
}
/// Enable raw mode, but don't let it block forever.
///
/// This lets us check if writing to tty is going to block forever and then recover, allowing
/// interopability with programs like `bg`.
fn enable_raw_mode() -> Result<()> {
#[cfg(unix)]
{
use tokio::signal::unix::{signal, SignalKind};
// Ignore SIGTSTP, SIGTTIN, and SIGTTOU
_ = signal(SignalKind::from_raw(20))?; // SIGTSTP
_ = signal(SignalKind::from_raw(21))?; // SIGTTIN
_ = signal(SignalKind::from_raw(22))?; // SIGTTOU
}
use std::io::IsTerminal;
if !stdout().is_terminal() {
bail!("Cannot enable raw mode on a non-terminal output");
}
enable_raw_mode()?;
stdout()
.execute(Hide)?
.execute(EnableFocusChange)?
.execute(EnableBracketedPaste)?;
Ok(())
}
pub(crate) fn remote_shutdown(interactive: bool) -> Result<()> {
if interactive && crossterm::terminal::is_raw_mode_enabled().unwrap_or(true) {
stdout()
.execute(Show)?
.execute(DisableFocusChange)?
.execute(DisableBracketedPaste)?;
disable_raw_mode()?;
// print a line to force the cursor down (no tearing)
println!();
}
Ok(())
}
pub(crate) async fn wait(&mut self) -> ServeUpdate {
use futures_util::future::OptionFuture;
use futures_util::StreamExt;
if !self.interactive {
return std::future::pending().await;
}
// Wait for the next user event or animation tick
loop {
let next = OptionFuture::from(self.events.as_mut().map(|f| f.next()));
let event = tokio::select! {
biased; // Always choose the event over the animation tick to not lose the event
Some(Some(Ok(event))) = next => event,
_ = self.tick_interval.tick(), if self.tick_animation => {
self.throbber.borrow_mut().calc_next();
return ServeUpdate::Redraw
},
else => futures_util::future::pending().await
};
match self.handle_input(event) {
Ok(Some(update)) => return update,
Err(ee) => {
return ServeUpdate::Exit {
error: Some(anyhow::anyhow!(ee)),
}
}
Ok(None) => {}
}
}
}
/// Handle an input event, returning `true` if the event should cause the program to restart.
fn handle_input(&mut self, input: Event) -> Result<Option<ServeUpdate>> {
match input {
Event::Key(key) if key.kind == KeyEventKind::Press => self.handle_keypress(key),
_ => Ok(Some(ServeUpdate::Redraw)),
}
}
fn handle_keypress(&mut self, key: KeyEvent) -> Result<Option<ServeUpdate>> {
// Some dev helpers for testing panic propagation and error handling. Remove this eventually.
if cfg!(debug_assertions) && std::env::var("DEBUG_PANICS").is_ok() {
match key.code {
KeyCode::Char('Z') => panic!("z pressed so we panic -> {}", uuid::Uuid::new_v4()),
KeyCode::Char('X') => bail!("x pressed so we bail -> {}", uuid::Uuid::new_v4()),
KeyCode::Char('E') => {
Err(anyhow!(
"E pressed so we bail with context -> {}",
uuid::Uuid::new_v4()
))
.context("With a message")
.context("With a context")?;
}
KeyCode::Char('C') => {
return Ok(Some(ServeUpdate::Exit {
error: Some(anyhow!(
"C pressed, so exiting with safe error -> {}",
uuid::Uuid::new_v4()
)),
}));
}
_ => {}
}
}
match key.code {
KeyCode::Char('c') if key.modifiers.contains(KeyModifiers::CONTROL) => {
return Ok(Some(ServeUpdate::Exit { error: None }))
}
KeyCode::Char('r') => return Ok(Some(ServeUpdate::RequestRebuild)),
KeyCode::Char('o') => return Ok(Some(ServeUpdate::OpenApp)),
KeyCode::Char('p') => return Ok(Some(ServeUpdate::ToggleShouldRebuild)),
KeyCode::Char('v') => {
self.verbose = !self.verbose;
tracing::info!(
"Verbose logging is now {}",
if self.verbose { "on" } else { "off" }
);
}
KeyCode::Char('t') => {
self.trace = !self.trace;
tracing::info!("Tracing is now {}", if self.trace { "on" } else { "off" });
}
KeyCode::Char('D') => {
return Ok(Some(ServeUpdate::OpenDebugger {
id: BuildId::SECONDARY,
}));
}
KeyCode::Char('d') => {
return Ok(Some(ServeUpdate::OpenDebugger {
id: BuildId::PRIMARY,
}));
}
KeyCode::Char('c') => {
stdout()
.execute(Clear(ClearType::All))?
.execute(Clear(ClearType::Purge))?;
// Clear the terminal and push the frame to the bottom
_ = self.term.borrow_mut().as_mut().map(|t| {
let frame_rect = t.get_frame().area();
let term_size = t.size().unwrap();
let remaining_space = term_size
.height
.saturating_sub(frame_rect.y + frame_rect.height);
t.insert_before(remaining_space, |_| {})
});
}
// Toggle the more modal by swapping the the terminal with a new one
// This is a bit of a hack since crossterm doesn't technically support changing the
// size of an inline viewport.
KeyCode::Char('/') => {
if let Some(terminal) = self.term.borrow_mut().as_mut() {
// Toggle the more modal, which will change our current viewport height
self.more_modal_open = !self.more_modal_open;
// Clear the terminal before resizing it, such that it doesn't tear
terminal.clear()?;
// And then set the new viewport, which essentially mimics a resize
*terminal = Terminal::with_options(
CrosstermBackend::new(stdout()),
TerminalOptions {
viewport: Viewport::Inline(self.viewport_current_height()),
},
)?;
}
}
_ => {}
}
// Out of safety, we always redraw, since it's relatively cheap operation
Ok(Some(ServeUpdate::Redraw))
}
/// Push a TraceMsg to be printed on the next render
pub fn push_log(&mut self, message: TraceMsg) {
self.pending_logs.push_front(message);
}
pub fn push_cargo_log(&mut self, message: Diagnostic) {
use cargo_metadata::diagnostic::DiagnosticLevel;
if self.trace || !matches!(message.level, DiagnosticLevel::Note) {
self.push_log(TraceMsg::cargo(message));
}
}
/// Add a message from stderr to the logs
/// This will queue the stderr message as a TraceMsg and print it on the next render
/// We'll use the `App` TraceSrc for the msg, and whatever level is provided
pub fn push_stdio(&mut self, bundle: BundleFormat, msg: String, level: Level) {
match bundle {
// If tracing is disabled, we need to filter out all the noise from Android logcat
BundleFormat::Android if !self.trace => {
// By default (trace off): only show RustStdoutStderr logs with proper log levels
// Filter out raw output like GL bindings, WebView internals, etc.
let is_rust_log = msg.contains("RustStdoutStderr");
let is_fatal = msg.contains("AndroidRuntime") || msg.starts_with('F');
let mut rendered_msg = None;
// If we're not in trace mode, then we need to filter out non-log messages and clean them up
// Only show logs with standard tracing level prefixes (check in the message after the colon)
if is_rust_log {
if let Some(colon_pos) = msg.find(':') {
let content = &msg[colon_pos + 1..];
if content.contains(" TRACE ")
|| content.contains(" DEBUG ")
|| content.contains(" INFO ")
|| content.contains(" WARN ")
|| content.contains(" ERROR ")
{
rendered_msg = Some(content.trim().to_string());
}
}
}
// Always show fatal errors, even if they don't come from Rust logging
if is_fatal {
rendered_msg = Some(msg);
}
if let Some(msg) = rendered_msg {
self.push_log(TraceMsg::text(TraceSrc::App(bundle), level, msg));
}
}
_ => self.push_log(TraceMsg::text(TraceSrc::App(bundle), level, msg)),
}
}
/// Push a message from the websocket to the logs
pub fn push_ws_message(&mut self, bundle: BundleFormat, message: &axum::extract::ws::Message) {
use dioxus_devtools_types::ClientMsg;
// We can only handle text messages from the websocket...
let axum::extract::ws::Message::Text(text) = message else {
return;
};
// ...and then decode them into a ClientMsg
let res = serde_json::from_str::<ClientMsg>(text.as_str());
// Client logs being errors aren't fatal, but we should still report them them
let msg = match res {
Ok(msg) => msg,
Err(err) => {
tracing::error!(dx_src = ?TraceSrc::Dev, "Error parsing message from {}: {} -> {:?}", bundle, err, text.as_str());
return;
}
};
let ClientMsg::Log { level, messages } = msg else {
return;
};
// FIXME(jon): why are we pulling only the first message here?
let content = messages.first().unwrap_or(&String::new()).clone();
let level = match level.as_str() {
"trace" => Level::TRACE,
"debug" => Level::DEBUG,
"info" => Level::INFO,
"warn" => Level::WARN,
"error" => Level::ERROR,
_ => Level::INFO,
};
// We don't care about logging the app's message so we directly push it instead of using tracing.
self.push_log(TraceMsg::text(TraceSrc::App(bundle), level, content));
}
/// Change internal state based on the build engine's update
///
/// We want to keep internal state as limited as possible, so currently we're only setting our
/// animation tick. We could, in theory, just leave animation running and have no internal state,
/// but that seems a bit wasteful. We might eventually change this to be more of a "requestAnimationFrame"
/// approach, but then we'd need to do that *everywhere* instead of simply performing a react-like
/// re-render when external state changes. Ratatui will diff the intermediate buffer, so we at least
/// we won't be drawing it.
pub(crate) fn new_build_update(&mut self, update: &BuilderUpdate) {
match update {
BuilderUpdate::Progress {
stage: BuildStage::Starting { .. },
} => self.tick_animation = true,
BuilderUpdate::BuildReady { .. } => self.tick_animation = false,
BuilderUpdate::BuildFailed { .. } => self.tick_animation = false,
_ => {}
}
}
/// Render the current state of everything to the console screen
pub fn render(&mut self, runner: &AppServer, server: &WebServer) {
if !self.interactive {
return;
}
// Get a handle to the terminal with a different lifetime so we can continue to call &self methods
let owned_term = self.term.clone();
let mut term = owned_term.borrow_mut();
let Some(term) = term.as_mut() else {
return;
};
// First, dequeue any logs that have built up from event handling
while let Some(log) = self.pending_logs.pop_back() {
_ = self.render_log(term, log);
}
// Then, draw the frame, passing along all the state of the TUI so we can render it properly
_ = term.draw(|frame| {
self.render_frame(frame, RenderState { runner, server });
});
}
fn render_frame(&self, frame: &mut Frame, state: RenderState) {
// Use the max size of the viewport, but shrunk to a sensible max width
let mut area = frame.area();
area.width = area.width.clamp(0, VIEWPORT_MAX_WIDTH);
let [_top, body, _bottom] = Layout::vertical([
Constraint::Length(1),
Constraint::Fill(1),
Constraint::Length(1),
])
.horizontal_margin(1)
.areas(area);
self.render_borders(frame, area);
self.render_body(frame, body, state);
self.render_body_title(frame, _top, state);
}
fn render_body_title(&self, frame: &mut Frame<'_>, area: Rect, _state: RenderState) {
frame.render_widget(
Line::from(vec![
" ".dark_gray(),
match self.more_modal_open {
true => "/:more".light_yellow(),
false => "/:more".dark_gray(),
},
" ".dark_gray(),
])
.right_aligned(),
area,
);
}
fn render_body(&self, frame: &mut Frame<'_>, area: Rect, state: RenderState) {
let [_title, body, more, _foot] = Layout::vertical([
Constraint::Length(0),
Constraint::Length(VIEWPORT_HEIGHT_SMALL - 2),
Constraint::Fill(1),
Constraint::Length(0),
])
.horizontal_margin(1)
.areas(area);
let [col1, col2] = Layout::horizontal([Constraint::Length(50), Constraint::Fill(1)])
.horizontal_margin(1)
.areas(body);
self.render_gauges(frame, col1, state);
self.render_stats(frame, col2, state);
if self.more_modal_open {
self.render_more_modal(frame, more, state);
}
}
fn render_gauges(&self, frame: &mut Frame<'_>, area: Rect, state: RenderState) {
let [gauge_area, _margin] =
Layout::horizontal([Constraint::Fill(1), Constraint::Length(3)]).areas(area);
let [app_progress, second_progress, status_line]: [_; 3] = Layout::vertical([
Constraint::Length(1),
Constraint::Length(1),
Constraint::Length(1),
])
.areas(gauge_area);
let client = &state.runner.client();
self.render_single_gauge(
frame,
app_progress,
client.compile_progress(),
"App: ",
state,
client.compile_duration(),
);
if state.runner.is_fullstack() {
self.render_single_gauge(
frame,
second_progress,
state.runner.server_compile_progress(),
"Server: ",
state,
client.compile_duration(),
);
} else {
self.render_single_gauge(
frame,
second_progress,
client.bundle_progress(),
"Bundle: ",
state,
client.bundle_duration(),
);
}
let mut lines = vec!["Status: ".white()];
match &client.stage {
BuildStage::Initializing => lines.push("Initializing".yellow()),
BuildStage::Starting { patch, .. } => {
if *patch {
lines.push("Hot-patching...".yellow())
} else {
lines.push("Starting build".yellow())
}
}
BuildStage::InstallingTooling => lines.push("Installing tooling".yellow()),
BuildStage::Compiling {
current,
total,
krate,
..
} => {
lines.push("Compiling ".yellow());
lines.push(format!("{current}/{total} ").gray());
lines.push(krate.as_str().dark_gray())
}
BuildStage::OptimizingWasm => lines.push("Optimizing wasm".yellow()),
BuildStage::SplittingBundle => lines.push("Splitting bundle".yellow()),
BuildStage::CompressingAssets => lines.push("Compressing assets".yellow()),
BuildStage::RunningBindgen => lines.push("Running wasm-bindgen".yellow()),
BuildStage::RunningGradle => lines.push("Running gradle assemble".yellow()),
BuildStage::CodeSigning => lines.push("Code signing app".yellow()),
BuildStage::Bundling => lines.push("Bundling app".yellow()),
BuildStage::CopyingAssets {
current,
total,
path,
} => {
lines.push("Copying asset ".yellow());
lines.push(format!("{current}/{total} ").gray());
if let Some(name) = path.file_name().and_then(|f| f.to_str()) {
lines.push(name.dark_gray())
}
}
BuildStage::Success => {
lines.push("Serving ".yellow());
lines.push(client.build.executable_name().white());
lines.push(" 🚀 ".green());
if let Some(comp_time) = client.total_build_time() {
lines.push(format!("{:.1}s", comp_time.as_secs_f32()).dark_gray());
}
}
BuildStage::Failed => lines.push("Failed".red()),
BuildStage::Aborted => lines.push("Aborted".red()),
BuildStage::Restarting => lines.push("Restarting".yellow()),
BuildStage::Linking => lines.push("Linking".yellow()),
BuildStage::Hotpatching => lines.push("Hot-patching...".yellow()),
BuildStage::ExtractingAssets => lines.push("Extracting assets".yellow()),
BuildStage::Prerendering => lines.push("Pre-rendering...".yellow()),
_ => {}
};
frame.render_widget(Line::from(lines), status_line);
}
fn render_single_gauge(
&self,
frame: &mut Frame<'_>,
area: Rect,
value: f64,
label: &str,
state: RenderState,
time_taken: Option<Duration>,
) {
let failed = state.runner.client.stage == BuildStage::Failed;
let value = if failed { 1.0 } else { value.clamp(0.0, 1.0) };
let [gauge_row, _, icon] = Layout::horizontal([
Constraint::Fill(1),
Constraint::Length(2),
Constraint::Length(10),
])
.areas(area);
frame.render_widget(
LineGauge::default()
.filled_style(Style::default().fg(match value {
1.0 if failed => Color::Red,
1.0 => Color::Green,
_ => Color::Yellow,
}))
.unfilled_style(Style::default().fg(Color::DarkGray))
.label(label.gray())
.line_set(symbols::line::THICK)
.ratio(if !failed { value } else { 1.0 }),
gauge_row,
);
let [throbber_frame, time_frame] = Layout::default()
.direction(Direction::Horizontal)
.constraints([Constraint::Length(3), Constraint::Fill(1)])
.areas(icon);
if value != 1.0 {
let throb = throbber_widgets_tui::Throbber::default()
.style(ratatui::style::Style::default().fg(ratatui::style::Color::Cyan))
.throbber_style(
ratatui::style::Style::default()
.fg(ratatui::style::Color::White)
.add_modifier(ratatui::style::Modifier::BOLD),
)
.throbber_set(throbber_widgets_tui::BLACK_CIRCLE)
.use_type(throbber_widgets_tui::WhichUse::Spin);
frame.render_stateful_widget(throb, throbber_frame, &mut self.throbber.borrow_mut());
} else {
frame.render_widget(
Line::from(vec![if failed {
"❌ ".white()
} else {
"🎉 ".white()
}])
.left_aligned(),
throbber_frame,
);
}
if let Some(time_taken) = time_taken {
if !failed {
frame.render_widget(
Line::from(vec![format!("{:.1}s", time_taken.as_secs_f32()).dark_gray()])
.left_aligned(),
time_frame,
);
}
}
}
fn render_stats(&self, frame: &mut Frame<'_>, area: Rect, state: RenderState) {
let [current_platform, app_features, serve_address]: [_; 3] = Layout::vertical([
Constraint::Length(1),
Constraint::Length(1),
Constraint::Length(1),
])
.areas(area);
let client = &state.runner.client();
frame.render_widget(
Paragraph::new(Line::from(vec![
"Platform: ".gray(),
client.build.bundle.expected_name().yellow(),
if state.runner.is_fullstack() {
" + fullstack".yellow()
} else {
" ".dark_gray()
},
])),
current_platform,
);
self.render_feature_list(frame, app_features, state);
// todo(jon) should we write https ?
let address = match state.server.displayed_address() {
Some(address) => format!(
"http://{}{}",
address,
state
.runner
.client
.build
.base_path()
.map(|f| format!("/{f}/"))
.unwrap_or_default()
)
.blue(),
None => "no server address".dark_gray(),
};
frame.render_widget_ref(
Paragraph::new(Line::from(vec![
if client.build.bundle == BundleFormat::Web {
"Serving at: ".gray()
} else {
"Server at: ".gray()
},
address,
])),
serve_address,
);
}
fn render_feature_list(&self, frame: &mut Frame<'_>, area: Rect, state: RenderState) {
frame.render_widget(
Paragraph::new(Line::from({
let mut lines = vec!["App features: ".gray(), "[".yellow()];
let feature_list: Vec<String> = state.runner.client().build.all_target_features();
let num_features = feature_list.len();
for (idx, feature) in feature_list.into_iter().enumerate() {
lines.push("\"".yellow());
lines.push(feature.yellow());
lines.push("\"".yellow());
if idx != num_features - 1 {
lines.push(", ".dark_gray());
}
}
lines.push("]".yellow());
lines
})),
area,
);
}
fn render_more_modal(&self, frame: &mut Frame<'_>, area: Rect, state: RenderState) {
let [col1, col2] =
Layout::horizontal([Constraint::Length(50), Constraint::Fill(1)]).areas(area);
let [top, bottom] = Layout::vertical([Constraint::Fill(1), Constraint::Length(2)])
.horizontal_margin(1)
.areas(col1);
let meta_list: [_; 6] = Layout::vertical([
Constraint::Length(1), // spacing
Constraint::Length(1), // item 1
Constraint::Length(1), // item 2
Constraint::Length(1), // item 3
Constraint::Length(1), // item 4
Constraint::Length(1), // Spacing
])
.areas(top);
frame.render_widget(
Paragraph::new(Line::from(vec![
"dx version: ".gray(),
self.dx_version.as_str().yellow(),
])),
meta_list[1],
);
frame.render_widget(
Paragraph::new(Line::from(vec![
"rustc: ".gray(),
state.runner.workspace.rustc_version.as_str().yellow(),
])),
meta_list[2],
);
frame.render_widget(
Paragraph::new(Line::from(vec![
"Hotreload: ".gray(),
if !state.runner.automatic_rebuilds {
"disabled".dark_gray()
} else if state.runner.use_hotpatch_engine {
"hot-patching".yellow()
} else {
"rsx and assets".yellow()
},
])),
meta_list[3],
);
let server_address = match state.server.server_address() {
Some(address) => format!("http://{address}").yellow(),
None => "no address".dark_gray(),
};
frame.render_widget(
Paragraph::new(Line::from(vec!["Network: ".gray(), server_address])),
meta_list[4],
);
let links_list: [_; 2] =
Layout::vertical([Constraint::Length(1), Constraint::Length(1)]).areas(bottom);
if state.runner.client.build.using_dioxus_explicitly {
frame.render_widget(
Paragraph::new(Line::from(vec![
"Read the docs: ".gray(),
"https://dioxuslabs.com/learn/0.7/".blue(),
])),
links_list[0],
);
frame.render_widget(
Paragraph::new(Line::from(vec![
"Video tutorials: ".gray(),
"https://youtube.com/@DioxusLabs".blue(),
])),
links_list[1],
);
}
let cmds = [
"",
"r: rebuild the app",
"o: open the app",
"p: pause rebuilds",
"v: toggle verbose logs",
"t: toggle tracing logs",
"c: clear the screen",
"d: attach debugger",
"/: toggle more commands",
];
let layout: [_; 9] = Layout::vertical(cmds.iter().map(|_| Constraint::Length(1)))
.horizontal_margin(1)
.areas(col2);
for (idx, cmd) in cmds.iter().enumerate() {
if cmd.is_empty() {
continue;
}
let (cmd, detail) = cmd.split_once(": ").unwrap_or((cmd, ""));
frame.render_widget(
Paragraph::new(Line::from(vec![
cmd.gray(),
": ".gray(),
detail.dark_gray(),
])),
layout[idx],
);
}
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | true |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/serve/proxy.rs | packages/cli/src/serve/proxy.rs | use crate::config::WebProxyConfig;
use crate::TraceSrc;
use crate::{Error, Result};
use anyhow::{bail, Context};
use axum::body::Body;
use axum::http::request::Parts;
use axum::{body::Body as MyBody, response::IntoResponse};
use axum::{
http::StatusCode,
routing::{any, MethodRouter},
Router,
};
use hyper::header::*;
use hyper::{Request, Response, Uri};
use hyper_util::{
client::legacy::{self, connect::HttpConnector},
rt::TokioExecutor,
};
#[derive(Debug, Clone)]
struct ProxyClient {
inner: legacy::Client<hyper_rustls::HttpsConnector<HttpConnector>, MyBody>,
url: Uri,
}
impl ProxyClient {
fn new(url: Uri) -> Self {
let _ = rustls::crypto::ring::default_provider().install_default();
let https = hyper_rustls::HttpsConnectorBuilder::new()
.with_native_roots()
.unwrap()
.https_or_http()
.enable_all_versions()
.build();
Self {
inner: legacy::Client::builder(TokioExecutor::new()).build(https),
url,
}
}
async fn send(&self, mut req: Request<MyBody>) -> Result<Response<hyper::body::Incoming>> {
let mut uri_parts = req.uri().clone().into_parts();
uri_parts.authority = self.url.authority().cloned();
uri_parts.scheme = self.url.scheme().cloned();
*req.uri_mut() = Uri::from_parts(uri_parts).context("Invalid URI parts")?;
self.inner
.request(req)
.await
.context("Failed to send proxy request")
}
}
/// Add routes to the router handling the specified proxy config.
///
/// We will proxy requests directed at either:
///
/// - the exact path of the proxy config's backend URL, e.g. /api
/// - the exact path with a trailing slash, e.g. /api/
/// - any subpath of the backend URL, e.g. /api/foo/bar
pub(crate) fn add_proxy(mut router: Router, proxy: &WebProxyConfig) -> Result<Router> {
let url: Uri = proxy.backend.parse()?;
let path = url.path().to_string();
let trimmed_path = path.trim_start_matches('/');
if trimmed_path.is_empty() {
bail!(
"Proxy backend URL must have a non-empty path, e.g. {}/api instead of {}",
proxy.backend.trim_end_matches('/'),
proxy.backend
);
}
let method_router = proxy_to(url, false, handle_proxy_error);
// api/*path
router = router.route(
&format!("/{}/{{*path}}", trimmed_path.trim_end_matches('/')),
method_router.clone(),
);
// /api/
router = router.route(
&format!("/{}/", trimmed_path.trim_end_matches('/')),
method_router.clone(),
);
// /api
router = router.route(
&format!("/{}", trimmed_path.trim_end_matches('/')),
method_router,
);
Ok(router)
}
pub(crate) fn proxy_to(
url: Uri,
nocache: bool,
handle_error: fn(Error) -> Response<Body>,
) -> MethodRouter {
let client = ProxyClient::new(url.clone());
any(move |parts: Parts, mut req: Request<MyBody>| async move {
// Prevent request loops
if req.headers().get("x-proxied-by-dioxus").is_some() {
return Err(Response::builder()
.status(StatusCode::NOT_FOUND)
.body(Body::from(
"API is sharing a loopback with the dev server. Try setting a different port on the API config.",
))
.unwrap());
}
req.headers_mut().insert(
"x-proxied-by-dioxus",
"true".parse().expect("header value is valid"),
);
let upgrade = req.headers().get(UPGRADE);
if req.uri().scheme().map(|f| f.as_str()) == Some("ws")
|| req.uri().scheme().map(|f| f.as_str()) == Some("wss")
|| upgrade.is_some_and(|h| h.as_bytes().eq_ignore_ascii_case(b"websocket"))
{
return super::proxy_ws::proxy_websocket(parts, req, &url).await;
}
if nocache {
crate::serve::insert_no_cache_headers(req.headers_mut());
}
let uri = req.uri().clone();
// retry with backoff
let res = client.send(req).await.map_err(handle_error);
match res {
Ok(res) => {
// log assets at a different log level
if uri.path().starts_with("/assets/")
|| uri.path().starts_with("/_dioxus/")
|| uri.path().starts_with("/public/")
|| uri.path().starts_with("/wasm/")
{
tracing::trace!(dx_src = ?TraceSrc::App(crate::BundleFormat::Server), "[{}] {}", res.status().as_u16(), uri);
} else {
tracing::info!(dx_src = ?TraceSrc::App(crate::BundleFormat::Server), "[{}] {}", res.status().as_u16(), uri);
}
Ok(res.into_response())
}
Err(err) => {
tracing::error!(dx_src = ?TraceSrc::App(crate::BundleFormat::Server), "[{}] {}", err.status().as_u16(), uri);
Err(err)
}
}
})
}
pub(crate) fn handle_proxy_error(e: Error) -> axum::http::Response<axum::body::Body> {
tracing::error!(dx_src = ?TraceSrc::Dev, "Proxy error: {}", e);
axum::http::Response::builder()
.status(axum::http::StatusCode::INTERNAL_SERVER_ERROR)
.body(axum::body::Body::from(format!(
"Proxy connection failed: {e:#?}"
)))
.unwrap()
}
#[cfg(test)]
mod test {
use super::*;
use axum_server::{Handle, Server};
async fn setup_servers(mut config: WebProxyConfig) -> String {
let backend_router =
Router::new().route(
"/{*path}",
any(|request: axum::extract::Request| async move {
format!("backend: {}", request.uri())
}),
);
// The API backend server
let backend_handle_handle = Handle::new();
let backend_handle_handle_ = backend_handle_handle.clone();
tokio::spawn(async move {
Server::bind("127.0.0.1:0".parse().unwrap())
.handle(backend_handle_handle_)
.serve(backend_router.into_make_service())
.await
.unwrap();
});
// Set the user's config to this dummy API we just built so we can test it
let backend_addr = backend_handle_handle.listening().await.unwrap();
config.backend = format!("http://{}{}", backend_addr, config.backend);
// Now set up our actual filesystem server
let router = super::add_proxy(Router::new(), &config);
let server_handle_handle = Handle::new();
let server_handle_handle_ = server_handle_handle.clone();
tokio::spawn(async move {
Server::bind("127.0.0.1:0".parse().unwrap())
.handle(server_handle_handle_)
.serve(router.unwrap().into_make_service())
.await
.unwrap();
});
// Expose *just* the filesystem web server's address
server_handle_handle.listening().await.unwrap().to_string()
}
async fn test_proxy_requests(path: String) {
let config = WebProxyConfig {
// Normally this would be an absolute URL including scheme/host/port,
// but in these tests we need to let the OS choose the port so tests
// don't conflict, so we'll concatenate the final address and this
// path together.
// So in day to day usage, use `http://localhost:8000/api` instead!
backend: path,
};
let server_addr = setup_servers(config).await;
assert_eq!(
reqwest::get(format!("http://{server_addr}/api"))
.await
.unwrap()
.text()
.await
.unwrap(),
"backend: /api"
);
assert_eq!(
reqwest::get(format!("http://{server_addr}/api/"))
.await
.unwrap()
.text()
.await
.unwrap(),
"backend: /api/"
);
assert_eq!(
reqwest::get(format!("http://{server_addr}/api/subpath"))
.await
.unwrap()
.text()
.await
.unwrap(),
"backend: /api/subpath"
);
}
#[tokio::test]
async fn add_proxy() {
test_proxy_requests("/api".to_string()).await;
}
#[tokio::test]
async fn add_proxy_trailing_slash() {
test_proxy_requests("/api/".to_string()).await;
}
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/cli/src/serve/ansi_buffer.rs | packages/cli/src/serve/ansi_buffer.rs | use ratatui::{buffer::Cell, prelude::*};
use std::fmt::{self, Write};
/// A buffer that can be rendered to and then dumped as raw ansi codes
///
/// This is taken from a PR on the ratatui repo (<https://github.com/ratatui/ratatui/pull/1065>) and
/// modified to be more appropriate for our use case.
pub fn ansi_string_to_line(line: Line) -> String {
let graphemes = line.styled_graphemes(Style::default());
let mut out = String::with_capacity(line.spans.first().map_or(0, |s| s.content.len() * 2));
let mut last_style = None;
for grapheme in graphemes {
let mut cell = Cell::default();
cell.set_symbol(grapheme.symbol);
cell.set_style(grapheme.style);
let style = (cell.fg, cell.bg, cell.modifier);
if last_style.is_none() || last_style != Some(style) {
_ = write_cell_style(&mut out, cell.modifier, cell.fg, cell.bg);
last_style = Some(style);
}
_ = out.write_str(cell.symbol());
}
_ = out.write_str("\u{1b}[0m");
out
}
fn write_cell_style(f: &mut impl Write, modifier: Modifier, fg: Color, bg: Color) -> fmt::Result {
f.write_str("\u{1b}[")?;
// Write the modifier codes
if modifier.contains(Modifier::BOLD) {
f.write_str("1;")?;
}
if modifier.contains(Modifier::DIM) {
f.write_str("2;")?;
}
if modifier.contains(Modifier::ITALIC) {
f.write_str("3;")?;
}
if modifier.contains(Modifier::UNDERLINED) {
f.write_str("4;")?;
}
if modifier.contains(Modifier::SLOW_BLINK) {
f.write_str("5;")?;
}
if modifier.contains(Modifier::RAPID_BLINK) {
f.write_str("6;")?;
}
if modifier.contains(Modifier::REVERSED) {
f.write_str("7;")?;
}
if modifier.contains(Modifier::HIDDEN) {
f.write_str("8;")?;
}
if modifier.contains(Modifier::CROSSED_OUT) {
f.write_str("9;")?;
}
// Write the foreground
f.write_str(match fg {
Color::Reset => "39",
Color::Black => "30",
Color::Red => "31",
Color::Green => "32",
Color::Yellow => "33",
Color::Blue => "34",
Color::Magenta => "35",
Color::Cyan => "36",
Color::Gray => "37",
Color::DarkGray => "90",
Color::LightRed => "91",
Color::LightGreen => "92",
Color::LightYellow => "93",
Color::LightBlue => "94",
Color::LightMagenta => "95",
Color::LightCyan => "96",
Color::White => "97",
_ => "",
})?;
if let Color::Rgb(red, green, blue) = fg {
f.write_fmt(format_args!("38;2;{red};{green};{blue}"))?;
}
if let Color::Indexed(i) = fg {
f.write_fmt(format_args!("38;5;{i}"))?;
}
f.write_str(";")?;
// Write the background
f.write_str(match bg {
Color::Reset => "49",
Color::Black => "40",
Color::Red => "41",
Color::Green => "42",
Color::Yellow => "43",
Color::Blue => "44",
Color::Magenta => "45",
Color::Cyan => "46",
Color::Gray => "47",
Color::DarkGray => "100",
Color::LightRed => "101",
Color::LightGreen => "102",
Color::LightYellow => "103",
Color::LightBlue => "104",
Color::LightMagenta => "105",
Color::LightCyan => "106",
Color::White => "107",
_ => "",
})?;
if let Color::Rgb(red, green, blue) = bg {
f.write_fmt(format_args!("48;2;{red};{green};{blue}"))?;
}
if let Color::Indexed(i) = bg {
f.write_fmt(format_args!("48;5;{i}"))?;
}
f.write_str("m")
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/core-types/src/lib.rs | packages/core-types/src/lib.rs | pub mod bubbles;
pub mod bundled;
pub mod formatter;
pub mod hr_context;
pub use bubbles::*;
pub use bundled::*;
pub use formatter::*;
pub use hr_context::*;
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/core-types/src/bundled.rs | packages/core-types/src/bundled.rs | use std::sync::LazyLock;
pub fn is_bundled_app() -> bool {
static BUNDLED: LazyLock<bool> = LazyLock::new(|| {
// If the env var is set, we're bundled
if std::env::var("DIOXUS_CLI_ENABLED").is_ok() {
return true;
}
// If the cargo manifest dir is set, we're not bundled.
// Generally this is only set with `cargo run`
if let Ok(path) = std::env::var("CARGO_MANIFEST_DIR") {
if !path.is_empty() {
return false;
}
}
true
});
*BUNDLED
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/core-types/src/attributes.rs | packages/core-types/src/attributes.rs | rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false | |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/core-types/src/formatter.rs | packages/core-types/src/formatter.rs | use std::borrow::Cow;
/// Take this type and format it into a Cow<'static, str>
///
/// This trait exists so libraries like manganis can implement this type for assets without depending
/// on dioxus-core, which can be heavy in proc macros and build scripts.
///
/// We don't want a blanket impl for T: Display because that might conflict for the other integral data
/// types of AttributeValue
///
/// Todo: we might be able to specialize without this just with Display.
pub trait DioxusFormattable {
fn format(&self) -> Cow<'static, str>;
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/core-types/src/hr_context.rs | packages/core-types/src/hr_context.rs | pub trait HotReloadingContext {
fn map_attribute(
element_name_rust: &str,
attribute_name_rust: &str,
) -> Option<(&'static str, Option<&'static str>)>;
fn map_element(element_name_rust: &str) -> Option<(&'static str, Option<&'static str>)>;
}
pub struct Empty;
impl HotReloadingContext for Empty {
fn map_attribute(_: &str, _: &str) -> Option<(&'static str, Option<&'static str>)> {
None
}
fn map_element(_: &str) -> Option<(&'static str, Option<&'static str>)> {
None
}
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/core-types/src/bubbles.rs | packages/core-types/src/bubbles.rs | /// Check if the event bubbles
///
/// todo: this should not be in this crate, but this crate is a "root" crate and
/// has zero-deps, meaning it gets compiled before anything else.
///
/// This function being here means we can use it in the interpreter without pulling in dioxus-html,
/// drastically shortening the crate graph and thus compile times
///
/// The real solution to this problem is that events need to mark themselves as "bubbling" or "not bubbling"
/// in their definition, which gets passed as part of the mutations.
pub fn event_bubbles(evt: &str) -> bool {
match evt {
"cancel" => false,
"copy" => true,
"cut" => true,
"paste" => true,
"compositionend" => true,
"compositionstart" => true,
"compositionupdate" => true,
"keydown" => true,
"keypress" => true,
"keyup" => true,
"focus" => false,
"focusout" => true,
"focusin" => true,
"blur" => false,
"change" => true,
"input" => true,
"invalid" => true,
"reset" => true,
"submit" => true,
"auxclick" => true,
"click" => true,
"contextmenu" => true,
"doubleclick" => true,
"dblclick" => true,
"drag" => true,
"dragend" => true,
"dragenter" => false,
"dragexit" => false,
"dragleave" => true,
"dragover" => true,
"dragstart" => true,
"drop" => true,
"mousedown" => true,
"mouseenter" => false,
"mouseleave" => false,
"mousemove" => true,
"mouseout" => true,
"scroll" => false,
"scrollend" => false,
"mouseover" => true,
"mouseup" => true,
"pointerdown" => true,
"pointermove" => true,
"pointerup" => true,
"pointercancel" => true,
"gotpointercapture" => true,
"lostpointercapture" => true,
"pointerenter" => false,
"pointerleave" => false,
"pointerover" => true,
"pointerout" => true,
"select" => true,
"touchcancel" => true,
"touchend" => true,
"touchmove" => true,
"touchstart" => true,
"wheel" => true,
"abort" => false,
"canplay" => false,
"canplaythrough" => false,
"durationchange" => false,
"emptied" => false,
"encrypted" => true,
"ended" => false,
"error" => false,
"loadeddata" => false,
"loadedmetadata" => false,
"loadstart" => false,
"load" => false,
"pause" => false,
"play" => false,
"playing" => false,
"progress" => false,
"ratechange" => false,
"resize" => false,
"seeked" => false,
"seeking" => false,
"stalled" => false,
"suspend" => false,
"timeupdate" => false,
"volumechange" => false,
"waiting" => false,
"animationstart" => true,
"animationend" => true,
"animationiteration" => true,
"transitionend" => true,
"toggle" => false,
"beforetoggle" => false,
"mounted" => false,
"visible" => false,
_ => true,
}
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/document/build.rs | packages/document/build.rs | fn main() {
// If any TS files change, re-run the build script
lazy_js_bundle::LazyTypeScriptBindings::new()
.with_watching("./src/ts")
.with_binding("./src/ts/head.ts", "./src/js/head.js")
.run();
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/document/src/lib.rs | packages/document/src/lib.rs | use std::rc::Rc;
mod document;
mod elements;
mod error;
mod eval;
pub use document::*;
pub use elements::*;
pub use error::*;
pub use eval::*;
/// Get the document provider for the current platform or a no-op provider if the platform doesn't document functionality.
pub fn document() -> Rc<dyn Document> {
match dioxus_core::try_consume_context::<Rc<dyn Document>>() {
Some(document) => document,
None => {
tracing::error!(
"Unable to find a document in the renderer. Using the default no-op document."
);
Rc::new(NoOpDocument)
}
}
}
/// Evaluate some javascript in the current document
#[doc = include_str!("../docs/eval.md")]
#[doc(alias = "javascript")]
pub fn eval(script: &str) -> Eval {
document().eval(script.to_string())
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/document/src/document.rs | packages/document/src/document.rs | use std::sync::Arc;
use super::*;
/// A context for the document
pub type DocumentContext = Arc<dyn Document>;
fn format_string_for_js(s: &str) -> String {
let escaped = s
.replace('\\', "\\\\")
.replace('\n', "\\n")
.replace('\r', "\\r")
.replace('"', "\\\"");
format!("\"{escaped}\"")
}
fn format_attributes(attributes: &[(&str, String)]) -> String {
let mut formatted = String::from("[");
for (key, value) in attributes {
formatted.push_str(&format!(
"[{}, {}],",
format_string_for_js(key),
format_string_for_js(value)
));
}
if formatted.ends_with(',') {
formatted.pop();
}
formatted.push(']');
formatted
}
/// Create a new element in the head with javascript through the [`Document::eval`] method
///
/// This can be used to implement the head element creation logic for most [`Document`] implementations.
pub fn create_element_in_head(
tag: &str,
attributes: &[(&str, String)],
children: Option<String>,
) -> String {
let helpers = include_str!("./js/head.js");
let attributes = format_attributes(attributes);
let children = children
.as_deref()
.map(format_string_for_js)
.unwrap_or("null".to_string());
let tag = format_string_for_js(tag);
format!(r#"{helpers};window.createElementInHead({tag}, {attributes}, {children});"#)
}
/// A provider for document-related functionality.
///
/// Provides things like a history API, a title, a way to run JS, and some other basics/essentials used
/// by nearly every platform.
///
/// An integration with some kind of navigation history.
///
/// Depending on your use case, your implementation may deviate from the described procedure. This
/// is fine, as long as both `current_route` and `current_query` match the described format.
///
/// However, you should document all deviations. Also, make sure the navigation is user-friendly.
/// The described behaviors are designed to mimic a web browser, which most users should already
/// know. Deviations might confuse them.
pub trait Document: 'static {
/// Run `eval` against this document, returning an [`Eval`] that can be used to await the result.
fn eval(&self, js: String) -> Eval;
/// Set the title of the document
fn set_title(&self, title: String) {
self.eval(format!("document.title = {title:?};"));
}
/// Create a new element in the head
fn create_head_element(
&self,
name: &str,
attributes: &[(&str, String)],
contents: Option<String>,
) {
// This default implementation remains to make the trait compatible with the 0.6 version, but it should not be used
// The element should only be created inside an effect so it is not called while the component is suspended
self.eval(create_element_in_head(name, attributes, contents));
}
/// Create a new meta tag in the head
fn create_meta(&self, props: MetaProps) {
let attributes = props.attributes();
self.create_head_element("meta", &attributes, None);
}
/// Create a new script tag in the head
fn create_script(&self, props: ScriptProps) {
let attributes = props.attributes();
self.create_head_element("script", &attributes, props.script_contents().ok());
}
/// Create a new style tag in the head
fn create_style(&self, props: StyleProps) {
let attributes = props.attributes();
self.create_head_element("style", &attributes, props.style_contents().ok());
}
/// Create a new link tag in the head
fn create_link(&self, props: LinkProps) {
let attributes = props.attributes();
self.create_head_element("link", &attributes, None);
}
/// Check if we should create a new head component at all. If it returns false, the head component will be skipped.
///
/// This runs once per head component and is used to hydrate head components in fullstack.
fn create_head_component(&self) -> bool {
true
}
}
/// A document that does nothing
#[derive(Default)]
pub struct NoOpDocument;
impl Document for NoOpDocument {
fn eval(&self, _: String) -> Eval {
let owner = generational_box::Owner::default();
struct NoOpEvaluator;
impl Evaluator for NoOpEvaluator {
fn poll_join(
&mut self,
_: &mut std::task::Context<'_>,
) -> std::task::Poll<Result<serde_json::Value, EvalError>> {
std::task::Poll::Ready(Err(EvalError::Unsupported))
}
fn poll_recv(
&mut self,
_: &mut std::task::Context<'_>,
) -> std::task::Poll<Result<serde_json::Value, EvalError>> {
std::task::Poll::Ready(Err(EvalError::Unsupported))
}
fn send(&self, _data: serde_json::Value) -> Result<(), EvalError> {
Err(EvalError::Unsupported)
}
}
Eval::new(owner.insert(Box::new(NoOpEvaluator)))
}
fn set_title(&self, _: String) {}
fn create_meta(&self, _: MetaProps) {}
fn create_script(&self, _: ScriptProps) {}
fn create_style(&self, _: StyleProps) {}
fn create_link(&self, _: LinkProps) {}
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/document/src/error.rs | packages/document/src/error.rs | use std::error::Error;
use std::fmt::Display;
/// Represents an error when evaluating JavaScript
#[derive(Debug)]
#[non_exhaustive]
pub enum EvalError {
/// The platform does not support evaluating JavaScript.
Unsupported,
/// The provided JavaScript has already been ran.
Finished,
/// The provided JavaScript is not valid and can't be ran.
InvalidJs(String),
/// Represents an error communicating between JavaScript and Rust.
Communication(String),
/// Represents an error serializing or deserializing the result of an eval
Serialization(serde_json::Error),
}
impl Display for EvalError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
EvalError::Unsupported => write!(f, "EvalError::Unsupported - eval is not supported on the current platform"),
EvalError::Finished => write!(f, "EvalError::Finished - eval has already ran"),
EvalError::InvalidJs(_) => write!(f, "EvalError::InvalidJs - the provided javascript is invalid"),
EvalError::Communication(_) => write!(f, "EvalError::Communication - there was an error trying to communicate with between javascript and rust"),
EvalError::Serialization(_) => write!(f, "EvalError::Serialization - there was an error trying to serialize or deserialize the result of an eval"),
}
}
}
impl Error for EvalError {}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/document/src/eval.rs | packages/document/src/eval.rs | #![doc = include_str!("../docs/eval.md")]
use crate::error::EvalError;
use generational_box::GenerationalBox;
use std::future::{poll_fn, Future, IntoFuture};
use std::pin::Pin;
use std::task::{Context, Poll};
#[doc = include_str!("../docs/eval.md")]
pub struct Eval {
evaluator: GenerationalBox<Box<dyn Evaluator>>,
}
impl Eval {
/// Create this eval from a dynamic evaluator
pub fn new(evaluator: GenerationalBox<Box<dyn Evaluator + 'static>>) -> Self {
Self { evaluator }
}
/// Wait until the javascript task is finished and return the result
pub async fn join<T: serde::de::DeserializeOwned>(self) -> Result<T, EvalError> {
let json_value = poll_fn(|cx| match self.evaluator.try_write() {
Ok(mut evaluator) => evaluator.poll_join(cx),
Err(_) => Poll::Ready(Err(EvalError::Finished)),
})
.await?;
serde_json::from_value(json_value).map_err(EvalError::Serialization)
}
/// Send a message to the javascript task
pub fn send(&self, data: impl serde::Serialize) -> Result<(), EvalError> {
match self.evaluator.try_read() {
Ok(evaluator) => {
evaluator.send(serde_json::to_value(data).map_err(EvalError::Serialization)?)
}
Err(_) => Err(EvalError::Finished),
}
}
/// Receive a message from the javascript task
pub async fn recv<T: serde::de::DeserializeOwned>(&mut self) -> Result<T, EvalError> {
let json_value = poll_fn(|cx| match self.evaluator.try_write() {
Ok(mut evaluator) => evaluator.poll_recv(cx),
Err(_) => Poll::Ready(Err(EvalError::Finished)),
})
.await?;
serde_json::from_value(json_value).map_err(EvalError::Serialization)
}
}
impl IntoFuture for Eval {
type Output = Result<serde_json::Value, EvalError>;
type IntoFuture = Pin<Box<dyn Future<Output = Self::Output>>>;
fn into_future(self) -> Self::IntoFuture {
Box::pin(self.join().into_future())
}
}
impl Clone for Eval {
fn clone(&self) -> Self {
*self
}
}
impl Copy for Eval {}
/// The platform's evaluator.
pub trait Evaluator {
/// Sends a message to the evaluated JavaScript.
fn send(&self, data: serde_json::Value) -> Result<(), EvalError>;
/// Receive any queued messages from the evaluated JavaScript.
fn poll_recv(
&mut self,
context: &mut Context<'_>,
) -> Poll<Result<serde_json::Value, EvalError>>;
/// Gets the return value of the JavaScript
fn poll_join(
&mut self,
context: &mut Context<'_>,
) -> Poll<Result<serde_json::Value, EvalError>>;
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/document/src/elements/link.rs | packages/document/src/elements/link.rs | use super::*;
use crate::document;
use dioxus_core::{use_hook, VNode};
use dioxus_html as dioxus_elements;
#[non_exhaustive]
#[derive(Clone, Props, PartialEq)]
pub struct OtherLinkProps {
pub rel: String,
#[props(extends = link, extends = GlobalAttributes)]
pub additional_attributes: Vec<Attribute>,
}
#[non_exhaustive]
#[derive(Clone, Props, PartialEq)]
pub struct LinkProps {
pub rel: Option<String>,
pub media: Option<String>,
pub title: Option<String>,
pub disabled: Option<bool>,
pub r#as: Option<String>,
pub sizes: Option<String>,
/// Links are deduplicated by their href and rel attributes
pub href: Option<String>,
pub crossorigin: Option<String>,
pub referrerpolicy: Option<String>,
pub fetchpriority: Option<String>,
pub hreflang: Option<String>,
pub integrity: Option<String>,
pub r#type: Option<String>,
pub blocking: Option<String>,
#[props(extends = link, extends = GlobalAttributes)]
pub additional_attributes: Vec<Attribute>,
pub onload: Option<String>,
}
impl LinkProps {
/// Get all the attributes for the link tag
pub fn attributes(&self) -> Vec<(&'static str, String)> {
let mut attributes = Vec::new();
extend_attributes(&mut attributes, &self.additional_attributes);
if let Some(rel) = &self.rel {
attributes.push(("rel", rel.clone()));
}
if let Some(media) = &self.media {
attributes.push(("media", media.clone()));
}
if let Some(title) = &self.title {
attributes.push(("title", title.clone()));
}
if let Some(disabled) = &self.disabled {
attributes.push(("disabled", disabled.to_string()));
}
if let Some(r#as) = &self.r#as {
attributes.push(("as", r#as.clone()));
}
if let Some(sizes) = &self.sizes {
attributes.push(("sizes", sizes.clone()));
}
if let Some(href) = &self.href {
attributes.push(("href", href.clone()));
}
if let Some(crossorigin) = &self.crossorigin {
attributes.push(("crossOrigin", crossorigin.clone()));
}
if let Some(referrerpolicy) = &self.referrerpolicy {
attributes.push(("referrerPolicy", referrerpolicy.clone()));
}
if let Some(fetchpriority) = &self.fetchpriority {
attributes.push(("fetchPriority", fetchpriority.clone()));
}
if let Some(hreflang) = &self.hreflang {
attributes.push(("hrefLang", hreflang.clone()));
}
if let Some(integrity) = &self.integrity {
attributes.push(("integrity", integrity.clone()));
}
if let Some(r#type) = &self.r#type {
attributes.push(("type", r#type.clone()));
}
if let Some(blocking) = &self.blocking {
attributes.push(("blocking", blocking.clone()));
}
if let Some(onload) = &self.onload {
attributes.push(("onload", onload.clone()));
}
attributes
}
}
/// Render a [`<link>`](https://developer.mozilla.org/en-US/docs/Web/HTML/Reference/Elements/link) tag into the head of the page.
///
/// > The [Link](https://docs.rs/dioxus-router/latest/dioxus_router/components/fn.Link.html) component in dioxus router and this component are completely different.
/// > This component links resources in the head of the page, while the router component creates clickable links in the body of the page.
///
/// # Example
/// ```rust, no_run
/// # use dioxus::prelude::*;
/// fn RedBackground() -> Element {
/// rsx! {
/// // You can use the meta component to render a meta tag into the head of the page
/// // This meta tag will redirect the user to the dioxuslabs homepage in 10 seconds
/// document::Link {
/// href: asset!("/assets/style.css"),
/// rel: "stylesheet",
/// }
/// }
/// }
/// ```
///
/// <div class="warning">
///
/// Any updates to the props after the first render will not be reflected in the head.
///
/// </div>
#[doc(alias = "<link>")]
#[component]
pub fn Link(props: LinkProps) -> Element {
use_update_warning(&props, "Link {}");
use_hook(|| {
let document = document();
let mut insert_link = document.create_head_component();
if let Some(href) = &props.href {
if !should_insert_link(href, props.rel.as_deref()) {
insert_link = false;
}
}
if !insert_link {
return;
}
document.create_link(props);
});
VNode::empty()
}
#[derive(Default, Clone)]
struct LinkContext(DeduplicationContext);
fn should_insert_link(href: &str, rel: Option<&str>) -> bool {
// Include rel in the deduplication key so that the same href can be used
// with different rel values (e.g., rel="preload" and rel="stylesheet")
let key = match rel {
Some(rel) => format!("{href}|{rel}"),
None => href.to_string(),
};
get_or_insert_root_context::<LinkContext>()
.0
.should_insert(&key)
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/document/src/elements/title.rs | packages/document/src/elements/title.rs | use dioxus_core::{use_hook, VNode};
use crate::document;
use super::*;
#[derive(Clone, Props, PartialEq)]
pub struct TitleProps {
/// The contents of the title tag. The children must be a single text node.
children: Element,
}
/// Render the title of the page. On web renderers, this will set the [`<title>`](https://developer.mozilla.org/en-US/docs/Web/HTML/Reference/Elements/title) in the head. On desktop, it will set the window title.
///
/// Unlike most head components, the Title can be modified after the first render. Only the latest update to the title will be reflected if multiple title components are rendered.
///
///
/// The children of the title component must be a single static or formatted string. If there are more children or the children contain components, conditionals, loops, or fragments, the title will not be updated.
///
/// # Example
///
/// ```rust, no_run
/// # use dioxus::prelude::*;
/// fn App() -> Element {
/// rsx! {
/// // You can use the Title component to render a title tag into the head of the page or window
/// document::Title { "My Page" }
/// }
/// }
/// ```
#[component]
#[doc(alias = "<title>")]
pub fn Title(props: TitleProps) -> Element {
let children = props.children;
let text = match extract_single_text_node(&children) {
Ok(text) => text,
Err(err) => {
err.log("Title");
return VNode::empty();
}
};
// Update the title as it changes. NOTE: We don't use use_effect here because we need this to run on the server
let document = use_hook(document);
let last_text = use_hook(|| {
// Set the title initially
document.set_title(text.clone());
Rc::new(RefCell::new(text.clone()))
});
// If the text changes, update the title
let mut last_text = last_text.borrow_mut();
if text != *last_text {
document.set_title(text.clone());
*last_text = text;
}
VNode::empty()
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/document/src/elements/stylesheet.rs | packages/document/src/elements/stylesheet.rs | use super::*;
/// Render a [`<link>`](https://developer.mozilla.org/en-US/docs/Web/HTML/Reference/Elements/link) tag into the head of the page with the stylesheet rel.
/// This is equivalent to the [`Link`](Link) component with a slightly more ergonomic API.
///
///
/// # Example
/// ```rust
/// # use dioxus::prelude::*;
/// fn RedBackground() -> Element {
/// rsx! {
/// document::Stylesheet {
/// href: asset!("/assets/style.css")
/// }
/// }
/// }
/// ```
///
/// <div class="warning">
///
/// Any updates to the props after the first render will not be reflected in the head.
///
/// </div>
#[component]
pub fn Stylesheet(props: LinkProps) -> Element {
super::Link(LinkProps {
rel: Some("stylesheet".into()),
r#type: Some("text/css".into()),
..props
})
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/document/src/elements/mod.rs | packages/document/src/elements/mod.rs | #![doc = include_str!("../../docs/head.md")]
use std::{cell::RefCell, collections::HashSet, rc::Rc};
use dioxus_core::{
Attribute, DynamicNode, Element, RenderError, Runtime, ScopeId, Template, TemplateNode,
};
use dioxus_core_macro::*;
mod link;
pub use link::*;
mod stylesheet;
pub use stylesheet::*;
mod meta;
pub use meta::*;
mod script;
pub use script::*;
mod style;
pub use style::*;
mod title;
pub use title::*;
/// Warn the user if they try to change props on a element that is injected into the head
#[allow(unused)]
fn use_update_warning<T: PartialEq + Clone + 'static>(value: &T, name: &'static str) {
#[cfg(debug_assertions)]
{
use dioxus_core::use_hook;
let cloned_value = value.clone();
let initial = use_hook(move || value.clone());
if initial != cloned_value {
tracing::warn!("Changing the props of `{name}` is not supported ");
}
}
}
/// An error that can occur when extracting a single text node from a component
pub enum ExtractSingleTextNodeError<'a> {
/// The node contained an render error, so we can't extract the text node
RenderError(&'a RenderError),
/// There was only one child, but it wasn't a text node
NonTextNode,
/// There is multiple child nodes
NonTemplate,
}
impl ExtractSingleTextNodeError<'_> {
/// Log a warning depending on the error
pub fn log(&self, component: &str) {
match self {
ExtractSingleTextNodeError::RenderError(err) => {
tracing::error!("Error while rendering {component}: {err}");
}
ExtractSingleTextNodeError::NonTextNode => {
tracing::error!(
"Error while rendering {component}: The children of {component} must be a single text node"
);
}
ExtractSingleTextNodeError::NonTemplate => {
tracing::error!(
"Error while rendering {component}: The children of {component} must be a single text node"
);
}
}
}
}
fn extract_single_text_node(children: &Element) -> Result<String, ExtractSingleTextNodeError<'_>> {
let vnode = match children {
Element::Ok(vnode) => vnode,
Element::Err(err) => {
return Err(ExtractSingleTextNodeError::RenderError(err));
}
};
// The title's children must be in one of two forms:
// 1. rsx! { "static text" }
// 2. rsx! { "title: {dynamic_text}" }
match vnode.template {
// rsx! { "static text" }
Template {
roots: &[TemplateNode::Text { text }],
node_paths: &[],
attr_paths: &[],
..
} => Ok(text.to_string()),
// rsx! { "title: {dynamic_text}" }
Template {
roots: &[TemplateNode::Dynamic { id }],
node_paths: &[&[0]],
attr_paths: &[],
..
} => {
let node = &vnode.dynamic_nodes[id];
match node {
DynamicNode::Text(text) => Ok(text.value.clone()),
_ => Err(ExtractSingleTextNodeError::NonTextNode),
}
}
_ => Err(ExtractSingleTextNodeError::NonTemplate),
}
}
fn get_or_insert_root_context<T: Default + Clone + 'static>() -> T {
let rt = Runtime::current();
match rt.has_context::<T>(ScopeId::ROOT) {
Some(context) => context,
None => {
let context = T::default();
rt.provide_context(ScopeId::ROOT, context.clone());
context
}
}
}
#[derive(Default, Clone)]
struct DeduplicationContext(Rc<RefCell<HashSet<String>>>);
impl DeduplicationContext {
fn should_insert(&self, href: &str) -> bool {
let mut set = self.0.borrow_mut();
let present = set.contains(href);
if !present {
set.insert(href.to_string());
true
} else {
false
}
}
}
/// Extend a list of string attributes with a list of dioxus attribute
pub(crate) fn extend_attributes(
attributes: &mut Vec<(&'static str, String)>,
additional_attributes: &[Attribute],
) {
for additional_attribute in additional_attributes {
let attribute_value_as_string = match &additional_attribute.value {
dioxus_core::AttributeValue::Text(v) => v.to_string(),
dioxus_core::AttributeValue::Float(v) => v.to_string(),
dioxus_core::AttributeValue::Int(v) => v.to_string(),
dioxus_core::AttributeValue::Bool(v) => v.to_string(),
dioxus_core::AttributeValue::Listener(_) | dioxus_core::AttributeValue::Any(_) => {
tracing::error!("document::* elements do not support event listeners or any value attributes. Expected displayable attribute, found {:?}", additional_attribute.value);
continue;
}
dioxus_core::AttributeValue::None => {
continue;
}
};
attributes.push((additional_attribute.name, attribute_value_as_string));
}
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
DioxusLabs/dioxus | https://github.com/DioxusLabs/dioxus/blob/ec8f31dece5c75371177bf080bab46dff54ffd0e/packages/document/src/elements/style.rs | packages/document/src/elements/style.rs | use super::*;
use crate::document;
use dioxus_core::{use_hook, VNode};
use dioxus_html as dioxus_elements;
#[non_exhaustive]
#[derive(Clone, Props, PartialEq)]
pub struct StyleProps {
/// Styles are deduplicated by their href attribute
pub href: Option<String>,
pub media: Option<String>,
pub nonce: Option<String>,
pub title: Option<String>,
/// The contents of the style tag. If present, the children must be a single text node.
pub children: Element,
#[props(extends = style, extends = GlobalAttributes)]
pub additional_attributes: Vec<Attribute>,
}
impl StyleProps {
/// Get all the attributes for the style tag
pub fn attributes(&self) -> Vec<(&'static str, String)> {
let mut attributes = Vec::new();
extend_attributes(&mut attributes, &self.additional_attributes);
if let Some(href) = &self.href {
attributes.push(("href", href.clone()));
}
if let Some(media) = &self.media {
attributes.push(("media", media.clone()));
}
if let Some(nonce) = &self.nonce {
attributes.push(("nonce", nonce.clone()));
}
if let Some(title) = &self.title {
attributes.push(("title", title.clone()));
}
attributes
}
pub fn style_contents(&self) -> Result<String, ExtractSingleTextNodeError<'_>> {
extract_single_text_node(&self.children)
}
}
/// Render a [`<style>`](https://developer.mozilla.org/en-US/docs/Web/HTML/Reference/Elements/style) or [`<link>`](https://developer.mozilla.org/en-US/docs/Web/HTML/Reference/Elements/link) tag into the head of the page.
///
/// If present, the children of the style component must be a single static or formatted string. If there are more children or the children contain components, conditionals, loops, or fragments, the style will not be added.
///
/// # Example
/// ```rust, no_run
/// # use dioxus::prelude::*;
/// fn RedBackground() -> Element {
/// rsx! {
/// // You can use the style component to render a style tag into the head of the page
/// // This style tag will set the background color of the page to red
/// document::Style {
/// r#"
/// body {{
/// background-color: red;
/// }}
/// "#
/// }
/// // You could also use a style with a href to load a stylesheet asset
/// document::Style {
/// href: asset!("/assets/style.css")
/// }
/// }
/// }
/// ```
///
/// <div class="warning">
///
/// Any updates to the props after the first render will not be reflected in the head.
///
/// </div>
#[component]
pub fn Style(props: StyleProps) -> Element {
use_update_warning(&props, "Style {}");
use_hook(|| {
let document = document();
let mut insert_style = document.create_head_component();
if let Some(href) = &props.href {
if !should_insert_style(href) {
insert_style = false;
}
}
if !insert_style {
return;
}
let mut attributes = props.attributes();
match (&props.href, props.style_contents()) {
// The style has inline contents, render it as a style tag
(_, Ok(_)) => document.create_style(props),
// The style has a src, render it as a link tag
(Some(_), _) => {
attributes.push(("type", "text/css".into()));
attributes.push(("rel", "stylesheet".into()));
document.create_link(LinkProps {
media: props.media,
title: props.title,
r#type: Some("text/css".to_string()),
additional_attributes: props.additional_attributes,
href: props.href,
rel: Some("stylesheet".to_string()),
disabled: None,
r#as: None,
sizes: None,
crossorigin: None,
referrerpolicy: None,
fetchpriority: None,
hreflang: None,
integrity: None,
blocking: None,
onload: None,
});
}
// The style has neither contents nor src, log an error
(None, Err(err)) => err.log("Style"),
};
});
VNode::empty()
}
#[derive(Default, Clone)]
struct StyleContext(DeduplicationContext);
fn should_insert_style(href: &str) -> bool {
get_or_insert_root_context::<StyleContext>()
.0
.should_insert(href)
}
| rust | Apache-2.0 | ec8f31dece5c75371177bf080bab46dff54ffd0e | 2026-01-04T15:32:28.012891Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.