repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/routes/orderbook.rs | coordinator/src/routes/orderbook.rs | use crate::check_version::check_version;
use crate::db;
use crate::orderbook;
use crate::orderbook::db::orders;
use crate::orderbook::trading::NewOrderMessage;
use crate::orderbook::websocket::websocket_connection;
use crate::routes::AppState;
use crate::AppError;
use anyhow::anyhow;
use anyhow::Context;
use anyhow::Result;
use axum::extract::ws::WebSocketUpgrade;
use axum::extract::Path;
use axum::extract::State;
use axum::response::IntoResponse;
use axum::Json;
use diesel::r2d2::ConnectionManager;
use diesel::r2d2::PooledConnection;
use diesel::PgConnection;
use rust_decimal::Decimal;
use std::sync::Arc;
use tokio::sync::broadcast::Sender;
use tokio::task::spawn_blocking;
use tracing::instrument;
use uuid::Uuid;
use xxi_node::commons;
use xxi_node::commons::Message;
use xxi_node::commons::NewOrder;
use xxi_node::commons::NewOrderRequest;
use xxi_node::commons::Order;
use xxi_node::commons::OrderReason;
use xxi_node::commons::OrderState;
use xxi_node::commons::OrderType;
#[instrument(skip_all, err(Debug))]
fn get_db_connection(
state: &Arc<AppState>,
) -> Result<PooledConnection<ConnectionManager<PgConnection>>, AppError> {
state
.pool
.clone()
.get()
.map_err(|e| AppError::InternalServerError(format!("Failed to get db access: {e:#}")))
}
#[instrument(skip_all, err(Debug))]
pub async fn get_order(
Path(order_id): Path<Uuid>,
State(state): State<Arc<AppState>>,
) -> Result<Json<Order>, AppError> {
let mut conn = get_db_connection(&state)?;
let order = orderbook::db::orders::get_with_id(&mut conn, order_id)
.map_err(|e| AppError::InternalServerError(format!("Failed to load order: {e:#}")))?
.context(format!("Order not found {order_id}"))
.map_err(|e| AppError::BadRequest(format!("{e:#}")))?;
Ok(Json(order))
}
#[instrument(skip_all, err(Debug))]
pub async fn get_orders(State(state): State<Arc<AppState>>) -> Result<Json<Vec<Order>>, AppError> {
let mut conn = get_db_connection(&state)?;
let orders =
orderbook::db::orders::get_all_orders(&mut conn, OrderType::Limit, OrderState::Open, true)
.map_err(|e| AppError::InternalServerError(format!("Failed to load order: {e:#}")))?;
Ok(Json(orders))
}
#[instrument(skip_all, err(Debug))]
pub async fn post_order(
State(state): State<Arc<AppState>>,
Json(new_order_request): Json<NewOrderRequest>,
) -> Result<(), AppError> {
new_order_request
.verify(&state.secp)
.map_err(|_| AppError::Unauthorized)?;
let new_order = new_order_request.value;
let order_id = new_order.id();
// TODO(holzeis): We should add a similar check eventually for limit orders (makers).
if let NewOrder::Market(new_order) = &new_order {
let mut conn = state
.pool
.get()
.map_err(|e| AppError::InternalServerError(e.to_string()))?;
check_version(&mut conn, &new_order.trader_id)
.map_err(|e| AppError::BadRequest(e.to_string()))?;
}
let settings = state.settings.read().await;
if let NewOrder::Limit(new_order) = &new_order {
if settings.whitelist_enabled && !settings.whitelisted_makers.contains(&new_order.trader_id)
{
tracing::warn!(
trader_id = %new_order.trader_id,
"Trader tried to post limit order but was not whitelisted"
);
return Err(AppError::Unauthorized);
}
if new_order.price == Decimal::ZERO {
return Err(AppError::BadRequest(
"Limit orders with zero price are not allowed".to_string(),
));
}
}
let pool = state.pool.clone();
let external_funding = match new_order_request
.channel_opening_params
.clone()
.and_then(|c| c.pre_image)
{
Some(pre_image_str) => {
let pre_image =
commons::PreImage::from_url_safe_encoded_pre_image(pre_image_str.as_str())
.map_err(|_| AppError::BadRequest("Invalid pre_image provided".to_string()))?;
tracing::debug!(
pre_image_str,
hash = pre_image.hash,
"Received pre-image, updating records"
);
let inner_hash = pre_image.hash.clone();
let funding_amount = spawn_blocking(move || {
let mut conn = pool.get()?;
let amount = db::hodl_invoice::update_hodl_invoice_to_accepted(
&mut conn,
inner_hash.as_str(),
pre_image_str.as_str(),
order_id,
)?;
anyhow::Ok(amount)
})
.await
.expect("task to complete")
.map_err(|e| AppError::BadRequest(format!("Invalid pre_image provided: {e:#}")))?;
// we have received funding via lightning and can now open the channel with funding
// only from the coordinator
Some(funding_amount)
}
None => None,
};
let pool = state.pool.clone();
let new_order = new_order.clone();
let order = spawn_blocking(move || {
let mut conn = pool.get()?;
let order = match new_order {
NewOrder::Market(o) => {
orders::insert_market_order(&mut conn, o.clone(), OrderReason::Manual)
}
NewOrder::Limit(o) => orders::insert_limit_order(&mut conn, o, OrderReason::Manual),
}
.map_err(|e| anyhow!(e))
.context("Failed to insert new order into DB")?;
anyhow::Ok(order)
})
.await
.expect("task to complete")
.map_err(|e| AppError::InternalServerError(e.to_string()))?;
// FIXME(holzeis): We shouldn't blindly trust the user about the coordinator reserve. Note, we
// already ignore the trader reserve parameter when the channel is externally funded.
let message = NewOrderMessage {
order,
channel_opening_params: new_order_request.channel_opening_params.map(|params| {
crate::ChannelOpeningParams {
trader_reserve: params.trader_reserve,
coordinator_reserve: params.coordinator_reserve,
external_funding,
}
}),
order_reason: OrderReason::Manual,
};
state.trading_sender.send(message).await.map_err(|e| {
AppError::InternalServerError(format!("Failed to send new order message: {e:#}"))
})?;
Ok(())
}
fn update_pricefeed(pricefeed_msg: Message, sender: Sender<Message>) {
match sender.send(pricefeed_msg) {
Ok(_) => {
tracing::trace!("Pricefeed updated")
}
Err(error) => {
tracing::warn!("Could not update pricefeed due to '{error}'")
}
}
}
#[instrument(skip_all, err(Debug))]
pub async fn delete_order(
Path(order_id): Path<Uuid>,
State(state): State<Arc<AppState>>,
) -> Result<Json<Order>, AppError> {
let mut conn = get_db_connection(&state)?;
let order = orderbook::db::orders::delete(&mut conn, order_id)
.map_err(|e| AppError::InternalServerError(format!("Failed to delete order: {e:#}")))?;
let sender = state.tx_orderbook_feed.clone();
update_pricefeed(Message::DeleteOrder(order_id), sender);
Ok(Json(order))
}
pub async fn websocket_handler(
ws: WebSocketUpgrade,
State(state): State<Arc<AppState>>,
) -> impl IntoResponse {
ws.on_upgrade(|socket| websocket_connection(socket, state))
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/routes/admin.rs | coordinator/src/routes/admin.rs | use crate::collaborative_revert;
use crate::db;
use crate::funding_fee::insert_funding_rates;
use crate::parse_dlc_channel_id;
use crate::position::models::Position;
use crate::referrals;
use crate::routes::AppState;
use crate::settings::SettingsFile;
use crate::AppError;
use anyhow::Context;
use axum::extract::Path;
use axum::extract::Query;
use axum::extract::State;
use axum::response::IntoResponse;
use axum::Json;
use bitcoin::secp256k1::PublicKey;
use bitcoin::Amount;
use bitcoin::OutPoint;
use bitcoin::Transaction;
use bitcoin::TxOut;
use diesel::r2d2::ConnectionManager;
use diesel::r2d2::PooledConnection;
use diesel::PgConnection;
use dlc_manager::channel::signed_channel::SignedChannelState;
use dlc_manager::channel::Channel;
use dlc_manager::DlcChannelId;
use dlc_manager::Storage;
use hex::FromHex;
use lightning::chain::chaininterface::ConfirmationTarget;
use rust_decimal::prelude::FromPrimitive;
use rust_decimal::prelude::ToPrimitive;
use rust_decimal::Decimal;
use serde::de;
use serde::Deserialize;
use serde::Deserializer;
use serde::Serialize;
use std::cmp::Ordering;
use std::fmt;
use std::num::NonZeroU32;
use std::str::FromStr;
use std::sync::Arc;
use time::OffsetDateTime;
use tokio::task::spawn_blocking;
use tracing::instrument;
use xxi_node::bitcoin_conversion::to_secp_pk_30;
use xxi_node::bitcoin_conversion::to_txid_30;
use xxi_node::commons;
use xxi_node::commons::CollaborativeRevertCoordinatorRequest;
use xxi_node::node::ProtocolId;
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct Balance {
pub onchain: u64,
pub dlc_channel: u64,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct TransactionDetails {
pub transaction: Transaction,
#[serde(with = "bitcoin::amount::serde::as_sat")]
pub sent: Amount,
#[serde(with = "bitcoin::amount::serde::as_sat")]
pub received: Amount,
#[serde(with = "bitcoin::amount::serde::as_sat::opt")]
pub fee: Option<Amount>,
pub confirmation_status: ConfirmationStatus,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub enum ConfirmationStatus {
Unknown,
Mempool {
#[serde(with = "time::serde::rfc3339")]
last_seen: OffsetDateTime,
},
Confirmed {
n_confirmations: NonZeroU32,
#[serde(with = "time::serde::rfc3339")]
timestamp: OffsetDateTime,
},
}
pub async fn get_balance(State(state): State<Arc<AppState>>) -> Result<Json<Balance>, AppError> {
spawn_blocking(move || {
let dlc_channel = state
.node
.inner
.get_dlc_channels_usable_balance()
.map_err(|error| {
AppError::InternalServerError(format!(
"Failed getting dlc channel balance {error:#}"
))
})?;
let onchain = state.node.inner.get_on_chain_balance();
Ok(Json(Balance {
onchain: onchain.confirmed,
dlc_channel: dlc_channel.to_sat(),
}))
})
.await
.map_err(|e| AppError::InternalServerError(format!("Failed to get balance: {e:#}")))?
}
pub async fn get_utxos(
State(state): State<Arc<AppState>>,
) -> Result<Json<Vec<(OutPoint, TxOut)>>, AppError> {
let utxos = state.node.inner.get_utxos();
Ok(Json(utxos))
}
#[derive(Serialize)]
pub struct FeeRateEstimation(u32);
pub async fn get_fee_rate_estimation(
State(state): State<Arc<AppState>>,
Path(target): Path<String>,
) -> Result<Json<FeeRateEstimation>, AppError> {
let target = match target.as_str() {
"normal" => ConfirmationTarget::Normal,
"background" => ConfirmationTarget::Background,
"highpriority" => ConfirmationTarget::HighPriority,
"mempoolminimum" => ConfirmationTarget::MempoolMinimum,
_ => {
return Err(AppError::BadRequest(
"Unknown confirmation target".to_string(),
));
}
};
let sats_per_vbyte = state
.node
.inner
.fee_rate_estimator
.get(target)
.as_sat_per_vb()
.floor();
let sats_per_vbyte = Decimal::from_f32(sats_per_vbyte)
.context("failed to convert f32 to u32")
.map_err(|e| AppError::InternalServerError(format!("{e:#}")))?;
let fee_rate = sats_per_vbyte
.to_u32()
.context("failed to convert to u32")
.map_err(|e| AppError::InternalServerError(format!("{e:#}")))?;
Ok(Json(FeeRateEstimation(fee_rate)))
}
#[derive(Serialize)]
pub struct DlcChannelDetails {
#[serde(flatten)]
pub channel_details: xxi_node::DlcChannelDetails,
#[serde(flatten)]
pub contract_details: Option<xxi_node::ContractDetails>,
pub user_email: String,
#[serde(with = "time::serde::rfc3339::option")]
pub user_registration_timestamp: Option<OffsetDateTime>,
#[serde(with = "bitcoin::amount::serde::as_sat::opt")]
pub coordinator_reserve_sats: Option<Amount>,
#[serde(with = "bitcoin::amount::serde::as_sat::opt")]
pub trader_reserve_sats: Option<Amount>,
}
#[instrument(skip_all, err(Debug))]
pub async fn list_dlc_channels(
State(state): State<Arc<AppState>>,
) -> Result<Json<Vec<DlcChannelDetails>>, AppError> {
let mut conn =
state.pool.clone().get().map_err(|e| {
AppError::InternalServerError(format!("Failed to acquire db lock: {e:#}"))
})?;
let dlc_channels = state.node.inner.list_dlc_channels().map_err(|e| {
AppError::InternalServerError(format!("Failed to list DLC channels: {e:#}"))
})?;
let mut dlc_channels = dlc_channels
.into_iter()
.map(|dlc_channel| {
let (email, registration_timestamp) =
match db::user::by_id(&mut conn, dlc_channel.get_counter_party_id().to_string()) {
Ok(Some(user)) => (user.contact, Some(user.timestamp)),
_ => ("unknown".to_string(), None),
};
let dlc_channel_id = dlc_channel.get_id();
let contract = match state
.node
.inner
.get_contract_by_dlc_channel_id(&dlc_channel_id)
{
Ok(contract) => Some(contract),
Err(_) => None,
};
let coordinator_reserve_sats = state
.node
.inner
.get_dlc_channel_usable_balance(&dlc_channel_id)
.ok();
let trader_reserve_sats = state
.node
.inner
.get_dlc_channel_usable_balance_counterparty(&dlc_channel_id)
.ok();
DlcChannelDetails {
channel_details: xxi_node::DlcChannelDetails::from(dlc_channel),
contract_details: contract.map(xxi_node::ContractDetails::from),
user_email: email,
user_registration_timestamp: registration_timestamp,
coordinator_reserve_sats,
trader_reserve_sats,
}
})
.collect::<Vec<_>>();
// Sort channels by state
dlc_channels.sort_by(|a, b| {
let ordering = a
.channel_details
.channel_state
.cmp(&b.channel_details.channel_state);
if ordering != Ordering::Equal {
return ordering;
}
a.channel_details
.signed_channel_state
.cmp(&b.channel_details.signed_channel_state)
});
Ok(Json(dlc_channels))
}
#[instrument(skip_all, err(Debug))]
pub async fn collaborative_revert(
State(state): State<Arc<AppState>>,
revert_params: Json<CollaborativeRevertCoordinatorRequest>,
) -> Result<(), AppError> {
let channel_id_hex = revert_params.channel_id.clone();
let channel_id = parse_dlc_channel_id(channel_id_hex.as_str())
.map_err(|e| AppError::BadRequest(format!("Invalid channel ID provided: {e:#}")))?;
collaborative_revert::propose_collaborative_revert(
state.node.inner.clone(),
state.pool.clone(),
state.auth_users_notifier.clone(),
channel_id,
revert_params.fee_rate_sats_vb,
revert_params.counter_payout,
revert_params.price,
)
.await
.map_err(|e| {
AppError::InternalServerError(format!("Could not collaboratively revert channel: {e:#}"))
})?;
tracing::info!(channel_id = channel_id_hex, "Proposed collaborative revert");
Ok(())
}
pub async fn list_on_chain_transactions(
State(state): State<Arc<AppState>>,
) -> Json<Vec<TransactionDetails>> {
let transactions = spawn_blocking(move || state.node.inner.get_on_chain_history())
.await
.expect("task to complete");
Json(
transactions
.into_iter()
.map(TransactionDetails::from)
.collect(),
)
}
pub async fn list_peers(State(state): State<Arc<AppState>>) -> Json<Vec<PublicKey>> {
let peers = state.node.inner.list_peers();
Json(peers)
}
#[derive(Debug, Deserialize)]
pub struct CloseChannelParams {
#[serde(default, deserialize_with = "empty_string_as_none")]
force: Option<bool>,
}
#[instrument(skip_all, err(Debug))]
pub async fn close_channel(
Path(channel_id_string): Path<String>,
Query(params): Query<CloseChannelParams>,
State(state): State<Arc<AppState>>,
) -> Result<(), AppError> {
let channel_id = parse_dlc_channel_id(&channel_id_string)
.map_err(|_| AppError::BadRequest("Provided channel ID was invalid".to_string()))?;
tracing::info!(channel_id = %channel_id_string, "Attempting to close channel");
match params.force.unwrap_or_default() {
true => state.node.force_close_dlc_channel(channel_id).await,
false => state.node.close_dlc_channel(channel_id).await,
}
.map_err(|e| AppError::InternalServerError(format!("{e:#}")))?;
Ok(())
}
#[derive(Debug, Deserialize)]
pub struct Confirmation {
#[serde(default, deserialize_with = "empty_string_as_none")]
i_know_what_i_am_doing: Option<bool>,
}
/// This function deletes a DLC channel from our database irreversible!
/// If you want to close a channel instead, use `close_channel`
#[instrument(skip_all, err(Debug))]
pub async fn delete_dlc_channel(
Path(channel_id_string): Path<String>,
State(state): State<Arc<AppState>>,
Query(params): Query<Confirmation>,
) -> Result<(), AppError> {
if !params.i_know_what_i_am_doing.unwrap_or_default() {
let error_message =
"Looks like you don't know what you are doing! Go and ask your supervisor for help!";
tracing::warn!(error_message);
return Err(AppError::BadRequest(error_message.to_string()));
}
let channel_id = parse_dlc_channel_id(&channel_id_string)
.map_err(|_| AppError::BadRequest("Provided channel ID was invalid".to_string()))?;
tracing::info!(channel_id = %channel_id_string, "Deleting dlc channel");
state
.node
.inner
.dlc_storage
.delete_channel(&channel_id)
.map_err(|e| {
AppError::InternalServerError(format!(
"Could not delete dlc_channel with id {} due to {:?}",
channel_id_string, e
))
})?;
tracing::info!(channel_id = %channel_id_string, "Deleted dlc channel");
Ok(())
}
/// This function attempts to roll back a DLC channel to the last stable state!
/// The action is irreversible, only use if you know what you are doing!
#[instrument(skip_all, err(Debug))]
pub async fn roll_back_dlc_channel(
Path(channel_id_string): Path<String>,
State(state): State<Arc<AppState>>,
Query(params): Query<Confirmation>,
) -> Result<(), AppError> {
if !params.i_know_what_i_am_doing.unwrap_or_default() {
let error_message =
"Looks like you don't know what you are doing! Go and ask your supervisor for help!";
tracing::warn!(error_message);
return Err(AppError::BadRequest(error_message.to_string()));
}
let channel_id = parse_dlc_channel_id(&channel_id_string)
.map_err(|_| AppError::BadRequest("Provided channel ID was invalid".to_string()))?;
tracing::info!(channel_id = %channel_id_string, "Attempting to roll back dlc channel to last stable state");
let channel = state
.node
.inner
.get_dlc_channel_by_id(&channel_id)
.map_err(|e| AppError::BadRequest(format!("Couldn't find channel. {e:#}")))?;
if let Channel::Signed(signed_channel) = channel {
state
.node
.inner
.roll_back_channel(&signed_channel)
.map_err(|e| {
AppError::InternalServerError(format!("Failed to roll back channel. {e:#}"))
})?
} else {
return Err(AppError::BadRequest(
"It's only possible to rollback a channel in state signed".to_string(),
));
}
tracing::info!(channel_id = %channel_id_string, "Rolled back dlc channel");
Ok(())
}
#[instrument(skip_all, err(Debug))]
pub async fn is_connected(
State(state): State<Arc<AppState>>,
Path(target_pubkey): Path<String>,
) -> Result<Json<bool>, AppError> {
let target = target_pubkey.parse().map_err(|err| {
AppError::BadRequest(format!("Invalid public key {target_pubkey}. Error: {err}"))
})?;
Ok(Json(state.node.is_connected(target)))
}
#[instrument(skip_all, err(Debug))]
pub async fn rollover(
State(state): State<Arc<AppState>>,
Path(dlc_channel_id): Path<String>,
) -> Result<(), AppError> {
let dlc_channel_id = DlcChannelId::from_hex(dlc_channel_id.clone()).map_err(|e| {
AppError::InternalServerError(format!("Could not decode DLC channel ID: {e}"))
})?;
let mut connection = state
.pool
.get()
.map_err(|e| AppError::InternalServerError(format!("Could not acquire DB lock: {e}")))?;
let position = get_position_by_channel_id(&state, dlc_channel_id, &mut connection)
.map_err(|e| AppError::BadRequest(format!("Could not find position for channel: {e:#}")))?;
state
.node
.propose_rollover(
&mut connection,
&dlc_channel_id,
position,
state.node.inner.network,
)
.await
.map_err(|e| {
AppError::InternalServerError(format!("Failed to rollover DLC channel: {e:#}",))
})?;
Ok(())
}
fn get_position_by_channel_id(
state: &Arc<AppState>,
dlc_channel_id: [u8; 32],
conn: &mut PooledConnection<ConnectionManager<PgConnection>>,
) -> anyhow::Result<Position> {
let dlc_channels = state.node.inner.list_dlc_channels()?;
let public_key = dlc_channels
.iter()
.find_map(|channel| {
if channel.get_id() == dlc_channel_id {
Some(channel.get_counter_party_id())
} else {
None
}
})
.context("DLC Channel not found")?;
let position = db::positions::Position::get_position_by_trader(
conn,
PublicKey::from_slice(&public_key.serialize()).expect("to be valid"),
vec![],
)?
.context("Position for channel not found")?;
Ok(position)
}
// Migrate existing dlc channels. TODO(holzeis): Delete this function after the migration has been
// run in prod.
pub async fn migrate_dlc_channels(State(state): State<Arc<AppState>>) -> Result<(), AppError> {
let mut conn = state
.pool
.get()
.map_err(|e| AppError::InternalServerError(format!("{e:#}")))?;
for channel in state
.node
.inner
.list_signed_dlc_channels()
.map_err(|e| AppError::InternalServerError(format!("{e:#}")))?
{
let coordinator_reserve = state
.node
.inner
.get_dlc_channel_usable_balance(&channel.channel_id)
.map_err(|e| AppError::InternalServerError(format!("{e:#}")))?;
let trader_reserve = state
.node
.inner
.get_dlc_channel_usable_balance_counterparty(&channel.channel_id)
.map_err(|e| AppError::InternalServerError(format!("{e:#}")))?;
let coordinator_funding = Amount::from_sat(channel.own_params.collateral);
let trader_funding = Amount::from_sat(channel.counter_params.collateral);
let protocol_id = match channel.reference_id {
Some(reference_id) => ProtocolId::try_from(reference_id)
.map_err(|e| AppError::InternalServerError(format!("{e:#}")))?,
None => ProtocolId::new(),
};
db::dlc_channels::insert_pending_dlc_channel(
&mut conn,
&protocol_id,
&channel.channel_id,
&to_secp_pk_30(channel.counter_party),
)
.map_err(|e| AppError::InternalServerError(format!("{e:#}")))?;
db::dlc_channels::set_dlc_channel_open(
&mut conn,
&protocol_id,
&channel.channel_id,
to_txid_30(channel.fund_tx.txid()),
coordinator_reserve,
trader_reserve,
coordinator_funding,
trader_funding,
)
.map_err(|e| AppError::InternalServerError(format!("{e:#}")))?;
match channel.state {
SignedChannelState::Closing {
buffer_transaction, ..
} => {
db::dlc_channels::set_channel_force_closing(
&mut conn,
&channel.channel_id,
to_txid_30(buffer_transaction.txid()),
)
.map_err(|e| AppError::InternalServerError(format!("{e:#}")))?;
}
SignedChannelState::SettledClosing {
settle_transaction, ..
} => {
db::dlc_channels::set_channel_force_closing_settled(
&mut conn,
&channel.channel_id,
to_txid_30(settle_transaction.txid()),
None,
)
.map_err(|e| AppError::InternalServerError(format!("{e:#}")))?;
}
SignedChannelState::CollaborativeCloseOffered { close_tx, .. } => {
db::dlc_channels::set_channel_collab_closing(
&mut conn,
&channel.channel_id,
to_txid_30(close_tx.txid()),
)
.map_err(|e| AppError::InternalServerError(format!("{e:#}")))?;
}
_ => {} // ignored
}
}
Ok(())
}
pub async fn resend_renew_revoke_message(
State(state): State<Arc<AppState>>,
Path(trader_pubkey): Path<String>,
) -> Result<(), AppError> {
let trader = trader_pubkey.parse().map_err(|err| {
AppError::BadRequest(format!("Invalid public key {trader_pubkey}. Error: {err}"))
})?;
state
.node
.resend_renew_revoke_message_internal(trader)
.map_err(|e| {
AppError::InternalServerError(format!(
"Failed to resend renew revoke message for {}: {e:#}",
trader_pubkey
))
})?;
Ok(())
}
/// Internal API for syncing the on-chain wallet and the DLC channels.
#[instrument(skip_all, err(Debug))]
pub async fn post_sync(
State(state): State<Arc<AppState>>,
Query(params): Query<SyncParams>,
) -> Result<(), AppError> {
if params.full.unwrap_or(false) {
tracing::info!("Full sync");
let stop_gap = params.gap.unwrap_or(20);
state.node.inner.full_sync(stop_gap).await.map_err(|e| {
AppError::InternalServerError(format!("Could not full-sync on-chain wallet: {e:#}"))
})?;
} else {
tracing::info!("Regular sync");
state.node.inner.sync_on_chain_wallet().await.map_err(|e| {
AppError::InternalServerError(format!("Could not sync on-chain wallet: {e:#}"))
})?;
}
spawn_blocking(move || {
if let Err(e) = state.node.inner.dlc_manager.periodic_check() {
tracing::error!("Failed to run DLC manager periodic check: {e:#}");
};
})
.await
.expect("task to complete");
Ok(())
}
#[derive(Debug, Deserialize)]
pub struct SyncParams {
#[serde(default, deserialize_with = "empty_string_as_none")]
full: Option<bool>,
#[serde(default, deserialize_with = "empty_string_as_none")]
gap: Option<usize>,
}
pub async fn get_settings(State(state): State<Arc<AppState>>) -> impl IntoResponse {
let settings = state.settings.read().await;
serde_json::to_string(&*settings).expect("to be able to serialise settings")
}
#[instrument(skip_all, err(Debug))]
pub async fn update_settings(
State(state): State<Arc<AppState>>,
Json(updated_settings): Json<SettingsFile>,
) -> Result<(), AppError> {
let mut settings = state.settings.write().await;
settings.update(updated_settings.clone());
settings
.write_to_file()
.await
.map_err(|e| AppError::InternalServerError(format!("Could not write settings: {e:#}")))?;
// Forward relevant settings down to the xxi node.
state.node.inner.update_settings(settings.xxi.clone()).await;
Ok(())
}
#[instrument(skip_all, err(Debug))]
pub async fn get_user_referral_status(
State(state): State<Arc<AppState>>,
Path(trader_pubkey): Path<String>,
) -> Result<Json<commons::ReferralStatus>, AppError> {
let mut conn = state
.pool
.get()
.map_err(|e| AppError::InternalServerError(format!("Could not get connection: {e:#}")))?;
let trader_pubkey = trader_pubkey
.as_str()
.parse()
.map_err(|_| AppError::BadRequest("Invalid trader id provided".to_string()))?;
let referral_status =
referrals::get_referral_status(trader_pubkey, &mut conn).map_err(|err| {
AppError::InternalServerError(format!("Could not calculate referral state {err:?}"))
})?;
Ok(Json(referral_status))
}
#[instrument(skip_all, err(Debug))]
pub async fn post_funding_rates(
State(state): State<Arc<AppState>>,
Json(funding_rates): Json<FundingRates>,
) -> Result<(), AppError> {
spawn_blocking(move || {
let mut conn = state.pool.get().map_err(|e| {
AppError::InternalServerError(format!("Could not get connection: {e:#}"))
})?;
let funding_rates = funding_rates
.0
.iter()
.copied()
.map(xxi_node::commons::FundingRate::from)
.collect::<Vec<_>>();
insert_funding_rates(&mut conn, state.tx_orderbook_feed.clone(), &funding_rates)
.map_err(|e| AppError::BadRequest(format!("{e:#}")))?;
Ok(())
})
.await
.expect("task to complete")?;
Ok(())
}
#[derive(Debug, Deserialize)]
pub struct FundingRates(Vec<FundingRate>);
#[derive(Debug, Deserialize, Clone, Copy)]
pub struct FundingRate {
rate: Decimal,
#[serde(with = "time::serde::rfc3339")]
start_date: OffsetDateTime,
#[serde(with = "time::serde::rfc3339")]
end_date: OffsetDateTime,
}
impl From<FundingRate> for xxi_node::commons::FundingRate {
fn from(value: FundingRate) -> Self {
xxi_node::commons::FundingRate::new(value.rate, value.start_date, value.end_date)
}
}
impl From<xxi_node::TransactionDetails> for TransactionDetails {
fn from(value: xxi_node::TransactionDetails) -> Self {
Self {
transaction: value.transaction,
sent: value.sent,
received: value.received,
fee: value.fee.ok(),
confirmation_status: value.confirmation_status.into(),
}
}
}
impl From<xxi_node::ConfirmationStatus> for ConfirmationStatus {
fn from(value: xxi_node::ConfirmationStatus) -> Self {
match value {
xxi_node::ConfirmationStatus::Unknown => Self::Unknown,
xxi_node::ConfirmationStatus::Mempool { last_seen } => Self::Mempool { last_seen },
xxi_node::ConfirmationStatus::Confirmed {
n_confirmations,
timestamp,
} => Self::Confirmed {
n_confirmations,
timestamp,
},
}
}
}
fn empty_string_as_none<'de, D, T>(de: D) -> Result<Option<T>, D::Error>
where
D: Deserializer<'de>,
T: FromStr,
T::Err: fmt::Display,
{
let opt = Option::<String>::deserialize(de)?;
match opt.as_deref() {
None | Some("") => Ok(None),
Some(s) => FromStr::from_str(s).map_err(de::Error::custom).map(Some),
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/position/mod.rs | coordinator/src/position/mod.rs | pub mod models;
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/position/models.rs | coordinator/src/position/models.rs | use crate::compute_relative_contracts;
use crate::decimal_from_f32;
use crate::f32_from_decimal;
use crate::FundingFee;
use anyhow::bail;
use anyhow::Context;
use anyhow::Result;
use bitcoin::secp256k1::PublicKey;
use bitcoin::Address;
use bitcoin::Amount;
use bitcoin::Txid;
use dlc_manager::ContractId;
use dlc_manager::DlcChannelId;
use lightning::ln::ChannelId;
use rust_decimal::prelude::Signed;
use rust_decimal::prelude::ToPrimitive;
use rust_decimal::Decimal;
use time::OffsetDateTime;
use xxi_node::bitmex_client::Quote;
use xxi_node::cfd::calculate_leverage;
use xxi_node::cfd::calculate_long_liquidation_price;
use xxi_node::cfd::calculate_margin;
use xxi_node::cfd::calculate_pnl;
use xxi_node::cfd::calculate_short_liquidation_price;
use xxi_node::commons::ContractSymbol;
use xxi_node::commons::Direction;
use xxi_node::commons::TradeParams;
#[derive(Clone)]
pub struct NewPosition {
pub contract_symbol: ContractSymbol,
pub trader_leverage: f32,
pub quantity: f32,
pub trader_direction: Direction,
pub trader: PublicKey,
pub average_entry_price: f32,
pub trader_liquidation_price: Decimal,
pub coordinator_liquidation_price: Decimal,
pub coordinator_margin: Amount,
pub expiry_timestamp: OffsetDateTime,
pub temporary_contract_id: ContractId,
pub coordinator_leverage: f32,
pub trader_margin: Amount,
pub stable: bool,
pub order_matching_fees: Amount,
}
#[derive(Clone, Copy, PartialEq, Debug)]
pub enum PositionState {
/// The position is in the process of being opened.
///
/// Once the position is fully opened it will end in the state `Open`
Proposed,
Open,
/// The position is in the process of being closed.
///
/// Once the position is being closed the closing price is known.
Closing {
closing_price: f32,
},
Closed {
pnl: i64,
},
/// The position was not opened successfully.
Failed,
Rollover,
Resizing,
}
/// The trading position for a user identified by `trader`.
#[derive(Clone, Copy, PartialEq)]
pub struct Position {
pub id: i32,
pub trader: PublicKey,
pub contract_symbol: ContractSymbol,
pub quantity: f32,
pub trader_direction: Direction,
pub average_entry_price: f32,
pub closing_price: Option<f32>,
pub trader_realized_pnl_sat: Option<i64>,
pub trader_liquidation_price: f32,
pub coordinator_liquidation_price: f32,
pub trader_margin: Amount,
pub coordinator_margin: Amount,
pub trader_leverage: f32,
pub coordinator_leverage: f32,
pub position_state: PositionState,
/// Accumulated order matching fees for the lifetime of the position.
pub order_matching_fees: Amount,
pub creation_timestamp: OffsetDateTime,
pub expiry_timestamp: OffsetDateTime,
pub update_timestamp: OffsetDateTime,
/// The temporary contract ID that is created when an [`OfferedContract`] is sent.
///
/// We use the temporary contract ID because the actual contract ID is not always available.
/// The temporary contract ID is propagated to all `rust-dlc` states until the contract is
/// closed.
///
/// This field is optional to maintain backwards compatibility, because we cannot
/// deterministically associate already existing contracts with positions.
///
/// [`OfferedContract`]: dlc_manager::contract::offered_contract::OfferedContract
pub temporary_contract_id: Option<ContractId>,
pub stable: bool,
}
impl Position {
// Returns true if the position is expired
pub fn is_expired(&self) -> bool {
OffsetDateTime::now_utc() >= self.expiry_timestamp
}
/// Calculates the profit and loss for the coordinator in satoshis
pub fn calculate_coordinator_pnl(&self, quote: Quote) -> Result<i64> {
let closing_price = match self.closing_price {
None => quote.get_price_for_direction(self.trader_direction.opposite()),
Some(closing_price) => {
Decimal::try_from(closing_price).expect("f32 closing price to fit into decimal")
}
};
let average_entry_price = Decimal::try_from(self.average_entry_price)
.context("Failed to convert average entry price to Decimal")?;
let (long_leverage, short_leverage) = match self.trader_direction {
Direction::Long => (self.trader_leverage, self.coordinator_leverage),
Direction::Short => (self.coordinator_leverage, self.trader_leverage),
};
let direction = self.trader_direction.opposite();
let long_margin = calculate_margin(average_entry_price, self.quantity, long_leverage);
let short_margin = calculate_margin(average_entry_price, self.quantity, short_leverage);
let pnl = calculate_pnl(
average_entry_price,
closing_price,
self.quantity,
direction,
long_margin.to_sat(),
short_margin.to_sat(),
)
.context("Failed to calculate pnl for position")?;
Ok(pnl)
}
/// Calculate the settlement amount for the coordinator when closing the _entire_ position.
pub fn calculate_coordinator_settlement_amount(
&self,
closing_price: Decimal,
matching_fee: Amount,
) -> Result<u64> {
let opening_price = Decimal::try_from(self.average_entry_price)?;
let leverage_long = leverage_long(
self.trader_direction,
self.trader_leverage,
self.coordinator_leverage,
);
let leverage_short = leverage_short(
self.trader_direction,
self.trader_leverage,
self.coordinator_leverage,
);
let coordinator_direction = self.trader_direction.opposite();
calculate_coordinator_settlement_amount(
opening_price,
closing_price,
self.quantity,
leverage_long,
leverage_short,
coordinator_direction,
matching_fee,
)
}
/// Calculate the settlement amount for the accept party (i.e. the trader) when closing the DLC
/// channel for the two-step position resizing protocol.
pub fn calculate_accept_settlement_amount_partial_close(
&self,
trade_params: &TradeParams,
) -> Result<Amount> {
calculate_accept_settlement_amount_partial_close(
self.quantity,
self.trader_direction,
self.average_entry_price,
self.trader_leverage,
self.coordinator_leverage,
trade_params.quantity,
trade_params.direction,
trade_params.average_execution_price(),
)
}
#[must_use]
pub fn apply_funding_fee(
self,
funding_fee: FundingFee,
maintenance_margin_rate: Decimal,
) -> Self {
let quantity = decimal_from_f32(self.quantity);
let average_entry_price = decimal_from_f32(self.average_entry_price);
match funding_fee {
FundingFee::Zero => self,
FundingFee::CoordinatorPays(funding_fee) => {
let funding_fee = funding_fee.to_signed().expect("to fit");
let coordinator_margin = self.coordinator_margin.to_signed().expect("to fit");
let new_coordinator_margin = coordinator_margin - funding_fee;
let new_coordinator_margin =
new_coordinator_margin.to_unsigned().unwrap_or(Amount::ZERO);
let new_coordinator_leverage =
calculate_leverage(quantity, new_coordinator_margin, average_entry_price);
let new_coordinator_liquidation_price = match self.trader_direction.opposite() {
Direction::Long => calculate_long_liquidation_price(
new_coordinator_leverage,
average_entry_price,
maintenance_margin_rate,
),
Direction::Short => calculate_short_liquidation_price(
new_coordinator_leverage,
average_entry_price,
maintenance_margin_rate,
),
};
Self {
coordinator_margin: new_coordinator_margin,
coordinator_leverage: f32_from_decimal(new_coordinator_leverage),
coordinator_liquidation_price: f32_from_decimal(
new_coordinator_liquidation_price,
),
..self
}
}
FundingFee::TraderPays(funding_fee) => {
let funding_fee = funding_fee.to_signed().expect("to fit");
let margin_trader = self.trader_margin.to_signed().expect("to fit");
let new_trader_margin = margin_trader - funding_fee;
let new_trader_margin = new_trader_margin.to_unsigned().unwrap_or(Amount::ZERO);
let new_trader_leverage =
calculate_leverage(quantity, new_trader_margin, average_entry_price);
let new_trader_liquidation_price = match self.trader_direction {
Direction::Long => calculate_long_liquidation_price(
new_trader_leverage,
average_entry_price,
maintenance_margin_rate,
),
Direction::Short => calculate_short_liquidation_price(
new_trader_leverage,
average_entry_price,
maintenance_margin_rate,
),
};
Self {
trader_margin: new_trader_margin,
trader_leverage: f32_from_decimal(new_trader_leverage),
trader_liquidation_price: f32_from_decimal(new_trader_liquidation_price),
..self
}
}
}
}
}
/// Calculate the settlement amount for the coordinator, based on the PNL and the order-matching
/// closing fee.
fn calculate_coordinator_settlement_amount(
opening_price: Decimal,
closing_price: Decimal,
quantity: f32,
long_leverage: f32,
short_leverage: f32,
coordinator_direction: Direction,
matching_fee: Amount,
) -> Result<u64> {
let close_position_fee = matching_fee.to_sat();
let long_margin = calculate_margin(opening_price, quantity, long_leverage);
let short_margin = calculate_margin(opening_price, quantity, short_leverage);
let total_margin = long_margin + short_margin;
let pnl = calculate_pnl(
opening_price,
closing_price,
quantity,
coordinator_direction,
long_margin.to_sat(),
short_margin.to_sat(),
)?;
let coordinator_margin = match coordinator_direction {
Direction::Long => long_margin,
Direction::Short => short_margin,
};
let coordinator_settlement_amount =
Decimal::from(coordinator_margin.to_sat()) + Decimal::from(pnl);
// Double-checking that the coordinator's payout isn't negative, although `calculate_pnl` should
// guarantee this.
let coordinator_settlement_amount = coordinator_settlement_amount.max(Decimal::ZERO);
// The coordinator should always get at least the order-matching fee for closing the position.
let coordinator_settlement_amount =
coordinator_settlement_amount + Decimal::from(close_position_fee);
let coordinator_settlement_amount = coordinator_settlement_amount
.to_u64()
.expect("to fit into u64");
// The coordinator's maximum settlement amount is capped by the total combined margin in the
// contract.
let coordinator_settlement_amount = coordinator_settlement_amount.min(total_margin.to_sat());
Ok(coordinator_settlement_amount)
}
/// Calculate the settlement amount for the accept party (i.e. the trader) when closing the DLC
/// channel for the two-step position resizing protocol.
///
/// There are 3 distinct cases:
///
/// 1. The position is reduced: settle current DLC channel at `original_margin + PNL` based on
/// the number of contracts removed and the order's execution price.
///
/// 2. The position flips direction: settle the current DLC channel at `original_margin + PNL`
/// based on the number of contracts removed (the whole position) and the order's execution
/// price.
///
/// 3. The position is extended: settle the current DLC channel at `original_margin`. Nothing
/// has been actually settled in terms of the position, so we just want to remake the channel
/// with more contracts.
///
/// NOTE: The `position.trader_margin` has already been subtracted the previous order-matching
/// fee, so we don't have to do anything about that.
#[allow(clippy::too_many_arguments)]
fn calculate_accept_settlement_amount_partial_close(
position_quantity: f32,
position_direction: Direction,
position_average_execution_price: f32,
position_trader_leverage: f32,
position_coordinator_leverage: f32,
trade_quantity: f32,
trade_direction: Direction,
trade_average_execution_price: Decimal,
) -> Result<Amount> {
let contracts_before_relative =
compute_relative_contracts(decimal_from_f32(position_quantity), &position_direction);
let contracts_trade_relative =
compute_relative_contracts(decimal_from_f32(trade_quantity), &trade_direction);
let contracts_after_relative = contracts_before_relative + contracts_trade_relative;
let leverage_long = leverage_long(
position_direction,
position_trader_leverage,
position_coordinator_leverage,
);
let leverage_short = leverage_short(
position_direction,
position_trader_leverage,
position_coordinator_leverage,
);
let position_trader_margin = calculate_margin(
decimal_from_f32(position_average_execution_price),
position_quantity,
position_trader_leverage,
);
// Position reduced.
let settlement_amount = if contracts_before_relative.signum()
== contracts_after_relative.signum()
&& contracts_before_relative.abs() > contracts_after_relative.abs()
&& !contracts_after_relative.is_zero()
{
// Settled as many contracts as there are in the executed order.
let settled_contracts = trade_quantity;
let opening_price = decimal_from_f32(position_average_execution_price);
let long_margin = calculate_margin(opening_price, settled_contracts, leverage_long);
let short_margin = calculate_margin(opening_price, settled_contracts, leverage_short);
let pnl = calculate_pnl(
opening_price,
trade_average_execution_price,
settled_contracts,
position_direction,
long_margin.to_sat(),
short_margin.to_sat(),
)?;
((position_trader_margin.to_sat() as i64) + pnl).max(0) as u64
}
// Position changed direction.
else if contracts_before_relative.signum() != contracts_after_relative.signum()
&& !contracts_after_relative.is_zero()
{
// Settled as many contracts as there are in the entire position.
let settled_contracts = position_quantity;
let opening_price = decimal_from_f32(position_average_execution_price);
let long_margin = calculate_margin(opening_price, settled_contracts, leverage_long);
let short_margin = calculate_margin(opening_price, settled_contracts, leverage_short);
let pnl = calculate_pnl(
opening_price,
trade_average_execution_price,
settled_contracts,
position_direction,
long_margin.to_sat(),
short_margin.to_sat(),
)?;
((position_trader_margin.to_sat() as i64) + pnl).max(0) as u64
}
// Position extended.
else if contracts_before_relative.signum() == contracts_after_relative.signum()
&& contracts_before_relative.abs() < contracts_after_relative.abs()
{
position_trader_margin.to_sat()
}
// Position either fully settled or unchanged. This is a bug.
else {
debug_assert!(false);
bail!("Invalid parameters for position resizing");
};
let settlement_amount = Amount::from_sat(settlement_amount);
Ok(settlement_amount)
}
pub fn leverage_long(direction: Direction, trader_leverage: f32, coordinator_leverage: f32) -> f32 {
match direction {
Direction::Long => trader_leverage,
Direction::Short => coordinator_leverage,
}
}
pub fn leverage_short(
direction: Direction,
trader_leverage: f32,
coordinator_leverage: f32,
) -> f32 {
match direction {
Direction::Long => coordinator_leverage,
Direction::Short => trader_leverage,
}
}
#[derive(Clone, Debug)]
pub struct CollaborativeRevert {
pub channel_id: DlcChannelId,
pub trader_pubkey: PublicKey,
pub price: Decimal,
pub coordinator_address: Address,
pub coordinator_amount_sats: Amount,
pub trader_amount_sats: Amount,
pub timestamp: OffsetDateTime,
}
#[derive(Clone, Debug)]
pub struct LegacyCollaborativeRevert {
pub channel_id: ChannelId,
pub trader_pubkey: PublicKey,
pub price: f32,
pub coordinator_address: Address,
pub coordinator_amount_sats: Amount,
pub trader_amount_sats: Amount,
pub timestamp: OffsetDateTime,
pub txid: Txid,
pub vout: u32,
}
impl std::fmt::Debug for NewPosition {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("NewPosition")
.field("contract_symbol", &self.contract_symbol)
.field("trader_leverage", &self.trader_leverage)
.field("quantity", &self.quantity)
.field("trader_direction", &self.trader_direction)
// Otherwise we end up printing the hex of the internal representation.
.field("trader", &self.trader.to_string())
.field("average_entry_price", &self.average_entry_price)
.field("trader_liquidation_price", &self.trader_liquidation_price)
.field(
"coordinator_liquidation_price",
&self.coordinator_liquidation_price,
)
.field("coordinator_margin", &self.coordinator_margin)
.field("expiry_timestamp", &self.expiry_timestamp)
.field("temporary_contract_id", &self.temporary_contract_id)
.field("coordinator_leverage", &self.coordinator_leverage)
.field("trader_margin", &self.trader_margin)
.field("stable", &self.stable)
.finish()
}
}
impl std::fmt::Debug for Position {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let Self {
id,
trader,
contract_symbol,
quantity,
trader_direction,
average_entry_price,
closing_price,
trader_realized_pnl_sat,
coordinator_liquidation_price,
trader_liquidation_price,
trader_margin,
coordinator_margin,
trader_leverage,
coordinator_leverage,
position_state,
order_matching_fees,
creation_timestamp,
expiry_timestamp,
update_timestamp,
temporary_contract_id,
stable,
} = self;
f.debug_struct("Position")
.field("id", &id)
.field("contract_symbol", &contract_symbol)
.field("trader_leverage", &trader_leverage)
.field("quantity", &quantity)
.field("trader_direction", &trader_direction)
.field("average_entry_price", &average_entry_price)
.field("trader_liquidation_price", &trader_liquidation_price)
.field(
"coordinator_liquidation_price",
&coordinator_liquidation_price,
)
.field("position_state", &position_state)
.field("coordinator_margin", &coordinator_margin)
.field("creation_timestamp", &creation_timestamp)
.field("expiry_timestamp", &expiry_timestamp)
.field("update_timestamp", &update_timestamp)
// Otherwise we end up printing the hex of the internal representation.
.field("trader", &trader.to_string())
.field("coordinator_leverage", &coordinator_leverage)
.field("temporary_contract_id", &temporary_contract_id)
.field("closing_price", &closing_price)
.field("trader_margin", &trader_margin)
.field("stable", &stable)
.field("trader_realized_pnl_sat", &trader_realized_pnl_sat)
.field("order_matching_fees", &order_matching_fees)
.finish()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::trade::liquidation_price;
use proptest::prelude::*;
use rust_decimal_macros::dec;
use std::str::FromStr;
use xxi_node::cfd::BTCUSD_MAX_PRICE;
#[test]
fn position_calculate_coordinator_settlement_amount() {
let position = Position {
id: 0,
contract_symbol: ContractSymbol::BtcUsd,
trader_leverage: 2.0,
quantity: 100.0,
trader_direction: Direction::Long,
average_entry_price: 40_000.0,
trader_liquidation_price: 20_000.0,
coordinator_liquidation_price: 60_000.0,
position_state: PositionState::Open,
coordinator_margin: Amount::from_sat(125_000),
creation_timestamp: OffsetDateTime::now_utc(),
expiry_timestamp: OffsetDateTime::now_utc(),
update_timestamp: OffsetDateTime::now_utc(),
trader: PublicKey::from_str(
"02bd998ebd176715fe92b7467cf6b1df8023950a4dd911db4c94dfc89cc9f5a655",
)
.unwrap(),
coordinator_leverage: 2.0,
temporary_contract_id: None,
closing_price: None,
trader_margin: Amount::from_sat(125_000),
stable: false,
trader_realized_pnl_sat: None,
order_matching_fees: Amount::ZERO,
};
let coordinator_settlement_amount = position
.calculate_coordinator_settlement_amount(dec!(39_000), Amount::from_sat(769))
.unwrap();
assert_eq!(coordinator_settlement_amount, 132_179);
}
#[test]
fn position_calculate_coordinator_settlement_amount_trader_leverage_3() {
let position = Position {
id: 0,
contract_symbol: ContractSymbol::BtcUsd,
trader_leverage: 3.0,
quantity: 100.0,
trader_direction: Direction::Long,
average_entry_price: 40_000.0,
trader_liquidation_price: 20_000.0,
coordinator_liquidation_price: 60_000.0,
position_state: PositionState::Open,
coordinator_margin: Amount::from_sat(125_000),
creation_timestamp: OffsetDateTime::now_utc(),
expiry_timestamp: OffsetDateTime::now_utc(),
update_timestamp: OffsetDateTime::now_utc(),
trader: PublicKey::from_str(
"02bd998ebd176715fe92b7467cf6b1df8023950a4dd911db4c94dfc89cc9f5a655",
)
.unwrap(),
coordinator_leverage: 2.0,
temporary_contract_id: None,
closing_price: None,
trader_margin: Amount::from_sat(125_000),
stable: false,
trader_realized_pnl_sat: None,
order_matching_fees: Amount::ZERO,
};
let coordinator_settlement_amount = position
.calculate_coordinator_settlement_amount(dec!(39_000), Amount::from_sat(769))
.unwrap();
assert_eq!(coordinator_settlement_amount, 132_179);
}
#[test]
fn position_calculate_coordinator_settlement_amount_coordinator_leverage_3() {
let position = Position {
id: 0,
contract_symbol: ContractSymbol::BtcUsd,
trader_leverage: 2.0,
quantity: 100.0,
trader_direction: Direction::Long,
average_entry_price: 40_000.0,
trader_liquidation_price: 20_000.0,
coordinator_liquidation_price: 60_000.0,
position_state: PositionState::Open,
coordinator_margin: Amount::from_sat(125_000),
creation_timestamp: OffsetDateTime::now_utc(),
expiry_timestamp: OffsetDateTime::now_utc(),
update_timestamp: OffsetDateTime::now_utc(),
trader: PublicKey::from_str(
"02bd998ebd176715fe92b7467cf6b1df8023950a4dd911db4c94dfc89cc9f5a655",
)
.unwrap(),
coordinator_leverage: 3.0,
temporary_contract_id: None,
closing_price: None,
trader_margin: Amount::from_sat(125_000),
stable: false,
trader_realized_pnl_sat: None,
order_matching_fees: Amount::ZERO,
};
let coordinator_settlement_amount = position
.calculate_coordinator_settlement_amount(dec!(39_000), Amount::from_sat(769))
.unwrap();
assert_eq!(coordinator_settlement_amount, 90_512);
}
// Basic sanity tests. Verify the effect of the price moving on the computed settlement amount.
#[test]
fn given_long_coordinator_and_price_goes_up() {
let quantity: f32 = 1.0;
let leverage_coordinator = 1.0;
let opening_price = Decimal::from(22000);
let closing_price = Decimal::from(23000);
let margin_coordinator = calculate_margin(opening_price, quantity, leverage_coordinator);
let settlement_coordinator = calculate_coordinator_settlement_amount(
opening_price,
closing_price,
quantity,
leverage_coordinator,
1.0,
Direction::Long,
Amount::from_sat(1000),
)
.unwrap();
assert!(margin_coordinator.to_sat() < settlement_coordinator);
}
#[test]
fn given_short_coordinator_and_price_goes_up() {
let quantity: f32 = 1.0;
let leverage_coordinator = 1.0;
let opening_price = Decimal::from(22000);
let closing_price = Decimal::from(23000);
let margin_coordinator = calculate_margin(opening_price, quantity, leverage_coordinator);
let settlement_coordinator = calculate_coordinator_settlement_amount(
opening_price,
closing_price,
quantity,
1.0,
leverage_coordinator,
Direction::Short,
Amount::from_sat(13),
)
.unwrap();
assert!(settlement_coordinator < margin_coordinator.to_sat());
}
#[test]
fn given_long_coordinator_and_price_goes_down() {
let quantity: f32 = 1.0;
let leverage_coordinator = 1.0;
let opening_price = Decimal::from(23000);
let closing_price = Decimal::from(22000);
let margin_coordinator = calculate_margin(opening_price, quantity, leverage_coordinator);
let settlement_coordinator = calculate_coordinator_settlement_amount(
opening_price,
closing_price,
quantity,
leverage_coordinator,
1.0,
Direction::Long,
Amount::from_sat(13),
)
.unwrap();
assert!(settlement_coordinator < margin_coordinator.to_sat());
}
#[test]
fn given_short_coordinator_and_price_goes_down() {
let quantity: f32 = 1.0;
let leverage_coordinator = 1.0;
let opening_price = Decimal::from(23000);
let closing_price = Decimal::from(22000);
let margin_coordinator = calculate_margin(opening_price, quantity, leverage_coordinator);
let settlement_coordinator = calculate_coordinator_settlement_amount(
opening_price,
closing_price,
quantity,
1.0,
leverage_coordinator,
Direction::Short,
Amount::from_sat(13),
)
.unwrap();
assert!(margin_coordinator.to_sat() < settlement_coordinator);
}
#[test]
fn given_long_coordinator_and_price_goes_up_different_leverages() {
let quantity: f32 = 1.0;
let leverage_coordinator = 1.0;
let opening_price = Decimal::from(22000);
let closing_price = Decimal::from(23000);
let margin_coordinator = calculate_margin(opening_price, quantity, leverage_coordinator);
let settlement_coordinator = calculate_coordinator_settlement_amount(
opening_price,
closing_price,
quantity,
leverage_coordinator,
2.0,
Direction::Long,
Amount::from_sat(13),
)
.unwrap();
assert!(margin_coordinator.to_sat() < settlement_coordinator);
}
#[test]
fn given_short_coordinator_and_price_goes_up_different_leverages() {
let quantity: f32 = 1.0;
let leverage_coordinator = 1.0;
let opening_price = Decimal::from(22000);
let closing_price = Decimal::from(23000);
let margin_coordinator = calculate_margin(opening_price, quantity, leverage_coordinator);
let settlement_coordinator = calculate_coordinator_settlement_amount(
opening_price,
closing_price,
quantity,
2.0,
leverage_coordinator,
Direction::Short,
Amount::from_sat(13),
)
.unwrap();
assert!(settlement_coordinator < margin_coordinator.to_sat());
}
#[test]
fn given_long_coordinator_and_price_goes_down_different_leverages() {
let quantity: f32 = 1.0;
let leverage_coordinator = 2.0;
let opening_price = Decimal::from(23000);
let closing_price = Decimal::from(22000);
let margin_coordinator = calculate_margin(opening_price, quantity, leverage_coordinator);
let settlement_coordinator = calculate_coordinator_settlement_amount(
opening_price,
closing_price,
quantity,
leverage_coordinator,
1.0,
Direction::Long,
Amount::from_sat(13),
)
.unwrap();
assert!(settlement_coordinator < margin_coordinator.to_sat());
}
#[test]
fn given_short_coordinator_and_price_goes_down_different_leverages() {
let quantity: f32 = 1.0;
let leverage_coordinator = 2.0;
let opening_price = Decimal::from(23000);
let closing_price = Decimal::from(22000);
let margin_coordinator = calculate_margin(opening_price, quantity, leverage_coordinator);
let settlement_coordinator = calculate_coordinator_settlement_amount(
opening_price,
closing_price,
quantity,
1.0,
leverage_coordinator,
Direction::Short,
Amount::from_sat(13),
)
.unwrap();
assert!(margin_coordinator.to_sat() < settlement_coordinator);
}
#[test]
fn given_trader_long_position_when_no_bid_price_change_then_zero_coordinator_pnl() {
let position = Position::dummy()
.with_leverage(2.0)
.with_quantity(1.0)
.with_average_entry_price(1000.0)
.with_direction(Direction::Long);
let quote = dummy_quote(1000, 0);
let coordinator_pnl = position.calculate_coordinator_pnl(quote).unwrap();
assert_eq!(coordinator_pnl, 0);
}
#[test]
fn given_trader_short_position_when_no_ask_price_change_then_zero_coordinator_pnl() {
let position = Position::dummy()
.with_leverage(2.0)
.with_quantity(1.0)
.with_average_entry_price(1000.0)
.with_direction(Direction::Short);
let quote = dummy_quote(0, 1000);
let coordinator_pnl = position.calculate_coordinator_pnl(quote).unwrap();
assert_eq!(coordinator_pnl, 0);
}
/// Thought Process documentation
///
/// In this example, the trader who went long, bought $20,000 worth of BTC at the price of
/// 20,000, i.e. 1 BTC At the price of $22,000 the trader sells $20,000 worth of BTC, i.e.
/// the trader sells it for 0.909090909 BTC. The difference is the trader's profit profit,
/// i.e.:
///
/// 1 BTC - 0.909090909 BTC = 0.09090909 BTC = 9_090_909 sats profit
///
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | true |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/bin/coordinator.rs | coordinator/src/bin/coordinator.rs | use anyhow::Context;
use anyhow::Result;
use bitcoin::key::XOnlyPublicKey;
use coordinator::backup::SledBackup;
use coordinator::cli::Opts;
use coordinator::db;
use coordinator::dlc_handler;
use coordinator::dlc_handler::DlcHandler;
use coordinator::funding_fee::generate_funding_fee_events_periodically;
use coordinator::logger;
use coordinator::message::spawn_delivering_messages_to_authenticated_users;
use coordinator::message::NewUserMessage;
use coordinator::node::expired_positions;
use coordinator::node::liquidated_positions;
use coordinator::node::rollover;
use coordinator::node::storage::NodeStorage;
use coordinator::node::unrealized_pnl;
use coordinator::node::Node;
use coordinator::notifications::NotificationService;
use coordinator::orderbook::async_match;
use coordinator::orderbook::collaborative_revert;
use coordinator::orderbook::trading;
use coordinator::routes::router;
use coordinator::run_migration;
use coordinator::scheduler::NotificationScheduler;
use coordinator::settings::Settings;
use coordinator::storage::CoordinatorTenTenOneStorage;
use coordinator::trade::websocket::InternalPositionUpdateMessage;
use diesel::r2d2;
use diesel::r2d2::ConnectionManager;
use diesel::PgConnection;
use lnd_bridge::LndBridge;
use rand::thread_rng;
use rand::RngCore;
use std::backtrace::Backtrace;
use std::net::IpAddr;
use std::net::Ipv4Addr;
use std::net::SocketAddr;
use std::str::FromStr;
use std::sync::mpsc;
use std::sync::Arc;
use std::time::Duration;
use tokio::sync::broadcast;
use tokio::task::spawn_blocking;
use tokio_cron_scheduler::JobScheduler;
use tracing::metadata::LevelFilter;
use xxi_node::node::event::NodeEventHandler;
use xxi_node::seed::Bip39Seed;
use xxi_node::storage::DlcChannelEvent;
const PROCESS_INCOMING_DLC_MESSAGES_INTERVAL: Duration = Duration::from_millis(200);
const LIQUIDATED_POSITION_SYNC_INTERVAL: Duration = Duration::from_secs(30);
const EXPIRED_POSITION_SYNC_INTERVAL: Duration = Duration::from_secs(5 * 60);
const UNREALIZED_PNL_SYNC_INTERVAL: Duration = Duration::from_secs(10 * 60);
const NODE_ALIAS: &str = "10101.finance";
/// The prefix to the [`bdk_file_store`] database file where BDK persists
/// [`bdk::wallet::ChangeSet`]s.
///
/// We hard-code the prefix so that we can always be sure that we are loading the correct file on
/// start-up.
const WALLET_DB_PREFIX: &str = "10101-coordinator";
#[tokio::main]
async fn main() -> Result<()> {
std::panic::set_hook(
#[allow(clippy::print_stderr)]
Box::new(|info| {
let backtrace = Backtrace::force_capture();
tracing::error!(%info, "Aborting after panic in task");
eprintln!("{backtrace}");
std::process::abort()
}),
);
let opts = Opts::read();
let data_dir = opts.data_dir()?;
let address = opts.p2p_address;
let http_address = opts.http_address;
let network = opts.network();
let oracle_infos = opts
.get_oracle_infos()
.into_iter()
.map(|o| o.into())
.collect();
let lnd_bridge = LndBridge::new(opts.lnd_endpoint, opts.macaroon, opts.secure_lnd);
logger::init_tracing(LevelFilter::DEBUG, opts.json, opts.tokio_console)?;
let mut ephemeral_randomness = [0; 32];
thread_rng().fill_bytes(&mut ephemeral_randomness);
let data_dir = data_dir.join(network.to_string());
if !data_dir.exists() {
std::fs::create_dir_all(&data_dir)
.context(format!("Could not create data dir for {network}"))?;
}
let data_dir_string = data_dir.clone().into_os_string();
tracing::info!("Data-dir: {data_dir_string:?}");
let seed_path = data_dir.join("seed");
let seed = Bip39Seed::initialize(&seed_path)?;
let settings = Settings::new(&data_dir).await?;
// set up database connection pool
let manager = ConnectionManager::<PgConnection>::new(opts.database.clone());
let pool = r2d2::Pool::builder()
.build(manager)
.expect("Failed to create pool.");
let mut conn = pool.get()?;
run_migration(&mut conn);
let storage = CoordinatorTenTenOneStorage::new(data_dir.to_string_lossy().to_string());
let node_storage = Arc::new(NodeStorage::new(pool.clone()));
let node_event_handler = Arc::new(NodeEventHandler::new());
let wallet_storage = bdk_file_store::Store::open_or_create_new(
WALLET_DB_PREFIX.as_bytes(),
data_dir.join("wallet"),
)?;
let (dlc_event_sender, dlc_event_receiver) = mpsc::channel::<DlcChannelEvent>();
let node = Arc::new(xxi_node::node::Node::new(
NODE_ALIAS,
network,
data_dir.as_path(),
storage,
node_storage,
wallet_storage,
address,
SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), address.port()),
opts.electrs.clone(),
seed,
ephemeral_randomness,
settings.xxi.clone(),
oracle_infos,
XOnlyPublicKey::from_str(&opts.oracle_pubkey).expect("valid public key"),
node_event_handler.clone(),
dlc_event_sender,
)?);
let dlc_handler = DlcHandler::new(pool.clone(), node.clone());
let _handle = dlc_handler::spawn_handling_outbound_dlc_messages(
dlc_handler,
node_event_handler.subscribe(),
);
let running = node.start(dlc_event_receiver)?;
let (tx_user_feed, _rx) = broadcast::channel::<NewUserMessage>(100);
let notification_service = NotificationService::new(opts.fcm_api_key.clone(), pool.clone());
let (_handle, auth_users_notifier) = spawn_delivering_messages_to_authenticated_users(
notification_service.get_sender(),
tx_user_feed.clone(),
);
// an internal channel to send updates about our position
let (tx_position_feed, _rx) = broadcast::channel::<InternalPositionUpdateMessage>(100);
let node = Node::new(
node,
running,
pool.clone(),
settings.to_node_settings(),
tx_position_feed.clone(),
auth_users_notifier.clone(),
lnd_bridge.clone(),
);
// TODO: Pass the tokio metrics into Prometheus
if let Some(interval) = opts.tokio_metrics_interval_seconds {
let handle = tokio::runtime::Handle::current();
let runtime_monitor = tokio_metrics::RuntimeMonitor::new(&handle);
let frequency = Duration::from_secs(interval);
tokio::spawn(async move {
for metrics in runtime_monitor.intervals() {
tracing::debug!(?metrics, "tokio metrics");
tokio::time::sleep(frequency).await;
}
});
}
tokio::spawn({
let node = node.clone();
// TODO: Do we still want to be able to update this at runtime?
let interval = settings.xxi.on_chain_sync_interval;
async move {
loop {
if let Err(e) = node.inner.sync_on_chain_wallet().await {
tracing::info!("On-chain sync failed: {e:#}");
}
spawn_blocking({
let node = node.clone();
move || {
if let Err(e) = node.inner.dlc_manager.periodic_check() {
tracing::error!("Failed to run DLC manager periodic check: {e:#}");
}
}
})
.await
.expect("task to complete");
tokio::time::sleep(interval).await;
}
}
});
tokio::spawn({
let node = node.clone();
async move {
loop {
let node = node.clone();
spawn_blocking(move || node.process_incoming_dlc_messages())
.await
.expect("To spawn blocking thread");
tokio::time::sleep(PROCESS_INCOMING_DLC_MESSAGES_INTERVAL).await;
}
}
});
tokio::spawn({
let node = node.clone();
async move {
loop {
tokio::time::sleep(UNREALIZED_PNL_SYNC_INTERVAL).await;
if let Err(e) = unrealized_pnl::sync(node.clone()).await {
tracing::error!(
"Failed to sync unrealized PnL with positions in database: {e:#}"
);
}
}
}
});
let (tx_orderbook_feed, _rx) = broadcast::channel(100);
let (_handle, trading_sender) = trading::start(
node.clone(),
tx_orderbook_feed.clone(),
auth_users_notifier.clone(),
notification_service.get_sender(),
network,
node.inner.oracle_pubkey,
);
let _handle = async_match::monitor(
node.clone(),
node_event_handler.subscribe(),
auth_users_notifier.clone(),
network,
node.inner.oracle_pubkey,
);
let _handle = rollover::monitor(
pool.clone(),
node_event_handler.subscribe(),
notification_service.get_sender(),
network,
node.clone(),
);
let _handle = collaborative_revert::monitor(
pool.clone(),
tx_user_feed.clone(),
auth_users_notifier.clone(),
network,
);
node.spawn_watch_dlc_channel_events_task();
tokio::spawn({
let node = node.clone();
let trading_sender = trading_sender.clone();
async move {
loop {
tokio::time::sleep(EXPIRED_POSITION_SYNC_INTERVAL).await;
if let Err(e) = expired_positions::close(node.clone(), trading_sender.clone()).await
{
tracing::error!("Failed to close expired positions! Error: {e:#}");
}
}
}
});
tokio::spawn({
let node = node.clone();
let trading_sender = trading_sender.clone();
async move {
loop {
tokio::time::sleep(LIQUIDATED_POSITION_SYNC_INTERVAL).await;
liquidated_positions::monitor(node.clone(), trading_sender.clone()).await
}
}
});
let user_backup = SledBackup::new(data_dir.to_string_lossy().to_string());
let app = router(
node.clone(),
pool.clone(),
settings.clone(),
NODE_ALIAS,
trading_sender,
tx_orderbook_feed,
tx_position_feed,
tx_user_feed,
auth_users_notifier.clone(),
notification_service.get_sender(),
user_backup,
lnd_bridge,
);
let sender = notification_service.get_sender();
let scheduler = NotificationScheduler::new(sender, settings.clone(), network, node).await;
tokio::spawn({
let pool = pool.clone();
async move {
scheduler
.add_rollover_window_reminder_job(pool.clone())
.await
.expect("To add the rollover window reminder job");
scheduler
.add_rollover_window_close_reminder_job(pool.clone())
.await
.expect("To add the rollover window close reminder job");
scheduler
.add_reminder_to_close_expired_position_job(pool.clone())
.await
.expect("To add the close expired position reminder job");
scheduler
.add_reminder_to_close_liquidated_position_job(pool.clone())
.await
.expect("To add the close liquidated position reminder job");
scheduler
.add_collect_metrics_job(pool.clone())
.await
.expect("To add the collect metrics job");
scheduler
.start()
.await
.expect("to be able to start scheduler");
}
});
if let Err(e) = spawn_blocking({
let pool = pool.clone();
move || {
let mut conn = pool.get()?;
db::hodl_invoice::cancel_pending_hodl_invoices(&mut conn)?;
anyhow::Ok(())
}
})
.await
.expect("task to finish")
{
tracing::error!("Failed to set expired hodl invoices to canceled. Error: {e:#}");
}
generate_funding_fee_events_periodically(
&JobScheduler::new().await?,
pool.clone(),
auth_users_notifier,
settings.generate_funding_fee_events_scheduler,
settings.index_price_source,
)
.await
.expect("to start task");
tracing::debug!("Listening on http://{}", http_address);
match axum::Server::bind(&http_address)
.serve(app.into_make_service_with_connect_info::<SocketAddr>())
.await
{
Ok(_) => {
tracing::info!("HTTP server stopped running");
}
Err(e) => {
tracing::error!("HTTP server stopped running: {e:#}");
}
}
Ok(())
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/orderbook/async_match.rs | coordinator/src/orderbook/async_match.rs | use crate::check_version::check_version;
use crate::db;
use crate::message::OrderbookMessage;
use crate::node::Node;
use crate::orderbook::db::matches;
use crate::orderbook::db::orders;
use crate::trade::TradeExecutor;
use anyhow::ensure;
use anyhow::Result;
use bitcoin::secp256k1::PublicKey;
use bitcoin::secp256k1::XOnlyPublicKey;
use bitcoin::Network;
use futures::future::RemoteHandle;
use futures::FutureExt;
use rust_decimal::prelude::ToPrimitive;
use time::OffsetDateTime;
use tokio::sync::broadcast;
use tokio::sync::broadcast::error::RecvError;
use tokio::sync::mpsc;
use tokio::task::spawn_blocking;
use xxi_node::commons;
use xxi_node::commons::ContractSymbol;
use xxi_node::commons::FilledWith;
use xxi_node::commons::Match;
use xxi_node::commons::Matches;
use xxi_node::commons::OrderState;
use xxi_node::commons::TradeAndChannelParams;
use xxi_node::commons::TradeParams;
use xxi_node::node::event::NodeEvent;
pub fn monitor(
node: Node,
mut receiver: broadcast::Receiver<NodeEvent>,
notifier: mpsc::Sender<OrderbookMessage>,
network: Network,
oracle_pk: XOnlyPublicKey,
) -> RemoteHandle<()> {
let (fut, remote_handle) = async move {
loop {
match receiver.recv().await {
Ok(NodeEvent::Connected { peer: trader_id }) => {
tokio::spawn({
let notifier = notifier.clone();
let node = node.clone();
async move {
tracing::debug!(
%trader_id,
"Checking if the user needs to be notified about pending matches"
);
if let Err(e) =
process_pending_match(node, notifier, trader_id, network, oracle_pk)
.await
{
tracing::error!("Failed to process pending match. Error: {e:#}");
}
}
});
}
Ok(_) => {} // ignoring other node events
Err(RecvError::Closed) => {
tracing::error!("Node event sender died! Channel closed.");
break;
}
Err(RecvError::Lagged(skip)) => {
tracing::warn!(%skip, "Lagging behind on node events.")
}
}
}
}
.remote_handle();
tokio::spawn(fut);
remote_handle
}
/// Checks if there are any pending matches
async fn process_pending_match(
node: Node,
notifier: mpsc::Sender<OrderbookMessage>,
trader_id: PublicKey,
network: Network,
oracle_pk: XOnlyPublicKey,
) -> Result<()> {
let mut conn = spawn_blocking({
let node = node.clone();
move || node.pool.get()
})
.await
.expect("task to complete")?;
if check_version(&mut conn, &trader_id).is_err() {
tracing::info!(%trader_id, "User is not on the latest version. Skipping check if user needs to be informed about pending matches.");
return Ok(());
}
if let Some(order) =
orders::get_by_trader_id_and_state(&mut conn, trader_id, OrderState::Matched)?
{
tracing::debug!(%trader_id, order_id=%order.id, "Executing pending match");
let matches = matches::get_matches_by_order_id(&mut conn, order.id)?;
let filled_with = get_filled_with_from_matches(matches, network, oracle_pk)?;
let channel_opening_params =
db::channel_opening_params::get_by_order_id(&mut conn, order.id)?;
tracing::info!(trader_id = %order.trader_id, order_id = %order.id, order_reason = ?order.order_reason, "Executing trade for match");
let trade_executor = TradeExecutor::new(node, notifier);
trade_executor
.execute(&TradeAndChannelParams {
trade_params: TradeParams {
pubkey: trader_id,
contract_symbol: ContractSymbol::BtcUsd,
leverage: order.leverage,
quantity: order.quantity.to_f32().expect("to fit into f32"),
direction: order.direction,
filled_with,
},
trader_reserve: channel_opening_params.map(|c| c.trader_reserve),
coordinator_reserve: channel_opening_params.map(|c| c.coordinator_reserve),
external_funding: channel_opening_params.and_then(|c| c.external_funding),
})
.await;
}
Ok(())
}
fn get_filled_with_from_matches(
matches: Vec<Matches>,
network: Network,
oracle_pk: XOnlyPublicKey,
) -> Result<FilledWith> {
ensure!(
!matches.is_empty(),
"Need at least one matches record to construct a FilledWith"
);
let order_id = matches
.first()
.expect("to have at least one match")
.order_id;
let expiry_timestamp = commons::calculate_next_expiry(OffsetDateTime::now_utc(), network);
Ok(FilledWith {
order_id,
expiry_timestamp,
oracle_pk,
matches: matches
.iter()
.map(|m| Match {
id: m.id,
order_id: m.order_id,
quantity: m.quantity,
pubkey: m.match_trader_id,
execution_price: m.execution_price,
matching_fee: m.matching_fee,
})
.collect(),
})
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/orderbook/websocket.rs | coordinator/src/orderbook/websocket.rs | use crate::db;
use crate::db::user;
use crate::funding_fee::get_funding_fee_events_for_active_trader_positions;
use crate::funding_fee::get_next_funding_rate;
use crate::message::NewUserMessage;
use crate::orderbook::db::orders;
use crate::orderbook::trading::NewOrderMessage;
use crate::referrals;
use crate::routes::AppState;
use anyhow::bail;
use anyhow::Result;
use axum::extract::ws::Message as WebsocketMessage;
use axum::extract::ws::WebSocket;
use bitcoin::secp256k1::PublicKey;
use futures::SinkExt;
use futures::StreamExt;
use std::sync::Arc;
use std::time::Duration;
use tokio::sync::broadcast::error::RecvError;
use tokio::sync::mpsc;
use tokio::task::spawn_blocking;
use uuid::Uuid;
use xxi_node::commons::create_sign_message;
use xxi_node::commons::Message;
use xxi_node::commons::NewLimitOrder;
use xxi_node::commons::OrderReason;
use xxi_node::commons::OrderbookRequest;
use xxi_node::commons::ReferralStatus;
use xxi_node::commons::TenTenOneConfig;
use xxi_node::commons::AUTH_SIGN_MESSAGE;
const WEBSOCKET_SEND_TIMEOUT: Duration = Duration::from_secs(5);
async fn handle_insert_order(
state: Arc<AppState>,
trader_id: PublicKey,
order: NewLimitOrder,
) -> Result<()> {
if order.trader_id != trader_id {
bail!("Maker {trader_id} tried to trade on behalf of someone else: {order:?}");
}
tracing::trace!(?order, "Inserting order");
let order = spawn_blocking({
let mut conn = state.pool.clone().get()?;
move || {
let order = orders::insert_limit_order(&mut conn, order, OrderReason::Manual)?;
anyhow::Ok(order)
}
})
.await??;
let _ = state
.trading_sender
.send(NewOrderMessage {
order,
channel_opening_params: None,
order_reason: OrderReason::Manual,
})
.await;
Ok(())
}
async fn handle_delete_order(
state: Arc<AppState>,
trader_id: PublicKey,
order_id: Uuid,
) -> Result<()> {
tracing::trace!(%order_id, "Deleting order");
spawn_blocking({
let mut conn = state.pool.clone().get()?;
move || {
orders::delete_trader_order(&mut conn, order_id, trader_id)?;
anyhow::Ok(())
}
})
.await??;
let _ = state.tx_orderbook_feed.send(Message::DeleteOrder(order_id));
Ok(())
}
// This function deals with a single websocket connection, i.e., a single
// connected client / user, for which we will spawn two independent tasks (for
// receiving / sending messages).
pub async fn websocket_connection(stream: WebSocket, state: Arc<AppState>) {
// By splitting, we can send and receive at the same time.
let (mut sender, mut receiver) = stream.split();
// We subscribe *before* sending the "joined" message, so that we will also
// display it to our client.
let mut price_feed = state.tx_orderbook_feed.subscribe();
let (local_sender, mut local_receiver) = mpsc::channel::<Message>(100);
let mut local_recv_task = tokio::spawn(async move {
while let Some(local_msg) = local_receiver.recv().await {
match serde_json::to_string(&local_msg) {
Ok(msg) => {
if let Err(err) = tokio::time::timeout(
WEBSOCKET_SEND_TIMEOUT,
sender.send(WebsocketMessage::Text(msg.clone())),
)
.await
{
tracing::error!("Could not forward message {msg} : {err:#}");
return;
}
}
Err(error) => {
tracing::warn!("Could not deserialize message {error:#}");
}
}
}
});
// Spawn the first task that will receive broadcast messages and send
// messages over the websocket to our client.
let mut send_task = {
let local_sender = local_sender.clone();
tokio::spawn(async move {
loop {
match price_feed.recv().await {
Ok(st) => {
if let Err(error) = local_sender.send(st).await {
tracing::error!("Could not send message {error:#}");
return;
}
}
Err(RecvError::Closed) => {
tracing::error!("price feed sender died! Channel closed.");
break;
}
Err(RecvError::Lagged(skip)) => tracing::warn!(%skip,
"Lagging behind on price feed."
),
}
}
})
};
// Spawn a task that takes messages from the websocket
let local_sender = local_sender.clone();
let mut recv_task = tokio::spawn(async move {
let mut whitelisted_maker = Option::<PublicKey>::None;
while let Some(Ok(WebsocketMessage::Text(text))) = receiver.next().await {
match serde_json::from_str(text.as_str()) {
Ok(OrderbookRequest::InsertOrder(order)) => {
let order_id = order.id;
match whitelisted_maker {
Some(authenticated_trader_id) => {
if let Err(e) =
handle_insert_order(state.clone(), authenticated_trader_id, order)
.await
{
tracing::error!(%order_id, "Failed to insert order: {e:#}");
// TODO: Send error to peer.
}
}
None => {
tracing::error!(
?order,
"Failed to insert order: maker not yet authenticated"
);
}
}
}
Ok(OrderbookRequest::DeleteOrder(order_id)) => {
match whitelisted_maker {
Some(authenticated_trader_id) => {
if let Err(e) = handle_delete_order(
state.clone(),
authenticated_trader_id,
order_id,
)
.await
{
tracing::error!(%order_id, "Failed to delete order: {e:#}");
// TODO: Send error to peer.
}
}
None => {
tracing::error!(
%order_id,
"Failed to delete order: maker not yet authenticated"
);
}
}
}
Ok(OrderbookRequest::Authenticate {
fcm_token,
version,
os,
signature,
}) => {
let msg = create_sign_message(AUTH_SIGN_MESSAGE.to_vec());
let trader_id = signature.pubkey;
let signature = signature.signature;
let mut conn = match state.pool.clone().get() {
Ok(conn) => conn,
Err(err) => {
tracing::error!("Could not get connection to db pool {err:#}");
return;
}
};
match state.secp.verify_ecdsa(&msg, &signature, &trader_id) {
Ok(_) => {
let liquidity_options =
db::liquidity_options::get_all(&mut conn).unwrap_or_default();
let (
min_quantity,
maintenance_margin_rate,
order_matching_fee_rate,
max_leverage,
) = {
let settings = state.settings.read().await;
(
settings.min_quantity,
settings.maintenance_margin_rate,
settings.order_matching_fee_rate,
settings.max_leverage,
)
};
let referral_status = referrals::update_referral_status_for_user(
&mut conn,
trader_id.to_string(),
)
.unwrap_or(ReferralStatus::new(trader_id));
if let Err(e) = local_sender
.send(Message::Authenticated(TenTenOneConfig {
liquidity_options,
min_quantity,
maintenance_margin_rate,
order_matching_fee_rate,
referral_status,
max_leverage,
}))
.await
{
tracing::error!(%trader_id, "Could not respond to user {e:#}");
return;
}
let orders = orders::all_limit_orders(&mut conn).unwrap_or_default();
if let Err(e) = local_sender.send(Message::AllOrders(orders)).await {
tracing::error!(%trader_id, "Failed to send all orders to user {e:#}");
}
// Send over all the funding fee events that the trader may have missed
// whilst they were offline.
match get_funding_fee_events_for_active_trader_positions(
&mut conn, trader_id,
) {
Ok(funding_fee_events) => {
if let Err(e) = local_sender
.send(Message::AllFundingFeeEvents(funding_fee_events))
.await
{
tracing::error!(
%trader_id,
"Failed to send funding fee events \
for active positions: {e}"
);
}
}
Err(e) => {
tracing::error!(
%trader_id,
"Failed to load funding fee events \
for active positions: {e}"
);
}
}
match get_next_funding_rate(&mut conn) {
Ok(Some(funding_rate)) => {
if let Err(e) = local_sender
.send(Message::NextFundingRate(funding_rate))
.await
{
tracing::error!(
%trader_id,
"Failed to send next funding rate: {e}"
);
}
}
Ok(None) => {
tracing::error!(
%trader_id,
"No next funding rate found in DB"
);
}
Err(e) => {
tracing::error!(
%trader_id,
"Failed to load next funding rate: {e}"
);
}
}
let token = fcm_token.unwrap_or("unavailable".to_string());
if let Err(e) =
user::login_user(&mut conn, trader_id, token, version, os)
{
tracing::error!(%trader_id, "Failed to update logged in user. Error: {e:#}")
}
let message = NewUserMessage {
new_user: trader_id,
sender: local_sender.clone(),
};
tracing::debug!(%trader_id, "New login");
// Check if the trader is a whitelisted maker.
{
let settings = state.settings.read().await;
if !settings.whitelist_enabled
|| settings.whitelisted_makers.contains(&trader_id)
{
whitelisted_maker = Some(trader_id);
}
}
if let Err(e) = state.tx_user_feed.send(message) {
tracing::error!(%trader_id, "Could not send new user message. Error: {e:#}");
}
}
Err(err) => {
if let Err(er) = local_sender
.send(Message::InvalidAuthentication(format!(
"Could not authenticate {err:#}"
)))
.await
{
tracing::error!(
%trader_id, "Failed to notify user about invalid authentication: {er:#}"
);
return;
}
}
}
}
Err(err) => {
tracing::trace!("Could not deserialize msg: {text} {err:#}");
}
}
}
});
// If any one of the tasks run to completion, we abort the other.
tokio::select! {
_ = (&mut send_task) => {
recv_task.abort();
local_recv_task.abort()
},
_ = (&mut recv_task) => {
send_task.abort();
local_recv_task.abort()
},
_ = (&mut local_recv_task) => {
recv_task.abort();
send_task.abort();
},
};
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/orderbook/trading.rs | coordinator/src/orderbook/trading.rs | use crate::db;
use crate::message::OrderbookMessage;
use crate::node::Node;
use crate::notifications::Notification;
use crate::notifications::NotificationKind;
use crate::orderbook::db::matches;
use crate::orderbook::db::orders;
use crate::referrals;
use crate::trade::TradeExecutor;
use crate::ChannelOpeningParams;
use anyhow::anyhow;
use anyhow::bail;
use anyhow::Context;
use anyhow::Result;
use bitcoin::secp256k1::PublicKey;
use bitcoin::secp256k1::XOnlyPublicKey;
use bitcoin::Amount;
use bitcoin::Network;
use futures::future::RemoteHandle;
use futures::FutureExt;
use rust_decimal::prelude::ToPrimitive;
use rust_decimal::Decimal;
use rust_decimal::RoundingStrategy;
use std::cmp::Ordering;
use time::OffsetDateTime;
use tokio::sync::broadcast;
use tokio::sync::mpsc;
use tokio::task::spawn_blocking;
use uuid::Uuid;
use xxi_node::commons::ContractSymbol;
use xxi_node::commons::Direction;
use xxi_node::commons::FilledWith;
use xxi_node::commons::Match;
use xxi_node::commons::Message;
use xxi_node::commons::Message::TradeError;
use xxi_node::commons::Order;
use xxi_node::commons::OrderReason;
use xxi_node::commons::OrderState;
use xxi_node::commons::OrderType;
use xxi_node::commons::TradeAndChannelParams;
use xxi_node::commons::TradeParams;
use xxi_node::commons::TradingError;
/// This value is arbitrarily set to 100 and defines the number of new order messages buffered in
/// the channel.
const NEW_ORDERS_BUFFER_SIZE: usize = 100;
pub struct NewOrderMessage {
pub order: Order,
pub order_reason: OrderReason,
pub channel_opening_params: Option<ChannelOpeningParams>,
}
#[derive(Clone)]
pub struct MatchParams {
pub taker_match: TraderMatchParams,
pub makers_matches: Vec<TraderMatchParams>,
}
#[derive(Clone)]
pub struct TraderMatchParams {
pub trader_id: PublicKey,
pub filled_with: FilledWith,
}
/// Spawn a task that processes [`NewOrderMessage`]s.
///
/// To feed messages to this task, the caller can use the corresponding
/// [`mpsc::Sender<NewOrderMessage>`] returned.
pub fn start(
node: Node,
tx_orderbook_feed: broadcast::Sender<Message>,
trade_notifier: mpsc::Sender<OrderbookMessage>,
notifier: mpsc::Sender<Notification>,
network: Network,
oracle_pk: XOnlyPublicKey,
) -> (RemoteHandle<()>, mpsc::Sender<NewOrderMessage>) {
let (sender, mut receiver) = mpsc::channel::<NewOrderMessage>(NEW_ORDERS_BUFFER_SIZE);
let (fut, remote_handle) = async move {
while let Some(new_order_msg) = receiver.recv().await {
tokio::spawn({
let tx_orderbook_feed = tx_orderbook_feed.clone();
let notifier = notifier.clone();
let trade_notifier = trade_notifier.clone();
let node = node.clone();
async move {
let new_order = new_order_msg.order;
let trader_id = new_order.trader_id;
let order_id = new_order.id;
let channel_opening_params = new_order_msg.channel_opening_params;
tracing::trace!(
%trader_id,
%order_id,
order_type = ?new_order.order_type,
"Processing new order",
);
if let Err(error) = match &new_order.order_type {
OrderType::Market => {
process_new_market_order(
node,
notifier.clone(),
trade_notifier.clone(),
&new_order,
network,
oracle_pk,
channel_opening_params
)
.await
}
OrderType::Limit => {
process_new_limit_order(
node,
tx_orderbook_feed,
new_order.clone(),
)
.await
}
} {
if new_order.order_reason == OrderReason::Manual {
// TODO(holzeis): the maker is currently not subscribed to the websocket
// api, hence it wouldn't receive the error message.
if let Err(e) = trade_notifier
.send(OrderbookMessage::TraderMessage {
trader_id,
message: TradeError { order_id, error },
notification: None,
})
.await
{
tracing::error!(%trader_id, %order_id, "Failed to send trade error. Error: {e:#}");
}
}
}
}
});
}
tracing::error!("Channel closed");
}
.remote_handle();
tokio::spawn(fut);
(remote_handle, sender)
}
pub async fn process_new_limit_order(
node: Node,
tx_orderbook_feed: broadcast::Sender<Message>,
order: Order,
) -> Result<(), TradingError> {
let mut conn = spawn_blocking(move || node.pool.get())
.await
.expect("task to complete")
.map_err(|e| anyhow!("{e:#}"))?;
// Before processing any match we set all expired limit orders to failed, to ensure they do not
// get matched.
//
// TODO(holzeis): Orders should probably not have an expiry, but should either be replaced or
// deleted if not wanted anymore.
// TODO: I don't think this is necessary anymore. We are manually deleting orders now.
let expired_limit_orders =
orders::set_expired_limit_orders_to_expired(&mut conn).map_err(|e| anyhow!("{e:#}"))?;
for expired_limit_order in expired_limit_orders {
tx_orderbook_feed
.send(Message::DeleteOrder(expired_limit_order.id))
.context("Could not update price feed")?;
}
tx_orderbook_feed
.send(Message::NewOrder(order))
.map_err(|e| anyhow!(e))
.context("Could not update price feed")?;
Ok(())
}
// TODO(holzeis): This functions runs multiple inserts in separate db transactions. This should only
// happen in a single transaction to ensure either all data or nothing is stored to the database.
pub async fn process_new_market_order(
node: Node,
notifier: mpsc::Sender<Notification>,
trade_notifier: mpsc::Sender<OrderbookMessage>,
order: &Order,
network: Network,
oracle_pk: XOnlyPublicKey,
channel_opening_params: Option<ChannelOpeningParams>,
) -> Result<(), TradingError> {
let mut conn = spawn_blocking({
let node = node.clone();
move || node.pool.get()
})
.await
.expect("task to complete")
.map_err(|e| anyhow!("{e:#}"))?;
// Reject new order if there is already a matched order waiting for execution.
if let Some(order) =
orders::get_by_trader_id_and_state(&mut conn, order.trader_id, OrderState::Matched)
.map_err(|e| anyhow!("{e:#}"))?
{
return Err(TradingError::InvalidOrder(format!(
"trader_id={}, order_id={}. Order is currently in execution. \
Can't accept new orders until the order execution is finished",
order.trader_id, order.id
)));
}
let opposite_direction_limit_orders = orders::all_by_direction_and_type(
&mut conn,
order.direction.opposite(),
OrderType::Limit,
true,
)
.map_err(|e| anyhow!("{e:#}"))?;
let fee_percent = { node.settings.read().await.order_matching_fee_rate };
let fee_percent = Decimal::try_from(fee_percent).expect("to fit into decimal");
let trader_pubkey_string = order.trader_id.to_string();
let status = referrals::get_referral_status(order.trader_id, &mut conn)?;
let fee_discount = status.referral_fee_bonus;
let fee_percent = fee_percent - (fee_percent * fee_discount);
tracing::debug!(
trader_pubkey = trader_pubkey_string,
%fee_discount, total_fee_percent = %fee_percent, "Fee discount calculated");
let matched_orders = match match_order(
order,
opposite_direction_limit_orders,
network,
oracle_pk,
fee_percent,
) {
Ok(Some(matched_orders)) => matched_orders,
Ok(None) => {
// TODO(holzeis): Currently we still respond to the user immediately if there
// has been a match or not, that's the reason why we also have to set the order
// to failed here. But actually we could keep the order until either expired or
// a match has been found and then update the state accordingly.
orders::set_order_state(&mut conn, order.id, OrderState::Failed)
.map_err(|e| anyhow!("{e:#}"))?;
return Err(TradingError::NoMatchFound(format!(
"Could not match order {}",
order.id
)));
}
Err(e) => {
orders::set_order_state(&mut conn, order.id, OrderState::Failed)
.map_err(|e| anyhow!("{e:#}"))?;
return Err(TradingError::Other(format!("Failed to match order: {e:#}")));
}
};
tracing::info!(
trader_id=%order.trader_id,
order_id=%order.id,
"Found a match with {} makers for new order",
matched_orders.taker_match.filled_with.matches.len()
);
for match_param in matched_orders.matches() {
matches::insert(&mut conn, match_param)?;
let trader_id = match_param.trader_id;
let order_id = match_param.filled_with.order_id.to_string();
tracing::info!(%trader_id, order_id, "Notifying trader about match");
let notification = match &order.order_reason {
OrderReason::Expired => Some(NotificationKind::PositionExpired),
OrderReason::TraderLiquidated => Some(NotificationKind::Custom {
title: "Woops, you got liquidated 💸".to_string(),
message: "Open your app to execute the liquidation".to_string(),
}),
OrderReason::CoordinatorLiquidated => Some(NotificationKind::Custom {
title: "Your counterparty got liquidated 💸".to_string(),
message: "Open your app to execute the liquidation".to_string(),
}),
OrderReason::Manual => None,
};
if let Some(notification) = notification {
// send user a push notification
notifier
.send(Notification::new(order.trader_id, notification))
.await
.with_context(|| {
format!(
"Failed to send push notification. trader_id = {}",
order.trader_id
)
})?;
}
let order_state = if order.order_type == OrderType::Limit {
// FIXME: The maker is currently not connected to the WebSocket so we can't
// notify him about a trade. However, trades are always accepted by the
// maker at the moment so in order to not have all limit orders in order
// state `Match` we are setting the order to `Taken` even if we couldn't
// notify the maker.
OrderState::Taken
} else {
OrderState::Matched
};
tracing::debug!(%trader_id, order_id, "Updating the order state to {order_state:?}");
orders::set_order_state(&mut conn, match_param.filled_with.order_id, order_state)
.map_err(|e| anyhow!("{e:#}"))?;
}
if let Some(channel_opening_params) = channel_opening_params {
db::channel_opening_params::insert(&mut conn, order.id, channel_opening_params)
.map_err(|e| anyhow!("{e:#}"))?;
}
if node.inner.is_connected(order.trader_id) {
tracing::info!(trader_id = %order.trader_id, order_id = %order.id, order_reason = ?order.order_reason, "Executing trade for match");
let trade_executor = TradeExecutor::new(node.clone(), trade_notifier);
trade_executor
.execute(&TradeAndChannelParams {
trade_params: TradeParams {
pubkey: order.trader_id,
contract_symbol: ContractSymbol::BtcUsd,
leverage: order.leverage,
quantity: order.quantity.to_f32().expect("to fit into f32"),
direction: order.direction,
filled_with: matched_orders.taker_match.filled_with,
},
trader_reserve: channel_opening_params.map(|p| p.trader_reserve),
coordinator_reserve: channel_opening_params.map(|p| p.coordinator_reserve),
external_funding: channel_opening_params.and_then(|c| c.external_funding),
})
.await;
} else {
match order.order_reason {
OrderReason::Manual => {
tracing::warn!(trader_id = %order.trader_id, order_id = %order.id, order_reason = ?order.order_reason, "Skipping trade execution as trader is not connected")
}
OrderReason::Expired
| OrderReason::TraderLiquidated
| OrderReason::CoordinatorLiquidated => {
tracing::info!(trader_id = %order.trader_id, order_id = %order.id, order_reason = ?order.order_reason, "Skipping trade execution as trader is not connected")
}
}
}
Ok(())
}
/// Matches an [`Order`] of [`OrderType::Market`] with a list of [`Order`]s of [`OrderType::Limit`].
///
/// The caller is expected to provide a list of `opposite_direction_orders` of [`OrderType::Limit`]
/// and opposite [`Direction`] to the `market_order`. We nevertheless ensure that this is the case
/// to be on the safe side.
fn match_order(
market_order: &Order,
opposite_direction_orders: Vec<Order>,
network: Network,
oracle_pk: XOnlyPublicKey,
fee_percent: Decimal,
) -> Result<Option<MatchParams>> {
if market_order.order_type == OrderType::Limit {
// We don't match limit orders with other limit orders at the moment.
return Ok(None);
}
let opposite_direction_orders = opposite_direction_orders
.into_iter()
.filter(|o| !o.direction.eq(&market_order.direction))
.collect();
let mut orders = sort_orders(opposite_direction_orders, market_order.direction);
let mut remaining_quantity = market_order.quantity;
let mut matched_orders = vec![];
while !orders.is_empty() {
let matched_order = orders.remove(0);
remaining_quantity -= matched_order.quantity;
matched_orders.push(matched_order);
if remaining_quantity <= Decimal::ZERO {
break;
}
}
// For the time being we do not want to support multi-matches.
if matched_orders.len() > 1 {
bail!("More than one matched order, please reduce order quantity");
}
if matched_orders.is_empty() {
return Ok(None);
}
let expiry_timestamp =
xxi_node::commons::calculate_next_expiry(OffsetDateTime::now_utc(), network);
let matches = matched_orders
.iter()
.map(|maker_order| {
let matching_fee = market_order.quantity / maker_order.price * fee_percent;
let matching_fee = matching_fee.round_dp_with_strategy(8, RoundingStrategy::MidpointAwayFromZero);
let matching_fee = match Amount::from_btc(matching_fee.to_f64().expect("to fit")) {
Ok(fee) => {fee}
Err(err) => {
tracing::error!(
trader_pubkey = maker_order.trader_id.to_string(),
order_id = maker_order.id.to_string(),
"Failed calculating order matching fee for order {err:?}. Falling back to 0");
Amount::ZERO
}
};
(
TraderMatchParams {
trader_id: maker_order.trader_id,
filled_with: FilledWith {
order_id: maker_order.id,
expiry_timestamp,
oracle_pk,
matches: vec![Match {
id: Uuid::new_v4(),
order_id: market_order.id,
quantity: market_order.quantity,
pubkey: market_order.trader_id,
execution_price: maker_order.price,
matching_fee,
}],
},
},
Match {
id: Uuid::new_v4(),
order_id: maker_order.id,
quantity: market_order.quantity,
pubkey: maker_order.trader_id,
execution_price: maker_order.price,
matching_fee,
},
)
})
.collect::<Vec<(TraderMatchParams, Match)>>();
let mut maker_matches = vec![];
let mut taker_matches = vec![];
for (mm, taker_match) in matches {
maker_matches.push(mm);
taker_matches.push(taker_match);
}
Ok(Some(MatchParams {
taker_match: TraderMatchParams {
trader_id: market_order.trader_id,
filled_with: FilledWith {
order_id: market_order.id,
expiry_timestamp,
oracle_pk,
matches: taker_matches,
},
},
makers_matches: maker_matches,
}))
}
/// Sort the provided list of limit [`Order`]s based on the [`Direction`] of the market order to be
/// matched.
///
/// For matching a market order and limit orders we have to
///
/// - take the highest rate if the market order is short; and
///
/// - take the lowest rate if the market order is long.
///
/// Hence, the orders are sorted accordingly:
///
/// - If the market order is short, the limit orders are sorted in descending order of
/// price.
///
/// - If the market order is long, the limit orders are sorted in ascending order of price.
///
/// Additionally, if two orders have the same price, the one with the earlier `timestamp` takes
/// precedence.
fn sort_orders(mut limit_orders: Vec<Order>, market_order_direction: Direction) -> Vec<Order> {
limit_orders.sort_by(|a, b| {
if a.price.cmp(&b.price) == Ordering::Equal {
return a.timestamp.cmp(&b.timestamp);
}
match market_order_direction {
// Ascending order.
Direction::Long => a.price.cmp(&b.price),
// Descending order.
Direction::Short => b.price.cmp(&a.price),
}
});
limit_orders
}
impl MatchParams {
fn matches(&self) -> Vec<&TraderMatchParams> {
std::iter::once(&self.taker_match)
.chain(self.makers_matches.iter())
.collect()
}
}
impl From<&TradeParams> for TraderMatchParams {
fn from(value: &TradeParams) -> Self {
TraderMatchParams {
trader_id: value.pubkey,
filled_with: value.filled_with.clone(),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use rust_decimal_macros::dec;
use std::str::FromStr;
use time::Duration;
use xxi_node::commons::ContractSymbol;
#[test]
fn when_short_then_sort_desc() {
let order1 = dummy_long_order(
dec!(20_000),
Uuid::new_v4(),
Default::default(),
Duration::seconds(0),
);
let order2 = dummy_long_order(
dec!(21_000),
Uuid::new_v4(),
Default::default(),
Duration::seconds(0),
);
let order3 = dummy_long_order(
dec!(20_500),
Uuid::new_v4(),
Default::default(),
Duration::seconds(0),
);
let orders = vec![order3.clone(), order1.clone(), order2.clone()];
let orders = sort_orders(orders, Direction::Short);
assert_eq!(orders[0], order2);
assert_eq!(orders[1], order3);
assert_eq!(orders[2], order1);
}
#[test]
fn when_long_then_sort_asc() {
let order1 = dummy_long_order(
dec!(20_000),
Uuid::new_v4(),
Default::default(),
Duration::seconds(0),
);
let order2 = dummy_long_order(
dec!(21_000),
Uuid::new_v4(),
Default::default(),
Duration::seconds(0),
);
let order3 = dummy_long_order(
dec!(20_500),
Uuid::new_v4(),
Default::default(),
Duration::seconds(0),
);
let orders = vec![order3.clone(), order1.clone(), order2.clone()];
let orders = sort_orders(orders, Direction::Long);
assert_eq!(orders[0], order1);
assert_eq!(orders[1], order3);
assert_eq!(orders[2], order2);
}
#[test]
fn when_all_same_price_sort_by_id() {
let order1 = dummy_long_order(
dec!(20_000),
Uuid::new_v4(),
Default::default(),
Duration::seconds(0),
);
let order2 = dummy_long_order(
dec!(20_000),
Uuid::new_v4(),
Default::default(),
Duration::seconds(1),
);
let order3 = dummy_long_order(
dec!(20_000),
Uuid::new_v4(),
Default::default(),
Duration::seconds(2),
);
let orders = vec![order3.clone(), order1.clone(), order2.clone()];
let orders = sort_orders(orders, Direction::Long);
assert_eq!(orders[0], order1);
assert_eq!(orders[1], order2);
assert_eq!(orders[2], order3);
let orders = sort_orders(orders, Direction::Short);
assert_eq!(orders[0], order1);
assert_eq!(orders[1], order2);
assert_eq!(orders[2], order3);
}
#[test]
fn given_limit_and_market_with_same_amount_then_match() {
let all_orders = vec![
dummy_long_order(
dec!(20_000),
Uuid::new_v4(),
dec!(100),
Duration::seconds(0),
),
dummy_long_order(
dec!(21_000),
Uuid::new_v4(),
dec!(200),
Duration::seconds(0),
),
dummy_long_order(
dec!(20_000),
Uuid::new_v4(),
dec!(300),
Duration::seconds(0),
),
dummy_long_order(
dec!(22_000),
Uuid::new_v4(),
dec!(400),
Duration::seconds(0),
),
];
let order = Order {
id: Uuid::new_v4(),
price: Default::default(),
trader_id: PublicKey::from_str(
"027f31ebc5462c1fdce1b737ecff52d37d75dea43ce11c74d25aa297165faa2007",
)
.unwrap(),
direction: Direction::Short,
leverage: 1.0,
contract_symbol: ContractSymbol::BtcUsd,
quantity: dec!(100),
order_type: OrderType::Market,
timestamp: OffsetDateTime::now_utc(),
expiry: OffsetDateTime::now_utc() + Duration::minutes(1),
order_state: OrderState::Open,
order_reason: OrderReason::Manual,
stable: false,
};
let matched_orders = match_order(
&order,
all_orders,
Network::Bitcoin,
get_oracle_public_key(),
Decimal::ZERO,
)
.unwrap()
.unwrap();
assert_eq!(matched_orders.makers_matches.len(), 1);
let maker_matches = matched_orders
.makers_matches
.first()
.unwrap()
.filled_with
.matches
.clone();
assert_eq!(maker_matches.len(), 1);
assert_eq!(maker_matches.first().unwrap().quantity, dec!(100));
assert_eq!(matched_orders.taker_match.filled_with.order_id, order.id);
assert_eq!(matched_orders.taker_match.filled_with.matches.len(), 1);
assert_eq!(
matched_orders
.taker_match
.filled_with
.matches
.first()
.unwrap()
.quantity,
order.quantity
);
}
/// This test is for safety reasons only. Once we want multiple matches we should update it
#[test]
fn given_limit_and_market_with_smaller_amount_then_error() {
let order1 = dummy_long_order(
dec!(20_000),
Uuid::new_v4(),
dec!(400),
Duration::seconds(0),
);
let order2 = dummy_long_order(
dec!(21_000),
Uuid::new_v4(),
dec!(200),
Duration::seconds(0),
);
let order3 = dummy_long_order(
dec!(22_000),
Uuid::new_v4(),
dec!(100),
Duration::seconds(0),
);
let order4 = dummy_long_order(
dec!(20_000),
Uuid::new_v4(),
dec!(300),
Duration::seconds(0),
);
let all_orders = vec![order1, order2, order3, order4];
let order = Order {
id: Uuid::new_v4(),
price: Default::default(),
trader_id: PublicKey::from_str(
"027f31ebc5462c1fdce1b737ecff52d37d75dea43ce11c74d25aa297165faa2007",
)
.unwrap(),
direction: Direction::Short,
leverage: 1.0,
contract_symbol: ContractSymbol::BtcUsd,
quantity: dec!(200),
order_type: OrderType::Market,
timestamp: OffsetDateTime::now_utc(),
expiry: OffsetDateTime::now_utc() + Duration::minutes(1),
order_state: OrderState::Open,
order_reason: OrderReason::Manual,
stable: false,
};
assert!(match_order(
&order,
all_orders,
Network::Bitcoin,
get_oracle_public_key(),
Decimal::ZERO,
)
.is_err());
}
#[test]
fn given_long_when_needed_short_direction_then_no_match() {
let all_orders = vec![
dummy_long_order(
dec!(20_000),
Uuid::new_v4(),
dec!(100),
Duration::seconds(0),
),
dummy_long_order(
dec!(21_000),
Uuid::new_v4(),
dec!(200),
Duration::seconds(0),
),
dummy_long_order(
dec!(22_000),
Uuid::new_v4(),
dec!(400),
Duration::seconds(0),
),
dummy_long_order(
dec!(20_000),
Uuid::new_v4(),
dec!(300),
Duration::seconds(0),
),
];
let order = Order {
id: Uuid::new_v4(),
price: Default::default(),
trader_id: PublicKey::from_str(
"027f31ebc5462c1fdce1b737ecff52d37d75dea43ce11c74d25aa297165faa2007",
)
.unwrap(),
direction: Direction::Long,
leverage: 1.0,
contract_symbol: ContractSymbol::BtcUsd,
quantity: dec!(200),
order_type: OrderType::Market,
timestamp: OffsetDateTime::now_utc(),
expiry: OffsetDateTime::now_utc() + Duration::minutes(1),
order_state: OrderState::Open,
order_reason: OrderReason::Manual,
stable: false,
};
let matched_orders = match_order(
&order,
all_orders,
Network::Bitcoin,
get_oracle_public_key(),
Decimal::ZERO,
)
.unwrap();
assert!(matched_orders.is_none());
}
fn dummy_long_order(
price: Decimal,
id: Uuid,
quantity: Decimal,
timestamp_delay: Duration,
) -> Order {
Order {
id,
price,
trader_id: PublicKey::from_str(
"027f31ebc5462c1fdce1b737ecff52d37d75dea43ce11c74d25aa297165faa2007",
)
.unwrap(),
direction: Direction::Long,
leverage: 1.0,
contract_symbol: ContractSymbol::BtcUsd,
quantity,
order_type: OrderType::Limit,
timestamp: OffsetDateTime::now_utc() + timestamp_delay,
expiry: OffsetDateTime::now_utc() + Duration::minutes(1),
order_state: OrderState::Open,
order_reason: OrderReason::Manual,
stable: false,
}
}
fn get_oracle_public_key() -> XOnlyPublicKey {
XOnlyPublicKey::from_str("16f88cf7d21e6c0f46bcbc983a4e3b19726c6c98858cc31c83551a88fde171c0")
.unwrap()
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/orderbook/mod.rs | coordinator/src/orderbook/mod.rs | pub mod async_match;
pub mod collaborative_revert;
pub mod db;
pub mod trading;
pub mod websocket;
#[cfg(test)]
mod tests;
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/orderbook/collaborative_revert.rs | coordinator/src/orderbook/collaborative_revert.rs | use crate::db::collaborative_reverts;
use crate::message::NewUserMessage;
use crate::message::OrderbookMessage;
use anyhow::bail;
use anyhow::Result;
use bitcoin::secp256k1::PublicKey;
use bitcoin::Address;
use bitcoin::Network;
use diesel::r2d2::ConnectionManager;
use diesel::r2d2::Pool;
use diesel::PgConnection;
use futures::future::RemoteHandle;
use futures::FutureExt;
use tokio::sync::broadcast;
use tokio::sync::broadcast::error::RecvError;
use tokio::sync::mpsc;
use tokio::task::spawn_blocking;
use xxi_node::commons::Message;
pub fn monitor(
pool: Pool<ConnectionManager<PgConnection>>,
tx_user_feed: broadcast::Sender<NewUserMessage>,
notifier: mpsc::Sender<OrderbookMessage>,
network: Network,
) -> RemoteHandle<()> {
let mut user_feed = tx_user_feed.subscribe();
let (fut, remote_handle) = async move {
loop {
match user_feed.recv().await {
Ok(new_user_msg) => {
tokio::spawn({
let notifier = notifier.clone();
let pool = pool.clone();
async move {
tracing::debug!(
trader_id=%new_user_msg.new_user,
"Checking if the user needs to be notified about \
collaboratively reverting a channel"
);
if let Err(e) = process_pending_collaborative_revert(
pool,
notifier,
new_user_msg.new_user,
network,
)
.await
{
tracing::error!(
trader_id = %new_user_msg.new_user,
"Failed to process pending collaborative revert. Error: {e:#}"
);
}
}
});
}
Err(RecvError::Closed) => {
tracing::error!("New user message sender died! Channel closed.");
break;
}
Err(RecvError::Lagged(skip)) => tracing::warn!(%skip,
"Lagging behind on new user message."
),
}
}
}
.remote_handle();
tokio::spawn(fut);
remote_handle
}
/// Checks if there are any pending collaborative reverts
async fn process_pending_collaborative_revert(
pool: Pool<ConnectionManager<PgConnection>>,
notifier: mpsc::Sender<OrderbookMessage>,
trader_id: PublicKey,
network: Network,
) -> Result<()> {
let mut conn = spawn_blocking(move || pool.get())
.await
.expect("task to complete")?;
match collaborative_reverts::by_trader_pubkey(
trader_id.to_string().as_str(),
network,
&mut conn,
)? {
None => {
// nothing to revert
}
Some(revert) => {
tracing::debug!(
%trader_id,
channel_id = hex::encode(revert.channel_id),
"Notifying trader about pending collaborative revert"
);
// Sending no optional push notification as this is only executed if the user just
// registered on the websocket. So we can assume that the user is still online.
let msg = OrderbookMessage::TraderMessage {
trader_id,
message: Message::DlcChannelCollaborativeRevert {
channel_id: revert.channel_id,
coordinator_address: Address::new(
revert.coordinator_address.network,
revert.coordinator_address.payload,
),
coordinator_amount: revert.coordinator_amount_sats,
trader_amount: revert.trader_amount_sats,
execution_price: revert.price,
},
notification: None,
};
if let Err(e) = notifier.send(msg).await {
bail!("Failed to send notification. Error: {e:#}");
}
}
}
Ok(())
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/orderbook/db/matches.rs | coordinator/src/orderbook/db/matches.rs | use crate::orderbook::db::custom_types::MatchState;
use crate::orderbook::trading::TraderMatchParams;
use crate::schema::matches;
use anyhow::ensure;
use anyhow::Result;
use bitcoin::secp256k1::PublicKey;
use bitcoin::Amount;
use diesel::ExpressionMethods;
use diesel::Insertable;
use diesel::PgConnection;
use diesel::QueryDsl;
use diesel::QueryResult;
use diesel::Queryable;
use diesel::QueryableByName;
use diesel::RunQueryDsl;
use rust_decimal::prelude::FromPrimitive;
use rust_decimal::prelude::ToPrimitive;
use rust_decimal::Decimal;
use std::str::FromStr;
use time::OffsetDateTime;
use uuid::Uuid;
use xxi_node::commons;
#[derive(Insertable, QueryableByName, Queryable, Debug, Clone, PartialEq)]
#[diesel(table_name = matches)]
struct Matches {
pub id: Uuid,
pub match_state: MatchState,
pub order_id: Uuid,
pub trader_id: String,
pub match_order_id: Uuid,
pub match_trader_id: String,
pub execution_price: f32,
pub quantity: f32,
pub created_at: OffsetDateTime,
pub updated_at: OffsetDateTime,
pub matching_fee_sats: i64,
}
pub fn insert(conn: &mut PgConnection, match_params: &TraderMatchParams) -> Result<()> {
for record in Matches::new(match_params, MatchState::Pending) {
let affected_rows = diesel::insert_into(matches::table)
.values(record.clone())
.execute(conn)?;
ensure!(affected_rows > 0, "Could not insert matches");
}
Ok(())
}
pub fn set_match_state(
conn: &mut PgConnection,
order_id: Uuid,
match_state: commons::MatchState,
) -> QueryResult<()> {
diesel::update(matches::table)
.filter(matches::order_id.eq(order_id))
.set(matches::match_state.eq(MatchState::from(match_state)))
.execute(conn)?;
Ok(())
}
pub fn get_matches_by_order_id(
conn: &mut PgConnection,
order_id: Uuid,
) -> QueryResult<Vec<commons::Matches>> {
let matches: Vec<Matches> = matches::table
.filter(matches::order_id.eq(order_id))
.load(conn)?;
let matches = matches.into_iter().map(commons::Matches::from).collect();
Ok(matches)
}
pub fn set_match_state_by_order_id(
conn: &mut PgConnection,
order_id: Uuid,
match_state: commons::MatchState,
) -> Result<()> {
let affected_rows = diesel::update(matches::table)
.filter(matches::order_id.eq(order_id))
.set(matches::match_state.eq(MatchState::from(match_state)))
.execute(conn)?;
ensure!(affected_rows > 0, "Could not update matches");
Ok(())
}
impl Matches {
pub fn new(match_params: &TraderMatchParams, match_state: MatchState) -> Vec<Matches> {
let order_id = match_params.filled_with.order_id;
let updated_at = OffsetDateTime::now_utc();
let trader_id = match_params.trader_id;
match_params
.filled_with
.matches
.iter()
.map(|m| Matches {
id: m.id,
match_state,
order_id,
trader_id: trader_id.to_string(),
match_order_id: m.order_id,
match_trader_id: m.pubkey.to_string(),
execution_price: m.execution_price.to_f32().expect("to fit into f32"),
quantity: m.quantity.to_f32().expect("to fit into f32"),
created_at: updated_at,
updated_at,
matching_fee_sats: m.matching_fee.to_sat() as i64,
})
.collect()
}
}
impl From<commons::Matches> for Matches {
fn from(value: commons::Matches) -> Self {
Matches {
id: value.id,
match_state: value.match_state.into(),
order_id: value.order_id,
trader_id: value.trader_id.to_string(),
match_order_id: value.match_order_id,
match_trader_id: value.match_trader_id.to_string(),
execution_price: value.execution_price.to_f32().expect("to fit into f32"),
quantity: value.quantity.to_f32().expect("to fit into f32"),
created_at: OffsetDateTime::now_utc(),
updated_at: OffsetDateTime::now_utc(),
matching_fee_sats: value.matching_fee.to_sat() as i64,
}
}
}
impl From<commons::MatchState> for MatchState {
fn from(value: commons::MatchState) -> Self {
match value {
commons::MatchState::Pending => MatchState::Pending,
commons::MatchState::Filled => MatchState::Filled,
commons::MatchState::Failed => MatchState::Failed,
}
}
}
impl From<Matches> for commons::Matches {
fn from(value: Matches) -> Self {
commons::Matches {
id: value.id,
match_state: value.match_state.into(),
order_id: value.order_id,
trader_id: PublicKey::from_str(&value.trader_id).expect("to be a valid public key"),
match_order_id: value.match_order_id,
match_trader_id: PublicKey::from_str(&value.match_trader_id)
.expect("to be a valid public key"),
execution_price: Decimal::from_f32(value.execution_price).expect("to fit into decimal"),
quantity: Decimal::from_f32(value.quantity).expect("to fit into decimal"),
created_at: OffsetDateTime::now_utc(),
updated_at: OffsetDateTime::now_utc(),
matching_fee: Amount::from_sat(value.matching_fee_sats as u64),
}
}
}
impl From<MatchState> for commons::MatchState {
fn from(value: MatchState) -> Self {
match value {
MatchState::Pending => commons::MatchState::Pending,
MatchState::Filled => commons::MatchState::Filled,
MatchState::Failed => commons::MatchState::Failed,
}
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/orderbook/db/mod.rs | coordinator/src/orderbook/db/mod.rs | pub mod custom_types;
pub mod matches;
pub mod orders;
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/orderbook/db/custom_types.rs | coordinator/src/orderbook/db/custom_types.rs | use crate::schema::sql_types::DirectionType;
use crate::schema::sql_types::MatchStateType;
use crate::schema::sql_types::OrderReasonType;
use crate::schema::sql_types::OrderStateType;
use crate::schema::sql_types::OrderTypeType;
use diesel::deserialize;
use diesel::deserialize::FromSql;
use diesel::pg::Pg;
use diesel::pg::PgValue;
use diesel::query_builder::QueryId;
use diesel::serialize;
use diesel::serialize::IsNull;
use diesel::serialize::Output;
use diesel::serialize::ToSql;
use diesel::AsExpression;
use diesel::FromSqlRow;
use std::any::TypeId;
use std::io::Write;
#[derive(Debug, Clone, Copy, PartialEq, FromSqlRow, AsExpression, Eq)]
#[diesel(sql_type = DirectionType)]
pub enum Direction {
Long,
Short,
}
impl QueryId for DirectionType {
type QueryId = DirectionType;
const HAS_STATIC_QUERY_ID: bool = false;
fn query_id() -> Option<TypeId> {
None
}
}
impl ToSql<DirectionType, Pg> for Direction {
fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Pg>) -> serialize::Result {
match *self {
Direction::Long => out.write_all(b"long")?,
Direction::Short => out.write_all(b"short")?,
}
Ok(IsNull::No)
}
}
impl FromSql<DirectionType, Pg> for Direction {
fn from_sql(bytes: PgValue<'_>) -> deserialize::Result<Self> {
match bytes.as_bytes() {
b"long" => Ok(Direction::Long),
b"short" => Ok(Direction::Short),
_ => Err("Unrecognized enum variant".into()),
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, FromSqlRow, AsExpression, Eq)]
#[diesel(sql_type = OrderTypeType)]
pub enum OrderType {
Market,
Limit,
}
impl QueryId for OrderTypeType {
type QueryId = OrderTypeType;
const HAS_STATIC_QUERY_ID: bool = false;
fn query_id() -> Option<TypeId> {
None
}
}
impl ToSql<OrderTypeType, Pg> for OrderType {
fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Pg>) -> serialize::Result {
match *self {
OrderType::Market => out.write_all(b"market")?,
OrderType::Limit => out.write_all(b"limit")?,
}
Ok(IsNull::No)
}
}
impl FromSql<OrderTypeType, Pg> for OrderType {
fn from_sql(bytes: PgValue<'_>) -> deserialize::Result<Self> {
match bytes.as_bytes() {
b"market" => Ok(OrderType::Market),
b"limit" => Ok(OrderType::Limit),
_ => Err("Unrecognized enum variant".into()),
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, FromSqlRow, AsExpression)]
#[diesel(sql_type = OrderStateType)]
pub(crate) enum OrderState {
/// The order is waiting for a match.
Open,
/// The order is matched, but the trade has not yet happened.
Matched,
/// The trade has been initiated for that order.
Taken,
/// The order failed.
Failed,
/// The order expired.
Expired,
/// The order was manually deleted by the trader
Deleted,
}
impl QueryId for OrderStateType {
type QueryId = OrderStateType;
const HAS_STATIC_QUERY_ID: bool = false;
fn query_id() -> Option<TypeId> {
None
}
}
impl ToSql<OrderStateType, Pg> for OrderState {
fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Pg>) -> serialize::Result {
match *self {
OrderState::Open => out.write_all(b"Open")?,
OrderState::Matched => out.write_all(b"Matched")?,
OrderState::Taken => out.write_all(b"Taken")?,
OrderState::Failed => out.write_all(b"Failed")?,
OrderState::Expired => out.write_all(b"Expired")?,
OrderState::Deleted => out.write_all(b"Deleted")?,
}
Ok(IsNull::No)
}
}
impl FromSql<OrderStateType, Pg> for OrderState {
fn from_sql(bytes: PgValue<'_>) -> deserialize::Result<Self> {
match bytes.as_bytes() {
b"Open" => Ok(OrderState::Open),
b"Matched" => Ok(OrderState::Matched),
b"Taken" => Ok(OrderState::Taken),
b"Failed" => Ok(OrderState::Failed),
b"Expired" => Ok(OrderState::Expired),
b"Deleted" => Ok(OrderState::Deleted),
_ => Err("Unrecognized enum variant".into()),
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, FromSqlRow, AsExpression)]
#[diesel(sql_type = OrderReasonType)]
pub(crate) enum OrderReason {
/// The order has been created manually by the user.
Manual,
/// The order has been create automatically as the position expired.
Expired,
/// The order has been created automatically as the position got liquidated.
TraderLiquidated,
CoordinatorLiquidated,
}
impl QueryId for OrderReasonType {
type QueryId = OrderReasonType;
const HAS_STATIC_QUERY_ID: bool = false;
fn query_id() -> Option<TypeId> {
None
}
}
impl ToSql<OrderReasonType, Pg> for OrderReason {
fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Pg>) -> serialize::Result {
match *self {
OrderReason::Manual => out.write_all(b"Manual")?,
OrderReason::Expired => out.write_all(b"Expired")?,
OrderReason::TraderLiquidated => out.write_all(b"TraderLiquidated")?,
OrderReason::CoordinatorLiquidated => out.write_all(b"CoordinatorLiquidated")?,
}
Ok(IsNull::No)
}
}
impl FromSql<OrderReasonType, Pg> for OrderReason {
fn from_sql(bytes: PgValue<'_>) -> deserialize::Result<Self> {
match bytes.as_bytes() {
b"Manual" => Ok(OrderReason::Manual),
b"Expired" => Ok(OrderReason::Expired),
b"TraderLiquidated" => Ok(OrderReason::TraderLiquidated),
b"CoordinatorLiquidated" => Ok(OrderReason::CoordinatorLiquidated),
_ => Err("Unrecognized enum variant".into()),
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, FromSqlRow, AsExpression)]
#[diesel(sql_type = MatchStateType)]
pub(crate) enum MatchState {
Pending,
Filled,
Failed,
}
impl QueryId for MatchStateType {
type QueryId = MatchStateType;
const HAS_STATIC_QUERY_ID: bool = false;
fn query_id() -> Option<TypeId> {
None
}
}
impl ToSql<MatchStateType, Pg> for MatchState {
fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Pg>) -> serialize::Result {
match *self {
MatchState::Pending => out.write_all(b"Pending")?,
MatchState::Filled => out.write_all(b"Filled")?,
MatchState::Failed => out.write_all(b"Failed")?,
}
Ok(IsNull::No)
}
}
impl FromSql<MatchStateType, Pg> for MatchState {
fn from_sql(bytes: PgValue<'_>) -> deserialize::Result<Self> {
match bytes.as_bytes() {
b"Pending" => Ok(MatchState::Pending),
b"Filled" => Ok(MatchState::Filled),
b"Failed" => Ok(MatchState::Failed),
_ => Err("Unrecognized enum variant".into()),
}
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/orderbook/db/orders.rs | coordinator/src/orderbook/db/orders.rs | use crate::db::positions::ContractSymbol;
use crate::orderbook::db::custom_types::Direction;
use crate::orderbook::db::custom_types::MatchState;
use crate::orderbook::db::custom_types::OrderReason;
use crate::orderbook::db::custom_types::OrderState;
use crate::orderbook::db::custom_types::OrderType;
use crate::schema::matches;
use crate::schema::orders;
use bitcoin::secp256k1::PublicKey;
use diesel::dsl::max;
use diesel::dsl::min;
use diesel::prelude::*;
use diesel::result::QueryResult;
use diesel::PgConnection;
use rust_decimal::prelude::FromPrimitive;
use rust_decimal::prelude::ToPrimitive;
use rust_decimal::Decimal;
use time::OffsetDateTime;
use uuid::Uuid;
use xxi_node::commons;
use xxi_node::commons::BestPrice;
use xxi_node::commons::Direction as OrderbookDirection;
use xxi_node::commons::NewLimitOrder;
use xxi_node::commons::NewMarketOrder;
use xxi_node::commons::Order as OrderbookOrder;
use xxi_node::commons::OrderReason as OrderBookOrderReason;
use xxi_node::commons::OrderState as OrderBookOrderState;
use xxi_node::commons::OrderType as OrderBookOrderType;
impl From<commons::Direction> for Direction {
fn from(value: commons::Direction) -> Self {
match value {
commons::Direction::Long => Direction::Long,
commons::Direction::Short => Direction::Short,
}
}
}
impl From<Direction> for commons::Direction {
fn from(value: Direction) -> Self {
match value {
Direction::Long => commons::Direction::Long,
Direction::Short => commons::Direction::Short,
}
}
}
impl From<OrderType> for OrderBookOrderType {
fn from(value: OrderType) -> Self {
match value {
OrderType::Market => OrderBookOrderType::Market,
OrderType::Limit => OrderBookOrderType::Limit,
}
}
}
impl From<OrderBookOrderType> for OrderType {
fn from(value: OrderBookOrderType) -> Self {
match value {
OrderBookOrderType::Market => OrderType::Market,
OrderBookOrderType::Limit => OrderType::Limit,
}
}
}
impl From<OrderState> for OrderBookOrderState {
fn from(value: OrderState) -> Self {
match value {
OrderState::Open => OrderBookOrderState::Open,
OrderState::Matched => OrderBookOrderState::Matched,
OrderState::Taken => OrderBookOrderState::Taken,
OrderState::Failed => OrderBookOrderState::Failed,
OrderState::Expired => OrderBookOrderState::Expired,
OrderState::Deleted => OrderBookOrderState::Deleted,
}
}
}
impl From<OrderBookOrderState> for OrderState {
fn from(value: OrderBookOrderState) -> Self {
match value {
OrderBookOrderState::Open => OrderState::Open,
OrderBookOrderState::Matched => OrderState::Matched,
OrderBookOrderState::Taken => OrderState::Taken,
OrderBookOrderState::Failed => OrderState::Failed,
OrderBookOrderState::Expired => OrderState::Expired,
OrderBookOrderState::Deleted => OrderState::Deleted,
}
}
}
#[derive(Queryable, Debug, Clone)]
struct Order {
// this id is only internally but needs to be here or diesel complains
#[allow(dead_code)]
pub id: i32,
pub trader_order_id: Uuid,
pub price: f32,
pub trader_id: String,
pub direction: Direction,
pub quantity: f32,
pub timestamp: OffsetDateTime,
pub order_type: OrderType,
pub expiry: OffsetDateTime,
pub order_state: OrderState,
pub contract_symbol: ContractSymbol,
pub leverage: f32,
pub order_reason: OrderReason,
pub stable: bool,
}
impl From<Order> for OrderbookOrder {
fn from(value: Order) -> Self {
OrderbookOrder {
id: value.trader_order_id,
price: Decimal::from_f32(value.price).expect("To be able to convert f32 to decimal"),
trader_id: value.trader_id.parse().expect("to have a valid pubkey"),
leverage: value.leverage,
contract_symbol: value.contract_symbol.into(),
direction: value.direction.into(),
quantity: Decimal::from_f32(value.quantity)
.expect("To be able to convert f32 to decimal"),
order_type: value.order_type.into(),
timestamp: value.timestamp,
expiry: value.expiry,
order_state: value.order_state.into(),
order_reason: value.order_reason.into(),
stable: value.stable,
}
}
}
impl From<OrderReason> for OrderBookOrderReason {
fn from(value: OrderReason) -> Self {
match value {
OrderReason::Manual => OrderBookOrderReason::Manual,
OrderReason::Expired => OrderBookOrderReason::Expired,
OrderReason::TraderLiquidated => OrderBookOrderReason::TraderLiquidated,
OrderReason::CoordinatorLiquidated => OrderBookOrderReason::CoordinatorLiquidated,
}
}
}
impl From<OrderBookOrderReason> for OrderReason {
fn from(value: OrderBookOrderReason) -> Self {
match value {
OrderBookOrderReason::Manual => OrderReason::Manual,
OrderBookOrderReason::Expired => OrderReason::Expired,
OrderBookOrderReason::TraderLiquidated => OrderReason::TraderLiquidated,
OrderBookOrderReason::CoordinatorLiquidated => OrderReason::CoordinatorLiquidated,
}
}
}
#[derive(Insertable, Debug, PartialEq)]
#[diesel(table_name = orders)]
struct NewOrder {
pub trader_order_id: Uuid,
pub price: f32,
pub trader_id: String,
pub direction: Direction,
pub quantity: f32,
pub order_type: OrderType,
pub expiry: OffsetDateTime,
pub order_reason: OrderReason,
pub contract_symbol: ContractSymbol,
pub leverage: f32,
pub stable: bool,
}
impl From<NewLimitOrder> for NewOrder {
fn from(value: NewLimitOrder) -> Self {
NewOrder {
trader_order_id: value.id,
price: value
.price
.round_dp(2)
.to_f32()
.expect("To be able to convert decimal to f32"),
trader_id: value.trader_id.to_string(),
direction: value.direction.into(),
quantity: value
.quantity
.round_dp(2)
.to_f32()
.expect("To be able to convert decimal to f32"),
order_type: OrderType::Limit,
expiry: value.expiry,
order_reason: OrderReason::Manual,
contract_symbol: value.contract_symbol.into(),
leverage: value
.leverage
.to_f32()
.expect("To be able to convert decimal to f32"),
stable: value.stable,
}
}
}
impl From<NewMarketOrder> for NewOrder {
fn from(value: NewMarketOrder) -> Self {
NewOrder {
trader_order_id: value.id,
// TODO: it would be cool to get rid of this as well
price: 0.0,
trader_id: value.trader_id.to_string(),
direction: value.direction.into(),
quantity: value
.quantity
.round_dp(2)
.to_f32()
.expect("To be able to convert decimal to f32"),
order_type: OrderType::Market,
expiry: value.expiry,
order_reason: OrderReason::Manual,
contract_symbol: value.contract_symbol.into(),
leverage: value
.leverage
.to_f32()
.expect("To be able to convert decimal to f32"),
stable: value.stable,
}
}
}
pub fn all_limit_orders(conn: &mut PgConnection) -> QueryResult<Vec<OrderbookOrder>> {
let orders = orders::table
.filter(orders::order_type.eq(OrderType::Limit))
.filter(orders::expiry.gt(OffsetDateTime::now_utc()))
.filter(orders::order_state.eq(OrderState::Open))
.load::<Order>(conn)?;
Ok(orders.into_iter().map(OrderbookOrder::from).collect())
}
/// Loads all orders by the given order direction and type
pub fn all_by_direction_and_type(
conn: &mut PgConnection,
direction: OrderbookDirection,
order_type: OrderBookOrderType,
filter_expired: bool,
) -> QueryResult<Vec<OrderbookOrder>> {
let filters = orders::table
.filter(orders::direction.eq(Direction::from(direction)))
.filter(orders::order_type.eq(OrderType::from(order_type)))
.filter(orders::order_state.eq(OrderState::Open));
let orders: Vec<Order> = if filter_expired {
filters
.filter(orders::expiry.gt(OffsetDateTime::now_utc()))
.load::<Order>(conn)?
} else {
filters.load::<Order>(conn)?
};
Ok(orders.into_iter().map(OrderbookOrder::from).collect())
}
pub fn get_best_price(
conn: &mut PgConnection,
contract_symbol: commons::ContractSymbol,
) -> QueryResult<BestPrice> {
let best_price = BestPrice {
bid: get_best_bid_price(conn, contract_symbol)?,
ask: get_best_ask_price(conn, contract_symbol)?,
};
Ok(best_price)
}
/// Returns the best price to sell.
pub fn get_best_bid_price(
conn: &mut PgConnection,
contract_symbol: commons::ContractSymbol,
) -> QueryResult<Option<Decimal>> {
let price: Option<f32> = orders::table
.select(max(orders::price))
.filter(orders::order_state.eq(OrderState::Open))
.filter(orders::order_type.eq(OrderType::Limit))
.filter(orders::direction.eq(Direction::Long))
.filter(orders::contract_symbol.eq(ContractSymbol::from(contract_symbol)))
.filter(orders::expiry.gt(OffsetDateTime::now_utc()))
.first::<Option<f32>>(conn)?;
Ok(price.map(|bid| Decimal::try_from(bid).expect("to fit into decimal")))
}
/// Returns the best price to buy.
pub fn get_best_ask_price(
conn: &mut PgConnection,
contract_symbol: commons::ContractSymbol,
) -> QueryResult<Option<Decimal>> {
let price: Option<f32> = orders::table
.select(min(orders::price))
.filter(orders::order_state.eq(OrderState::Open))
.filter(orders::order_type.eq(OrderType::Limit))
.filter(orders::direction.eq(Direction::Short))
.filter(orders::contract_symbol.eq(ContractSymbol::from(contract_symbol)))
.filter(orders::expiry.gt(OffsetDateTime::now_utc()))
.first::<Option<f32>>(conn)?;
Ok(price.map(|ask| Decimal::try_from(ask).expect("to fit into decimal")))
}
pub fn get_all_orders(
conn: &mut PgConnection,
order_type: OrderBookOrderType,
order_state: OrderBookOrderState,
filter_expired: bool,
) -> QueryResult<Vec<OrderbookOrder>> {
let filters = orders::table
.filter(orders::order_state.eq(OrderState::from(order_state)))
.filter(orders::order_type.eq(OrderType::from(order_type)));
let orders: Vec<Order> = if filter_expired {
filters
.filter(orders::expiry.gt(OffsetDateTime::now_utc()))
.load::<Order>(conn)?
} else {
filters.load::<Order>(conn)?
};
Ok(orders.into_iter().map(OrderbookOrder::from).collect())
}
pub fn get_all_matched_market_orders_by_order_reason(
conn: &mut PgConnection,
order_reasons: Vec<commons::OrderReason>,
) -> QueryResult<Vec<OrderbookOrder>> {
let orders: Vec<Order> = orders::table
.filter(orders::order_state.eq(OrderState::Matched))
.filter(
orders::order_reason.eq_any(
order_reasons
.into_iter()
.map(OrderReason::from)
.collect::<Vec<_>>(),
),
)
.filter(orders::order_type.eq(OrderType::Market))
.load::<Order>(conn)?;
Ok(orders.into_iter().map(OrderbookOrder::from).collect())
}
/// Returns the number of affected rows: 1.
pub fn insert_limit_order(
conn: &mut PgConnection,
order: NewLimitOrder,
// TODO: All limit orders are "manual".
order_reason: OrderBookOrderReason,
) -> QueryResult<OrderbookOrder> {
let new_order = NewOrder {
order_reason: OrderReason::from(order_reason),
..NewOrder::from(order)
};
let order: Order = diesel::insert_into(orders::table)
.values(new_order)
.get_result(conn)?;
Ok(OrderbookOrder::from(order))
}
/// Returns the number of affected rows: 1.
pub fn insert_market_order(
conn: &mut PgConnection,
order: NewMarketOrder,
order_reason: OrderBookOrderReason,
) -> QueryResult<OrderbookOrder> {
let new_order = NewOrder {
order_reason: OrderReason::from(order_reason),
..NewOrder::from(order)
};
let order: Order = diesel::insert_into(orders::table)
.values(new_order)
.get_result(conn)?;
Ok(OrderbookOrder::from(order))
}
/// Returns the number of affected rows: 1.
pub fn set_is_taken(
conn: &mut PgConnection,
id: Uuid,
is_taken: bool,
) -> QueryResult<OrderbookOrder> {
if is_taken {
set_order_state(conn, id, commons::OrderState::Taken)
} else {
set_order_state(conn, id, commons::OrderState::Open)
}
}
/// Mark an order as [`OrderState::Deleted`], if it belongs to the given `trader_id`.
pub fn delete_trader_order(
conn: &mut PgConnection,
id: Uuid,
trader_id: PublicKey,
) -> QueryResult<OrderbookOrder> {
let order: Order = diesel::update(orders::table)
.filter(orders::trader_order_id.eq(id))
.filter(orders::trader_id.eq(trader_id.to_string()))
.set(orders::order_state.eq(OrderState::Deleted))
.get_result(conn)?;
Ok(OrderbookOrder::from(order))
}
/// Mark an order as [`OrderState::Deleted`].
pub fn delete(conn: &mut PgConnection, id: Uuid) -> QueryResult<OrderbookOrder> {
set_order_state(conn, id, commons::OrderState::Deleted)
}
/// Returns the number of affected rows: 1.
pub fn set_order_state(
conn: &mut PgConnection,
id: Uuid,
order_state: commons::OrderState,
) -> QueryResult<OrderbookOrder> {
let order: Order = diesel::update(orders::table)
.filter(orders::trader_order_id.eq(id))
.set((orders::order_state.eq(OrderState::from(order_state)),))
.get_result(conn)?;
Ok(OrderbookOrder::from(order))
}
pub fn set_expired_limit_orders_to_expired(
conn: &mut PgConnection,
) -> QueryResult<Vec<OrderbookOrder>> {
let expired_limit_orders: Vec<Order> = diesel::update(orders::table)
.filter(orders::order_state.eq(OrderState::Open))
.filter(orders::order_type.eq(OrderType::Limit))
.filter(orders::expiry.lt(OffsetDateTime::now_utc()))
.set(orders::order_state.eq(OrderState::Expired))
.get_results(conn)?;
Ok(expired_limit_orders
.into_iter()
.map(OrderbookOrder::from)
.collect())
}
/// Returns the order by id
pub fn get_with_id(conn: &mut PgConnection, uid: Uuid) -> QueryResult<Option<OrderbookOrder>> {
let x = orders::table
.filter(orders::trader_order_id.eq(uid))
.load::<Order>(conn)?;
let option = x.first().map(|order| OrderbookOrder::from(order.clone()));
Ok(option)
}
pub fn get_by_trader_id_and_state(
conn: &mut PgConnection,
trader_id: PublicKey,
order_state: commons::OrderState,
) -> QueryResult<Option<OrderbookOrder>> {
orders::table
.filter(orders::trader_id.eq(trader_id.to_string()))
.filter(orders::order_state.eq(OrderState::from(order_state)))
.order_by(orders::timestamp.desc())
.first::<Order>(conn)
.map(OrderbookOrder::from)
.optional()
}
/// Get all the filled matches for all the limit orders generated by `trader_id`.
///
/// This can be used to calculate the implicit position of the maker, assuming that all the filled
/// matches were executed.
pub fn get_all_limit_order_filled_matches(
conn: &mut PgConnection,
trader_id: PublicKey,
) -> QueryResult<Vec<(Uuid, Decimal)>> {
let orders = orders::table
// We use `matches::match_order_id` so that we can verify that the corresponding app trader
// order is in `match_state` _`Filled`_. The maker's match remains in `Pending` (since the
// trade is not actually executed yet), which is not very informative.
.inner_join(matches::table.on(matches::match_order_id.eq(orders::trader_order_id)))
.filter(
orders::trader_id
.eq(trader_id.to_string())
// Looking for `Matched`, `Limit` orders only, corresponding to the maker.
.and(orders::order_type.eq(OrderType::Limit))
.and(orders::order_state.eq(OrderState::Matched))
// The corresponding app trader match is `Filled`.
.and(matches::match_state.eq(MatchState::Filled)),
)
.select((
// We use the order ID of the _match_ so that we get a unique order ID even if the same
// limit order is partially filled more than once.
matches::order_id,
matches::quantity,
orders::direction,
))
.load::<(Uuid, f32, Direction)>(conn)?;
let filled_matches = orders
.into_iter()
.map(|(order_id, quantity, direction_maker)| {
let quantity = Decimal::from_f32(quantity).expect("to fit into Decimal");
let quantity = match direction_maker {
Direction::Long => quantity,
Direction::Short => -quantity,
};
(order_id, quantity)
})
.collect();
Ok(filled_matches)
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/orderbook/tests/sample_test.rs | coordinator/src/orderbook/tests/sample_test.rs | use crate::logger::init_tracing_for_test;
use crate::orderbook::db::orders;
use crate::orderbook::tests::setup_db;
use crate::orderbook::tests::start_postgres;
use bitcoin::secp256k1::PublicKey;
use rust_decimal_macros::dec;
use std::str::FromStr;
use testcontainers::clients::Cli;
use time::Duration;
use time::OffsetDateTime;
use uuid::Uuid;
use xxi_node::commons;
use xxi_node::commons::Direction;
use xxi_node::commons::NewLimitOrder;
use xxi_node::commons::NewMarketOrder;
use xxi_node::commons::OrderReason;
use xxi_node::commons::OrderState;
#[tokio::test]
async fn crud_test() {
init_tracing_for_test();
let docker = Cli::default();
let (_container, conn_spec) = start_postgres(&docker).unwrap();
let mut conn = setup_db(conn_spec);
let order = orders::insert_limit_order(
&mut conn,
dummy_limit_order(OffsetDateTime::now_utc() + Duration::minutes(1)),
OrderReason::Manual,
)
.unwrap();
let order = orders::set_is_taken(&mut conn, order.id, true).unwrap();
assert_eq!(order.order_state, OrderState::Taken);
}
#[tokio::test]
async fn test_all_limit_orders() {
init_tracing_for_test();
let docker = Cli::default();
let (_container, conn_spec) = start_postgres(&docker).unwrap();
let mut conn = setup_db(conn_spec);
let orders = orders::all_limit_orders(&mut conn).unwrap();
assert!(orders.is_empty());
let order_1 = dummy_limit_order(OffsetDateTime::now_utc() + Duration::minutes(1));
orders::insert_limit_order(&mut conn, order_1, OrderReason::Manual).unwrap();
let order_2 = dummy_market_order(OffsetDateTime::now_utc() + Duration::minutes(1));
orders::insert_market_order(&mut conn, order_2, OrderReason::Manual).unwrap();
let order_3 = dummy_limit_order(OffsetDateTime::now_utc() + Duration::minutes(1));
let second_limit_order =
orders::insert_limit_order(&mut conn, order_3, OrderReason::Manual).unwrap();
orders::set_order_state(&mut conn, second_limit_order.id, OrderState::Failed).unwrap();
let orders = orders::all_limit_orders(&mut conn).unwrap();
assert_eq!(orders.len(), 1);
}
fn dummy_market_order(expiry: OffsetDateTime) -> NewMarketOrder {
NewMarketOrder {
id: Uuid::new_v4(),
trader_id: PublicKey::from_str(
"027f31ebc5462c1fdce1b737ecff52d37d75dea43ce11c74d25aa297165faa2007",
)
.unwrap(),
direction: Direction::Long,
quantity: dec!(100.0),
expiry,
contract_symbol: commons::ContractSymbol::BtcUsd,
leverage: dec!(1.0),
stable: false,
}
}
fn dummy_limit_order(expiry: OffsetDateTime) -> NewLimitOrder {
NewLimitOrder {
id: Uuid::new_v4(),
price: dec!(20000.00),
trader_id: PublicKey::from_str(
"027f31ebc5462c1fdce1b737ecff52d37d75dea43ce11c74d25aa297165faa2007",
)
.unwrap(),
direction: Direction::Long,
quantity: dec!(100.0),
expiry,
contract_symbol: commons::ContractSymbol::BtcUsd,
leverage: dec!(1.0),
stable: false,
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/orderbook/tests/mod.rs | coordinator/src/orderbook/tests/mod.rs | mod registration_test;
mod sample_test;
use crate::run_migration;
use anyhow::Result;
use diesel::r2d2;
use diesel::r2d2::ConnectionManager;
use diesel::r2d2::PooledConnection;
use diesel::PgConnection;
use testcontainers::clients::Cli;
use testcontainers::core::WaitFor;
use testcontainers::images;
use testcontainers::images::generic::GenericImage;
use testcontainers::Container;
pub fn start_postgres(docker: &Cli) -> Result<(Container<GenericImage>, String)> {
let db = "postgres-db-test";
let user = "postgres-user-test";
let password = "postgres-password-test";
let postgres = images::generic::GenericImage::new("postgres", "15-alpine")
.with_wait_for(WaitFor::message_on_stderr(
"database system is ready to accept connections",
))
.with_env_var("POSTGRES_DB", db)
.with_env_var("POSTGRES_USER", user)
.with_env_var("POSTGRES_PASSWORD", password);
let node = docker.run(postgres);
let connection_string = &format!(
"postgres://{}:{}@127.0.0.1:{}/{}",
user,
password,
node.get_host_port_ipv4(5432),
db
);
Ok((node, connection_string.clone()))
}
pub fn setup_db(db_url: String) -> PooledConnection<ConnectionManager<PgConnection>> {
let manager = ConnectionManager::<PgConnection>::new(db_url);
let pool = r2d2::Pool::builder()
.build(manager)
.expect("Failed to create pool.");
let mut conn = pool.get().unwrap();
run_migration(&mut conn);
conn
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/orderbook/tests/registration_test.rs | coordinator/src/orderbook/tests/registration_test.rs | use crate::db::user;
use crate::logger::init_tracing_for_test;
use crate::orderbook::tests::setup_db;
use crate::orderbook::tests::start_postgres;
use bitcoin::secp256k1::PublicKey;
use std::str::FromStr;
use testcontainers::clients::Cli;
#[tokio::test]
async fn registered_user_is_stored_in_db() {
init_tracing_for_test();
let docker = Cli::default();
let (_container, conn_spec) = start_postgres(&docker).unwrap();
let mut conn = setup_db(conn_spec);
let users = user::all(&mut conn).unwrap();
assert!(users.is_empty());
let dummy_pubkey = dummy_public_key();
let dummy_email = "dummy@user.com".to_string();
let nickname = Some("dummy_user".to_string());
let fcm_token = "just_a_token".to_string();
let version = Some("1.9.0".to_string());
let os = Some("linux".to_string());
let user = user::upsert_user(
&mut conn,
dummy_pubkey,
Some(dummy_email.clone()),
nickname.clone(),
version.clone(),
Some("code1".to_string()),
os.clone(),
)
.unwrap();
assert!(user.id.is_some(), "Id should be filled in by diesel");
user::login_user(
&mut conn,
dummy_pubkey,
fcm_token.clone(),
version.clone(),
os,
)
.unwrap();
let users = user::all(&mut conn).unwrap();
assert_eq!(users.len(), 1);
// We started without the id, so we can't compare the whole user.
assert_eq!(users.first().unwrap().pubkey, dummy_pubkey.to_string());
assert_eq!(users.first().unwrap().contact, dummy_email);
assert_eq!(users.first().unwrap().nickname, nickname);
assert_eq!(users.first().unwrap().fcm_token, fcm_token);
assert_eq!(users.first().unwrap().version, version);
}
fn dummy_public_key() -> PublicKey {
PublicKey::from_str("02bd998ebd176715fe92b7467cf6b1df8023950a4dd911db4c94dfc89cc9f5a655")
.unwrap()
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/trade/websocket.rs | coordinator/src/trade/websocket.rs | use crate::db;
use crate::position::models::Position;
use crate::routes::AppState;
use axum::extract::ws::Message as WebsocketMessage;
use axum::extract::ws::WebSocket;
use axum::extract::State;
use axum::extract::WebSocketUpgrade;
use axum::response::IntoResponse;
use diesel::r2d2::ConnectionManager;
use diesel::r2d2::PooledConnection;
use diesel::PgConnection;
use futures::SinkExt;
use futures::StreamExt;
use std::sync::Arc;
use std::time::Duration;
use tokio::sync::broadcast::error::RecvError;
use tokio::sync::mpsc;
use xxi_node::commons::create_sign_message;
use xxi_node::commons::Direction;
use xxi_node::commons::PositionMessage;
use xxi_node::commons::PositionMessageRequest;
use xxi_node::commons::AUTH_SIGN_MESSAGE;
#[derive(Clone)]
pub enum InternalPositionUpdateMessage {
NewTrade {
/// As seen from the coordinator, i.e. if quantity is < 0 then coordinator is short, if >
/// 0, then coordinator is long
quantity: f32,
average_entry_price: f32,
},
}
const WEBSOCKET_SEND_TIMEOUT: Duration = Duration::from_secs(5);
pub async fn websocket_handler(
ws: WebSocketUpgrade,
State(state): State<Arc<AppState>>,
) -> impl IntoResponse {
ws.on_upgrade(|socket| websocket_connection(socket, state))
}
// This function deals with a single websocket connection, i.e., a single
// connected client / user, for which we will spawn two independent tasks (for
// receiving / sending messages).
pub async fn websocket_connection(stream: WebSocket, state: Arc<AppState>) {
// By splitting, we can send and receive at the same time.
let (mut sender, mut receiver) = stream.split();
let mut feed = state.tx_position_feed.subscribe();
let (local_sender, mut local_receiver) = mpsc::channel::<PositionMessage>(100);
let mut local_recv_task = tokio::spawn(async move {
while let Some(local_msg) = local_receiver.recv().await {
match serde_json::to_string(&local_msg) {
Ok(msg) => {
if let Err(err) = tokio::time::timeout(
WEBSOCKET_SEND_TIMEOUT,
sender.send(WebsocketMessage::Text(msg.clone())),
)
.await
{
tracing::error!("Could not forward message {msg} : {err:#}");
return;
}
}
Err(error) => {
tracing::warn!("Could not deserialize message {error:#}");
}
}
}
});
// Spawn the first task that will receive broadcast messages and send
// messages over the websocket to our client.
let mut send_task = {
let local_sender = local_sender.clone();
let pool = state.pool.clone();
tokio::spawn(async move {
loop {
match feed.recv().await.clone() {
Ok(position_update) => match position_update {
InternalPositionUpdateMessage::NewTrade {
quantity,
average_entry_price,
} => {
if let Err(error) = {
let mut conn = match pool.get() {
Ok(conn) => conn,
Err(err) => {
tracing::error!(
"Could not get connection to db pool {err:#}"
);
return;
}
};
let (total_average_entry_price, total_quantity) =
calculate_position_stats(&mut conn);
local_sender.send(PositionMessage::NewTrade {
total_quantity,
total_average_entry_price,
new_trade_quantity: quantity,
new_trade_average_entry_price: average_entry_price,
})
}
.await
{
tracing::error!("Could not send message {error:#}");
return;
}
}
},
Err(RecvError::Closed) => {
tracing::error!("position feed sender died! Channel closed.");
break;
}
Err(RecvError::Lagged(skip)) => tracing::warn!(%skip,
"Lagging behind on position feed."
),
}
}
})
};
// Spawn a task that takes messages from the websocket
let local_sender = local_sender.clone();
let pool = state.pool.clone();
let mut recv_task = tokio::spawn(async move {
while let Some(Ok(WebsocketMessage::Text(text))) = receiver.next().await {
match serde_json::from_str(text.as_str()) {
Ok(PositionMessageRequest::Authenticate { signature }) => {
let msg = create_sign_message(AUTH_SIGN_MESSAGE.to_vec());
// TODO(bonomat): in the future we could add authorization as well to only allow
// particular pubkeys get updates
let user_id = signature.pubkey;
let signature = signature.signature;
let mut conn = match pool.get() {
Ok(conn) => conn,
Err(err) => {
tracing::error!("Could not get connection to db pool {err:#}");
return;
}
};
match state.secp.verify_ecdsa(&msg, &signature, &user_id) {
Ok(_) => {
if let Err(e) = local_sender.send(PositionMessage::Authenticated).await
{
tracing::error!(%user_id, "Could not respond to user {e:#}");
return;
}
let (average_entry_price, total_quantity) =
calculate_position_stats(&mut conn);
if let Err(e) = local_sender
.send(PositionMessage::CurrentPosition {
quantity: total_quantity,
average_entry_price,
})
.await
{
tracing::error!(%user_id, "Failed to send all open positions to user {e:#}");
}
}
Err(err) => {
if let Err(er) = local_sender
.send(PositionMessage::InvalidAuthentication(format!(
"Could not authenticate {err:#}"
)))
.await
{
tracing::error!(
%user_id, "Failed to notify user about invalid authentication: {er:#}"
);
return;
}
}
}
}
Err(err) => {
tracing::trace!("Could not deserialize msg: {text} {err:#}");
}
}
}
});
// If any one of the tasks run to completion, we abort the other.
tokio::select! {
_ = (&mut send_task) => {
recv_task.abort();
local_recv_task.abort()
},
_ = (&mut recv_task) => {
send_task.abort();
local_recv_task.abort()
},
_ = (&mut local_recv_task) => {
recv_task.abort();
send_task.abort();
},
};
}
/// Calculates position stats and returns as a tuple (`average_entry_price`,`total_quantity`)
fn calculate_position_stats(
conn: &mut PooledConnection<ConnectionManager<PgConnection>>,
) -> (f32, f32) {
let positions = db::positions::Position::get_all_open_positions(conn).unwrap_or_default();
let average_entry_price = average_entry_price(&positions);
let total_quantity = positions
.iter()
.map(|pos| {
if pos.trader_direction == Direction::Short {
pos.quantity
} else {
// we want to see the quantity as seen from the coordinator
pos.quantity * -1.0
}
})
.sum();
(average_entry_price, total_quantity)
}
/// calculates the average execution price for inverse contracts
///
/// The average execution price follows a simple formula:
/// `total_order_quantity / (quantity_position_0 / execution_price_position_0 + quantity_position_1
/// / execution_price_position_1 )`
pub fn average_entry_price(positions: &[Position]) -> f32 {
if positions.is_empty() {
return 0.0;
}
if positions.len() == 1 {
return positions
.first()
.expect("to be exactly one")
.average_entry_price;
}
let sum_quantity = positions.iter().fold(0.0, |acc, m| acc + m.quantity);
let nominal_prices = positions
.iter()
.fold(0.0, |acc, m| acc + (m.quantity / m.average_entry_price));
sum_quantity / nominal_prices
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/trade/mod.rs | coordinator/src/trade/mod.rs | use crate::compute_relative_contracts;
use crate::db;
use crate::decimal_from_f32;
use crate::dlc_protocol;
use crate::funding_fee::funding_fee_from_funding_fee_events;
use crate::funding_fee::get_outstanding_funding_fee_events;
use crate::message::OrderbookMessage;
use crate::node::Node;
use crate::orderbook::db::matches;
use crate::orderbook::db::orders;
use crate::payout_curve;
use crate::position::models::NewPosition;
use crate::position::models::Position;
use crate::position::models::PositionState;
use anyhow::anyhow;
use anyhow::bail;
use anyhow::ensure;
use anyhow::Context;
use anyhow::Result;
use bitcoin::secp256k1::PublicKey;
use bitcoin::Amount;
use bitcoin::SignedAmount;
use diesel::Connection;
use diesel::PgConnection;
use dlc_manager::channel::signed_channel::SignedChannel;
use dlc_manager::channel::signed_channel::SignedChannelState;
use dlc_manager::channel::Channel;
use dlc_manager::contract::contract_input::ContractInput;
use dlc_manager::contract::contract_input::ContractInputInfo;
use dlc_manager::contract::contract_input::OracleInput;
use dlc_manager::ContractId;
use dlc_manager::DlcChannelId;
use dlc_messages::channel::Reject;
use lightning::chain::chaininterface::ConfirmationTarget;
use rust_decimal::prelude::FromPrimitive;
use rust_decimal::prelude::ToPrimitive;
use rust_decimal::Decimal;
use time::OffsetDateTime;
use tokio::sync::mpsc;
use tokio::task::spawn_blocking;
use uuid::Uuid;
use xxi_node::bitcoin_conversion::to_secp_pk_29;
use xxi_node::bitcoin_conversion::to_xonly_pk_29;
use xxi_node::cfd::calculate_long_liquidation_price;
use xxi_node::cfd::calculate_margin;
use xxi_node::cfd::calculate_pnl;
use xxi_node::cfd::calculate_short_liquidation_price;
use xxi_node::commons;
use xxi_node::commons::Direction;
use xxi_node::commons::MatchState;
use xxi_node::commons::Message;
use xxi_node::commons::OrderState;
use xxi_node::commons::TradeAndChannelParams;
use xxi_node::commons::TradeParams;
use xxi_node::message_handler::TenTenOneMessage;
use xxi_node::message_handler::TenTenOneReject;
use xxi_node::node::dlc_channel::estimated_dlc_channel_fee_reserve;
use xxi_node::node::dlc_channel::estimated_funding_transaction_fee;
use xxi_node::node::event::NodeEvent;
use xxi_node::node::signed_channel_state_name;
use xxi_node::node::ProtocolId;
pub mod models;
pub mod websocket;
enum TradeAction {
OpenDlcChannel,
OpenSingleFundedChannel {
external_funding: Amount,
},
OpenPosition {
channel_id: DlcChannelId,
own_payout: Amount,
counter_payout: Amount,
},
ClosePosition {
channel_id: DlcChannelId,
position: Box<Position>,
},
ResizePosition {
channel_id: DlcChannelId,
position: Box<Position>,
resize_action: ResizeAction,
},
}
#[derive(Debug, Clone, Copy)]
enum ResizeAction {
Increase {
/// Absolute number of contracts we increase the position by.
contracts: Decimal,
average_execution_price: Decimal,
},
Decrease {
/// Absolute number of contracts we decrease the position by.
contracts: Decimal,
average_execution_price: Decimal,
},
ChangeDirection {
/// The sign determines the new direction.
contracts_new_direction: Decimal,
average_execution_price: Decimal,
},
}
pub struct TradeExecutor {
node: Node,
notifier: mpsc::Sender<OrderbookMessage>,
}
/// The funds the trader will need to provide to open a DLC channel with the coordinator.
///
/// We can extend this enum with a `ForTradeCost` variant to denote that the trader has to pay for
/// everything except for transaction fees.
enum TraderRequiredLiquidity {
/// Pay for margin, collateral reserve, order-matching fees and transaction fees.
ForTradeCostAndTxFees,
/// Do not pay for anything. The trader has probably paid in a different way e.g. using
/// Lightning.
None,
}
impl TradeExecutor {
pub fn new(node: Node, notifier: mpsc::Sender<OrderbookMessage>) -> Self {
Self { node, notifier }
}
pub async fn execute(&self, params: &TradeAndChannelParams) {
let trader_id = params.trade_params.pubkey;
let order_id = params.trade_params.filled_with.order_id;
match self.execute_internal(params).await {
Ok(()) => {
tracing::info!(
%trader_id,
%order_id,
"Successfully processed match, setting match to Filled"
);
if let Err(e) =
self.update_order_and_match(order_id, MatchState::Filled, OrderState::Taken)
{
tracing::error!(
%trader_id,
%order_id,
"Failed to update order and match state. Error: {e:#}"
);
}
if params.external_funding.is_some() {
// The channel was funded externally. We need to post process the dlc channel
// offer.
if let Err(e) = self.settle_invoice(trader_id, order_id).await {
tracing::error!(%trader_id, %order_id, "Failed to settle invoice with provided pre_image. Cancelling offer. Error: {e:#}");
if let Err(e) = self.cancel_offer(trader_id).await {
tracing::error!(%trader_id, %order_id, "Failed to cancel offer. Error: {e:#}");
}
if let Err(e) = self.cancel_hodl_invoice(order_id).await {
tracing::error!(%trader_id, %order_id, "Failed to cancel hodl invoice. Error: {e:#}");
}
let message = OrderbookMessage::TraderMessage {
trader_id,
message: Message::TradeError {
order_id,
error: e.into(),
},
notification: None,
};
if let Err(e) = self.notifier.send(message).await {
tracing::debug!("Failed to notify trader. Error: {e:#}");
}
return;
}
}
// Everything has been processed successfully, we can safely send the last dlc
// message, that has been stored before.
self.node
.inner
.event_handler
.publish(NodeEvent::SendLastDlcMessage { peer: trader_id });
}
Err(e) => {
tracing::error!(%trader_id, %order_id,"Failed to execute trade. Error: {e:#}");
if params.external_funding.is_some() {
// TODO(holzeis): It might make sense to do this for any failed offer to
// unreserve potentially reserved utxos.
if let Err(e) = self.cancel_offer(trader_id).await {
tracing::error!(%trader_id, %order_id, "Failed to cancel offer. Error: {e:#}");
}
if let Err(e) = self.cancel_hodl_invoice(order_id).await {
tracing::error!(%trader_id, %order_id, "Failed to cancel hodl_invoice. Error: {e:#}");
}
}
if let Err(e) =
self.update_order_and_match(order_id, MatchState::Failed, OrderState::Failed)
{
tracing::error!(%trader_id, %order_id, "Failed to update order and match: {e}");
};
let message = OrderbookMessage::TraderMessage {
trader_id,
message: Message::TradeError {
order_id,
error: e.into(),
},
notification: None,
};
if let Err(e) = self.notifier.send(message).await {
tracing::debug!("Failed to notify trader. Error: {e:#}");
}
}
};
}
/// Settles the accepted invoice for the given trader
async fn settle_invoice(&self, trader: PublicKey, order_id: Uuid) -> Result<()> {
let pre_image = spawn_blocking({
let pool = self.node.pool.clone();
move || {
let mut conn = pool.get()?;
let pre_image = db::hodl_invoice::get_pre_image_by_order_id(&mut conn, order_id)?;
anyhow::Ok(pre_image)
}
})
.await??
.context("Missing pre_image")?;
self.node.lnd_bridge.settle_invoice(pre_image).await?;
tracing::info!(%trader, %order_id, "Settled invoice");
Ok(())
}
/// Cancels a potential pending offer if the proposal failed.
async fn cancel_offer(&self, trader: PublicKey) -> Result<()> {
if let Some(channel) = self
.node
.inner
.get_dlc_channel(|channel| channel.get_counter_party_id() == to_secp_pk_29(trader))?
{
self.node.process_dlc_message(
trader,
&TenTenOneMessage::Reject(TenTenOneReject {
reject: Reject {
channel_id: channel.get_id(),
timestamp: OffsetDateTime::now_utc().unix_timestamp() as u64,
reference_id: None,
},
}),
)?;
spawn_blocking({
let pool = self.node.pool.clone();
move || {
let mut conn = pool.get()?;
db::last_outbound_dlc_message::delete(&mut conn, &trader)?;
anyhow::Ok(())
}
})
.await??;
}
Ok(())
}
pub async fn cancel_hodl_invoice(&self, order_id: Uuid) -> Result<()> {
// if the order was externally funded we need to set the hodl invoice to failed.
let r_hash = spawn_blocking({
let pool = self.node.pool.clone();
move || {
let mut conn = pool.get()?;
let r_hash = db::hodl_invoice::get_r_hash_by_order_id(&mut conn, order_id)?;
anyhow::Ok(r_hash)
}
})
.await??;
self.node.lnd_bridge.cancel_invoice(r_hash).await
}
/// Execute a trade action according to the coordinator's current trading status with the
/// trader.
///
/// We look for a pre-existing position with the trader and execute accordingly:
///
/// 0. If no DLC channel is found, we open a DLC channel (with the position included).
///
/// 1. If a position of equal quantity and opposite direction is found, we close the position.
///
/// 2. If no position is found, we open a position.
///
/// 3. If a position of differing quantity is found, we resize the position.
async fn execute_internal(&self, params: &TradeAndChannelParams) -> Result<()> {
let mut connection = self.node.pool.get()?;
let order_id = params.trade_params.filled_with.order_id;
let trader_id = params.trade_params.pubkey;
let order =
orders::get_with_id(&mut connection, order_id)?.context("Could not find order")?;
let is_stable_order = order.stable;
ensure!(
order.expiry > OffsetDateTime::now_utc(),
"Can't execute a trade on an expired order"
);
ensure!(
order.order_state == OrderState::Matched,
"Can't execute trade with in invalid state {:?}",
order.order_state
);
tracing::info!(%trader_id, %order_id, "Executing match");
let trade_action = self.determine_trade_action(&mut connection, params).await?;
ensure!(
matches!(trade_action, TradeAction::ClosePosition { .. })
|| self.node.settings.read().await.allow_opening_positions,
"Trading is disabled except for closing positions"
);
match trade_action {
TradeAction::OpenDlcChannel => {
let collateral_reserve_coordinator = params
.coordinator_reserve
.context("Missing coordinator collateral reserve")?;
let collateral_reserve_trader = params
.trader_reserve
.context("Missing trader collateral reserve")?;
self.open_dlc_channel(
&mut connection,
¶ms.trade_params,
collateral_reserve_coordinator,
collateral_reserve_trader,
is_stable_order,
TraderRequiredLiquidity::ForTradeCostAndTxFees,
)
.await
.context("Failed to open DLC channel")?;
}
TradeAction::OpenSingleFundedChannel { external_funding } => {
let collateral_reserve_coordinator = params
.coordinator_reserve
.context("Missing coordinator collateral reserve")?;
let order_matching_fee = params.trade_params.order_matching_fee();
let margin_trader = margin_trader(¶ms.trade_params);
let fee_rate = self
.node
.inner
.fee_rate_estimator
.get(ConfirmationTarget::Normal);
// The on chain fees are split evenly between the two parties.
let funding_transaction_fee =
estimated_funding_transaction_fee(fee_rate.as_sat_per_vb() as f64) / 2;
let channel_fee_reserve =
estimated_dlc_channel_fee_reserve(fee_rate.as_sat_per_vb() as f64) / 2;
// If the user funded the channel externally we derive the collateral reserve
// trader from the difference of the trader margin and the
// externally received funds.
//
// TODO(holzeis): Introduce margin orders to directly use the
// external_funding_sats for the position instead of failing here. We need
// to do this though as a malicious actor could otherwise drain us.
//
// Note, we add a min trader reserve to the external funding to ensure that
// minor price movements are covered.
let collateral_reserve_trader = external_funding
.checked_sub(
margin_trader
+ order_matching_fee
+ funding_transaction_fee
+ channel_fee_reserve,
)
.context("Not enough external funds to open position")?;
self.open_dlc_channel(
&mut connection,
¶ms.trade_params,
collateral_reserve_coordinator,
collateral_reserve_trader,
is_stable_order,
TraderRequiredLiquidity::None,
)
.await
.context("Failed to open DLC channel")?;
}
TradeAction::OpenPosition {
channel_id,
own_payout,
counter_payout,
} => self
.open_position(
&mut connection,
channel_id,
¶ms.trade_params,
own_payout,
counter_payout,
is_stable_order,
)
.await
.context("Failed to open new position")?,
TradeAction::ClosePosition {
channel_id,
position,
} => self
.start_closing_position(
&mut connection,
order,
&position,
¶ms.trade_params,
channel_id,
)
.await
.with_context(|| format!("Failed to close position {}", position.id))?,
TradeAction::ResizePosition {
channel_id,
position,
resize_action,
} => self
.resize_position(
&mut connection,
channel_id,
&position,
¶ms.trade_params,
resize_action,
)
.await
.with_context(|| format!("Failed to resize position {}", position.id))?,
};
Ok(())
}
async fn open_dlc_channel(
&self,
conn: &mut PgConnection,
trade_params: &TradeParams,
collateral_reserve_coordinator: Amount,
collateral_reserve_trader: Amount,
stable: bool,
trader_required_utxos: TraderRequiredLiquidity,
) -> Result<()> {
let peer_id = trade_params.pubkey;
let leverage_trader = trade_params.leverage;
let leverage_coordinator = coordinator_leverage_for_trade(&trade_params.pubkey)?;
let margin_trader = margin_trader(trade_params);
let margin_coordinator = margin_coordinator(trade_params, leverage_coordinator);
let order_matching_fee = trade_params.order_matching_fee();
// The coordinator gets the `order_matching_fee` directly in the collateral reserve.
let collateral_reserve_with_fee_coordinator =
collateral_reserve_coordinator + order_matching_fee;
let initial_price = trade_params.filled_with.average_execution_price();
let coordinator_direction = trade_params.direction.opposite();
tracing::info!(
%peer_id,
order_id = %trade_params.filled_with.order_id,
?trade_params,
leverage_coordinator,
%margin_coordinator,
%margin_trader,
%order_matching_fee,
%collateral_reserve_with_fee_coordinator,
%collateral_reserve_trader,
"Opening DLC channel and position"
);
let contract_descriptor = payout_curve::build_contract_descriptor(
initial_price,
margin_coordinator,
margin_trader,
leverage_coordinator,
leverage_trader,
coordinator_direction,
collateral_reserve_with_fee_coordinator,
collateral_reserve_trader,
trade_params.quantity,
trade_params.contract_symbol,
)
.context("Could not build contract descriptor")?;
let contract_symbol = trade_params.contract_symbol.label();
let maturity_time = trade_params.filled_with.expiry_timestamp;
let maturity_time = maturity_time.unix_timestamp();
let sats_per_vbyte = self
.node
.inner
.fee_rate_estimator
.get(ConfirmationTarget::Normal)
.as_sat_per_vb()
.round();
// This fee rate is used to construct the fund and CET transactions.
let fee_rate = Decimal::try_from(sats_per_vbyte)?
.to_u64()
.context("failed to convert to u64")?;
// The contract input to be used for setting up the trade between the trader and the
// coordinator.
let event_id = format!("{contract_symbol}{maturity_time}");
let (offer_collateral, accept_collateral, fee_config) = match trader_required_utxos {
TraderRequiredLiquidity::ForTradeCostAndTxFees => (
(margin_coordinator + collateral_reserve_coordinator).to_sat(),
(margin_trader + collateral_reserve_trader + order_matching_fee).to_sat(),
dlc::FeeConfig::EvenSplit,
),
TraderRequiredLiquidity::None => (
// If the trader doesn't bring their own UTXOs, including the `order_matching_fee`
// is not strictly necessary, but it's simpler to do so.
(margin_coordinator
+ collateral_reserve_coordinator
+ margin_trader
+ collateral_reserve_trader
+ order_matching_fee)
.to_sat(),
0,
dlc::FeeConfig::AllOffer,
),
};
let contract_input = ContractInput {
offer_collateral,
accept_collateral,
fee_rate,
contract_infos: vec![ContractInputInfo {
contract_descriptor,
oracles: OracleInput {
public_keys: vec![to_xonly_pk_29(trade_params.filled_with.oracle_pk)],
event_id: event_id.clone(),
threshold: 1,
},
}],
};
let protocol_id = ProtocolId::new();
tracing::debug!(
%protocol_id,
event_id,
oracle=%trade_params.filled_with.oracle_pk,
"Proposing DLC channel"
);
let (temporary_contract_id, temporary_channel_id) = self
.node
.inner
.propose_dlc_channel(
trade_params.filled_with.clone(),
contract_input,
trade_params.pubkey,
protocol_id,
fee_config,
)
.await
.context("Could not propose DLC channel")?;
let protocol_executor = dlc_protocol::DlcProtocolExecutor::new(self.node.pool.clone());
protocol_executor.start_open_channel_protocol(
protocol_id,
&temporary_contract_id,
&temporary_channel_id,
trade_params,
)?;
// After the DLC channel has been proposed the position can be created. This fixes
// https://github.com/get10101/10101/issues/537, where the position was created before the
// DLC was successfully proposed.
//
// Athough we can still run into inconsistencies (e.g. if `propose_dlc_channel` succeeds,
// but `persist_position_and_trade` doesn't), we are more likely to succeed with the new
// order.
//
// FIXME: We should not create a shadow representation (position) of the DLC struct, but
// rather imply the state from the DLC.
//
// TODO(holzeis): The position should only get created after the dlc protocol has finished
// successfully.
self.persist_position(
conn,
trade_params,
temporary_contract_id,
leverage_coordinator,
stable,
order_matching_fee,
)
.await
}
async fn open_position(
&self,
conn: &mut PgConnection,
dlc_channel_id: DlcChannelId,
trade_params: &TradeParams,
coordinator_dlc_channel_collateral: Amount,
trader_dlc_channel_collateral: Amount,
stable: bool,
) -> Result<()> {
let peer_id = trade_params.pubkey;
tracing::info!(
%peer_id,
order_id = %trade_params.filled_with.order_id,
channel_id = %hex::encode(dlc_channel_id),
?trade_params,
"Opening position"
);
let initial_price = trade_params.filled_with.average_execution_price();
let leverage_coordinator = coordinator_leverage_for_trade(&trade_params.pubkey)?;
let leverage_trader = trade_params.leverage;
let margin_coordinator = margin_coordinator(trade_params, leverage_coordinator);
let margin_trader = margin_trader(trade_params);
let order_matching_fee = trade_params.order_matching_fee();
let coordinator_direction = trade_params.direction.opposite();
// How many coins the coordinator will keep outside of the bet. They still go in the DLC
// channel, but the payout will be at least this much for the coordinator.
//
// TODO: Do we want to let the coordinator use accrued order-matching fees as margin?
// Probably not.
let coordinator_collateral_reserve = (coordinator_dlc_channel_collateral
+ order_matching_fee)
.checked_sub(margin_coordinator)
.with_context(|| {
format!(
"Coordinator cannot trade with more than their total collateral in the \
DLC channel: margin ({}) > collateral ({}) + order_matching_fee ({})",
margin_coordinator, coordinator_dlc_channel_collateral, order_matching_fee
)
})?;
// How many coins the trader will keep outside of the bet. They still go in the DLC channel,
// but the payout will be at least this much for the coordinator.
let trader_collateral_reserve = trader_dlc_channel_collateral
.checked_sub(order_matching_fee)
.and_then(|collateral| collateral.checked_sub(margin_trader))
.with_context(|| {
format!(
"Trader cannot trade with more than their total collateral in the \
DLC channel: margin ({}) + order_matching_fee ({}) > collateral ({})",
margin_trader, order_matching_fee, trader_dlc_channel_collateral
)
})?;
tracing::debug!(
%peer_id,
order_id = %trade_params.filled_with.order_id,
leverage_coordinator,
margin_coordinator_sat = %margin_coordinator,
margin_trader_sat = %margin_trader,
coordinator_collateral_reserve_sat = %coordinator_collateral_reserve,
trader_collateral_reserve_sat = %trader_collateral_reserve,
order_matching_fee_sat = %order_matching_fee,
"DLC channel update parameters"
);
let contract_descriptor = payout_curve::build_contract_descriptor(
initial_price,
margin_coordinator,
margin_trader,
leverage_coordinator,
leverage_trader,
coordinator_direction,
coordinator_collateral_reserve,
trader_collateral_reserve,
trade_params.quantity,
trade_params.contract_symbol,
)
.context("Could not build contract descriptor")?;
let contract_symbol = trade_params.contract_symbol.label();
let maturity_time = trade_params.filled_with.expiry_timestamp;
let maturity_time = maturity_time.unix_timestamp();
let sats_per_vbyte = self
.node
.inner
.fee_rate_estimator
.get(ConfirmationTarget::Normal)
.as_sat_per_vb()
.round();
// This fee rate is actually ignored since the fee reserve is defined when the channel is
// first opened.
let fee_rate = Decimal::try_from(sats_per_vbyte)?
.to_u64()
.context("failed to convert to u64")?;
// The contract input to be used for setting up the trade between the trader and the
// coordinator.
let event_id = format!("{contract_symbol}{maturity_time}");
tracing::debug!(
event_id,
oracle=%trade_params.filled_with.oracle_pk,
"Proposing DLC channel update"
);
let contract_input = ContractInput {
offer_collateral: coordinator_dlc_channel_collateral.to_sat(),
accept_collateral: trader_dlc_channel_collateral.to_sat(),
fee_rate,
contract_infos: vec![ContractInputInfo {
contract_descriptor,
oracles: OracleInput {
public_keys: vec![to_xonly_pk_29(trade_params.filled_with.oracle_pk)],
event_id,
threshold: 1,
},
}],
};
let protocol_id = ProtocolId::new();
let channel = self.node.inner.get_dlc_channel_by_id(&dlc_channel_id)?;
let previous_protocol_id = match channel.get_reference_id() {
Some(reference_id) => Some(ProtocolId::try_from(reference_id)?),
None => None,
};
let temporary_contract_id = self
.node
.inner
.propose_reopen_or_resize(
trade_params.filled_with.clone(),
&dlc_channel_id,
contract_input,
protocol_id,
)
.await
.context("Could not propose reopen DLC channel update")?;
let protocol_executor = dlc_protocol::DlcProtocolExecutor::new(self.node.pool.clone());
protocol_executor.start_open_position_protocol(
protocol_id,
previous_protocol_id,
&temporary_contract_id,
&channel.get_id(),
trade_params,
)?;
// TODO(holzeis): The position should only get created after the dlc protocol has finished
// successfully.
self.persist_position(
conn,
trade_params,
temporary_contract_id,
leverage_coordinator,
stable,
order_matching_fee,
)
.await
}
async fn resize_position(
&self,
conn: &mut PgConnection,
dlc_channel_id: DlcChannelId,
position: &Position,
trade_params: &TradeParams,
resize_action: ResizeAction,
) -> Result<()> {
if !self
.node
.inner
.check_if_signed_channel_is_confirmed(position.trader)
.await?
{
bail!("Underlying DLC channel not yet confirmed.");
}
let peer_id = trade_params.pubkey;
// Update position based on the outstanding funding fee events _before_ applying resize.
let funding_fee_events =
get_outstanding_funding_fee_events(conn, position.trader, position.id)?;
let funding_fee = funding_fee_from_funding_fee_events(&funding_fee_events);
let maintenance_margin_rate = {
Decimal::try_from(self.node.settings.read().await.maintenance_margin_rate)
.expect("to fit")
};
let position = position.apply_funding_fee(funding_fee, maintenance_margin_rate);
let (collateral_reserve_coordinator, collateral_reserve_trader) = self
.node
.apply_funding_fee_to_channel(dlc_channel_id, funding_fee)?;
tracing::info!(
%peer_id,
order_id = %trade_params.filled_with.order_id,
channel_id = %hex::encode(dlc_channel_id),
?resize_action,
?position,
?trade_params,
?collateral_reserve_coordinator,
?collateral_reserve_trader,
"Resizing position"
);
if !funding_fee_events.is_empty() {
tracing::debug!(
?funding_fee,
?funding_fee_events,
"Resolving funding fee events when resizing position"
);
}
let order_matching_fee = trade_params.order_matching_fee();
// The leverage does not change when we resize a position.
let resized_position = apply_resize_to_position(
resize_action,
&position,
collateral_reserve_coordinator,
collateral_reserve_trader,
order_matching_fee,
maintenance_margin_rate,
)?;
let leverage_coordinator = position.coordinator_leverage;
let leverage_trader = position.trader_leverage;
tracing::debug!(
%peer_id,
order_id = %trade_params.filled_with.order_id,
leverage_coordinator,
leverage_trader,
%order_matching_fee,
?resized_position,
"DLC channel update parameters"
);
let ResizedPosition {
contracts,
coordinator_direction,
average_execution_price,
coordinator_liquidation_price,
trader_liquidation_price,
margin_coordinator,
margin_trader,
collateral_reserve_coordinator,
collateral_reserve_trader,
realized_pnl,
} = resized_position;
let contract_descriptor = payout_curve::build_contract_descriptor(
average_execution_price,
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | true |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/trade/models.rs | coordinator/src/trade/models.rs | use bitcoin::secp256k1::PublicKey;
use bitcoin::Amount;
use time::OffsetDateTime;
use xxi_node::commons::ContractSymbol;
use xxi_node::commons::Direction;
#[derive(Debug)]
pub struct NewTrade {
pub position_id: i32,
pub contract_symbol: ContractSymbol,
pub trader_pubkey: PublicKey,
pub quantity: f32,
pub trader_leverage: f32,
pub trader_direction: Direction,
pub average_price: f32,
pub order_matching_fee: Amount,
pub trader_realized_pnl_sat: Option<i64>,
}
#[derive(Debug)]
pub struct Trade {
pub id: i32,
pub position_id: i32,
pub contract_symbol: ContractSymbol,
pub trader_pubkey: PublicKey,
pub quantity: f32,
pub trader_leverage: f32,
pub direction: Direction,
pub average_price: f32,
pub timestamp: OffsetDateTime,
pub order_matching_fee: Amount,
pub trader_realized_pnl_sat: Option<i64>,
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/funding_fee/db.rs | coordinator/src/funding_fee/db.rs | pub mod funding_fee_events;
pub mod funding_rates;
pub mod protocol_funding_fee_events;
pub use funding_fee_events::*;
pub use funding_rates::*;
pub use protocol_funding_fee_events::*;
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/funding_fee/db/protocol_funding_fee_events.rs | coordinator/src/funding_fee/db/protocol_funding_fee_events.rs | //! The `protocol_funding_fee_events` table defines the relationship between funding fee events and
//! the DLC protocol that will resolve them.
use crate::schema::protocol_funding_fee_events;
use diesel::prelude::*;
use xxi_node::node::ProtocolId;
pub fn insert_protocol_funding_fee_event(
conn: &mut PgConnection,
protocol_id: ProtocolId,
funding_fee_event_ids: &[i32],
) -> QueryResult<()> {
if funding_fee_event_ids.is_empty() {
tracing::debug!(
%protocol_id,
"Protocol without outstanding funding fee events"
);
return Ok(());
}
let values = funding_fee_event_ids
.iter()
.map(|funding_fee_event_id| {
(
protocol_funding_fee_events::protocol_id.eq(protocol_id.to_uuid()),
protocol_funding_fee_events::funding_fee_event_id.eq(*funding_fee_event_id),
)
})
.collect::<Vec<_>>();
let affected_rows = diesel::insert_into(protocol_funding_fee_events::table)
.values(values)
.execute(conn)?;
if affected_rows == 0 {
return Err(diesel::result::Error::NotFound);
}
Ok(())
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/funding_fee/db/funding_rates.rs | coordinator/src/funding_fee/db/funding_rates.rs | use crate::schema::funding_rates;
use anyhow::Result;
use diesel::prelude::*;
use rust_decimal::prelude::FromPrimitive;
use rust_decimal::prelude::ToPrimitive;
use rust_decimal::Decimal;
use time::OffsetDateTime;
use xxi_node::commons::to_nearest_hour_in_the_past;
#[derive(Insertable, Debug)]
#[diesel(table_name = funding_rates)]
struct NewFundingRate {
start_date: OffsetDateTime,
end_date: OffsetDateTime,
rate: f32,
}
#[derive(Queryable, Debug)]
struct FundingRate {
#[diesel(column_name = "id")]
_id: i32,
start_date: OffsetDateTime,
end_date: OffsetDateTime,
rate: f32,
#[diesel(column_name = "timestamp")]
_timestamp: OffsetDateTime,
}
pub fn insert_funding_rates(
conn: &mut PgConnection,
funding_rates: &[xxi_node::commons::FundingRate],
) -> Result<()> {
let funding_rates = funding_rates
.iter()
.copied()
.map(NewFundingRate::from)
.collect::<Vec<_>>();
diesel::insert_into(funding_rates::table)
.values(funding_rates)
.on_conflict(funding_rates::end_date)
.do_nothing()
.execute(conn)?;
Ok(())
}
pub fn get_next_funding_rate(
conn: &mut PgConnection,
) -> QueryResult<Option<xxi_node::commons::FundingRate>> {
let funding_rate: Option<FundingRate> = funding_rates::table
.order(funding_rates::end_date.desc())
.first::<FundingRate>(conn)
.optional()?;
let funding_rate = funding_rate.map(xxi_node::commons::FundingRate::from);
Ok(funding_rate)
}
/// Get the funding rate with an end date that is equal to the current date to the nearest hour.
pub fn get_funding_rate_charged_in_the_last_hour(
conn: &mut PgConnection,
) -> QueryResult<Option<xxi_node::commons::FundingRate>> {
let now = OffsetDateTime::now_utc();
let now = to_nearest_hour_in_the_past(now);
let funding_rate: Option<FundingRate> = funding_rates::table
.filter(funding_rates::end_date.eq(now))
.first::<FundingRate>(conn)
.optional()?;
Ok(funding_rate.map(xxi_node::commons::FundingRate::from))
}
impl From<FundingRate> for xxi_node::commons::FundingRate {
fn from(value: FundingRate) -> Self {
Self::new(
Decimal::from_f32(value.rate).expect("to fit"),
value.start_date,
value.end_date,
)
}
}
impl From<xxi_node::commons::FundingRate> for NewFundingRate {
fn from(value: xxi_node::commons::FundingRate) -> Self {
Self {
start_date: value.start_date(),
end_date: value.end_date(),
rate: value.rate().to_f32().expect("to fit"),
}
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/funding_fee/db/funding_fee_events.rs | coordinator/src/funding_fee/db/funding_fee_events.rs | use crate::db::positions::Position;
use crate::db::positions::PositionState;
use crate::decimal_from_f32;
use crate::f32_from_decimal;
use crate::funding_fee;
use crate::schema::funding_fee_events;
use crate::schema::positions;
use crate::schema::protocol_funding_fee_events;
use bitcoin::secp256k1::PublicKey;
use bitcoin::SignedAmount;
use diesel::prelude::*;
use rust_decimal::Decimal;
use std::str::FromStr;
use time::OffsetDateTime;
use xxi_node::node::ProtocolId;
#[derive(Queryable, Debug)]
struct FundingFeeEvent {
id: i32,
/// A positive amount indicates that the trader pays the coordinator; a negative amount
/// indicates that the coordinator pays the trader.
amount_sats: i64,
trader_pubkey: String,
position_id: i32,
due_date: OffsetDateTime,
price: f32,
funding_rate: f32,
paid_date: Option<OffsetDateTime>,
#[diesel(column_name = "timestamp")]
_timestamp: OffsetDateTime,
}
pub fn insert(
conn: &mut PgConnection,
amount: SignedAmount,
trader_pubkey: PublicKey,
position_id: i32,
due_date: OffsetDateTime,
price: Decimal,
funding_rate: Decimal,
) -> QueryResult<Option<funding_fee::FundingFeeEvent>> {
let res = diesel::insert_into(funding_fee_events::table)
.values(&(
funding_fee_events::amount_sats.eq(amount.to_sat()),
funding_fee_events::trader_pubkey.eq(trader_pubkey.to_string()),
funding_fee_events::position_id.eq(position_id),
funding_fee_events::due_date.eq(due_date),
funding_fee_events::price.eq(f32_from_decimal(price)),
funding_fee_events::funding_rate.eq(f32_from_decimal(funding_rate)),
))
.get_result::<FundingFeeEvent>(conn);
match res {
Ok(funding_fee_event) => Ok(Some(funding_fee::FundingFeeEvent::from(funding_fee_event))),
Err(diesel::result::Error::DatabaseError(
diesel::result::DatabaseErrorKind::UniqueViolation,
_,
)) => {
tracing::trace!(
position_id,
%trader_pubkey,
%due_date,
?amount,
"Funding fee event already exists in funding_fee_events table"
);
Ok(None)
}
Err(e) => Err(e),
}
}
/// Get all [`funding_fee::FundingFeeEvent`]s for the active positions of a given trader.
///
/// A trader may miss multiple funding fee events, particularly when they go offline. This function
/// allows us the coordinator to catch them up on reconnect.
///
/// # Returns
///
/// A list of [`xxi_node::FundingFeeEvent`]s, since these are to be sent to the trader via the
/// `xxi_node::Message::AllFundingFeeEvents` message.
pub fn get_funding_fee_events_for_active_trader_positions(
conn: &mut PgConnection,
trader_pubkey: PublicKey,
) -> QueryResult<Vec<xxi_node::FundingFeeEvent>> {
let funding_fee_events: Vec<(FundingFeeEvent, Position)> = funding_fee_events::table
.filter(funding_fee_events::trader_pubkey.eq(trader_pubkey.to_string()))
.inner_join(positions::table.on(positions::id.eq(funding_fee_events::position_id)))
.filter(
positions::position_state
.eq(PositionState::Open)
.or(positions::position_state.eq(PositionState::Resizing))
.or(positions::position_state.eq(PositionState::Rollover)),
)
.load(conn)?;
let funding_fee_events = funding_fee_events
.into_iter()
.map(|(e, p)| xxi_node::FundingFeeEvent {
contract_symbol: p.contract_symbol.into(),
contracts: decimal_from_f32(p.quantity),
direction: p.trader_direction.into(),
price: decimal_from_f32(e.price),
fee: SignedAmount::from_sat(e.amount_sats),
due_date: e.due_date,
})
.collect();
Ok(funding_fee_events)
}
/// Get the unpaid [`funding_fee::FundingFeeEvent`]s for a trader position.
pub fn get_outstanding_funding_fee_events(
conn: &mut PgConnection,
trader_pubkey: PublicKey,
position_id: i32,
) -> QueryResult<Vec<funding_fee::FundingFeeEvent>> {
let funding_events: Vec<FundingFeeEvent> = funding_fee_events::table
.filter(
funding_fee_events::trader_pubkey
.eq(trader_pubkey.to_string())
.and(funding_fee_events::position_id.eq(position_id))
// If the `paid_date` is not set, the funding fee has not been paid.
.and(funding_fee_events::paid_date.is_null()),
)
.load(conn)?;
Ok(funding_events
.iter()
.map(funding_fee::FundingFeeEvent::from)
.collect())
}
pub fn mark_funding_fee_event_as_paid(
conn: &mut PgConnection,
protocol_id: ProtocolId,
) -> QueryResult<()> {
conn.transaction(|conn| {
// Find all funding fee event IDs that were just paid.
let funding_fee_event_ids: Vec<i32> = protocol_funding_fee_events::table
.select(protocol_funding_fee_events::funding_fee_event_id)
.filter(protocol_funding_fee_events::protocol_id.eq(protocol_id.to_uuid()))
.load(conn)?;
if funding_fee_event_ids.is_empty() {
tracing::debug!(%protocol_id, "No funding fee events paid by protocol");
return QueryResult::Ok(());
}
let now = OffsetDateTime::now_utc();
// Mark funding fee events as paid.
diesel::update(
funding_fee_events::table.filter(funding_fee_events::id.eq_any(&funding_fee_event_ids)),
)
.set(funding_fee_events::paid_date.eq(now))
.execute(conn)?;
// Delete entries in `protocol_funding_fee_events` table.
diesel::delete(
protocol_funding_fee_events::table
.filter(protocol_funding_fee_events::id.eq_any(&funding_fee_event_ids)),
)
.execute(conn)?;
QueryResult::Ok(())
})?;
Ok(())
}
impl From<&FundingFeeEvent> for funding_fee::FundingFeeEvent {
fn from(value: &FundingFeeEvent) -> Self {
Self {
id: value.id,
amount: SignedAmount::from_sat(value.amount_sats),
trader_pubkey: PublicKey::from_str(value.trader_pubkey.as_str())
.expect("to be valid pk"),
position_id: value.position_id,
due_date: value.due_date,
price: decimal_from_f32(value.price),
funding_rate: decimal_from_f32(value.funding_rate),
paid_date: value.paid_date,
}
}
}
impl From<FundingFeeEvent> for funding_fee::FundingFeeEvent {
fn from(value: FundingFeeEvent) -> Self {
Self {
id: value.id,
amount: SignedAmount::from_sat(value.amount_sats),
trader_pubkey: PublicKey::from_str(value.trader_pubkey.as_str())
.expect("to be valid pk"),
position_id: value.position_id,
due_date: value.due_date,
price: decimal_from_f32(value.price),
funding_rate: decimal_from_f32(value.funding_rate),
paid_date: value.paid_date,
}
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
ctsrc/Pgen | https://github.com/ctsrc/Pgen/blob/7f54478e9f5947b5b146d6e5eab4c3bd224fa566/crates/pgen/src/bip39_algorithm.rs | crates/pgen/src/bip39_algorithm.rs | /*
* Copyright (c) 2024 Erik Nordstrøm <erik@nordstroem.no>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#![forbid(unsafe_code)]
use bip39_lexical_data::WL_BIP39;
use sha2::{Digest, Sha256};
/// Calculate BIP39 checksum (CS) bits given entropy bits.
fn calculate_cs_bits(ent: &[u8]) -> u8 {
let mut hasher = Sha256::new();
hasher.update(ent);
let hash = hasher.finalize();
let shift = match ent.len() {
// 128 bits of entropy (16 bytes) needs 4 bits of checksum
16 => 4usize,
// 160 bits of entropy (20 bytes) needs 5 bits of checksum
20 => 3,
// 192 bits of entropy (24 bytes) needs 6 bits of checksum
24 => 2,
// 224 bits of entropy (28 bytes) needs 7 bits of checksum
28 => 1,
// 256 bits of entropy (32 bytes) needs 8 bits of checksum
32 => 0,
// No other number of bits of entropy aside from the above is supported by BIP39.
// And since this function is internal to our program, and we only intend to call it
// with the supported number of bits of entropy, there really isn't much point in going
// through the extra motions of returning an error since it would mean we have a fatal
// (unrecoverable) error in the coding of our program anyway. So we may as well panic
// via `unreachable!()` instead of returning details about the error.
_ => unreachable!(),
};
hash[0] >> shift
}
/// Get BIP39 English word from 11 bits.
fn get_word_from_11_bits(value: u16) -> &'static str {
// The caller is responsible for ensuring that only the lower 11 bits are set.
const MAX_ACCEPTABLE_VALUE: u16 = 0b11111111111;
if value > MAX_ACCEPTABLE_VALUE {
unreachable!();
}
WL_BIP39[value as usize]
}
/// Extract 11 bit chunks from entropy bytes. Alternate implementation.
///
/// Returns a `Vec<u16>` of 11 bit chunks, along with an `usize` specifying
/// the number of bits that are left over for checksum in the last `u16` element of the `Vec`.
fn chunk_to_11_bit_groups_alt_via_u128(ent: &[u8]) -> (Vec<u16>, usize) {
// This function pads the last `u16` of output with zeros, leaving space for checksum.
// The checksum bits can then be added to the result elsewhere. Adding checksum is not
// a responsibility of this function.
let (chunk_size, checksum_num_bits): (usize, usize) = match ent.len() {
16 => (16, 4), // one full u128
20 => (4, 5), // five u128 with 32 bits used each
24 => (8, 6), // two u128 with 64 bits used each
28 => (4, 7), // seven u128 with 32 bits used each
32 => (16, 8), // two full u128
// Caller is responsible for ensuring that array length matches one of the BIP39
// valid number of entropy bytes, available above. Since the chunk function is crate internal,
// we can assume that this is taken into account, and we can simply panic if it's not.
// No point in returning an error as the situation would be unrecoverable anyway.
_ => unreachable!(),
};
eprintln!("u128 has size {}", size_of::<u128>());
let groups_128 = ent
.chunks(chunk_size)
.map(|c| match ent.len() {
16 | 32 => u128::from_be_bytes(c.try_into().unwrap()),
24 => (u64::from_be_bytes(c.try_into().unwrap()) as u128) << 64,
_ => (u32::from_be_bytes(c.try_into().unwrap()) as u128) << 96,
})
.collect::<Vec<_>>();
for group_128 in groups_128 {
eprintln!("Group {group_128:#0128b}");
}
// TODO: Continue implementation of this function.
todo!();
}
/// Extract 11 bit chunks from entropy bytes.
///
/// Returns a `Vec<u16>` of 11 bit chunks, along with an `usize` specifying
/// the number of bits that are left over for checksum in the last `u16` element of the `Vec`.
fn chunk_to_11_bit_groups(ent: &[u8]) -> (Vec<u16>, usize) {
let mut chunks = vec![];
// Initialize first output chunk. Initially empty.
let mut curr_output_chunk = 0u16;
// Number of bits left for curr chunk to be complete
let mut cc = 11;
for &curr_input_byte in ent.iter() {
eprintln!("enter byte loop iteration");
eprintln!("num chunks output so far {:2}", chunks.len());
eprintln!("curr_input_byte {curr_input_byte:#010b}");
eprintln!("curr_output_chunk {curr_output_chunk:#013b}");
eprintln!("cc {cc:#2}");
// Number of bits left to take in curr input byte
let mut iu = 8;
eprintln!("iu {iu:#2}");
// Take all bits from input byte, filling output chunks.
while iu != 0 {
eprintln!("enter bit take iteration");
// Number of bits to take
let take_n_bits = if cc >= iu { iu } else { cc };
eprintln!("take_n_bits {take_n_bits}");
// Mask for bits to take
// - set the number of bits in the mask corresponding to the number of bits to take
let mask_take_bits = (0xffu16 << (8 - take_n_bits)) as u8;
eprintln!("mask_take_bits {mask_take_bits:#010b}");
// - shift the mask into position
let mask_take_bits = mask_take_bits >> (8 - iu);
eprintln!("mask_take_bits {mask_take_bits:#010b}");
// Take bits from input byte
let mut bits_taken = curr_input_byte & mask_take_bits;
eprintln!("bits_taken {bits_taken:#010b}");
// Update number of bits left for curr chunk to be complete with 11 bits taken from input.
cc -= take_n_bits;
eprintln!("cc {cc:#2}");
// Update the number of bits we have left to take from current byte of input.
iu -= take_n_bits;
eprintln!("iu {iu:#2}");
// Shift the output chunk with as many bits as we are taking, to make room for these bits.
curr_output_chunk <<= take_n_bits;
eprintln!("curr_output_chunk {curr_output_chunk:#013b}");
// Shift the taken bits so that they don't have any trailing zeroes.
bits_taken >>= iu;
// Append the taken bits to the output chunk.
curr_output_chunk ^= bits_taken as u16;
eprintln!("curr_output_chunk {curr_output_chunk:#013b}");
// If current chunk is complete, save it and create a new empty chunk.
if cc == 0 {
chunks.push(curr_output_chunk);
eprintln!("new chunk");
curr_output_chunk = 0;
cc = 11;
}
eprintln!("end bit take iteration");
}
eprintln!("end byte loop iteration");
eprintln!();
}
if cc != 11 {
curr_output_chunk <<= cc;
chunks.push(curr_output_chunk);
} else {
cc = 0;
}
for chunk in &chunks {
eprintln!("chunk {chunk:#013b}");
}
(chunks, cc)
}
#[cfg(test)]
mod test {
use crate::bip39_algorithm::{
calculate_cs_bits, chunk_to_11_bit_groups, get_word_from_11_bits,
};
use test_case::test_case;
// From <https://github.com/trezor/python-mnemonic/blob/b57a5ad77a981e743f4167ab2f7927a55c1e82a8/vectors.json#L3-L8>:
//
// ```json
// [
// "00000000000000000000000000000000",
// "abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon about",
// "c55257c360c07c72029aebc1b53c05ed0362ada38ead3e3e9efa3708e53495531f09a6987599d18264c1e1c92f2cf141630c7a3c4ab7c81b2f001698e7463b04",
// "xprv9s21ZrQH143K3h3fDYiay8mocZ3afhfULfb5GX8kCBdno77K4HiA15Tg23wpbeF1pLfs1c5SPmYHrEpTuuRhxMwvKDwqdKiGJS9XFKzUsAF"
// ],
// ```
//
// - 128 bits of "entropy" (all zero in this case).
// - The 12th word in the mnemonic sentence is the 4th word (index 3) in the BIP39 English wordlist.
#[test_case(&[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], 3; "with 128 bits of input of all zeros")]
// From <https://github.com/trezor/python-mnemonic/blob/b57a5ad77a981e743f4167ab2f7927a55c1e82a8/vectors.json#L27-L32>:
//
// ```json
// [
// "000000000000000000000000000000000000000000000000",
// "abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon agent",
// "035895f2f481b1b0f01fcf8c289c794660b289981a78f8106447707fdd9666ca06da5a9a565181599b79f53b844d8a71dd9f439c52a3d7b3e8a79c906ac845fa",
// "xprv9s21ZrQH143K3mEDrypcZ2usWqFgzKB6jBBx9B6GfC7fu26X6hPRzVjzkqkPvDqp6g5eypdk6cyhGnBngbjeHTe4LsuLG1cCmKJka5SMkmU"
// ],
// ```
//
// - 192 bits of "entropy" (all zero in this case).
// - The 18th word in the mnemonic sentence is the 40th word (index 39) in the BIP39 English wordlist.
#[test_case(&[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], 39; "with 192 bits of input of all zeros")]
// From <https://github.com/trezor/python-mnemonic/blob/b57a5ad77a981e743f4167ab2f7927a55c1e82a8/vectors.json#L51-L56>:
//
// ```json
// [
// "0000000000000000000000000000000000000000000000000000000000000000",
// "abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon art",
// "bda85446c68413707090a52022edd26a1c9462295029f2e60cd7c4f2bbd3097170af7a4d73245cafa9c3cca8d561a7c3de6f5d4a10be8ed2a5e608d68f92fcc8",
// "xprv9s21ZrQH143K32qBagUJAMU2LsHg3ka7jqMcV98Y7gVeVyNStwYS3U7yVVoDZ4btbRNf4h6ibWpY22iRmXq35qgLs79f312g2kj5539ebPM"
// ],
// ```
//
// - 256 bits of "entropy" (all zero in this case).
// - The 24th word in the mnemonic sentence is the 103rd word (index 102) in the BIP39 English wordlist.
#[test_case(&[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], 102; "with 256 bits of input of all zeros")]
fn calculates_cs_bits_correctly(ent: &[u8], cs_expected: u8) {
let cs_actual = calculate_cs_bits(ent);
assert_eq!(cs_expected, cs_actual);
}
#[test_case(0, "abandon"; "first word in list (index 0)")]
#[test_case(3, "about")]
#[test_case(102, "art")]
#[test_case(2047, "zoo"; "last word in list (index 2047)")]
fn gets_correct_word_from_11_bits(value: u16, expected_word: &str) {
let actual_word = get_word_from_11_bits(value);
assert_eq!(expected_word, actual_word);
}
#[test]
#[should_panic]
fn get_word_should_panic_when_more_than_11_bits_are_set() {
let value = 2048u16;
let _ = get_word_from_11_bits(value);
}
// 128 bits of input should have 12 chunks of output, with 4 bits left in last byte for checksum, according to BIP39.
#[test_case(&[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], &[0,0,0,0,0,0,0,0,0,0,0,0], 4; "with 128 bits of input of all zeros")]
#[test_case(&[0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff], &[0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0b11111110000], 4; "with 128 bits of input of all ones")]
#[test_case(&[0xff,0,0xff,0,0xff,0,0xff,0,0xff,0,0xff,0,0xff,0,0xff,0], &[2040,63,1537,2032,127,1027,2016,255,7,1984,510,0], 4; "with 128 bits of input alternating between bytes all one and all zero")]
#[test_case(&[0,0xff,0,0xff,0,0xff,0,0xff,0,0xff,0,0xff,0,0xff,0,0xff], &[7,1984,510,15,1920,1020,31,1792,2040,63,1537,2032], 4; "with 128 bits of input alternating between bytes all zero and all one")]
#[test_case(&[0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa], &[1365,682,1365,682,1365,682,1365,682,1365,682,1365,672], 4; "with 128 bits of input alternating bits between one and zero")]
#[test_case(&[0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55], &[682,1365,682,1365,682,1365,682,1365,682,1365,682,1360], 4; "with 128 bits of input alternating bits between zero and one")]
#[test_case(&[0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99], &[1228,1638,819,409,1228,1638,819,409,1228,1638,819,400], 4; "with 128 bits of input repeating 0b10011001 pattern")]
#[test_case(&[0x2,0x4,0x8,0x10,0x20,0x40,0x81,0x2,0x4,0x8,0x10,0x20,0x40,0x81,0x2,0x4], &[16,258,32,516,64,1032,129,16,258,32,516,64], 4; "with 128 bits of input having every seventh bit set")]
#[test_case(&[0,0x20,0x4,0,0x80,0x10,0x2,0,0x40,0x8,0x1,0,0x20,0x4,0,0x80], &[1,1,1,1,1,1,1,1,1,1,1,0], 4; "with 128 bits of input having every eleventh bit set")]
#[test_case(&[0xff,0xdf,0xfb,0xff,0x7f,0xef,0xfd,0xff,0xbf,0xf7,0xfe,0xff,0xdf,0xfb,0xff,0x7f], &[2046,2046,2046,2046,2046,2046,2046,2046,2046,2046,2046,2032], 4; "with 128 bits of input having all but every eleventh bit set")]
#[test_case(&[0,0,0x20,0,0x4,0,0,0x80,0,0x10,0,0x2,0,0,0x40,0], &[0,8,0,64,0,512,2,0,16,0,128,0], 4; "with 128 bits of input having every nineteenth bit set")]
#[test_case(&[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1], &[0,0,0,0,0,0,0,0,0,0,0,16], 4; "with 128 bits of input, having only the very last bit set")]
// 160 bits of input should have 15 chunks of output, with 5 bits left in last byte for checksum, according to BIP39.
#[test_case(&[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], &[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], 5; "with 160 bits of input of all zeros")]
#[test_case(&[0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff], &[0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0b11111100000], 5; "with 160 bits of input of all ones")]
#[test_case(&[0xff,0,0xff,0,0xff,0,0xff,0,0xff,0,0xff,0,0xff,0,0xff,0,0xff,0,0xff,0], &[2040,63,1537,2032,127,1027,2016,255,7,1984,510,15,1920,1020,0], 5; "with 160 bits of input alternating between bytes all one and all zero")]
#[test_case(&[0,0xff,0,0xff,0,0xff,0,0xff,0,0xff,0,0xff,0,0xff,0,0xff,0,0xff,0,0xff], &[7,1984,510,15,1920,1020,31,1792,2040,63,1537,2032,127,1027,2016], 5; "with 160 bits of input alternating between bytes all zero and all one")]
#[test_case(&[0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa], &[1365,682,1365,682,1365,682,1365,682,1365,682,1365,682,1365,682,1344], 5; "with 160 bits of input alternating bits between one and zero")]
#[test_case(&[0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55], &[682,1365,682,1365,682,1365,682,1365,682,1365,682,1365,682,1365,672], 5; "with 160 bits of input alternating bits between zero and one")]
#[test_case(&[0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99], &[1228,1638,819,409,1228,1638,819,409,1228,1638,819,409,1228,1638,800], 5; "with 160 bits of input repeating 0b10011001 pattern")]
#[test_case(&[0x2,0x4,0x8,0x10,0x20,0x40,0x81,0x2,0x4,0x8,0x10,0x20,0x40,0x81,0x2,0x4,0x8,0x10,0x20,0x40], &[16,258,32,516,64,1032,129,16,258,32,516,64,1032,129,0], 5; "with 160 bits of input having every seventh bit set")]
#[test_case(&[0,0x20,0x4,0,0x80,0x10,0x2,0,0x40,0x8,0x1,0,0x20,0x4,0,0x80,0x10,0x2,0,0x40], &[1,1,1,1,1,1,1,1,1,1,1,1,1,1,0], 5; "with 160 bits of input having every eleventh bit set")]
#[test_case(&[0xff,0xdf,0xfb,0xff,0x7f,0xef,0xfd,0xff,0xbf,0xf7,0xfe,0xff,0xdf,0xfb,0xff,0x7f,0xef,0xfd,0xff,0xbf], &[2046,2046,2046,2046,2046,2046,2046,2046,2046,2046,2046,2046,2046,2046,2016], 5; "with 160 bits of input having all but every eleventh bit set")]
#[test_case(&[0,0,0x20,0,0x4,0,0,0x80,0,0x10,0,0x2,0,0,0x40,0,0x8,0,0x1,0], &[0,8,0,64,0,512,2,0,16,0,128,0,1024,4,0], 5; "with 160 bits of input having every nineteenth bit set")]
#[test_case(&[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1], &[0,0,0,0,0,0,0,0,0,0,0,0,0,0,32], 5; "with 160 bits of input, having only the very last bit set")]
// 192 bits of input should have 18 chunks of output, with 6 bits left in last byte for checksum, according to BIP39.
#[test_case(&[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], &[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], 6; "with 192 bits of input of all zeros")]
#[test_case(&[0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff], &[0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0b11111000000], 6; "with 192 bits of input of all ones")]
#[test_case(&[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1], &[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,64], 6; "with 192 bits of input, having only the very last bit set")]
// 224 bits of input should have 21 chunks of output, with 7 bits left in last byte for checksum, according to BIP39.
#[test_case(&[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], &[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], 7; "with 224 bits of input of all zeros")]
#[test_case(&[0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff], &[0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0b11110000000], 7; "with 224 bits of input of all ones")]
#[test_case(&[0xff,0,0xff,0,0xff,0,0xff,0,0xff,0,0xff,0,0xff,0,0xff,0,0xff,0,0xff,0,0xff,0,0xff,0,0xff,0,0xff,0], &[2040,63,1537,2032,127,1027,2016,255,7,1984,510,15,1920,1020,31,1792,2040,63,1537,2032,0], 7; "with 224 bits of input alternating between bytes all one and all zero")]
#[test_case(&[0,0xff,0,0xff,0,0xff,0,0xff,0,0xff,0,0xff,0,0xff,0,0xff,0,0xff,0,0xff,0,0xff,0,0xff,0,0xff,0,0xff], &[7,1984,510,15,1920,1020,31,1792,2040,63,1537,2032,127,1027,2016,255,7,1984,510,15,0b11110000000], 7; "with 224 bits of input alternating between bytes all zero and all one")]
#[test_case(&[0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa], &[1365,682,1365,682,1365,682,1365,682,1365,682,1365,682,1365,682,1365,682,1365,682,1365,682,1280], 7; "with 224 bits of input alternating bits between one and zero")]
#[test_case(&[0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55], &[682,1365,682,1365,682,1365,682,1365,682,1365,682,1365,682,1365,682,1365,682,1365,682,1365,640], 7; "with 224 bits of input alternating bits between zero and one")]
#[test_case(&[0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99], &[1228,1638,819,409,1228,1638,819,409,1228,1638,819,409,1228,1638,819,409,1228,1638,819,409,1152], 7; "with 224 bits of input repeating 0b10011001 pattern")]
#[test_case(&[0x2,0x4,0x8,0x10,0x20,0x40,0x81,0x2,0x4,0x8,0x10,0x20,0x40,0x81,0x2,0x4,0x8,0x10,0x20,0x40,0x81,0x2,0x4,0x8,0x10,0x20,0x40,0x81], &[16,258,32,516,64,1032,129,16,258,32,516,64,1032,129,16,258,32,516,64,1032,128], 7; "with 224 bits of input having every seventh bit set")]
#[test_case(&[0,0x20,0x4,0,0x80,0x10,0x2,0,0x40,0x8,0x1,0,0x20,0x4,0,0x80,0x10,0x2,0,0x40,0x8,0x1,0,0x20,0x4,0,0x80,0x10], &[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0], 7; "with 224 bits of input having every eleventh bit set")]
#[test_case(&[0xff,0xdf,0xfb,0xff,0x7f,0xef,0xfd,0xff,0xbf,0xf7,0xfe,0xff,0xdf,0xfb,0xff,0x7f,0xef,0xfd,0xff,0xbf,0xf7,0xfe,0xff,0xdf,0xfb,0xff,0x7f,0xef], &[2046,2046,2046,2046,2046,2046,2046,2046,2046,2046,2046,2046,2046,2046,2046,2046,2046,2046,2046,2046,1920], 7; "with 224 bits of input having all but every eleventh bit set")]
#[test_case(&[0,0,0x20,0,0x4,0,0,0x80,0,0x10,0,0x2,0,0,0x40,0,0x8,0,0x1,0,0,0x20,0,0x4,0,0,0x80,0], &[0,8,0,64,0,512,2,0,16,0,128,0,1024,4,0,32,0,256,1,0,0], 7; "with 224 bits of input having every nineteenth bit set")]
#[test_case(&[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1], &[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,128], 7; "with 224 bits of input, having only the very last bit set")]
// 256 bits of input should have 24 chunks of output, with 8 bits left in last byte for checksum, according to BIP39.
#[test_case(&[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], &[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], 8; "with 256 bits of input of all zeros")]
#[test_case(&[0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff], &[0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0x7ff,0b11100000000], 8; "with 256 bits of input of all ones")]
#[test_case(&[0xff,0,0xff,0,0xff,0,0xff,0,0xff,0,0xff,0,0xff,0,0xff,0,0xff,0,0xff,0,0xff,0,0xff,0,0xff,0,0xff,0,0xff,0,0xff,0], &[2040,63,1537,2032,127,1027,2016,255,7,1984,510,15,1920,1020,31,1792,2040,63,1537,2032,127,1027,2016,0], 8; "with 256 bits of input alternating between bytes all one and all zero")]
#[test_case(&[0,0xff,0,0xff,0,0xff,0,0xff,0,0xff,0,0xff,0,0xff,0,0xff,0,0xff,0,0xff,0,0xff,0,0xff,0,0xff,0,0xff,0,0xff,0,0xff], &[7,1984,510,15,1920,1020,31,1792,2040,63,1537,2032,127,1027,2016,255,7,1984,510,15,1920,1020,31,0b11100000000], 8; "with 256 bits of input alternating between bytes all zero and all one")]
#[test_case(&[0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa,0xaa], &[1365,682,1365,682,1365,682,1365,682,1365,682,1365,682,1365,682,1365,682,1365,682,1365,682,1365,682,1365,512], 8; "with 256 bits of input alternating bits between one and zero")]
#[test_case(&[0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55], &[682,1365,682,1365,682,1365,682,1365,682,1365,682,1365,682,1365,682,1365,682,1365,682,1365,682,1365,682,1280], 8; "with 256 bits of input alternating bits between zero and one")]
#[test_case(&[0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99], &[1228,1638,819,409,1228,1638,819,409,1228,1638,819,409,1228,1638,819,409,1228,1638,819,409,1228,1638,819,256], 8; "with 256 bits of input repeating 0b10011001 pattern")]
#[test_case(&[0x2,0x4,0x8,0x10,0x20,0x40,0x81,0x2,0x4,0x8,0x10,0x20,0x40,0x81,0x2,0x4,0x8,0x10,0x20,0x40,0x81,0x2,0x4,0x8,0x10,0x20,0x40,0x81,0x2,0x4,0x8,0x10], &[16,258,32,516,64,1032,129,16,258,32,516,64,1032,129,16,258,32,516,64,1032,129,16,258,0], 8; "with 256 bits of input having every seventh bit set")]
#[test_case(&[0,0x20,0x4,0,0x80,0x10,0x2,0,0x40,0x8,0x1,0,0x20,0x4,0,0x80,0x10,0x2,0,0x40,0x8,0x1,0,0x20,0x4,0,0x80,0x10,0x2,0,0x40,0x8], &[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0], 8; "with 256 bits of input having every eleventh bit set")]
#[test_case(&[0xff,0xdf,0xfb,0xff,0x7f,0xef,0xfd,0xff,0xbf,0xf7,0xfe,0xff,0xdf,0xfb,0xff,0x7f,0xef,0xfd,0xff,0xbf,0xf7,0xfe,0xff,0xdf,0xfb,0xff,0x7f,0xef,0xfd,0xff,0xbf,0xf7], &[2046,2046,2046,2046,2046,2046,2046,2046,2046,2046,2046,2046,2046,2046,2046,2046,2046,2046,2046,2046,2046,2046,2046,1792], 8; "with 256 bits of input having all but every eleventh bit set")]
#[test_case(&[0,0,0x20,0,0x4,0,0,0x80,0,0x10,0,0x2,0,0,0x40,0,0x8,0,0x1,0,0,0x20,0,0x4,0,0,0x80,0,0x10,0,0x2,0], &[0,8,0,64,0,512,2,0,16,0,128,0,1024,4,0,32,0,256,1,0,8,0,64,0], 8; "with 256 bits of input having every nineteenth bit set")]
#[test_case(&[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1], &[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,256], 8; "with 256 bits of input, having only the very last bit set")]
fn chunks_correctly_to_11_bit_groups(
input_ent: &[u8],
expected_chunks: &[u16],
expected_n_cs: usize,
) {
let (actual_chunks, actual_n_cs) = chunk_to_11_bit_groups(input_ent);
// The output chunks should be as we think they should be.
assert_eq!(expected_chunks, actual_chunks);
// The number of lower bits left for checksum in the last output chunk should be as we think it should.
assert_eq!(expected_n_cs, actual_n_cs);
// Only the lower 11 bits should be set in each output chunk.
for actual_chunk in actual_chunks {
assert_eq!(actual_chunk, actual_chunk & 0b11111111111);
}
}
}
| rust | ISC | 7f54478e9f5947b5b146d6e5eab4c3bd224fa566 | 2026-01-04T20:11:15.304186Z | false |
ctsrc/Pgen | https://github.com/ctsrc/Pgen/blob/7f54478e9f5947b5b146d6e5eab4c3bd224fa566/crates/pgen/src/lib.rs | crates/pgen/src/lib.rs | mod bip39_algorithm;
| rust | ISC | 7f54478e9f5947b5b146d6e5eab4c3bd224fa566 | 2026-01-04T20:11:15.304186Z | false |
ctsrc/Pgen | https://github.com/ctsrc/Pgen/blob/7f54478e9f5947b5b146d6e5eab4c3bd224fa566/crates/pgen/src/main.rs | crates/pgen/src/main.rs | /*
* Copyright (c) 2018, 2019, 2023, 2024 Erik Nordstrøm <erik@nordstroem.no>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#![forbid(unsafe_code)]
use bip39_lexical_data::WL_BIP39;
use clap::{Parser, ValueEnum};
use eff_lexical_data::{WL_AUTOCOMPLETE, WL_LONG, WL_SHORT};
use rand::thread_rng;
use rand::Rng;
use std::io::{stdin, stdout, Write};
use thiserror::Error;
#[derive(Parser)]
#[command(author, version, about, long_about = None)]
struct Cli {
/// Use physical six-sided dice instead of letting the computer pick words
#[arg(short = 'd', long = "dice")]
use_physical_dice: bool,
/// Select wordlist to use
#[arg(short = 'w', long = "wordlist", value_enum, default_value_t)]
use_wlist: WordlistChoice,
/// Specify the number of passphrases to generate k
#[arg(short, default_value_t = 1, value_name = "k")]
k: u32,
/// Specify the number of words to use
#[arg(short, value_name = "n")]
n: Option<usize>,
/// Calculate and print the entropy for the passphrase(s) that would be generated with the given settings
#[arg(short = 'e')]
calculate_entropy: bool,
}
#[derive(Eq, PartialEq, Copy, Clone, Debug, ValueEnum)]
enum WordlistChoice {
/// EFF's Short Wordlist #2
///
/// Features:
/// - Each word has a unique three-character prefix. This means that software could
/// auto-complete words in the passphrase after the user has typed the first three characters.
/// - All words are at least an edit distance of 3 apart. This means that software could
/// correct any single typo in the user's passphrase (and in many cases more than one typo).
///
/// Details:
/// - <https://www.eff.org/deeplinks/2016/07/new-wordlists-random-passphrases>
/// - <https://www.eff.org/dice>
EffAutocomplete,
/// EFF's Long Wordlist
///
/// Features:
/// - Contains words that are easy to type and remember.
/// - Built from a list of words that prioritizes the most recognized words
/// and then the most concrete words.
/// - Manually checked by EFF and attempted to remove as many profane, insulting, sensitive,
/// or emotionally-charged words as possible, and also filtered based on several public
/// lists of vulgar English words.
///
/// Details:
/// - <https://www.eff.org/deeplinks/2016/07/new-wordlists-random-passphrases>
/// - <https://www.eff.org/dice>
EffLong,
/// EFF's Short Wordlist #1
///
/// Features:
/// - Designed to include the 1,296 most memorable and distinct words.
///
/// Details:
/// - <https://www.eff.org/deeplinks/2016/07/new-wordlists-random-passphrases>
/// - <https://www.eff.org/dice>
EffShort,
/// BIP39 wordlist
///
/// Details:
/// - <https://en.bitcoin.it/wiki/BIP_0039>
/// - <https://en.bitcoin.it/wiki/Seed_phrase>
Bip39,
}
impl Default for WordlistChoice {
fn default() -> Self {
Self::EffAutocomplete
}
}
#[derive(Debug, Error)]
enum Error {
#[error("Invalid number of words for BIP39: {0}")]
Bip39MSLenInvalid(usize),
}
fn main() -> anyhow::Result<()> {
let cli = Cli::parse();
let wordlist = match cli.use_wlist {
WordlistChoice::EffAutocomplete => WL_AUTOCOMPLETE,
WordlistChoice::EffLong => WL_LONG,
WordlistChoice::EffShort => WL_SHORT,
WordlistChoice::Bip39 => WL_BIP39,
};
// the EFF wordlists have lengths that are an exact power of 6,
// whereas the bip39 wordlist does not
let num_dice: u32 = if cli.use_wlist == WordlistChoice::Bip39 {
// bip39 has 2048 words, which is a power of 2.
// we need 11 dice because 6**11 / 3**11 = 2048,
// i.e. we use 11 dice because it leads to a multiple
// of the wordlist length
11
} else if cli.use_wlist == WordlistChoice::EffLong {
// EFF long wordlist has 6**5 = 7776 words
5
} else {
// Other EFF wordlists have 6**4 = 1296 words
4
};
let num_passphrases = cli.k;
let num_words = match cli.n {
Some(n) => {
// BIP39 has specific allowable lengths of the generated mnemonic sentence (MS) in words.
// See <https://en.bitcoin.it/wiki/BIP_0039#Generating_the_mnemonic> for details.
let bip39_allowable_mnemonic_sentence_lengths: [usize; 5] = [12, 15, 18, 21, 24];
if cli.use_wlist == WordlistChoice::Bip39
&& !bip39_allowable_mnemonic_sentence_lengths.contains(&n)
{
eprintln!("When BIP39 wordlist is used, number of words to use must be one of: {bip39_allowable_mnemonic_sentence_lengths:?}");
return Err(Error::Bip39MSLenInvalid(n).into());
}
n
}
None => {
if cli.use_wlist == WordlistChoice::EffLong {
10
} else {
12
}
}
};
let stdout = stdout();
let mut handle = stdout.lock();
if cli.calculate_entropy {
handle.write_fmt(format_args!(
"Current settings will create passphrases with {:.2} bits of entropy.\n",
(num_words as f64) * (wordlist.len() as f64).log2()
))?;
} else {
for _ in 0..num_passphrases {
if cli.use_physical_dice {
let mut word_idx = vec![0usize; num_words];
let width = format!("{num_words}").len();
for (i, item) in word_idx.iter_mut().enumerate().take(num_words) {
eprint!("Word {:>w$} / {}. ", i + 1, num_words, w = width);
// For the sake of the bip39 wordlist, we modulo index by the wordlist length,
// because the range of the possible values is a multiple of the wordlist length.
//
// With the EFF wordlists, the wordlist lengths match the range
// of the numbers we get from the dice, so for EFF wordlists
// this modulo does not change anything.
*item = read_dice(num_dice) % wordlist.len();
}
for i in 0..num_words {
handle.write_all(wordlist[word_idx[i]].as_bytes())?;
if i < (num_words - 1) {
handle.write_all(b" ")?;
}
}
} else {
let mut rng = thread_rng();
for i in 0..num_words {
handle.write_all(
wordlist[rng.gen_range(0..wordlist.len()) as usize].as_bytes(),
)?;
if i < (num_words - 1) {
handle.write_all(b" ")?;
}
}
}
handle.write_all(b"\n")?;
}
}
Ok(())
}
fn read_dice(n: u32) -> usize {
eprint!("Throw {n} dice and enter the number of eyes shown on each: ");
let mut result = 0;
let mut i = 0;
while i < n {
let mut input = String::new();
stdin().read_line(&mut input).unwrap();
for c in input.chars() {
match c {
'1' | '2' | '3' | '4' | '5' | '6' => {
result += (c.to_digit(10).unwrap() - 1) * (6u32).pow(n - i - 1);
i += 1;
}
_ => {}
}
if i == n {
break;
}
}
}
result as usize
}
| rust | ISC | 7f54478e9f5947b5b146d6e5eab4c3bd224fa566 | 2026-01-04T20:11:15.304186Z | false |
ctsrc/Pgen | https://github.com/ctsrc/Pgen/blob/7f54478e9f5947b5b146d6e5eab4c3bd224fa566/crates/bip39-lexical-data/build.rs | crates/bip39-lexical-data/build.rs | use std::env;
use std::fs::File;
use std::io::{BufRead, BufReader, Write};
use std::path::Path;
// https://doc.rust-lang.org/cargo/reference/build-scripts.html#case-study-code-generation
/// Extract words from simple wordlist
fn words_simple(mut f_dest: &File, const_name: &str, fname_src: &str) {
write!(f_dest, "pub const {const_name}: &[&str] = &[").unwrap();
let f_src = BufReader::new(File::open(fname_src).unwrap());
for line in f_src.lines() {
match line {
Ok(line) => {
let word = line.trim();
write!(f_dest, "\"{word}\",").unwrap();
}
Err(_e) => panic!("Unable to read line from internal file"),
}
}
f_dest.write_all(b"];").unwrap();
}
fn main() {
let out_dir = env::var("OUT_DIR").unwrap();
let dest_path = Path::new(&out_dir).join("wordlists.rs");
let f = File::create(dest_path).unwrap();
words_simple(&f, "WL_BIP39", "data/bip39_en_wordlist.txt");
}
| rust | ISC | 7f54478e9f5947b5b146d6e5eab4c3bd224fa566 | 2026-01-04T20:11:15.304186Z | false |
ctsrc/Pgen | https://github.com/ctsrc/Pgen/blob/7f54478e9f5947b5b146d6e5eab4c3bd224fa566/crates/bip39-lexical-data/src/lib.rs | crates/bip39-lexical-data/src/lib.rs | #![no_std]
#![forbid(unsafe_code)]
// https://doc.rust-lang.org/cargo/reference/build-scripts.html#case-study-code-generation
include!(concat!(env!("OUT_DIR"), "/wordlists.rs"));
| rust | ISC | 7f54478e9f5947b5b146d6e5eab4c3bd224fa566 | 2026-01-04T20:11:15.304186Z | false |
ctsrc/Pgen | https://github.com/ctsrc/Pgen/blob/7f54478e9f5947b5b146d6e5eab4c3bd224fa566/crates/eff-lexical-data/build.rs | crates/eff-lexical-data/build.rs | use std::env;
use std::fs::File;
use std::io::{BufRead, BufReader, Write};
use std::path::Path;
// https://doc.rust-lang.org/cargo/reference/build-scripts.html#case-study-code-generation
/// Extract words from the EFF wordlists
fn words_eff(mut f_dest: &File, const_name: &str, fname_src: &str) {
write!(f_dest, "pub const {const_name}: &[&str] = &[").unwrap();
let f_src = BufReader::new(File::open(fname_src).unwrap());
for line in f_src.lines() {
match line {
Ok(line) => {
let word = line.split('\t').nth(1).unwrap();
write!(f_dest, "\"{word}\",").unwrap();
}
Err(_e) => panic!("Unable to read line from internal file"),
}
}
f_dest.write_all(b"];").unwrap();
}
fn main() {
let out_dir = env::var("OUT_DIR").unwrap();
let dest_path = Path::new(&out_dir).join("wordlists.rs");
let f = File::create(dest_path).unwrap();
words_eff(&f, "WL_AUTOCOMPLETE", "data/eff_short_wordlist_2_0.txt");
words_eff(&f, "WL_LONG", "data/eff_large_wordlist.txt");
words_eff(&f, "WL_SHORT", "data/eff_short_wordlist_1.txt");
}
| rust | ISC | 7f54478e9f5947b5b146d6e5eab4c3bd224fa566 | 2026-01-04T20:11:15.304186Z | false |
ctsrc/Pgen | https://github.com/ctsrc/Pgen/blob/7f54478e9f5947b5b146d6e5eab4c3bd224fa566/crates/eff-lexical-data/src/lib.rs | crates/eff-lexical-data/src/lib.rs | #![no_std]
#![forbid(unsafe_code)]
// https://doc.rust-lang.org/cargo/reference/build-scripts.html#case-study-code-generation
include!(concat!(env!("OUT_DIR"), "/wordlists.rs"));
| rust | ISC | 7f54478e9f5947b5b146d6e5eab4c3bd224fa566 | 2026-01-04T20:11:15.304186Z | false |
AnneKitsune/planck_ecs | https://github.com/AnneKitsune/planck_ecs/blob/b0b496643541d868cd404d168570890d579bf01c/src/lib.rs | src/lib.rs | //! A minimalist, safe and fast ECS.
//! Composed of two libraries:
//! * entity_component
//! * world_dispatcher
//!
//! Planck ECS is a library that brings those two smaller parts together.
//! It adds the `maintain` function to world, which takes care of cleaning up
//! dead entities after running systems.
pub use entity_component::*;
pub use world_dispatcher::*;
use atomic_refcell_try::AtomicRefMut;
use std::any::Any;
/// Extension to the `World` struct that adds a maintain() method.
pub trait WorldExt {
fn maintain(&mut self);
}
impl WorldExt for World {
/// Removes dead entities from all the registered storages.
fn maintain(&mut self) {
if let Ok(mut entities) = self.get_mut::<Entities>() {
for (typeid, func) in COMPONENT_REGISTRY.lock().unwrap().iter() {
if let Ok(any) = self.get_by_typeid(typeid) {
let any: AtomicRefMut<dyn Any> = AtomicRefMut::map(any, |j| j.as_any_mut());
func(any, entities.killed());
}
}
entities.clear_killed();
}
}
}
| rust | Apache-2.0 | b0b496643541d868cd404d168570890d579bf01c | 2026-01-04T20:18:29.373186Z | false |
AnneKitsune/planck_ecs | https://github.com/AnneKitsune/planck_ecs/blob/b0b496643541d868cd404d168570890d579bf01c/examples/serde.rs | examples/serde.rs | use planck_ecs::*;
use serde::Serialize;
struct Animal;
#[derive(Clone, Serialize)]
struct Position {
x: f32,
y: f32,
}
#[derive(Serialize)]
struct SerializeMe {
t: &'static str,
positions: Vec<Position>,
}
fn main() {
let mut world = World::default();
let create_animals = (|animals: &mut Components<Animal>,
positions: &mut Components<Position>,
entities: &mut Entities| {
for i in 0..10 {
let entity = entities.create();
animals.insert(entity, Animal);
positions.insert(
entity,
Position {
x: i as f32,
y: i as f32,
},
);
}
Ok(())
})
.system();
let mut setup = DispatcherBuilder::new()
.add_system(create_animals)
.build(&mut world);
setup.run_seq(&world).unwrap();
let animals = world.get::<Components<Animal>>().unwrap();
let positions = world.get::<Components<Position>>().unwrap();
let cloned_positions = join!(&animals && &positions)
.filter_map(|(_, position)| position)
.cloned()
.collect::<Vec<_>>();
let serialize_me = SerializeMe {
t: "Animals",
positions: cloned_positions,
};
let serialized = serde_json::to_string_pretty(&serialize_me).unwrap();
println!("serialized = {}", serialized);
}
| rust | Apache-2.0 | b0b496643541d868cd404d168570890d579bf01c | 2026-01-04T20:18:29.373186Z | false |
basro/stylance-rs | https://github.com/basro/stylance-rs/blob/74321d41b1931829b93cdb6c8ae4cec44fa4cc0d/stylance/src/lib.rs | stylance/src/lib.rs | //! # About stylance
//!
//! Stylance is a scoped CSS library for rust.
//!
//! Use it in conjunction with [stylance-cli](https://crates.io/crates/stylance-cli).
//!
//! # Usage
//!
//! Create a .module.css file inside your rust source directory
//! ```scss
//! // src/component1/style.module.css
//!
//! .header {
//! color: red;
//! }
//!
//! .contents {
//! border: 1px solid black;
//! }
//! ```
//!
//! Then import that file from your rust code:
//! ```rust
//! stylance::import_crate_style!(style, "src/component1/style.module.css");
//! stylance::import_style!(style2, "style2.module.css");
//!
//! fn use_style() {
//! println!("{} {}", style::header, style2::content);
//! }
//! ```
//!
//! ### Accessing non-scoped global class names with `:global(.class)`
//!
//! Sometimes you may want to use an external classname in your .module.css file.
//!
//! For this you can wrap the global class name with `:global()`, this instructs stylance to leave that class name alone.
//!
//! ```css
//! .contents :global(.paragraph) {
//! color: blue;
//! }
//! ```
//!
//! This will expand to
//! ```css
//! .contents-539306b .paragraph {
//! color: blue;
//! }
//! ```
//!
//! # Transforming and bundling your .module.css files
//!
//! To transform your .module.css and .module.scss into a bundled css file use [stylance-cli](https://crates.io/crates/stylance-cli).
//!
//!
#![cfg_attr(docsrs, feature(doc_cfg))]
#[doc(hidden)]
pub mod internal {
/// MaybeStr Wraps an Option<&str> and implements From trait for various
/// types.
/// Used by JoinClasses and the classes! macro to accept various types.
pub struct MaybeStr<'a>(Option<&'a str>);
pub use stylance_macros::*;
fn join_opt_str_iter<'a, Iter>(iter: &mut Iter) -> String
where
Iter: Iterator<Item = &'a str> + Clone,
{
let Some(first) = iter.next() else {
return String::new();
};
let size = first.len() + iter.clone().map(|v| v.len() + 1).sum::<usize>();
let mut result = String::with_capacity(size);
result.push_str(first);
for v in iter {
result.push(' ');
result.push_str(v);
}
debug_assert_eq!(result.len(), size);
result
}
pub fn join_maybe_str_slice(slice: &[MaybeStr<'_>]) -> String {
let mut iter = slice.iter().flat_map(|c| c.0);
join_opt_str_iter(&mut iter)
}
impl<'a> From<&'a str> for MaybeStr<'a> {
fn from(value: &'a str) -> Self {
MaybeStr::<'a>(Some(value))
}
}
impl<'a> From<&'a String> for MaybeStr<'a> {
fn from(value: &'a String) -> Self {
MaybeStr::<'a>(Some(value.as_ref()))
}
}
impl<'a, T> From<Option<&'a T>> for MaybeStr<'a>
where
T: AsRef<str> + ?Sized,
{
fn from(value: Option<&'a T>) -> Self {
Self(value.map(AsRef::as_ref))
}
}
impl<'a, T> From<&'a Option<T>> for MaybeStr<'a>
where
T: AsRef<str>,
{
fn from(value: &'a Option<T>) -> Self {
Self(value.as_ref().map(AsRef::as_ref))
}
}
}
/// Reads a css file at compile time and generates a module containing the classnames found inside that css file.
/// Path is relative to the file that called the macro.
///
/// ### Syntax
/// ```rust
/// import_style!([#[attribute]] [pub] module_identifier, style_path);
/// ```
/// - Optionally prefix attributes that will be added to the generated module. Particularly common is `#[allow(dead_code)]` to silence warnings from unused class names.
/// - Optionally add pub keyword before `module_identifier` to make the generated module public.
/// - `module_identifier`: This will be used as the name of the module generated by this macro.
/// - `style_path`: This should be a string literal with the path to a css file inside your rust
/// crate. The path is relative to the file where this macro was called from.
///
/// ### Example
/// ```rust
/// // style.css is located in the same directory as this rust file.
/// stylance::import_style!(#[allow(dead_code)] pub style, "style.css");
///
/// fn use_style() {
/// println!("{}", style::header);
/// }
/// ```
///
/// ### Expands into
///
/// ```rust
/// pub mod style {
/// pub const header: &str = "header-539306b";
/// pub const contents: &str = "contents-539306b";
/// }
/// ```
#[macro_export]
macro_rules! import_style {
($(#[$meta:meta])* $vis:vis $ident:ident, $str:expr) => {
$(#[$meta])* $vis mod $ident {
::stylance::internal::import_style_classes_rel!($str);
}
};
}
/// Reads a css file at compile time and generates a module containing the classnames found inside that css file.
///
/// ### Syntax
/// ```rust
/// import_crate_style!([#[attribute]] [pub] module_identifier, style_path);
/// ```
/// - Optionally prefix attributes that will be added to the generated module. Particularly common is `#[allow(dead_code)]` to silence warnings from unused class names.
/// - Optionally add pub keyword before `module_identifier` to make the generated module public.
/// - `module_identifier`: This will be used as the name of the module generated by this macro.
/// - `style_path`: This should be a string literal with the path to a css file inside your rust
/// crate. The path must be relative to the cargo manifest directory (The directory that has Cargo.toml).
///
/// ### Example
/// ```rust
/// stylance::import_crate_style!(pub style, "path/from/manifest_dir/to/style.css");
///
/// fn use_style() {
/// println!("{}", style::header);
/// }
/// ```
///
/// ### Expands into
///
/// ```rust
/// pub mod style {
/// pub const header: &str = "header-539306b";
/// pub const contents: &str = "contents-539306b";
/// }
/// ```
#[macro_export]
macro_rules! import_crate_style {
($(#[$meta:meta])* $vis:vis $ident:ident, $str:expr) => {
$(#[$meta])* $vis mod $ident {
::stylance::internal::import_style_classes!($str);
}
};
}
/// Utility trait for combining tuples of class names into a single string.
pub trait JoinClasses {
/// Join all elements of the tuple into a single string separating them with a single space character.
///
/// Option elements of the tuple will be skipped if they are None.
///
/// ### Example
///
/// ```rust
/// import_crate_style!(style, "tests/style.module.scss");
/// let current_page = 10; // Some variable to use in the condition
///
/// let class_name = (
/// "header", // Global classname
/// style::style1, // Stylance scoped classname
/// if current_page == 10 { // Conditional class
/// Some("active1")
/// } else {
/// None
/// },
/// (current_page == 11).then_some("active2"), // Same as above but much nicer
/// )
/// .join_classes();
///
/// // class_name is "header style1-a331da9 active1"
/// ```
fn join_classes(self) -> String;
}
impl JoinClasses for &[internal::MaybeStr<'_>] {
fn join_classes(self) -> String {
internal::join_maybe_str_slice(self)
}
}
macro_rules! impl_join_classes_for_tuples {
(($($types:ident),*), ($($idx:tt),*)) => {
impl<'a, $($types),*> JoinClasses for ($($types,)*)
where
$($types: Into<internal::MaybeStr<'a>>),*
{
fn join_classes(self) -> String {
internal::join_maybe_str_slice([
$((self.$idx).into()),*
].as_slice())
}
}
};
}
impl_join_classes_for_tuples!(
(T1, T2), //
(0, 1)
);
impl_join_classes_for_tuples!(
(T1, T2, T3), //
(0, 1, 2)
);
impl_join_classes_for_tuples!(
(T1, T2, T3, T4), //
(0, 1, 2, 3)
);
impl_join_classes_for_tuples!(
(T1, T2, T3, T4, T5), //
(0, 1, 2, 3, 4)
);
impl_join_classes_for_tuples!(
(T1, T2, T3, T4, T5, T6), //
(0, 1, 2, 3, 4, 5)
);
impl_join_classes_for_tuples!(
(T1, T2, T3, T4, T5, T6, T7), //
(0, 1, 2, 3, 4, 5, 6)
);
impl_join_classes_for_tuples!(
(T1, T2, T3, T4, T5, T6, T7, T8), //
(0, 1, 2, 3, 4, 5, 6, 7)
);
impl_join_classes_for_tuples!(
(T1, T2, T3, T4, T5, T6, T7, T8, T9),
(0, 1, 2, 3, 4, 5, 6, 7, 8)
);
impl_join_classes_for_tuples!(
(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10),
(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)
);
impl_join_classes_for_tuples!(
(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11),
(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
);
impl_join_classes_for_tuples!(
(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12),
(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11)
);
impl_join_classes_for_tuples!(
(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13),
(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)
);
impl_join_classes_for_tuples!(
(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14),
(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13)
);
impl_join_classes_for_tuples!(
(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15),
(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14)
);
impl_join_classes_for_tuples!(
(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16),
(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15)
);
impl_join_classes_for_tuples!(
(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17),
(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16)
);
/// Utility macro for joining multiple class names.
///
/// The macro accepts `&str` `&String` and any refs of `T` where `T` implements `AsRef<str>`
///
/// It also accepts `Option` of those types, `None` values will be filtered from the list.
///
/// Example
///
/// ```rust
/// let active_tab = 0; // set to 1 to disable the active class!
/// let classes_string = classes!(
/// "some-global-class",
/// my_style::header,
/// module_style::header,
/// // conditionally activate a global style
/// if active_tab == 0 { Some(my_style::active) } else { None }
/// // The same can be expressed with then_some:
/// (active_tab == 0).then_some(my_style::active)
/// );
/// ```
#[macro_export]
macro_rules! classes {
() => { "" };
($($exp:expr),+$(,)?) => {
::stylance::JoinClasses::join_classes([$($exp.into()),*].as_slice())
};
}
| rust | MIT | 74321d41b1931829b93cdb6c8ae4cec44fa4cc0d | 2026-01-04T20:18:25.534893Z | false |
basro/stylance-rs | https://github.com/basro/stylance-rs/blob/74321d41b1931829b93cdb6c8ae4cec44fa4cc0d/stylance/tests/test_import_styles.rs | stylance/tests/test_import_styles.rs | use stylance::*;
#[test]
fn test_import_crate_style() {
import_crate_style!(style, "tests/style.module.scss");
assert_eq!(style::style1, "style1-a331da9");
assert_eq!(style::style2, "style2-a331da9");
assert_eq!(style::style3, "style3-a331da9");
assert_eq!(style::style4, "style4-a331da9");
assert_eq!(style::style5, "style5-a331da9");
assert_eq!(style::style6, "style6-a331da9");
assert_eq!(style::style7, "style7-a331da9");
assert_eq!(style::style8, "style8-a331da9");
assert_eq!(style::style9, "style9-a331da9");
assert_eq!(style::style_with_dashes, "style-with-dashes-a331da9");
assert_eq!(style::nested_style, "nested-style-a331da9");
mod some_module {
stylance::import_crate_style!(#[allow(dead_code)] pub style, "tests/style.module.scss");
}
assert_eq!(some_module::style::style1, "style1-a331da9");
import_crate_style!(style2, "tests/style2.module.scss");
assert_eq!(style2::style1, "style1-58ea9e3");
assert_eq!(style2::different_style, "different-style-58ea9e3");
}
#[test]
fn test_import_style() {
import_style!(style, "style.module.scss");
assert_eq!(style::style1, "style1-a331da9");
assert_eq!(style::style2, "style2-a331da9");
assert_eq!(style::style3, "style3-a331da9");
assert_eq!(style::style4, "style4-a331da9");
assert_eq!(style::style5, "style5-a331da9");
assert_eq!(style::style6, "style6-a331da9");
assert_eq!(style::style7, "style7-a331da9");
assert_eq!(style::style8, "style8-a331da9");
assert_eq!(style::style9, "style9-a331da9");
assert_eq!(style::style_with_dashes, "style-with-dashes-a331da9");
assert_eq!(style::nested_style, "nested-style-a331da9");
mod some_module {
stylance::import_style!(#[allow(dead_code)] pub style, "style.module.scss");
}
assert_eq!(some_module::style::style1, "style1-a331da9");
import_style!(style2, "style2.module.scss");
assert_eq!(style2::style1, "style1-58ea9e3");
assert_eq!(style2::different_style, "different-style-58ea9e3");
}
| rust | MIT | 74321d41b1931829b93cdb6c8ae4cec44fa4cc0d | 2026-01-04T20:18:25.534893Z | false |
basro/stylance-rs | https://github.com/basro/stylance-rs/blob/74321d41b1931829b93cdb6c8ae4cec44fa4cc0d/stylance/tests/test_classes.rs | stylance/tests/test_classes.rs | #[test]
fn test_join_classes() {
use stylance::JoinClasses;
assert_eq!(
(
"one",
Some("two"),
false.then_some("three"),
true.then_some("four"),
&String::from("five"),
Some(&String::from("six")),
&("seven", "eight").join_classes()
)
.join_classes(),
"one two four five six seven eight"
);
}
#[test]
fn test_classes_macro_none() {
use stylance::classes;
assert_eq!(classes!(), "");
}
#[test]
fn test_classes_macro_one() {
use stylance::classes;
assert_eq!(classes!("one"), "one");
assert_eq!(classes!(Some("one")), "one");
assert_eq!(classes!(false.then_some("one")), "");
}
#[test]
fn test_classes_macro_many() {
use stylance::classes;
assert_eq!(
classes!(
"one",
Some("two"),
false.then_some("three"),
true.then_some("four"),
&String::from("five"),
Some(&String::from("six")),
&classes!("seven", "eight")
),
"one two four five six seven eight"
);
}
#[test]
fn test_classes_macro_trailing_comma() {
use stylance::classes;
assert_eq!(classes!("one", "two", "three",), "one two three");
}
| rust | MIT | 74321d41b1931829b93cdb6c8ae4cec44fa4cc0d | 2026-01-04T20:18:25.534893Z | false |
basro/stylance-rs | https://github.com/basro/stylance-rs/blob/74321d41b1931829b93cdb6c8ae4cec44fa4cc0d/stylance/examples/usage/module.rs | stylance/examples/usage/module.rs | use stylance::import_crate_style;
// Add pub if you your style module definition to be public.
import_crate_style!(pub style, "examples/usage/style2.module.scss");
| rust | MIT | 74321d41b1931829b93cdb6c8ae4cec44fa4cc0d | 2026-01-04T20:18:25.534893Z | false |
basro/stylance-rs | https://github.com/basro/stylance-rs/blob/74321d41b1931829b93cdb6c8ae4cec44fa4cc0d/stylance/examples/usage/main.rs | stylance/examples/usage/main.rs | mod module;
use stylance::{classes, import_crate_style};
use module::style as module_style;
import_crate_style!(my_style, "examples/usage/style1.module.scss");
fn main() {
println!(
"my_style 'examples/usage/style1.module.scss' \nheader: {}",
my_style::header
);
println!(
"module_style 'examples/usage/style2.module.scss' \nheader: {}",
module_style::header
);
// Easily combine two or more classes using the classes! macro
let active_tab = 0; // set to 1 to disable the active class!
println!(
"The two classes combined: '{}'",
classes!(
"some-global-class",
my_style::header,
my_style::contents,
module_style::header,
(active_tab == 0).then_some(my_style::active) // conditionally activate a global style
),
);
stylance::import_style!(
#[allow(dead_code)]
rel_path_style,
"style1.module.scss"
);
println!(
"rel_path_style 'style1.module.scss' \nheader: {}",
rel_path_style::header
);
}
| rust | MIT | 74321d41b1931829b93cdb6c8ae4cec44fa4cc0d | 2026-01-04T20:18:25.534893Z | false |
basro/stylance-rs | https://github.com/basro/stylance-rs/blob/74321d41b1931829b93cdb6c8ae4cec44fa4cc0d/internal/stylance-macros/src/lib.rs | internal/stylance-macros/src/lib.rs | use std::{env, path::Path};
use anyhow::Context as _;
use proc_macro::TokenStream;
use proc_macro2::{Ident, Span};
use quote::{quote, quote_spanned};
use syn::{parse_macro_input, LitStr};
fn try_import_style_classes_with_path(
manifest_path: &Path,
file_path: &Path,
identifier_span: Span,
) -> anyhow::Result<TokenStream> {
let config = stylance_core::load_config(manifest_path)?;
let (_, classes) = stylance_core::get_classes(manifest_path, file_path, &config)?;
let binding = file_path.canonicalize().unwrap();
let full_path = binding.to_string_lossy();
let identifiers = classes
.iter()
.map(|class| Ident::new(&class.original_name.replace('-', "_"), identifier_span))
.collect::<Vec<_>>();
let output_fields = classes.iter().zip(identifiers).map(|(class, class_ident)| {
let class_str = &class.hashed_name;
quote_spanned!(identifier_span =>
#[allow(non_upper_case_globals)]
pub const #class_ident: &str = #class_str;
)
});
Ok(quote! {
const _ : &[u8] = include_bytes!(#full_path);
#(#output_fields )*
}
.into())
}
fn try_import_style_classes(input: &LitStr) -> anyhow::Result<TokenStream> {
let manifest_dir_env =
env::var_os("CARGO_MANIFEST_DIR").context("CARGO_MANIFEST_DIR env var not found")?;
let manifest_path = Path::new(&manifest_dir_env);
let file_path = manifest_path.join(Path::new(&input.value()));
try_import_style_classes_with_path(manifest_path, &file_path, input.span())
}
#[proc_macro]
pub fn import_style_classes(input: TokenStream) -> TokenStream {
let input = parse_macro_input!(input as LitStr);
match try_import_style_classes(&input) {
Ok(ts) => ts,
Err(err) => syn::Error::new_spanned(&input, err.to_string())
.to_compile_error()
.into(),
}
}
fn try_import_style_classes_rel(input: &LitStr) -> anyhow::Result<TokenStream> {
let manifest_dir_env =
env::var_os("CARGO_MANIFEST_DIR").context("CARGO_MANIFEST_DIR env var not found")?;
let manifest_path = Path::new(&manifest_dir_env);
let Some(source_path) = input.span().unwrap().local_file() else {
// It would make sense to error here but currently rust analyzer is returning None when
// the normal build would return the path.
// For this reason we bail silently creating no code.
return Ok(TokenStream::new());
};
let css_path = source_path
.parent()
.expect("Macro source path should have a parent dir")
.join(input.value());
try_import_style_classes_with_path(manifest_path, &css_path, input.span())
}
#[proc_macro]
pub fn import_style_classes_rel(input: TokenStream) -> TokenStream {
let input = parse_macro_input!(input as LitStr);
match try_import_style_classes_rel(&input) {
Ok(ts) => ts,
Err(err) => syn::Error::new_spanned(&input, err.to_string())
.to_compile_error()
.into(),
}
}
| rust | MIT | 74321d41b1931829b93cdb6c8ae4cec44fa4cc0d | 2026-01-04T20:18:25.534893Z | false |
basro/stylance-rs | https://github.com/basro/stylance-rs/blob/74321d41b1931829b93cdb6c8ae4cec44fa4cc0d/internal/stylance-core/src/lib.rs | internal/stylance-core/src/lib.rs | mod class_name_pattern;
mod parse;
use std::{
borrow::Cow,
fs,
hash::{Hash as _, Hasher as _},
path::{Path, PathBuf},
str::FromStr,
};
use anyhow::{anyhow, bail, Context};
use class_name_pattern::ClassNamePattern;
use parse::{CssFragment, Global};
use serde::Deserialize;
use siphasher::sip::SipHasher13;
fn default_extensions() -> Vec<String> {
vec![".module.css".to_owned(), ".module.scss".to_owned()]
}
fn default_folders() -> Vec<PathBuf> {
vec![PathBuf::from_str("./src/").expect("path is valid")]
}
fn default_hash_len() -> usize {
7
}
#[derive(Deserialize, Debug)]
#[serde(deny_unknown_fields)]
pub struct Config {
pub output_file: Option<PathBuf>,
pub output_dir: Option<PathBuf>,
#[serde(default = "default_extensions")]
pub extensions: Vec<String>,
#[serde(default = "default_folders")]
pub folders: Vec<PathBuf>,
pub scss_prelude: Option<String>,
#[serde(default = "default_hash_len")]
pub hash_len: usize,
#[serde(default)]
pub class_name_pattern: ClassNamePattern,
}
impl Default for Config {
fn default() -> Self {
Self {
output_file: None,
output_dir: None,
extensions: default_extensions(),
folders: default_folders(),
scss_prelude: None,
hash_len: default_hash_len(),
class_name_pattern: Default::default(),
}
}
}
#[derive(Deserialize)]
pub struct CargoToml {
package: Option<CargoTomlPackage>,
}
#[derive(Deserialize)]
pub struct CargoTomlPackage {
metadata: Option<CargoTomlPackageMetadata>,
}
#[derive(Deserialize)]
pub struct CargoTomlPackageMetadata {
stylance: Option<Config>,
}
pub fn hash_string(input: &str) -> u64 {
let mut hasher = SipHasher13::new();
input.hash(&mut hasher);
hasher.finish()
}
pub struct Class {
pub original_name: String,
pub hashed_name: String,
}
pub fn load_config(manifest_dir: &Path) -> anyhow::Result<Config> {
let cargo_toml_contents =
fs::read_to_string(manifest_dir.join("Cargo.toml")).context("Failed to read Cargo.toml")?;
let cargo_toml: CargoToml = toml::from_str(&cargo_toml_contents)?;
let config = match cargo_toml.package {
Some(CargoTomlPackage {
metadata:
Some(CargoTomlPackageMetadata {
stylance: Some(config),
}),
}) => config,
_ => Config::default(),
};
if config.extensions.iter().any(|e| e.is_empty()) {
bail!("Stylance config extensions can't be empty strings");
}
Ok(config)
}
fn normalized_relative_path(base: &Path, subpath: &Path) -> anyhow::Result<String> {
let base = base.canonicalize()?;
let subpath = subpath.canonicalize()?;
let relative_path_str: String = subpath
.strip_prefix(base)
.context("css file should be inside manifest_dir")?
.to_string_lossy()
.into();
#[cfg(target_os = "windows")]
let relative_path_str = relative_path_str.replace('\\', "/");
Ok(relative_path_str)
}
fn make_hash(manifest_dir: &Path, css_file: &Path, hash_len: usize) -> anyhow::Result<String> {
let relative_path_str = normalized_relative_path(manifest_dir, css_file)?;
let hash = hash_string(&relative_path_str);
let mut hash_str = format!("{hash:x}");
hash_str.truncate(hash_len);
Ok(hash_str)
}
pub struct ModifyCssResult {
pub path: PathBuf,
pub normalized_path_str: String,
pub hash: String,
pub contents: String,
}
pub fn load_and_modify_css(
manifest_dir: &Path,
css_file: &Path,
config: &Config,
) -> anyhow::Result<ModifyCssResult> {
let hash_str = make_hash(manifest_dir, css_file, config.hash_len)?;
let css_file_contents = fs::read_to_string(css_file)?;
let fragments = parse::parse_css(&css_file_contents).map_err(|e| anyhow!("{e}"))?;
let mut new_file = String::with_capacity(css_file_contents.len() * 2);
let mut cursor = css_file_contents.as_str();
for fragment in fragments {
let (span, replace) = match fragment {
CssFragment::Class(class) => (
class,
Cow::Owned(config.class_name_pattern.apply(class, &hash_str)),
),
CssFragment::Global(Global { inner, outer }) => (outer, Cow::Borrowed(inner)),
};
let (before, after) = cursor.split_at(span.as_ptr() as usize - cursor.as_ptr() as usize);
cursor = &after[span.len()..];
new_file.push_str(before);
new_file.push_str(&replace);
}
new_file.push_str(cursor);
Ok(ModifyCssResult {
path: css_file.to_owned(),
normalized_path_str: normalized_relative_path(manifest_dir, css_file)?,
hash: hash_str,
contents: new_file,
})
}
pub fn get_classes(
manifest_dir: &Path,
css_file: &Path,
config: &Config,
) -> anyhow::Result<(String, Vec<Class>)> {
let hash_str = make_hash(manifest_dir, css_file, config.hash_len)?;
let css_file_contents = fs::read_to_string(css_file)?;
let mut classes = parse::parse_css(&css_file_contents)
.map_err(|e| anyhow!("{e}"))?
.into_iter()
.filter_map(|c| {
if let CssFragment::Class(c) = c {
Some(c)
} else {
None
}
})
.collect::<Vec<_>>();
classes.sort();
classes.dedup();
Ok((
hash_str.clone(),
classes
.into_iter()
.map(|class| Class {
original_name: class.to_owned(),
hashed_name: config.class_name_pattern.apply(class, &hash_str),
})
.collect(),
))
}
| rust | MIT | 74321d41b1931829b93cdb6c8ae4cec44fa4cc0d | 2026-01-04T20:18:25.534893Z | false |
basro/stylance-rs | https://github.com/basro/stylance-rs/blob/74321d41b1931829b93cdb6c8ae4cec44fa4cc0d/internal/stylance-core/src/parse.rs | internal/stylance-core/src/parse.rs | use winnow::{
combinator::{alt, cut_err, delimited, fold_repeat, opt, peek, preceded, terminated},
error::{ContextError, ParseError},
stream::{AsChar, ContainsToken, Range},
token::{none_of, one_of, tag, take_till, take_until0, take_while},
PResult, Parser,
};
/// ```text
/// v----v inner span
/// :global(.class)
/// ^-------------^ outer span
/// ```
#[derive(Debug, PartialEq)]
pub struct Global<'s> {
pub inner: &'s str,
pub outer: &'s str,
}
#[derive(Debug, PartialEq)]
pub enum CssFragment<'s> {
Class(&'s str),
Global(Global<'s>),
}
pub fn parse_css(input: &str) -> Result<Vec<CssFragment<'_>>, ParseError<&str, ContextError>> {
style_rule_block_contents.parse(input)
}
pub fn recognize_repeat<'s, O>(
range: impl Into<Range>,
f: impl Parser<&'s str, O, ContextError>,
) -> impl Parser<&'s str, &'s str, ContextError> {
fold_repeat(range, f, || (), |_, _| ()).recognize()
}
fn ws<'s>(input: &mut &'s str) -> PResult<&'s str> {
recognize_repeat(
0..,
alt((
line_comment,
block_comment,
take_while(1.., (AsChar::is_space, '\n', '\r')),
)),
)
.parse_next(input)
}
fn line_comment<'s>(input: &mut &'s str) -> PResult<&'s str> {
("//", take_while(0.., |c| c != '\n'))
.recognize()
.parse_next(input)
}
fn block_comment<'s>(input: &mut &'s str) -> PResult<&'s str> {
("/*", cut_err(terminated(take_until0("*/"), "*/")))
.recognize()
.parse_next(input)
}
// matches a sass interpolation of the form #{...}
fn sass_interpolation<'s>(input: &mut &'s str) -> PResult<&'s str> {
(
"#{",
cut_err(terminated(take_till(1.., ('{', '}', '\n')), '}')),
)
.recognize()
.parse_next(input)
}
fn identifier<'s>(input: &mut &'s str) -> PResult<&'s str> {
(
one_of(('_', '-', AsChar::is_alpha)),
take_while(0.., ('_', '-', AsChar::is_alphanum)),
)
.recognize()
.parse_next(input)
}
fn class<'s>(input: &mut &'s str) -> PResult<&'s str> {
preceded('.', identifier).parse_next(input)
}
fn global<'s>(input: &mut &'s str) -> PResult<Global<'s>> {
let (inner, outer) = preceded(
":global(",
cut_err(terminated(
stuff_till(0.., (')', '(', '{')), // inner
')',
)),
)
.with_recognized() // outer
.parse_next(input)?;
Ok(Global { inner, outer })
}
fn string_dq<'s>(input: &mut &'s str) -> PResult<&'s str> {
let str_char = alt((none_of(['"']).void(), tag("\\\"").void()));
let str_chars = recognize_repeat(0.., str_char);
preceded('"', cut_err(terminated(str_chars, '"'))).parse_next(input)
}
fn string_sq<'s>(input: &mut &'s str) -> PResult<&'s str> {
let str_char = alt((none_of(['\'']).void(), tag("\\'").void()));
let str_chars = recognize_repeat(0.., str_char);
preceded('\'', cut_err(terminated(str_chars, '\''))).parse_next(input)
}
fn string<'s>(input: &mut &'s str) -> PResult<&'s str> {
alt((string_dq, string_sq)).parse_next(input)
}
/// Behaves like take_till except it finds and parses strings and
/// comments (allowing those to contain the end condition characters).
pub fn stuff_till<'s>(
range: impl Into<Range>,
list: impl ContainsToken<char>,
) -> impl Parser<&'s str, &'s str, ContextError> {
recognize_repeat(
range,
alt((
string.void(),
block_comment.void(),
line_comment.void(),
sass_interpolation.void(),
'/'.void(),
'#'.void(),
take_till(1.., ('\'', '"', '/', '#', list)).void(),
)),
)
}
fn selector<'s>(input: &mut &'s str) -> PResult<Vec<CssFragment<'s>>> {
fold_repeat(
1..,
alt((
class.map(|c| Some(CssFragment::Class(c))),
global.map(|g| Some(CssFragment::Global(g))),
':'.map(|_| None),
stuff_till(1.., ('.', ';', '{', '}', ':')).map(|_| None),
)),
Vec::new,
|mut acc, item| {
if let Some(item) = item {
acc.push(item);
}
acc
},
)
.parse_next(input)
}
fn declaration<'s>(input: &mut &'s str) -> PResult<&'s str> {
(
(opt('$'), identifier),
ws,
':',
terminated(
stuff_till(1.., (';', '{', '}')),
alt((';', peek('}'))), // semicolon is optional if it's the last element in a rule block
),
)
.recognize()
.parse_next(input)
}
fn style_rule_block_statement<'s>(input: &mut &'s str) -> PResult<Vec<CssFragment<'s>>> {
let content = alt((
declaration.map(|_| Vec::new()), //
at_rule,
style_rule,
));
delimited(ws, content, ws).parse_next(input)
}
fn style_rule_block_contents<'s>(input: &mut &'s str) -> PResult<Vec<CssFragment<'s>>> {
fold_repeat(
0..,
style_rule_block_statement,
Vec::new,
|mut acc, mut item| {
acc.append(&mut item);
acc
},
)
.parse_next(input)
}
fn style_rule_block<'s>(input: &mut &'s str) -> PResult<Vec<CssFragment<'s>>> {
preceded(
'{',
cut_err(terminated(style_rule_block_contents, (ws, '}'))),
)
.parse_next(input)
}
fn style_rule<'s>(input: &mut &'s str) -> PResult<Vec<CssFragment<'s>>> {
let (mut classes, mut nested_classes) = (selector, style_rule_block).parse_next(input)?;
classes.append(&mut nested_classes);
Ok(classes)
}
fn at_rule<'s>(input: &mut &'s str) -> PResult<Vec<CssFragment<'s>>> {
let (identifier, char) = preceded(
'@',
cut_err((
terminated(identifier, stuff_till(0.., ('{', '}', ';'))),
alt(('{', ';', peek('}'))),
)),
)
.parse_next(input)?;
if char != '{' {
return Ok(vec![]);
}
match identifier {
"media" | "layer" | "container" | "include" => {
cut_err(terminated(style_rule_block_contents, '}')).parse_next(input)
}
_ => {
cut_err(terminated(unknown_block_contents, '}')).parse_next(input)?;
Ok(vec![])
}
}
// if identifier == "media" {
// cut_err(terminated(style_rule_block_contents, '}')).parse_next(input)
// } else {
// cut_err(terminated(unknown_block_contents, '}')).parse_next(input)?;
// Ok(vec![])
// }
}
fn unknown_block_contents<'s>(input: &mut &'s str) -> PResult<&'s str> {
recognize_repeat(
0..,
alt((
stuff_till(1.., ('{', '}')).void(),
('{', cut_err((unknown_block_contents, '}'))).void(),
)),
)
.parse_next(input)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_class() {
let mut input = "._x1a2b Hello";
let r = class.parse_next(&mut input);
assert_eq!(r, Ok("_x1a2b"));
}
#[test]
fn test_selector() {
let mut input = ".foo.bar [value=\"fa.sdasd\"] /* .banana */ // .apple \n \t .cry {";
let r = selector.parse_next(&mut input);
assert_eq!(
r,
Ok(vec![
CssFragment::Class("foo"),
CssFragment::Class("bar"),
CssFragment::Class("cry")
])
);
let mut input = "{";
let r = selector.recognize().parse_next(&mut input);
assert!(r.is_err());
}
#[test]
fn test_declaration() {
let mut input = "background-color \t : red;";
let r = declaration.parse_next(&mut input);
assert_eq!(r, Ok("background-color \t : red;"));
let r = declaration.parse_next(&mut input);
assert!(r.is_err());
}
#[test]
fn test_style_rule() {
let mut input = ".foo.bar {
background-color: red;
.baz {
color: blue;
}
$some-scss-var: 10px;
@some-at-rule blah blah;
@media blah .blah {
.moo {
color: red;
}
}
@container (width > 700px) {
.zoo {
color: blue;
}
}
}END";
let r = style_rule.parse_next(&mut input);
assert_eq!(
r,
Ok(vec![
CssFragment::Class("foo"),
CssFragment::Class("bar"),
CssFragment::Class("baz"),
CssFragment::Class("moo"),
CssFragment::Class("zoo")
])
);
assert_eq!(input, "END");
}
#[test]
fn test_at_rule_simple() {
let mut input = "@simple-rule blah \"asd;asd\" blah;";
let r = at_rule.parse_next(&mut input);
assert_eq!(r, Ok(vec![]));
assert!(input.is_empty());
}
#[test]
fn test_at_rule_unknown() {
let mut input = "@unknown blah \"asdasd\" blah {
bunch of stuff {
// things inside {
blah
' { '
}
.bar {
color: blue;
.baz {
color: green;
}
}
}";
let r = at_rule.parse_next(&mut input);
assert_eq!(r, Ok(vec![]));
assert!(input.is_empty());
}
#[test]
fn test_at_rule_media() {
let mut input = "@media blah \"asdasd\" blah {
.foo {
background-color: red;
}
.bar {
color: blue;
.baz {
color: green;
}
}
}";
let r = at_rule.parse_next(&mut input);
assert_eq!(
r,
Ok(vec![
CssFragment::Class("foo"),
CssFragment::Class("bar"),
CssFragment::Class("baz")
])
);
assert!(input.is_empty());
}
#[test]
fn test_at_rule_layer() {
let mut input = "@layer test {
.foo {
background-color: red;
}
.bar {
color: blue;
.baz {
color: green;
}
}
}";
let r = at_rule.parse_next(&mut input);
assert_eq!(
r,
Ok(vec![
CssFragment::Class("foo"),
CssFragment::Class("bar"),
CssFragment::Class("baz")
])
);
assert!(input.is_empty());
}
#[test]
fn test_top_level() {
let mut input = "// tool.module.scss
.default_border {
border-color: lch(100% 10 10);
border-style: dashed double;
border-radius: 30px;
}
@media testing {
.media-foo {
color: red;
}
}
@layer {
.layer-foo {
color: blue;
}
}
@include mixin {
border: none;
.include-foo {
color: green;
}
}
@layer foo;
@debug 1+2 * 3==1+(2 * 3); // true
.container {
padding: 1em;
border: 2px solid;
border-color: lch(100% 10 10);
border-style: dashed double;
border-radius: 30px;
margin: 1em;
background-color: lch(45% 9.5 140.4);
.bar {
color: red;
}
}
@debug 1+2 * 3==1+(2 * 3); // true
";
let r = style_rule_block_contents.parse_next(&mut input);
assert_eq!(
r,
Ok(vec![
CssFragment::Class("default_border"),
CssFragment::Class("media-foo"),
CssFragment::Class("layer-foo"),
CssFragment::Class("include-foo"),
CssFragment::Class("container"),
CssFragment::Class("bar"),
])
);
println!("{input}");
assert!(input.is_empty());
}
#[test]
fn test_sass_interpolation() {
let mut input = "#{$test-test}END";
let r = sass_interpolation.parse_next(&mut input);
assert_eq!(r, Ok("#{$test-test}"));
assert_eq!(input, "END");
let mut input = "#{$test-test
}END";
let r = sass_interpolation.parse_next(&mut input);
assert!(r.is_err());
let mut input = "#{$test-test";
let r = sass_interpolation.parse_next(&mut input);
assert!(r.is_err());
let mut input = "#{$test-te{st}";
let r = sass_interpolation.parse_next(&mut input);
assert!(r.is_err());
}
}
| rust | MIT | 74321d41b1931829b93cdb6c8ae4cec44fa4cc0d | 2026-01-04T20:18:25.534893Z | false |
basro/stylance-rs | https://github.com/basro/stylance-rs/blob/74321d41b1931829b93cdb6c8ae4cec44fa4cc0d/internal/stylance-core/src/class_name_pattern.rs | internal/stylance-core/src/class_name_pattern.rs | use std::borrow::Cow;
use serde::{Deserialize, Deserializer};
#[derive(Debug, Clone, PartialEq)]
pub enum Fragment {
Str(String),
Name,
Hash,
}
#[derive(Debug, Clone, PartialEq)]
pub struct ClassNamePattern(Vec<Fragment>);
impl ClassNamePattern {
pub fn apply(&self, classname: &str, hash: &str) -> String {
self.0
.iter()
.map(|v| match v {
Fragment::Str(s) => s,
Fragment::Name => classname,
Fragment::Hash => hash,
})
.collect::<Vec<_>>()
.join("")
}
}
impl Default for ClassNamePattern {
fn default() -> Self {
Self(vec![
Fragment::Name,
Fragment::Str("-".into()),
Fragment::Hash,
])
}
}
impl<'de> Deserialize<'de> for ClassNamePattern {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let s: Cow<str> = Deserialize::deserialize(deserializer)?;
match parse::parse_pattern(&s) {
Ok(v) => Ok(v),
Err(e) => Err(serde::de::Error::custom(e)),
}
}
}
mod parse {
use super::*;
use winnow::{
combinator::{alt, repeat},
error::{ContextError, ParseError},
token::take_till,
PResult, Parser,
};
fn fragment(input: &mut &str) -> PResult<Fragment> {
alt((
"[name]".value(Fragment::Name),
"[hash]".value(Fragment::Hash),
take_till(1.., '[').map(|s: &str| Fragment::Str(s.into())),
))
.parse_next(input)
}
fn pattern(input: &mut &str) -> PResult<Vec<Fragment>> {
repeat(0.., fragment).parse_next(input)
}
pub fn parse_pattern(input: &str) -> Result<ClassNamePattern, ParseError<&str, ContextError>> {
Ok(ClassNamePattern(pattern.parse(input)?))
}
}
#[cfg(test)]
mod test {
use crate::class_name_pattern::ClassNamePattern;
#[test]
fn test_pattern_deserialize() {
let pattern: ClassNamePattern =
serde_json::from_str("\"test-[name]-[hash]\"").expect("should deserialize");
assert_eq!("test-my-class-12345", pattern.apply("my-class", "12345"));
}
}
| rust | MIT | 74321d41b1931829b93cdb6c8ae4cec44fa4cc0d | 2026-01-04T20:18:25.534893Z | false |
basro/stylance-rs | https://github.com/basro/stylance-rs/blob/74321d41b1931829b93cdb6c8ae4cec44fa4cc0d/stylance-cli/src/lib.rs | stylance-cli/src/lib.rs | use std::{
borrow::Cow,
collections::HashMap,
fs::{self, File},
io::{BufWriter, Write},
path::Path,
};
use anyhow::bail;
pub use stylance_core::Config;
use stylance_core::ModifyCssResult;
use walkdir::WalkDir;
pub fn run(manifest_dir: &Path, config: &Config) -> anyhow::Result<()> {
println!("Running stylance");
run_silent(manifest_dir, config, |file_path| {
println!("{}", file_path.display())
})
}
pub fn run_silent(
manifest_dir: &Path,
config: &Config,
mut file_visit_callback: impl FnMut(&Path),
) -> anyhow::Result<()> {
let mut modified_css_files = Vec::new();
for folder in &config.folders {
for (entry, meta) in WalkDir::new(manifest_dir.join(folder))
.into_iter()
.filter_map(|e| e.ok())
.filter_map(|entry| entry.metadata().ok().map(|meta| (entry, meta)))
{
if meta.is_file() {
let path_str = entry.path().to_string_lossy();
if config.extensions.iter().any(|ext| path_str.ends_with(ext)) {
file_visit_callback(entry.path());
modified_css_files.push(stylance_core::load_and_modify_css(
manifest_dir,
entry.path(),
config,
)?);
}
}
}
}
{
// Verify that there are no hash collisions
let mut map = HashMap::new();
for file in modified_css_files.iter() {
if let Some(previous_file) = map.insert(&file.hash, file) {
bail!(
"The following files had a hash collision:\n{}\n{}\nConsider increasing the hash_len setting.",
file.path.to_string_lossy(),
previous_file.path.to_string_lossy()
);
}
}
}
{
// sort by (filename, path)
fn key(a: &ModifyCssResult) -> (&std::ffi::OsStr, &String) {
(
a.path.file_name().expect("should be a file"),
&a.normalized_path_str,
)
}
modified_css_files.sort_unstable_by(|a, b| key(a).cmp(&key(b)));
}
if let Some(output_file) = &config.output_file {
if let Some(parent) = output_file.parent() {
fs::create_dir_all(parent)?;
}
let mut file = BufWriter::new(File::create(output_file)?);
if let Some(scss_prelude) = &config.scss_prelude {
if output_file
.extension()
.filter(|ext| ext.to_string_lossy() == "scss")
.is_some()
{
file.write_all(scss_prelude.as_bytes())?;
file.write_all(b"\n\n")?;
}
}
file.write_all(
modified_css_files
.iter()
.map(|r| r.contents.as_ref())
.collect::<Vec<_>>()
.join("\n\n")
.as_bytes(),
)?;
}
if let Some(output_dir) = &config.output_dir {
let output_dir = output_dir.join("stylance");
fs::create_dir_all(&output_dir)?;
let entries = fs::read_dir(&output_dir)?;
for entry in entries {
let entry = entry?;
let file_type = entry.file_type()?;
if file_type.is_file() {
fs::remove_file(entry.path())?;
}
}
let mut new_files = Vec::new();
for modified_css in modified_css_files {
let extension = modified_css
.path
.extension()
.map(|e| e.to_string_lossy())
.filter(|e| e == "css")
.unwrap_or(Cow::from("scss"));
let new_file_name = format!(
"{}-{}.{extension}",
modified_css
.path
.file_stem()
.expect("This path should be a file")
.to_string_lossy(),
modified_css.hash
);
new_files.push(new_file_name.clone());
let file_path = output_dir.join(new_file_name);
let mut file = BufWriter::new(File::create(file_path)?);
if let Some(scss_prelude) = &config.scss_prelude {
if extension == "scss" {
file.write_all(scss_prelude.as_bytes())?;
file.write_all(b"\n\n")?;
}
}
file.write_all(modified_css.contents.as_bytes())?;
}
let mut file = File::create(output_dir.join("_index.scss"))?;
file.write_all(
new_files
.iter()
.map(|f| format!("@use \"{f}\";\n"))
.collect::<Vec<_>>()
.join("")
.as_bytes(),
)?;
}
Ok(())
}
| rust | MIT | 74321d41b1931829b93cdb6c8ae4cec44fa4cc0d | 2026-01-04T20:18:25.534893Z | false |
basro/stylance-rs | https://github.com/basro/stylance-rs/blob/74321d41b1931829b93cdb6c8ae4cec44fa4cc0d/stylance-cli/src/main.rs | stylance-cli/src/main.rs | use std::{
path::{Path, PathBuf},
sync::Arc,
time::Duration,
};
use stylance_cli::run;
use stylance_core::{load_config, Config};
use clap::Parser;
use notify::{Event, RecursiveMode, Watcher};
use tokio::{sync::mpsc, task::spawn_blocking};
use tokio_stream::{Stream, StreamExt};
#[derive(Parser)]
#[command(author, version, about, long_about = None, arg_required_else_help = true)]
struct Cli {
/// The path where your crate's Cargo toml is located
manifest_dir: PathBuf,
/// Generate a file with all css modules concatenated
#[arg(long)]
output_file: Option<PathBuf>,
/// Generate a "stylance" directory in this path with all css modules inside
#[arg(long)]
output_dir: Option<PathBuf>,
/// The folders in your crate where stylance will look for css modules
///
/// The paths are relative to the manifest_dir and must not land outside of manifest_dir.
#[arg(short, long, num_args(1))]
folder: Vec<PathBuf>,
/// Watch the fylesystem for changes to the css module files
#[arg(short, long)]
watch: bool,
}
struct RunParams {
manifest_dir: PathBuf,
config: Config,
}
#[tokio::main(flavor = "current_thread")]
async fn main() -> anyhow::Result<()> {
let cli = Cli::parse();
let run_params = make_run_params(&cli).await?;
run(&run_params.manifest_dir, &run_params.config)?;
if cli.watch {
watch(cli, run_params).await?;
}
Ok(())
}
async fn make_run_params(cli: &Cli) -> anyhow::Result<RunParams> {
let manifest_dir = cli.manifest_dir.clone();
let mut config = spawn_blocking(move || load_config(&manifest_dir)).await??;
config.output_file = cli.output_file.clone().or_else(|| {
config
.output_file
.as_ref()
.map(|p| cli.manifest_dir.join(p))
});
config.output_dir = cli
.output_dir
.clone()
.or_else(|| config.output_dir.as_ref().map(|p| cli.manifest_dir.join(p)));
if !cli.folder.is_empty() {
config.folders.clone_from(&cli.folder);
}
Ok(RunParams {
manifest_dir: cli.manifest_dir.clone(),
config,
})
}
fn watch_file(path: &Path) -> anyhow::Result<mpsc::UnboundedReceiver<()>> {
let (events_tx, events_rx) = mpsc::unbounded_channel();
let mut watcher = notify::recommended_watcher({
let events_tx = events_tx.clone();
move |e: notify::Result<Event>| {
let Ok(e) = e else {
return;
};
// Ignore access events
if matches!(e.kind, notify::EventKind::Access(_)) {
return;
}
let _ = events_tx.send(());
}
})?;
watcher.watch(path, RecursiveMode::NonRecursive)?;
tokio::spawn(async move {
events_tx.closed().await;
drop(watcher);
});
Ok(events_rx)
}
fn watch_folders(paths: &Vec<PathBuf>) -> anyhow::Result<mpsc::UnboundedReceiver<PathBuf>> {
let (events_tx, events_rx) = mpsc::unbounded_channel();
let mut watcher = notify::recommended_watcher({
let events_tx = events_tx.clone();
move |e: notify::Result<Event>| {
let Ok(e) = e else {
return;
};
// Ignore access events
if matches!(e.kind, notify::EventKind::Access(_)) {
return;
}
for path in e.paths {
if events_tx.send(path).is_err() {
break;
}
}
}
})?;
for path in paths {
watcher.watch(path, RecursiveMode::Recursive)?;
}
tokio::spawn(async move {
events_tx.closed().await;
drop(watcher);
});
Ok(events_rx)
}
async fn debounced_next(s: &mut (impl Stream<Item = ()> + Unpin)) -> Option<()> {
s.next().await;
loop {
let result = tokio::time::timeout(Duration::from_millis(50), s.next()).await;
match result {
Ok(Some(_)) => {}
Ok(None) => return None,
Err(_) => return Some(()),
}
}
}
async fn watch(cli: Cli, run_params: RunParams) -> anyhow::Result<()> {
let (run_params_tx, mut run_params) = tokio::sync::watch::channel(Arc::new(run_params));
let manifest_dir = cli.manifest_dir.clone();
// Watch Cargo.toml to update the current run_params.
let cargo_toml_events = watch_file(&manifest_dir.join("Cargo.toml").canonicalize()?)?;
tokio::spawn(async move {
let mut stream = tokio_stream::wrappers::UnboundedReceiverStream::new(cargo_toml_events);
while debounced_next(&mut stream).await.is_some() {
match make_run_params(&cli).await {
Ok(new_params) => {
if run_params_tx.send(Arc::new(new_params)).is_err() {
return;
};
}
Err(e) => {
eprintln!("{e}");
}
}
}
});
// Wait for run_events to run the stylance process.
let (run_events_tx, run_events) = mpsc::channel(1);
tokio::spawn({
let run_params = run_params.clone();
async move {
let mut stream = tokio_stream::wrappers::ReceiverStream::new(run_events);
while (debounced_next(&mut stream).await).is_some() {
let run_params = run_params.borrow().clone();
if let Ok(Err(e)) =
spawn_blocking(move || run(&run_params.manifest_dir, &run_params.config)).await
{
eprintln!("{e}");
}
}
}
});
loop {
// Watch the folders from the current run_params
let mut events = watch_folders(
&run_params
.borrow()
.config
.folders
.iter()
.map(|f| manifest_dir.join(f))
.collect(),
)?;
// With the events from the watched folder trigger run_events if they match the extensions of the config.
let watch_folders = {
let run_params = run_params.borrow().clone();
let run_events_tx = run_events_tx.clone();
async move {
while let Some(path) = events.recv().await {
let str_path = path.to_string_lossy();
if run_params
.config
.extensions
.iter()
.any(|ext| str_path.ends_with(ext))
{
let _ = run_events_tx.try_send(());
break;
}
}
}
};
// Run until the config has changed
tokio::select! {
_ = watch_folders => {},
_ = run_params.changed() => {
let _ = run_events_tx.try_send(()); // Config changed so lets trigger a run
},
}
}
}
| rust | MIT | 74321d41b1931829b93cdb6c8ae4cec44fa4cc0d | 2026-01-04T20:18:25.534893Z | false |
aarch64-switch-rs/nx | https://github.com/aarch64-switch-rs/nx/blob/b365c1baa4c4472fe604f4ab9646440d23c3bd9c/src/exception.rs | src/exception.rs | //! Exception handling support (stubbed for now).
use crate::diag::abort::{AbortLevel, abort};
use crate::result::ResultCode;
use crate::svc;
#[unsafe(no_mangle)]
#[linkage = "weak"]
pub(crate) unsafe extern "C" fn __nx_exception_dispatch(
_reason: svc::ExceptionType,
_stack_top: *mut u8,
) -> ! {
// immediately exit if a crate consumer hasn't definite their own exception handler.
abort(
AbortLevel::ProcessExit(),
ResultCode::new(0x6C01 /* StopProcessingException */),
);
}
| rust | MIT | b365c1baa4c4472fe604f4ab9646440d23c3bd9c | 2026-01-04T20:16:15.900894Z | false |
aarch64-switch-rs/nx | https://github.com/aarch64-switch-rs/nx/blob/b365c1baa4c4472fe604f4ab9646440d23c3bd9c/src/applet.rs | src/applet.rs | //! AppletAE/AppletOE Support
use crate::hbl::{AppletType, get_applet_type};
use crate::ipc::sf;
use crate::result::*;
use crate::service;
use crate::svc;
use crate::sync::{ReadGuard, RwLock};
use crate::version::{Version, get_version};
use core::sync::atomic::AtomicU64;
pub use crate::service::applet::*;
static ALL_SYSTEM_APPLET_PROXY_SERVICE: RwLock<Option<AllSystemAppletProxiesService>> =
RwLock::new(None);
static LIBRARY_APPLET_PROXY: RwLock<Option<AppletProxy>> = RwLock::new(None);
static WINDOW_CONTROLLER: RwLock<Option<WindowController>> = RwLock::new(None);
/// Global AppletResourceUserID.
/// Stored as part of `applet::initialize()`
pub static GLOBAL_ARUID: AtomicU64 = AtomicU64::new(0);
/// Proxy type to avoid passing boxed trait objects for Applet Proxy actions.
pub enum AppletProxy {
/// AppletType::Application | AppletType::Default
Application(ApplicationProxy),
/// AppletType::SystemApplet
SystemApplet(SystemAppletProxy),
/// AppletType::LibraryApplet
LibraryApplet(LibraryAppletProxy),
/// AppletType::OverlayApplet
OverlayApplet(OverlayAppletProxy),
/// AppletType::SystemApplication
SystemApplication(SystemApplicationProxy),
}
macro_rules! applet_proxy_match_to_fn {
($self:ident, $func:ident) => {
match $self {
AppletProxy::Application(p) => IApplicationProxyClient::$func(p),
AppletProxy::SystemApplet(p) => ISystemAppletProxyClient::$func(p),
AppletProxy::LibraryApplet(p) => ILibraryAppletProxyClient::$func(p),
AppletProxy::OverlayApplet(p) => IOverlayAppletProxyClient::$func(p),
AppletProxy::SystemApplication(p) => ISystemApplicationProxyClient::$func(p),
}
};
}
impl ProxyCommon for AppletProxy {
fn get_common_state_getter(&self) -> Result<CommonStateGetter> {
applet_proxy_match_to_fn!(self, get_common_state_getter)
}
fn get_self_controller(&self) -> Result<SelfController> {
applet_proxy_match_to_fn!(self, get_self_controller)
}
fn get_window_controller(&self) -> Result<WindowController> {
applet_proxy_match_to_fn!(self, get_window_controller)
}
fn get_audio_controller(&self) -> Result<AudioController> {
applet_proxy_match_to_fn!(self, get_audio_controller)
}
fn get_display_controller(&self) -> Result<DisplayController> {
applet_proxy_match_to_fn!(self, get_display_controller)
}
fn get_process_winding_controller(&self) -> Result<ProcessWindingController> {
applet_proxy_match_to_fn!(self, get_process_winding_controller)
}
fn get_library_applet_creator(&self) -> Result<LibraryAppletCreator> {
applet_proxy_match_to_fn!(self, get_library_applet_creator)
}
}
/// global AppletAttribute used for openning the applet proxy for the program
///
/// TODO - make a better way to override this value
#[linkage = "weak"]
#[unsafe(export_name = "__nx_applet_attribute")]
pub static APPLET_ATTRIBUTE: AppletAttribute = AppletAttribute::zero();
/// Attempts to initialize the module, or returns if the module has already been initialized.
#[inline]
pub fn initialize() -> Result<()> {
let mut app_proxy_service_guard = ALL_SYSTEM_APPLET_PROXY_SERVICE.write();
if app_proxy_service_guard.is_some() {
//already initialized
return Ok(());
}
let app_proxy_service = service::new_service_object::<AllSystemAppletProxiesService>()?;
let app_proxy = loop {
let proxy_result: Result<AppletProxy> = try {
match get_applet_type() {
AppletType::Application | AppletType::Default => {
AppletProxy::Application(app_proxy_service.open_application_proxy(
sf::ProcessId::new(),
sf::CopyHandle::from(svc::CURRENT_PROCESS_PSEUDO_HANDLE),
)?)
}
AppletType::OverlayApplet => {
AppletProxy::OverlayApplet(app_proxy_service.open_overlay_applet_proxy(
sf::ProcessId::new(),
sf::CopyHandle::from(svc::CURRENT_PROCESS_PSEUDO_HANDLE),
)?)
}
AppletType::SystemApplet => {
AppletProxy::SystemApplet(app_proxy_service.open_system_applet_proxy(
sf::ProcessId::new(),
sf::CopyHandle::from(svc::CURRENT_PROCESS_PSEUDO_HANDLE),
)?)
}
AppletType::LibraryApplet if get_version() >= Version::new(3, 0, 0) => {
AppletProxy::LibraryApplet(app_proxy_service.open_library_applet_proxy(
sf::ProcessId::new(),
sf::CopyHandle::from(svc::CURRENT_PROCESS_PSEUDO_HANDLE),
sf::InMapAliasBuffer::from_var(&APPLET_ATTRIBUTE),
)?)
}
AppletType::LibraryApplet => {
AppletProxy::LibraryApplet(app_proxy_service.open_library_applet_proxy_old(
sf::ProcessId::new(),
sf::CopyHandle::from(svc::CURRENT_PROCESS_PSEUDO_HANDLE),
)?)
}
AppletType::SystemApplication => AppletProxy::SystemApplication(
app_proxy_service.open_system_application_proxy(
sf::ProcessId::new(),
sf::CopyHandle::from(svc::CURRENT_PROCESS_PSEUDO_HANDLE),
)?,
),
AppletType::None => {
panic!(
"Initialized applet service with applet type disabled (`None` applet type)."
)
}
}
};
match proxy_result {
Ok(p) => break Ok(p),
Err(rc) if rc.get_value() == 0x19280 => {
// behaviour from libnx, though we don't check for a global timeout
let _ = svc::sleep_thread(100000000);
continue;
}
Err(rc) => break Err(rc),
}
}?;
let window_controller = app_proxy.get_window_controller()?;
let aruid = window_controller.get_applet_resource_user_id()?;
*app_proxy_service_guard = Some(app_proxy_service);
*LIBRARY_APPLET_PROXY.write() = Some(app_proxy);
*WINDOW_CONTROLLER.write() = Some(window_controller);
GLOBAL_ARUID.store(aruid, core::sync::atomic::Ordering::Release);
Ok(())
}
/// Returns whether the module has been successfully initialized.
pub fn is_initialized() -> bool {
ALL_SYSTEM_APPLET_PROXY_SERVICE.read().is_some()
}
/// Finalizes library applet support, dropping the shared resources. pub(crate) as it should only run in rrt0.rs
pub(crate) fn finalize() {
let mut app_proxy_service_guard = ALL_SYSTEM_APPLET_PROXY_SERVICE.write();
*WINDOW_CONTROLLER.write() = None;
*LIBRARY_APPLET_PROXY.write() = None;
*app_proxy_service_guard = None;
}
/// Gets the registered global Window Controller
pub fn get_window_controller<'a>() -> ReadGuard<'a, Option<WindowController>> {
WINDOW_CONTROLLER.read()
}
/// Gets the registered global AppletProxy
pub fn get_applet_proxy<'a>() -> ReadGuard<'a, Option<AppletProxy>> {
LIBRARY_APPLET_PROXY.read()
}
/// Gets the registered global System Proxy Service
pub fn get_system_proxy_service<'a>() -> ReadGuard<'a, Option<AllSystemAppletProxiesService>> {
ALL_SYSTEM_APPLET_PROXY_SERVICE.read()
}
| rust | MIT | b365c1baa4c4472fe604f4ab9646440d23c3bd9c | 2026-01-04T20:16:15.900894Z | false |
aarch64-switch-rs/nx | https://github.com/aarch64-switch-rs/nx/blob/b365c1baa4c4472fe604f4ab9646440d23c3bd9c/src/arm.rs | src/arm.rs | //! ARM support and utils
use core::arch::asm;
/// Represents a CPU register value (`W`, `X` or `R` value depending on the context/arch).
#[derive(Copy, Clone, PartialEq, Eq, Debug, Default)]
#[repr(C)]
pub struct CpuRegister {
/// The register value.
pub reg: u64,
}
impl CpuRegister {
/// Gets the [`CpuRegister`] as an `X` value.
#[inline]
pub const fn get_x(&self) -> u64 {
self.reg
}
/// Sets the [`CpuRegister`] from an `X` value.
///
/// # Arguments:
///
/// * `x`: The value to set.
#[inline]
pub fn set_x(&mut self, x: u64) {
self.reg = x;
}
/// Gets the [`CpuRegister`] as an `W` value.
#[inline]
pub const fn get_w(&self) -> u32 {
self.reg as u32
}
/// Sets the [`CpuRegister`] from an `W` value.
///
/// # Arguments:
///
/// * `w`: The value to set.
#[inline]
pub fn set_w(&mut self, w: u32) {
self.reg = w as u64;
}
/// Gets the [`CpuRegister`] as an `R` value.
#[inline]
pub const fn get_r(&self) -> u32 {
self.reg as u32
}
/// Sets the [`CpuRegister`] from an `R` value.
///
/// # Arguments:
///
/// * `r`: The value to set.
#[inline]
pub fn set_r(&mut self, r: u32) {
self.reg = r as u64;
}
}
/// Represents a FPU register value (`V`, `D` or `S` value depending on the context/arch).
#[derive(Copy, Clone, PartialEq, Eq, Debug, Default)]
#[repr(C)]
pub struct FpuRegister {
/// The register value.
pub reg: u128,
}
impl FpuRegister {
/// Gets the [`FpuRegister`] as an `V` value.
#[inline]
pub const fn get_v(&self) -> u128 {
self.reg
}
/// Sets the [`FpuRegister`] from an `V` value.
///
/// # Arguments:
///
/// * `v`: The value to set.
#[inline]
pub fn set_v(&mut self, v: u128) {
self.reg = v;
}
/// Gets the [`FpuRegister`] as an `D` value.
#[inline]
pub const fn get_d(&self) -> f64 {
self.reg as f64
}
/// Sets the [`FpuRegister`] from an `D` value.
///
/// # Arguments:
///
/// * `d`: The value to set.
#[inline]
pub fn set_d(&mut self, d: f64) {
self.reg = d.to_bits() as u128;
}
/// Gets the [`FpuRegister`] as an `S` value.
#[inline]
pub const fn get_s(&self) -> f32 {
f32::from_bits(self.reg as u32)
}
/// Sets the [`FpuRegister`] from an `S` value.
///
/// # Arguments:
///
/// * `s`: The value to set.
#[inline]
pub fn set_s(&mut self, s: f32) {
self.reg = s.to_bits() as u128;
}
}
define_bit_set! {
/// Represents flags of different register kinds/groups.
RegisterGroup (u32) {
CpuGprs = bit!(0),
CpuSprs = bit!(1),
FpuGprs = bit!(2),
FpuSprs = bit!(3)
}
}
/// Represents a thread context usable with [`svc`][`crate::svc`]s.
#[derive(Copy, Clone, PartialEq, Eq, Debug, Default)]
#[repr(C)]
pub struct ThreadContext {
/// The general-purpose CPU registers.
pub gpu_gprs: [CpuRegister; 29],
/// The FP register.
pub fp: u64,
/// The LR register.
pub lr: u64,
/// The SP register.
pub sp: u64,
/// The PC register.
pub pc: CpuRegister,
/// The PSR value.
pub psr: u32,
/// The general-purpose FPU registers.
pub fpu_gprs: [FpuRegister; 32],
/// The FPCR value.
pub fpcr: u32,
/// The FPSR value.
pub fpsr: u32,
/// The TPIDR value.
pub tpidr: u64,
}
/// Flushes (clean + invalidate) memory cache at a certain memory location.
///
/// The start and end address are rounded to cache line boundaries read from the `CTR_EL0` register.
///
/// # Arguments:
///
/// * `address`: Memory address.
/// * `size`: Memory size.
#[inline(always)]
#[allow(clippy::not_unsafe_ptr_arg_deref)]
pub fn cache_flush(address: *mut u8, size: usize) {
// Equivalent to `cache_flush2` commented out below, but ends up being better hand-written
// than compiler optimised.
#[unsafe(naked)]
unsafe extern "C" fn __nx_arm_cache_flush(address: *mut u8, size: usize) {
core::arch::naked_asm!(
crate::macros::util::maybe_cfi!(".cfi_startproc"),
"add x1, x1, x0",
"mrs x8, CTR_EL0",
"lsr x8, x8, #16",
"and x8, x8, #0xf",
"mov x9, #4",
"lsl x9, x9, x8",
"sub x10, x9, #1",
"bic x8, x0, x10",
"mov x10, x1",
"mov w1, #1",
"mrs x0, tpidrro_el0",
"strb w1, [x0, #0x104] ", // Set flag at TLR[0x104] for kernel
"2:",
"dc civac, x8",
"add x8, x8, x9",
"cmp x8, x10",
"bcc 2b",
"dsb sy",
"strb wzr, [x0, #0x104]", // Unset flag at TLR[0x104] for kernel
"ret",
crate::macros::util::maybe_cfi!(".cfi_endproc")
);
}
unsafe {
__nx_arm_cache_flush(address, size);
}
}
/*
pub fn cache_flush2(address: *mut u8, size: usize) {
let address = address.expose_provenance();
let mut ctr_el0: u64;
unsafe {
asm!("mrs {}, CTR_EL0", out(reg) ctr_el0);
}
let cache_line_size = 4usize << (ctr_el0 as usize >> 16 & 0xF);
let cache_line_mask = !(cache_line_size - 1);
let last_address = address.saturating_add(size) & cache_line_mask;
let mut address = address & cache_line_mask;
unsafe {
let tlr = nx::thread::get_thread_local_region();
(*tlr).cache_maintenance_flag = true;
while address <= last_address {
asm!("dc civac, {}", in(reg) address);
address = address.saturating_add(cache_line_size);
}
asm!("dsb sy");
(*tlr).cache_maintenance_flag = false;
}
}
*/
/// Gets the system tick.
#[inline(always)]
pub fn get_system_tick() -> u64 {
let system_tick: u64;
unsafe {
asm!(
"mrs {}, cntpct_el0",
out(reg) system_tick
);
}
system_tick
}
/// Gets the system tick time as nanoseconds.
#[inline(always)]
pub fn get_system_tick_as_nanos() -> u64 {
get_system_tick() / (get_system_tick_frequency() / 1_000_000_000u64)
}
/// Gets the system tick frequency.
#[inline(always)]
pub fn get_system_tick_frequency() -> u64 {
let system_tick_freq: u64;
unsafe {
asm!(
"mrs {}, cntfrq_el0",
out(reg) system_tick_freq
);
}
system_tick_freq
}
/// Converts ticks to nanoseconds.
///
/// # Arguments:
///
/// * `ticks`: Ticks to convert.
#[inline]
pub const fn ticks_to_nanoseconds(ticks: u64) -> u64 {
(ticks * 625) / 12
}
/// Converts nanoseconds to ticks.
///
/// # Arguments:
///
/// * `ns`: Nanoseconds to convert.
#[inline]
pub const fn nanoseconds_to_ticks(ns: u64) -> u64 {
(ns * 12) / 625
}
| rust | MIT | b365c1baa4c4472fe604f4ab9646440d23c3bd9c | 2026-01-04T20:16:15.900894Z | false |
aarch64-switch-rs/nx | https://github.com/aarch64-switch-rs/nx/blob/b365c1baa4c4472fe604f4ab9646440d23c3bd9c/src/lib.rs | src/lib.rs | //! Userland library for Nintendo Switch homebrew (and other potential purposes), written in pure Rust and some assembly bits
//!
//! # Features
//!
//! This library covers a lot of different modules, wrappers, etc. so some of them (essentially those which can be opt-in) are separated as optional features:
//!
//! - `services`: Enables custom client-IPC service implementations, AKA the `nx::service` module
//!
//! - `smc`: Enables secure-monitor support, AKA the `nx::smc` module
//!
//! - `gpu`: Enables graphics support, AKA the `nx::gpu` module (also enables `services`)
//!
//! - `console`: Enables console support, AKA the `nx::console` module (also enables `canvas` and the `font8x8` dependency)
//!
//! - `vty`: Enables virtual tty support, AKA, the `nx::console::vty` module (also enables `console` as well as the dependencies `embedded-term` and `embedded-graphics-core`)
//!
//! - `fs`: Enables support for this library's FS implementation, aka the `nx::fs` module (also enables `services`)
//!
//! - `input`: Enables input support, AKA the `nx::input` module (also enables `services`)
//!
//! - `la`: Enables library applet support, AKA the `nx::la` module (also enables `services`)
//!
//! - `rand`: Enables pseudo-RNG support, AKA the `nx::rand` module (also enables `services`)
//!
//! - `socket` : Enables std-like network support, AKA the `nx::socket` module (also enables `services`)
//!
//! - `applet` : Enables applet service support, AKA the `nx::applet` module (also enables `services`)
//!
//! - `mii` : Enables mii support, AKA the `nx::mii` module (also enables `services`)
//!
//! Note that most of these features/modules are just simplified and easy-to-use wrappers around IPC/raw system features, so not using them doesn't fully block those features (for instance, you could use services using IPC commands more directly without the `services` feature).
//!
//! # Contributing
//!
//! You can always contribute to these libraries, report bugs, etc. at their [repository](https://github.com/aarch64-switch-rs/nx)
//!
//! # Examples
//!
//! Library examples are located at this other [repository](https://github.com/aarch64-switch-rs/examples)
#![no_std]
// needed to implement the APIs for collection types with custom allocators, and doing raw allocations
#![feature(allocator_api)]
// needed to specify weak linkage on some items
#![feature(linkage)]
// needed for the implementation of the threads module
#![feature(get_mut_unchecked)]
// get rid of mangled error handling in applet::initialize
#![feature(try_blocks)]
// used for ergonomics reading UTF16 strings
#![feature(str_from_utf16_endian)]
//#![warn(missing_docs)]
#![macro_use]
use core::arch::global_asm;
// Required assembly bits (those which essentially cannot/shouldn't be inlined)
global_asm!(include_str!("rrt0.s"));
global_asm!(include_str!("mod0.s"));
//global_asm!(include_str!("exception.s"));
extern crate self as nx;
#[macro_use]
extern crate alloc;
#[macro_use]
extern crate static_assertions;
#[macro_use]
pub mod macros;
#[macro_use]
pub mod result;
pub mod rc;
#[macro_use]
pub mod util;
pub mod mem;
pub mod elf;
pub mod exception;
pub mod sync;
pub mod thread;
pub mod hbl;
#[macro_use]
pub mod rrt0;
// We're going to allow this just because EVERYTHING in there is potentially unsafe in some way,
// even if it's not necessarily memory safety.
#[allow(clippy::missing_safety_doc)]
pub mod svc;
#[macro_use]
pub mod ipc;
#[macro_use]
pub mod diag;
#[cfg(feature = "input")]
pub mod input;
pub mod vmem;
pub mod arm;
pub mod wait;
pub mod version;
#[cfg(feature = "applet")]
pub mod applet;
#[cfg(feature = "services")]
pub mod service;
#[cfg(feature = "gpu")]
pub mod gpu;
#[cfg(feature = "smc")]
pub mod smc;
#[cfg(feature = "fs")]
pub mod fs;
#[cfg(feature = "rand")]
pub mod rand;
#[cfg(feature = "la")]
pub mod la;
#[cfg(any(feature = "console", feature = "vty"))]
#[macro_use]
pub mod console;
#[cfg(feature = "socket")]
pub mod socket;
#[cfg(feature = "mii")]
pub mod mii;
| rust | MIT | b365c1baa4c4472fe604f4ab9646440d23c3bd9c | 2026-01-04T20:16:15.900894Z | false |
aarch64-switch-rs/nx | https://github.com/aarch64-switch-rs/nx/blob/b365c1baa4c4472fe604f4ab9646440d23c3bd9c/src/gpu.rs | src/gpu.rs | //! Graphics and GPU support and utils
use ::alloc::sync::Arc;
use crate::ipc::sf;
use crate::mem::{alloc, wait_for_permission};
use crate::result::*;
use crate::service;
use crate::service::applet;
use crate::service::dispdrv;
use crate::service::nv;
use crate::service::nv::{ErrorCode, Fd, INvDrvClient, IoctlId};
use crate::service::vi;
use crate::service::vi::{
ApplicationDisplay, ApplicationDisplayRootService, IApplicationDisplayClient,
ManagerDisplayRootService, SystemDisplayRootService,
};
use crate::svc;
use crate::svc::MemoryPermission;
use crate::sync::RwLock;
pub mod rc;
pub mod parcel;
pub mod binder;
pub mod ioctl;
pub mod surface;
#[cfg(feature = "canvas")]
pub mod canvas;
/// Represents layout types
#[derive(Copy, Clone, PartialEq, Eq, Debug, Default)]
#[repr(u32)]
#[allow(missing_docs)]
pub enum Layout {
#[default]
Invalid = 0,
Pitch = 1,
Tiled = 2,
BlockLinear = 3,
}
/// Represents display scan format types
#[derive(Copy, Clone, PartialEq, Eq, Debug, Default)]
#[repr(u32)]
#[allow(missing_docs)]
pub enum DisplayScanFormat {
#[default]
Progressive = 0,
Interlaced = 1,
}
/// Represents kinds
#[allow(non_camel_case_types)]
#[derive(Copy, Clone, PartialEq, Eq, Debug, Default)]
#[repr(u32)]
#[allow(missing_docs)]
pub enum Kind {
#[default]
Pitch = 0x0,
Z16 = 0x1,
Z16_2C = 0x2,
Z16_MS2_2C = 0x3,
Z16_MS4_2C = 0x4,
Z16_MS8_2C = 0x5,
Z16_MS16_2C = 0x6,
Z16_2Z = 0x7,
Z16_MS2_2Z = 0x8,
Z16_MS4_2Z = 0x9,
Z16_MS8_2Z = 0xa,
Z16_MS16_2Z = 0xb,
Z16_4CZ = 0xc,
Z16_MS2_4CZ = 0xd,
Z16_MS4_4CZ = 0xe,
Z16_MS8_4CZ = 0xf,
Z16_MS16_4CZ = 0x10,
S8Z24 = 0x11,
S8Z24_1Z = 0x12,
S8Z24_MS2_1Z = 0x13,
S8Z24_MS4_1Z = 0x14,
S8Z24_MS8_1Z = 0x15,
S8Z24_MS16_1Z = 0x16,
S8Z24_2CZ = 0x17,
S8Z24_MS2_2CZ = 0x18,
S8Z24_MS4_2CZ = 0x19,
S8Z24_MS8_2CZ = 0x1a,
S8Z24_MS16_2CZ = 0x1b,
S8Z24_2CS = 0x1C,
S8Z24_MS2_2CS = 0x1d,
S8Z24_MS4_2CS = 0x1e,
S8Z24_MS8_2CS = 0x1f,
S8Z24_MS16_2CS = 0x20,
S8Z24_4CSZV = 0x21,
S8Z24_MS2_4CSZV = 0x22,
S8Z24_MS4_4CSZV = 0x23,
S8Z24_MS8_4CSZV = 0x24,
S8Z24_MS16_4CSZV = 0x25,
V8Z24_MS4_VC12 = 0x26,
V8Z24_MS4_VC4 = 0x27,
V8Z24_MS8_VC8 = 0x28,
V8Z24_MS8_VC24 = 0x29,
S8 = 0x2a,
S8_2S = 0x2b,
V8Z24_MS4_VC12_1ZV = 0x2e,
V8Z24_MS4_VC4_1ZV = 0x2f,
V8Z24_MS8_VC8_1ZV = 0x30,
V8Z24_MS8_VC24_1ZV = 0x31,
V8Z24_MS4_VC12_2CS = 0x32,
V8Z24_MS4_VC4_2CS = 0x33,
V8Z24_MS8_VC8_2CS = 0x34,
V8Z24_MS8_VC24_2CS = 0x35,
V8Z24_MS4_VC12_2CZV = 0x3a,
V8Z24_MS4_VC4_2CZV = 0x3b,
V8Z24_MS8_VC8_2CZV = 0x3c,
V8Z24_MS8_VC24_2CZV = 0x3d,
V8Z24_MS4_VC12_2ZV = 0x3e,
V8Z24_MS4_VC4_2ZV = 0x3f,
V8Z24_MS8_VC8_2ZV = 0x40,
V8Z24_MS8_VC24_2ZV = 0x41,
V8Z24_MS4_VC12_4CSZV = 0x42,
V8Z24_MS4_VC4_4CSZV = 0x43,
V8Z24_MS8_VC8_4CSZV = 0x44,
V8Z24_MS8_VC24_4CSZV = 0x45,
Z24S8 = 0x46,
Z24S8_1Z = 0x47,
Z24S8_MS2_1Z = 0x48,
Z24S8_MS4_1Z = 0x49,
Z24S8_MS8_1Z = 0x4a,
Z24S8_MS16_1Z = 0x4b,
Z24S8_2CS = 0x4c,
Z24S8_MS2_2CS = 0x4d,
Z24S8_MS4_2CS = 0x4e,
Z24S8_MS8_2CS = 0x4f,
Z24S8_MS16_2CS = 0x50,
Z24S8_2CZ = 0x51,
Z24S8_MS2_2CZ = 0x52,
Z24S8_MS4_2CZ = 0x53,
Z24S8_MS8_2CZ = 0x54,
Z24S8_MS16_2CZ = 0x55,
Z24S8_4CSZV = 0x56,
Z24S8_MS2_4CSZV = 0x57,
Z24S8_MS4_4CSZV = 0x58,
Z24S8_MS8_4CSZV = 0x59,
Z24S8_MS16_4CSZV = 0x5a,
Z24V8_MS4_VC12 = 0x5b,
Z24V8_MS4_VC4 = 0x5C,
Z24V8_MS8_VC8 = 0x5d,
Z24V8_MS8_VC24 = 0x5e,
Z24V8_MS4_VC12_1ZV = 0x63,
Z24V8_MS4_VC4_1ZV = 0x64,
Z24V8_MS8_VC8_1ZV = 0x65,
Z24V8_MS8_VC24_1ZV = 0x66,
Z24V8_MS4_VC12_2CS = 0x67,
Z24V8_MS4_VC4_2CS = 0x68,
Z24V8_MS8_VC8_2CS = 0x69,
Z24V8_MS8_VC24_2CS = 0x6a,
Z24V8_MS4_VC12_2CZV = 0x6f,
Z24V8_MS4_VC4_2CZV = 0x70,
Z24V8_MS8_VC8_2CZV = 0x71,
Z24V8_MS8_VC24_2CZV = 0x72,
Z24V8_MS4_VC12_2ZV = 0x73,
Z24V8_MS4_VC4_2ZV = 0x74,
Z24V8_MS8_VC8_2ZV = 0x75,
Z24V8_MS8_VC24_2ZV = 0x76,
Z24V8_MS4_VC12_4CSZV = 0x77,
Z24V8_MS4_VC4_4CSZV = 0x78,
Z24V8_MS8_VC8_4CSZV = 0x79,
Z24V8_MS8_VC24_4CSZV = 0x7a,
ZF32 = 0x7b,
ZF32_1Z = 0x7C,
ZF32_MS2_1Z = 0x7d,
ZF32_MS4_1Z = 0x7e,
ZF32_MS8_1Z = 0x7f,
ZF32_MS16_1Z = 0x80,
ZF32_2CS = 0x81,
ZF32_MS2_2CS = 0x82,
ZF32_MS4_2CS = 0x83,
ZF32_MS8_2CS = 0x84,
ZF32_MS16_2CS = 0x85,
ZF32_2CZ = 0x86,
ZF32_MS2_2CZ = 0x87,
ZF32_MS4_2CZ = 0x88,
ZF32_MS8_2CZ = 0x89,
ZF32_MS16_2CZ = 0x8a,
X8Z24_X16V8S8_MS4_VC12 = 0x8b,
X8Z24_X16V8S8_MS4_VC4 = 0x8c,
X8Z24_X16V8S8_MS8_VC8 = 0x8d,
X8Z24_X16V8S8_MS8_VC24 = 0x8e,
X8Z24_X16V8S8_MS4_VC12_1CS = 0x8f,
X8Z24_X16V8S8_MS4_VC4_1CS = 0x90,
X8Z24_X16V8S8_MS8_VC8_1CS = 0x91,
X8Z24_X16V8S8_MS8_VC24_1CS = 0x92,
X8Z24_X16V8S8_MS4_VC12_1ZV = 0x97,
X8Z24_X16V8S8_MS4_VC4_1ZV = 0x98,
X8Z24_X16V8S8_MS8_VC8_1ZV = 0x99,
X8Z24_X16V8S8_MS8_VC24_1ZV = 0x9a,
X8Z24_X16V8S8_MS4_VC12_1CZV = 0x9b,
X8Z24_X16V8S8_MS4_VC4_1CZV = 0x9c,
X8Z24_X16V8S8_MS8_VC8_1CZV = 0x9d,
X8Z24_X16V8S8_MS8_VC24_1CZV = 0x9e,
X8Z24_X16V8S8_MS4_VC12_2CS = 0x9f,
X8Z24_X16V8S8_MS4_VC4_2CS = 0xa0,
X8Z24_X16V8S8_MS8_VC8_2CS = 0xa1,
X8Z24_X16V8S8_MS8_VC24_2CS = 0xa2,
X8Z24_X16V8S8_MS4_VC12_2CSZV = 0xa3,
X8Z24_X16V8S8_MS4_VC4_2CSZV = 0xa4,
X8Z24_X16V8S8_MS8_VC8_2CSZV = 0xa5,
X8Z24_X16V8S8_MS8_VC24_2CSZV = 0xa6,
ZF32_X16V8S8_MS4_VC12 = 0xa7,
ZF32_X16V8S8_MS4_VC4 = 0xa8,
ZF32_X16V8S8_MS8_VC8 = 0xa9,
ZF32_X16V8S8_MS8_VC24 = 0xaa,
ZF32_X16V8S8_MS4_VC12_1CS = 0xab,
ZF32_X16V8S8_MS4_VC4_1CS = 0xac,
ZF32_X16V8S8_MS8_VC8_1CS = 0xad,
ZF32_X16V8S8_MS8_VC24_1CS = 0xae,
ZF32_X16V8S8_MS4_VC12_1ZV = 0xb3,
ZF32_X16V8S8_MS4_VC4_1ZV = 0xb4,
ZF32_X16V8S8_MS8_VC8_1ZV = 0xb5,
ZF32_X16V8S8_MS8_VC24_1ZV = 0xb6,
ZF32_X16V8S8_MS4_VC12_1CZV = 0xb7,
ZF32_X16V8S8_MS4_VC4_1CZV = 0xb8,
ZF32_X16V8S8_MS8_VC8_1CZV = 0xb9,
ZF32_X16V8S8_MS8_VC24_1CZV = 0xba,
ZF32_X16V8S8_MS4_VC12_2CS = 0xbb,
ZF32_X16V8S8_MS4_VC4_2CS = 0xbc,
ZF32_X16V8S8_MS8_VC8_2CS = 0xbd,
ZF32_X16V8S8_MS8_VC24_2CS = 0xbe,
ZF32_X16V8S8_MS4_VC12_2CSZV = 0xbf,
ZF32_X16V8S8_MS4_VC4_2CSZV = 0xc0,
ZF32_X16V8S8_MS8_VC8_2CSZV = 0xc1,
ZF32_X16V8S8_MS8_VC24_2CSZV = 0xc2,
ZF32_X24S8 = 0xc3,
ZF32_X24S8_1CS = 0xc4,
ZF32_X24S8_MS2_1CS = 0xc5,
ZF32_X24S8_MS4_1CS = 0xc6,
ZF32_X24S8_MS8_1CS = 0xc7,
ZF32_X24S8_MS16_1CS = 0xc8,
SmskedMessage = 0xca,
SmhostMessage = 0xcb,
C64_MS2_2CRA = 0xcd,
ZF32_X24S8_2CSZV = 0xce,
ZF32_X24S8_MS2_2CSZV = 0xcf,
ZF32_X24S8_MS4_2CSZV = 0xd0,
ZF32_X24S8_MS8_2CSZV = 0xd1,
ZF32_X24S8_MS16_2CSZV = 0xd2,
ZF32_X24S8_2CS = 0xd3,
ZF32_X24S8_MS2_2CS = 0xd4,
ZF32_X24S8_MS4_2CS = 0xd5,
ZF32_X24S8_MS8_2CS = 0xd6,
ZF32_X24S8_MS16_2CS = 0xd7,
C32_2C = 0xd8,
C32_2CBR = 0xd9,
C32_2CBA = 0xda,
C32_2CRA = 0xdb,
C32_2BRA = 0xdc,
C32_MS2_2C = 0xdd,
C32_MS2_2CBR = 0xde,
C32_MS2_2CRA = 0xcc,
C32_MS4_2C = 0xdf,
C32_MS4_2CBR = 0xe0,
C32_MS4_2CBA = 0xe1,
C32_MS4_2CRA = 0xe2,
C32_MS4_2BRA = 0xe3,
C32_MS8_MS16_2C = 0xe4,
C32_MS8_MS16_2CRA = 0xe5,
C64_2C = 0xe6,
C64_2CBR = 0xe7,
C64_2CBA = 0xe8,
C64_2CRA = 0xe9,
C64_2BRA = 0xea,
C64_MS2_2C = 0xeb,
C64_MS2_2CBR = 0xec,
C64_MS4_2C = 0xed,
C64_MS4_2CBR = 0xee,
C64_MS4_2CBA = 0xef,
C64_MS4_2CRA = 0xf0,
C64_MS4_2BRA = 0xf1,
C64_MS8_MS16_2C = 0xf2,
C64_MS8_MS16_2CRA = 0xf3,
C128_2C = 0xf4,
C128_2CR = 0xf5,
C128_MS2_2C = 0xf6,
C128_MS2_2CR = 0xf7,
C128_MS4_2C = 0xf8,
C128_MS4_2CR = 0xf9,
C128_MS8_MS16_2C = 0xfa,
C128_MS8_MS16_2CR = 0xfb,
X8C24 = 0xfc,
PitchNoSwizzle = 0xfd,
Generic_16BX2 = 0xfe,
Invalid = 0xff,
}
/// Represents supported color formats
#[allow(non_camel_case_types)]
#[derive(Copy, Clone, PartialEq, Eq, Debug, Default)]
#[repr(u64)]
#[allow(missing_docs)]
pub enum ColorFormat {
#[default]
Unspecified = 0,
NonColor8 = 0x0009200408,
NonColor16 = 0x0009200A10,
NonColor24 = 0x0009201A18,
NonColor32 = 0x0009201C20,
X4C4 = 0x0009210508,
A4L4 = 0x0100490508,
A8L8 = 0x0100490E10,
Float_A16L16 = 0x0100495D20,
A1B5G5R5 = 0x0100531410,
A4B4G4R4 = 0x0100531510,
A5B5G5R1 = 0x0100531810,
A2B10G10R10 = 0x0100532020,
A8B8G8R8 = 0x0100532120,
A16B16G16R16 = 0x0100532740,
Float_A16B16G16R16 = 0x0100536740,
A1R5G5B5 = 0x0100D11410,
A4R4G4B4 = 0x0100D11510,
A5R1G5B5 = 0x0100D11610,
A2R10G10B10 = 0x0100D12020,
A8R8G8B8 = 0x0100D12120,
A1 = 0x0101240101,
A2 = 0x0101240202,
A4 = 0x0101240304,
A8 = 0x0101240408,
A16 = 0x0101240A10,
A32 = 0x0101241C20,
Float_A16 = 0x0101244A10,
L4A4 = 0x0102000508,
L8A8 = 0x0102000E10,
B4G4R4A4 = 0x01060A1510,
B5G5R1A5 = 0x01060A1710,
B5G5R5A1 = 0x01060A1810,
B8G8R8A8 = 0x01060A2120,
B10G10R10A2 = 0x01060A2320,
R1G5B5A5 = 0x0106881410,
R4G4B4A4 = 0x0106881510,
R5G5B5A1 = 0x0106881810,
R8G8B8A8 = 0x0106882120,
R10G10B10A2 = 0x0106882320,
L1 = 0x010A000101,
L2 = 0x010A000202,
L4 = 0x010A000304,
L8 = 0x010A000408,
L16 = 0x010A000A10,
L32 = 0x010A001C20,
Float_L16 = 0x010A004A10,
B5G6R5 = 0x010A0A1210,
B6G5R5 = 0x010A0A1310,
B5G5R5X1 = 0x010A0A1810,
B8_G8_R8 = 0x010A0A1918,
B8G8R8X8 = 0x010A0A2120,
Float_B10G11R11 = 0x010A0A5E20,
X1B5G5R5 = 0x010A531410,
X8B8G8R8 = 0x010A532120,
X16B16G16R16 = 0x010A532740,
Float_X16B16G16R16 = 0x010A536740,
R3G3B2 = 0x010A880608,
R5G5B6 = 0x010A881110,
R5G6B5 = 0x010A881210,
R5G5B5X1 = 0x010A881810,
R8_G8_B8 = 0x010A881918,
R8G8B8X8 = 0x010A882120,
X1R5G5B5 = 0x010AD11410,
X8R8G8B8 = 0x010AD12120,
RG8 = 0x010B080E10,
R16G16 = 0x010B081D20,
Float_R16G16 = 0x010B085D20,
R8 = 0x010B200408,
R16 = 0x010B200A10,
Float_R16 = 0x010B204A10,
A2B10G10R10_sRGB = 0x0200532020,
A8B8G8R8_sRGB = 0x0200532120,
A16B16G16R16_sRGB = 0x0200532740,
A2R10G10B10_sRGB = 0x0200D12020,
B10G10R10A2_sRGB = 0x02060A2320,
R10G10B10A2_sRGB = 0x0206882320,
X8B8G8R8_sRGB = 0x020A532120,
X16B16G16R16_sRGB = 0x020A532740,
A2B10G10R10_709 = 0x0300532020,
A8B8G8R8_709 = 0x0300532120,
A16B16G16R16_709 = 0x0300532740,
A2R10G10B10_709 = 0x0300D12020,
B10G10R10A2_709 = 0x03060A2320,
R10G10B10A2_709 = 0x0306882320,
X8B8G8R8_709 = 0x030A532120,
X16B16G16R16_709 = 0x030A532740,
A2B10G10R10_709_Linear = 0x0400532020,
A8B8G8R8_709_Linear = 0x0400532120,
A16B16G16R16_709_Linear = 0x0400532740,
A2R10G10B10_709_Linear = 0x0400D12020,
B10G10R10A2_709_Linear = 0x04060A2320,
R10G10B10A2_709_Linear = 0x0406882320,
X8B8G8R8_709_Linear = 0x040A532120,
X16B16G16R16_709_Linear = 0x040A532740,
Float_A16B16G16R16_scRGB_Linear = 0x0500536740,
A2B10G10R10_2020 = 0x0600532020,
A8B8G8R8_2020 = 0x0600532120,
A16B16G16R16_2020 = 0x0600532740,
A2R10G10B10_2020 = 0x0600D12020,
B10G10R10A2_2020 = 0x06060A2320,
R10G10B10A2_2020 = 0x0606882320,
X8B8G8R8_2020 = 0x060A532120,
X16B16G16R16_2020 = 0x060A532740,
A2B10G10R10_2020_Linear = 0x0700532020,
A8B8G8R8_2020_Linear = 0x0700532120,
A16B16G16R16_2020_Linear = 0x0700532740,
Float_A16B16G16R16_2020_Linear = 0x0700536740,
A2R10G10B10_2020_Linear = 0x0700D12020,
B10G10R10A2_2020_Linear = 0x07060A2320,
R10G10B10A2_2020_Linear = 0x0706882320,
X8B8G8R8_2020_Linear = 0x070A532120,
X16B16G16R16_2020_Linear = 0x070A532740,
Float_A16B16G16R16_2020_PQ = 0x0800536740,
A4I4 = 0x0901210508,
A8I8 = 0x0901210E10,
I4A4 = 0x0903200508,
I8A8 = 0x0903200E10,
I1 = 0x0909200101,
I2 = 0x0909200202,
I4 = 0x0909200304,
I8 = 0x0909200408,
A8Y8U8V8 = 0x0A00D12120,
A16Y16U16V16 = 0x0A00D12740,
Y8U8V8A8 = 0x0A06882120,
V8_U8 = 0x0A080C0710,
V8U8 = 0x0A080C0E10,
V10U10 = 0x0A08142220,
V12U12 = 0x0A08142420,
V8 = 0x0A08240408,
V10 = 0x0A08240F10,
V12 = 0x0A08241010,
U8_V8 = 0x0A08440710,
U8V8 = 0x0A08440E10,
U10V10 = 0x0A08842220,
U12V12 = 0x0A08842420,
U8 = 0x0A09040408,
U10 = 0x0A09040F10,
U12 = 0x0A09041010,
Y8 = 0x0A09200408,
Y10 = 0x0A09200F10,
Y12 = 0x0A09201010,
YVYU = 0x0A0A500810,
VYUY = 0x0A0A500910,
YUYV = 0x0A0A880810,
UYVY = 0x0A0A880910,
Y8_U8_V8 = 0x0A0A881918,
V8_U8_RR = 0x0B080C0710,
V8U8_RR = 0x0B080C0E10,
V8_RR = 0x0B08240408,
U8_V8_RR = 0x0B08440710,
U8V8_RR = 0x0B08440E10,
U8_RR = 0x0B09040408,
Y8_RR = 0x0B09200408,
V8_U8_ER = 0x0C080C0710,
V8U8_ER = 0x0C080C0E10,
V8_ER = 0x0C08240408,
U8_V8_ER = 0x0C08440710,
U8V8_ER = 0x0C08440E10,
U8_ER = 0x0C09040408,
Y8_ER = 0x0C09200408,
V8_U8_709 = 0x0D080C0710,
V8U8_709 = 0x0D080C0E10,
V10U10_709 = 0x0D08142220,
V12U12_709 = 0x0D08142420,
V8_709 = 0x0D08240408,
V10_709 = 0x0D08240F10,
V12_709 = 0x0D08241010,
U8_V8_709 = 0x0D08440710,
U8V8_709 = 0x0D08440E10,
U10V10_709 = 0x0D08842220,
U12V12_709 = 0x0D08842420,
U8_709 = 0x0D09040408,
U10_709 = 0x0D09040F10,
U12_709 = 0x0D09041010,
Y8_709 = 0x0D09200408,
Y10_709 = 0x0D09200F10,
Y12_709 = 0x0D09201010,
V8_U8_709_ER = 0x0E080C0710,
V8U8_709_ER = 0x0E080C0E10,
V10U10_709_ER = 0x0E08142220,
V12U12_709_ER = 0x0E08142420,
V8_709_ER = 0x0E08240408,
V10_709_ER = 0x0E08240F10,
V12_709_ER = 0x0E08241010,
U8_V8_709_ER = 0x0E08440710,
U8V8_709_ER = 0x0E08440E10,
U10V10_709_ER = 0x0E08842220,
U12V12_709_ER = 0x0E08842420,
U8_709_ER = 0x0E09040408,
U10_709_ER = 0x0E09040F10,
U12_709_ER = 0x0E09041010,
Y8_709_ER = 0x0E09200408,
Y10_709_ER = 0x0E09200F10,
Y12_709_ER = 0x0E09201010,
V10U10_2020 = 0x0F08142220,
V12U12_2020 = 0x0F08142420,
V10_2020 = 0x0F08240F10,
V12_2020 = 0x0F08241010,
U10V10_2020 = 0x0F08842220,
U12V12_2020 = 0x0F08842420,
U10_2020 = 0x0F09040F10,
U12_2020 = 0x0F09041010,
Y10_2020 = 0x0F09200F10,
Y12_2020 = 0x0F09201010,
Bayer8RGGB = 0x1009200408,
Bayer16RGGB = 0x1009200A10,
BayerS16RGGB = 0x1009208A10,
X2Bayer14RGGB = 0x1009210B10,
X4Bayer12RGGB = 0x1009210C10,
X6Bayer10RGGB = 0x1009210D10,
Bayer8BGGR = 0x1109200408,
Bayer16BGGR = 0x1109200A10,
BayerS16BGGR = 0x1109208A10,
X2Bayer14BGGR = 0x1109210B10,
X4Bayer12BGGR = 0x1109210C10,
X6Bayer10BGGR = 0x1109210D10,
Bayer8GRBG = 0x1209200408,
Bayer16GRBG = 0x1209200A10,
BayerS16GRBG = 0x1209208A10,
X2Bayer14GRBG = 0x1209210B10,
X4Bayer12GRBG = 0x1209210C10,
X6Bayer10GRBG = 0x1209210D10,
Bayer8GBRG = 0x1309200408,
Bayer16GBRG = 0x1309200A10,
BayerS16GBRG = 0x1309208A10,
X2Bayer14GBRG = 0x1309210B10,
X4Bayer12GBRG = 0x1309210C10,
X6Bayer10GBRG = 0x1309210D10,
XYZ = 0x140A886640,
}
impl ColorFormat {
/// Gets the bytes-per-pixel (`bpp`) of a [`ColorFormat`] value (bits 3-8).
#[inline(always)]
pub const fn bytes_per_pixel(&self) -> u32 {
(((*self as u64) >> 3) & 0x1F) as u32
}
}
/// Represents supported pixel formats. Defined in [AOSP's](https://android.googlesource.com) [graphics-base-v1.0.h](https://android.googlesource.com/platform/system/core/+/8186c6362183e88bc5254af457baa662b20ca1e8/libsystem/include/system/graphics-base-v1.0.h#12)
#[allow(non_camel_case_types)]
#[derive(Copy, Clone, PartialEq, Eq, Debug, Default)]
#[repr(u32)]
#[allow(missing_docs)]
pub enum PixelFormat {
#[default]
Invalid = 0,
RGBA_8888 = 1,
RGBX_8888 = 2,
RGB_888 = 3,
RGB_565 = 4,
BGRA_8888 = 5,
RGBA_5551 = 6,
RGBA_4444 = 7,
YCRB_420_SP = 17,
Raw16 = 32,
Blob = 33,
ImplementationDefined = 34,
YCBCR_420_888 = 35,
Y8 = 0x20203859,
Y16 = 0x20363159,
YV12 = 0x32315659,
}
define_bit_set! {
/// Represents allocator usage flags
GraphicsAllocatorUsage (u32) {
SoftwareReadNever = 0,
SoftwareReadRarely = 0x2,
SoftwareReadOften = 0x3,
SoftwareReadMask = 0xF,
SoftwareWriteNever = 0,
SoftwareWriteRarely = 0x20,
SoftwareWriteOften = 0x30,
SoftwareWriteMask = 0xF0,
HardwareTexture = 0x100,
HardwareRender = 0x200,
Hardware2d = 0x400,
HardwareComposer = 0x800,
HardwareFramebuffer = 0x1000,
HardwareExternalDisplay = 0x2000,
HardwareProtected = 0x4000,
HardwareCursor = 0x8000,
HardwareVideoEncoder = 0x10000,
HardwareCameraWrite = 0x20000,
HardwareCameraRead = 0x40000,
HardwareCameraZSL = 0x60000,
HardwareCameraMask = 0x60000,
HardwareMask = 0x71F00,
RenderScript = 0x100000
}
}
/// Represents connection APIs
#[derive(Copy, Clone, PartialEq, Eq, Debug, Default)]
#[repr(i32)]
pub enum ConnectionApi {
/// Marker for invalid API values.
#[default]
Invalid = 0,
/// Buffers will be queued by EGL via eglSwapBuffers after being filled using OpenGL ES.
EGL = 1,
/// Buffers will be queued after being filled using the CPU.
Cpu = 2,
/// Buffers will be queued by Stagefright after being filled by a video decoder.
/// The video decoder can either be a software or hardware decoder.
Media = 3,
/// Buffers will be queued by the the camera HAL.
Camera = 4,
}
/// Represents disconnect modes
#[derive(Copy, Clone, PartialEq, Eq, Debug, Default)]
#[repr(u32)]
pub enum DisconnectMode {
/// Disconnect only the specified API.
#[default]
Api,
/// Disconnect any API originally connected from the process calling disconnect.
AllLocal,
}
/// Represents a queue buffer output layout
#[derive(Copy, Clone, PartialEq, Eq, Debug, Default)]
#[repr(C)]
pub struct QueueBufferOutput {
/// The width
pub width: u32,
/// The height
pub height: u32,
/// The transform hint
pub transform_hint: u32,
/// The pending buffer count
pub pending_buffer_count: u32,
}
impl QueueBufferOutput {
/// Creates a new, empty [`QueueBufferOutput`]
pub const fn new() -> Self {
Self {
width: 0,
height: 0,
transform_hint: 0,
pending_buffer_count: 0,
}
}
}
/// Represents a plane layout
#[derive(Copy, Clone, PartialEq, Eq, Debug, Default)]
#[repr(C)]
pub struct Plane {
/// The width
pub width: u32,
/// The height
pub height: u32,
/// The color format
pub color_format: ColorFormat,
/// The layout
pub layout: Layout,
/// The pitch
pub pitch: u32,
/// The map handle
pub map_handle: u32,
/// The offset
pub offset: u32,
/// The kind
pub kind: Kind,
/// The base-2 log of the block height
pub block_height_log2: BlockLinearHeights,
/// The display scan format
pub display_scan_format: DisplayScanFormat,
/// The second field offset
pub second_field_offset: u32,
/// The flags
pub flags: u64,
/// The size
pub size: usize,
/// Unknown/unused
pub unk: [u32; 6],
}
/// Represents a graphic buffer header layout
#[derive(Copy, Clone, PartialEq, Eq, Debug, Default)]
#[repr(C)]
pub struct GraphicBufferHeader {
/// The magic
pub magic: u32,
/// The width
pub width: u32,
/// The height
pub height: u32,
/// The stride
pub stride: u32,
/// The pixel format
pub pixel_format: PixelFormat,
/// The allocator usage
pub gfx_alloc_usage: GraphicsAllocatorUsage,
/// The PID
pub pid: u32,
/// The reference count
pub refcount: u32,
/// The FD count
pub fd_count: u32,
/// The buffer size
pub buffer_size: u32,
}
impl GraphicBufferHeader {
/// Represents the magic value of this layout
pub const MAGIC: u32 = u32::from_be_bytes(*b"GBFR");
}
/// Represents a graphic buffer layout
#[derive(Copy, Clone, PartialEq, Eq, Debug, Default)]
#[repr(C)]
#[repr(packed)]
pub struct GraphicBuffer {
/// The header
pub header: GraphicBufferHeader,
/// Empty value
pub unknown: i32,
/// The map ID
pub map_id: u32,
/// Empty value
pub zero: u32,
/// The magic
pub magic: u32,
/// The PID
pub pid: u32,
/// The buffer type
pub buffer_type: u32,
/// The allocator usage
pub gfx_alloc_usage: GraphicsAllocatorUsage,
/// The pixel format
pub pixel_format: PixelFormat,
/// The external pixel format
pub external_pixel_format: PixelFormat,
/// The stride
pub stride: u32,
/// The full size
pub full_size: u32,
/// The plane count
pub plane_count: u32,
/// Empty value
pub unk2: u32,
/// The planes
pub planes: [Plane; 3],
/// Unused
pub unused: u64,
}
impl GraphicBuffer {
/// Represents the magic value of this layout
pub const MAGIC: u32 = 0xDAFFCAFF;
}
/// Represents a fence layout
#[derive(Copy, Clone, PartialEq, Eq, Debug, Default)]
#[repr(C)]
pub struct Fence {
id: u32,
value: u32,
}
/// Represents a multiple fence layout
#[derive(Copy, Clone, PartialEq, Eq, Debug, Default)]
#[repr(C)]
pub struct MultiFence {
fence_count: u32,
fences: [Fence; 4],
}
/// Represents a rectangle layout
#[derive(Copy, Clone, PartialEq, Eq, Debug, Default)]
#[repr(C)]
pub struct Rect {
left: i32,
top: i32,
right: i32,
bottom: i32,
}
/// Represents a transform type
#[derive(Copy, Clone, PartialEq, Eq, Debug, Default)]
#[repr(u32)]
#[allow(missing_docs)]
pub enum Transform {
#[default]
Invalid = 0,
FlipH = 1,
FlipV = 2,
Rotate90 = 4,
Rotate180 = 3,
Rotate270 = 7,
}
/// Represents a queue buffer input layout
#[derive(Copy, Clone, PartialEq, Eq, Debug, Default)]
#[repr(C)]
#[repr(packed)]
pub struct QueueBufferInput {
timestamp: i64,
is_auto_timestamp: i32,
crop: Rect,
scaling_mode: i32,
transform: Transform,
sticky_transform: u32,
unk: u32,
swap_interval: u32,
fences: MultiFence,
}
#[derive(Copy, Clone, Default, PartialEq, Eq, PartialOrd, Ord, Debug)]
#[repr(u32)]
pub enum BlockLinearHeights {
OneGob,
TwoGobs,
FourGobs,
EightGobs,
#[default]
SixteenGobs,
ThirtyTwoGobs,
}
impl BlockLinearHeights {
#[inline]
pub const fn block_height_log2(self) -> u32 {
self as u32
}
#[inline]
pub const fn block_height_bytes(self) -> u32 {
8 * self.block_height()
}
#[inline]
pub const fn block_height(self) -> u32 {
1 << self.block_height_log2()
}
}
const NVHOST_AS_GPU_PATH: &str = "/dev/nvhost-as-gpu\0";
const NVMAP_PATH: &str = "/dev/nvmap\0";
const NVHOST_CTRL_PATH: &str = "/dev/nvhost-ctrl\0";
/// Represents the screen width
pub const SCREEN_WIDTH: u32 = 1280;
/// Represents the screen height
pub const SCREEN_HEIGHT: u32 = 720;
//const SIZE_FACTOR: f32 = (SCREEN_WIDTH as f32) / (SCREEN_HEIGHT as f32);
/// Represents a layer Z value
///
/// This can contain the minimum/maximum possible values, or a custom Z value
pub enum LayerZ {
/// Always inserts at the front
Max,
/// Always inserts at the back
Min,
/// Inserts with a specified Z value
Value(i64),
}
/// Represents `nvdrv:*` service kinds
pub enum NvDrvServiceKind {
/// "nvdrv"
Application,
/// "nvdrv:a"
Applet,
/// "nvdrv:s"
System,
}
/// Represents `vi:*` service kinds
pub enum ViServiceKind {
/// "vi:u"
Application,
// "vi:s"
System,
/// "vi:m"
Manager,
}
/// Converts [`ErrorCode`][`nv::ErrorCode`] to a regular [`Result`]
///
/// # Arguments
///
/// * `err`: The [`ErrorCode`][`nv::ErrorCode`]
#[allow(unreachable_patterns)]
pub fn convert_nv_error_code(err: nv::ErrorCode) -> Result<()> {
match err {
nv::ErrorCode::Success => Ok(()),
nv::ErrorCode::NotImplemented => rc::ResultNvErrorCodeNotImplemented::make_err(),
nv::ErrorCode::NotSupported => rc::ResultNvErrorCodeNotSupported::make_err(),
nv::ErrorCode::NotInitialized => rc::ResultNvErrorCodeNotInitialized::make_err(),
nv::ErrorCode::InvalidParameter => rc::ResultNvErrorCodeInvalidParameter::make_err(),
nv::ErrorCode::TimeOut => rc::ResultNvErrorCodeTimeOut::make_err(),
nv::ErrorCode::InsufficientMemory => rc::ResultNvErrorCodeInsufficientMemory::make_err(),
nv::ErrorCode::ReadOnlyAttribute => rc::ResultNvErrorCodeReadOnlyAttribute::make_err(),
nv::ErrorCode::InvalidState => rc::ResultNvErrorCodeInvalidState::make_err(),
nv::ErrorCode::InvalidAddress => rc::ResultNvErrorCodeInvalidAddress::make_err(),
nv::ErrorCode::InvalidSize => rc::ResultNvErrorCodeInvalidSize::make_err(),
nv::ErrorCode::InvalidValue => rc::ResultNvErrorCodeInvalidValue::make_err(),
nv::ErrorCode::AlreadyAllocated => rc::ResultNvErrorCodeAlreadyAllocated::make_err(),
nv::ErrorCode::Busy => rc::ResultNvErrorCodeBusy::make_err(),
nv::ErrorCode::ResourceError => rc::ResultNvErrorCodeResourceError::make_err(),
nv::ErrorCode::CountMismatch => rc::ResultNvErrorCodeCountMismatch::make_err(),
nv::ErrorCode::SharedMemoryTooSmall => {
rc::ResultNvErrorCodeSharedMemoryTooSmall::make_err()
}
nv::ErrorCode::FileOperationFailed => rc::ResultNvErrorCodeFileOperationFailed::make_err(),
nv::ErrorCode::IoctlFailed => rc::ResultNvErrorCodeIoctlFailed::make_err(),
_ => rc::ResultNvErrorCodeInvalid::make_err(),
}
}
/// A holder for our `*RootService` objects, just to keep them alive for the lifetime of the `Context`
pub enum RootServiceHolder {
/// Application Service
Application(ApplicationDisplayRootService),
/// Manager Service
Manager(ManagerDisplayRootService),
/// System Service
System(SystemDisplayRootService),
}
/// A holder for our `nvdrv` service objects
pub enum NvDrvServiceHolder {
/// Application Service
Application(nv::ApplicationNvDrvService),
/// Applet Service
Applet(nv::AppletNvDrvService),
/// System Service
System(nv::SystemNvDrvService),
}
impl NvDrvServiceHolder {
fn open(&self, path: sf::InMapAliasBuffer<'_, u8>) -> Result<(Fd, ErrorCode)> {
match self {
Self::Application(s) => s.open(path),
Self::Applet(s) => s.open(path),
Self::System(s) => s.open(path),
}
}
fn ioctl(
&self,
fd: Fd,
id: IoctlId,
in_buf: sf::InOutAutoSelectBuffer<'_, u8>,
) -> Result<ErrorCode> {
match self {
Self::Application(s) => s.ioctl(fd, id, in_buf),
Self::Applet(s) => s.ioctl(fd, id, in_buf),
Self::System(s) => s.ioctl(fd, id, in_buf),
}
}
fn close(&self, fd: Fd) -> Result<ErrorCode> {
match self {
Self::Application(s) => s.close(fd),
Self::Applet(s) => s.close(fd),
Self::System(s) => s.close(fd),
}
}
fn initialize(
&self,
transfer_mem_size: u32,
self_process_handle: sf::CopyHandle,
transfer_mem_handle: sf::CopyHandle,
) -> Result<ErrorCode> {
match self {
Self::Application(s) => {
s.initialize(transfer_mem_size, self_process_handle, transfer_mem_handle)
}
Self::Applet(s) => {
s.initialize(transfer_mem_size, self_process_handle, transfer_mem_handle)
}
Self::System(s) => {
s.initialize(transfer_mem_size, self_process_handle, transfer_mem_handle)
}
}
}
fn close_self(&mut self) {
use crate::ipc::client::IClientObject;
match self {
Self::Application(s) => s.get_session_mut().close(),
Self::Applet(s) => s.get_session_mut().close(),
Self::System(s) => s.get_session_mut().close(),
}
}
}
/// Represents a graphics context
#[allow(dead_code)]
pub struct Context {
vi_service: RootServiceHolder,
nvdrv_service: NvDrvServiceHolder,
application_display_service: ApplicationDisplay,
hos_binder_driver: Arc<dispdrv::HOSBinderDriver>,
transfer_mem: alloc::Buffer<u8>,
transfer_mem_handle: svc::Handle,
nvhost_fd: svc::Handle,
nvmap_fd: svc::Handle,
nvhostctrl_fd: svc::Handle,
}
impl Context {
/// Creates a new [`Context`]
///
/// This automatically accesses VI and NV [`INvDrvClient`] services (of the specified kinds) and creates NV transfer memory
///
/// # Arguments
///
/// * `nv_kind`: The [`NvDrvServiceKind`]
/// * `vi_kind`: The [`ViServiceKind`]
/// * `transfer_mem_size`: The transfer memory size to use
pub fn new(
nv_kind: NvDrvServiceKind,
vi_kind: ViServiceKind,
transfer_mem_size: usize,
) -> Result<Self> {
let (vi_srv, application_display_srv) = match vi_kind {
ViServiceKind::Manager => {
use vi::IManagerDisplayRootClient;
let vi_srv = service::new_service_object::<ManagerDisplayRootService>()?;
let app_disp_srv =
vi_srv.get_display_service(vi::DisplayServiceMode::Privileged)?;
(RootServiceHolder::Manager(vi_srv), app_disp_srv)
}
ViServiceKind::System => {
use vi::ISystemDisplayRootClient;
let vi_srv = service::new_service_object::<SystemDisplayRootService>()?;
let app_disp_srv =
vi_srv.get_display_service(vi::DisplayServiceMode::Privileged)?;
(RootServiceHolder::System(vi_srv), app_disp_srv)
}
ViServiceKind::Application => {
use vi::IApplicationDisplayRootClient;
let vi_srv = service::new_service_object::<ApplicationDisplayRootService>()?;
let app_disp_srv = vi_srv.get_display_service(vi::DisplayServiceMode::User)?;
(RootServiceHolder::Application(vi_srv), app_disp_srv)
}
};
let nvdrv_srv = match nv_kind {
NvDrvServiceKind::Application => {
NvDrvServiceHolder::Application(service::new_service_object::<
nv::ApplicationNvDrvService,
>()?)
}
NvDrvServiceKind::Applet => {
NvDrvServiceHolder::Applet(service::new_service_object::<nv::AppletNvDrvService>()?)
}
NvDrvServiceKind::System => {
NvDrvServiceHolder::System(service::new_service_object::<nv::SystemNvDrvService>()?)
}
};
Self::from(
vi_srv,
application_display_srv,
nvdrv_srv,
transfer_mem_size,
!matches!(nv_kind, NvDrvServiceKind::System),
)
}
/// Creates a new [`Context`] with already existing service objects
///
/// This automatically creates NV transfer memory
///
/// # Arguments
///
/// * `vi_srv`: The VI service object
/// * `application_display_srv`: The vi [`IApplicationDisplayClient`] interface object
/// * `nvdrv_srv`: The NV [`INvDrvClient`] service object
/// * `transfer_mem_size`: The transfer memory size to use
/// * `nv_host_as_gpu`: Flag whether to open a handle to the GPU for hardware accelerated rendering.
fn from(
vi_srv: RootServiceHolder,
application_display_srv: ApplicationDisplay,
mut nvdrv_srv: NvDrvServiceHolder,
transfer_mem_size: usize,
nv_host_as_gpu: bool,
) -> Result<Self> {
let transfer_mem = alloc::Buffer::new(alloc::PAGE_ALIGNMENT, transfer_mem_size)?;
let transfer_mem_handle = svc::create_transfer_memory(
transfer_mem.ptr,
transfer_mem_size,
svc::MemoryPermission::None(),
)?;
if let Err(rc) = nvdrv_srv.initialize(
transfer_mem_size as u32,
sf::Handle::from(svc::CURRENT_PROCESS_PSEUDO_HANDLE),
sf::Handle::from(transfer_mem_handle),
) {
let _ = svc::close_handle(transfer_mem_handle);
let _ = wait_for_permission(transfer_mem.ptr, MemoryPermission::Write(), None);
return Err(rc);
};
// wrap this up in a try block so we don't need to call into a function for `?` flow control -
| rust | MIT | b365c1baa4c4472fe604f4ab9646440d23c3bd9c | 2026-01-04T20:16:15.900894Z | true |
aarch64-switch-rs/nx | https://github.com/aarch64-switch-rs/nx/blob/b365c1baa4c4472fe604f4ab9646440d23c3bd9c/src/version.rs | src/version.rs | //! System version utils
use crate::sync;
use core::cmp;
use core::fmt;
/// Represents a version with major, minor and micro components
#[derive(Copy, Clone, PartialEq, Eq)]
pub struct Version {
/// The major component
pub major: u8,
/// The minor component
pub minor: u8,
/// The micro component
pub micro: u8,
}
unsafe impl Sync for Version {}
impl Version {
/// Creates an empty [`Version`] (with value `0.0.0`)
#[inline]
pub const fn empty() -> Self {
Self {
major: 0,
minor: 0,
micro: 0,
}
}
/// Creates a [`Version`] with the supplied components
///
/// # Arguments
///
/// * `major`: The major component
/// * `minor`: The minor component
/// * `micro`: The micro component
#[inline]
pub const fn new(major: u8, minor: u8, micro: u8) -> Self {
Self {
major,
minor,
micro,
}
}
}
impl Ord for Version {
fn cmp(&self, other: &Self) -> cmp::Ordering {
match self.major.cmp(&other.major) {
cmp::Ordering::Equal => {}
other => return other,
};
match self.minor.cmp(&other.minor) {
cmp::Ordering::Equal => {}
other => return other,
};
self.micro.cmp(&other.micro)
}
}
impl PartialOrd for Version {
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
Some(self.cmp(other))
}
}
impl fmt::Display for Version {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}.{}.{}", self.major, self.minor, self.micro)
}
}
/// Represents an interval between versions, being optionally limited on both sides
///
/// An interval limited on both sides is, for example, `1.0.0-5.1.0` (inclusive)
///
/// An interval limited on one side is, for example, `*-3.0.0` (any version lower or equal to `3.0.0`) or `2.3.0-*` (any version higher or equal to `2.3.0`)
pub struct VersionInterval {
min: Option<Version>,
max: Option<Version>,
}
impl VersionInterval {
/// Creates a non-limited [`VersionInterval`], essentially an interval allowing any versions
#[inline]
pub const fn all() -> Self {
Self {
min: None,
max: None,
}
}
/// Creates a left-limited [`VersionInterval`], including any version higher or equal to `min`
///
/// # Arguments
///
/// * `min`: The minimum [`Version`] limiting the interval
#[inline]
pub const fn from(min: Version) -> Self {
Self {
min: Some(min),
max: None,
}
}
/// Creates a right-limited [`VersionInterval`], including any version lower or equal to `max`
///
/// # Arguments
///
/// * `max`: The maximum [`Version`] limiting the interval
#[inline]
pub const fn to(max: Version) -> Self {
Self {
min: None,
max: Some(max),
}
}
/// Creates a limited [`VersionInterval`], including any version between `min` and `max` (inclusive)
///
/// # Arguments
///
/// * `min`: The minimum [`Version`] limiting the interval
/// * `max`: The maximum [`Version`] limiting the interval
#[inline]
pub const fn from_to(min: Version, max: Version) -> Self {
Self {
min: Some(min),
max: Some(max),
}
}
/// Returns whether `ver` is contained in the interval
///
/// # Arguments
///
/// * `ver`: The [`Version`] to check
pub fn contains(&self, ver: Version) -> bool {
if let Some(min_v) = self.min
&& ver < min_v
{
return false;
}
if let Some(max_v) = self.max
&& ver > max_v
{
return false;
}
true
}
}
static G_VERSION: sync::Mutex<Version> = sync::Mutex::new(Version::empty());
/// Sets the global [`Version`], used in the library as the system [`Version`]
///
/// This is used on [`rrt0`][`crate::rrt0`] to set the actual system version, and shouldn't be used for other purposes unless you really know what you're doing
///
/// # Arguments
///
/// * `ver`: The system [`Version`] to set globally for the library
///
/// # Safety
///
/// This is automatically called in the runtime set up, and should not be modified by consuming crates.
pub unsafe fn set_version(ver: Version) {
G_VERSION.set(ver);
}
/// Gets the global library value for the system [`Version`]
///
/// This value is set on [`rrt0`][`crate::rrt0`] to the actual system version
pub fn get_version() -> Version {
G_VERSION.get_val()
}
| rust | MIT | b365c1baa4c4472fe604f4ab9646440d23c3bd9c | 2026-01-04T20:16:15.900894Z | false |
aarch64-switch-rs/nx | https://github.com/aarch64-switch-rs/nx/blob/b365c1baa4c4472fe604f4ab9646440d23c3bd9c/src/sync.rs | src/sync.rs | //! Synchronization support and utils
use core::cell::UnsafeCell;
pub mod sys;
use sys::mutex::Mutex as RawMutex;
use sys::rwlock::RwLock as RawRwLock;
#[macro_export]
macro_rules! acquire {
($x:expr) => {
atomic::fence(Acquire)
};
}
/// Represents a type which will lock a given [`Mutex`] on creation and unlock it on destruction, effectively guarding it
pub struct ScopedLock<'a> {
lock: &'a mut RawMutex,
}
impl<'a> ScopedLock<'a> {
/// Creates a new [`ScopedLock`] for a given [`Mutex`]
///
/// # Arguments
///
/// * `lock`: The [`Mutex`] to guard
pub fn new(lock: &'a mut RawMutex) -> Self {
lock.lock();
Self { lock }
}
}
impl Drop for ScopedLock<'_> {
/// Unlocks the [`Mutex`] as the [`ScopedLock`] is destroyed (likely out of scope)
fn drop(&mut self) {
// SAFETY: variant upheld that the lock should actually be locked
unsafe { self.lock.unlock() };
}
}
//////////// MUTEX
/// Represents a value whose access is controlled by an inner [`Mutex`]
pub struct Mutex<T: ?Sized> {
pub(self) raw_lock: RawMutex,
pub(self) object_cell: UnsafeCell<T>,
}
impl<T> Mutex<T> {
pub fn is_locked(&self) -> bool {
self.raw_lock.is_locked()
}
/// Creates a new [`RwLock`] with a value
///
/// # Arguments
///
/// * `is_recursive`: Whether the inner [`Mutex`] is recursive
/// * `t`: The value to store
#[inline]
pub const fn new(t: T) -> Self {
Self {
raw_lock: RawMutex::new(),
object_cell: UnsafeCell::new(t),
}
}
/// Sets a value, doing a lock-unlock operation in the process
pub fn set(&self, t: T) {
unsafe {
self.raw_lock.lock();
let _to_drop = core::mem::replace(self.object_cell.get().as_mut().unwrap(), t);
self.raw_lock.unlock();
}
}
}
impl<T: ?Sized> Mutex<T> {
/// Locks the Mutex and returns a guarded reference to the inner value
pub fn lock(&self) -> MutexGuard<'_, T> {
self.raw_lock.lock();
MutexGuard { lock: self }
}
pub fn try_lock(&self) -> Option<MutexGuard<'_, T>> {
if self.raw_lock.try_lock() {
Some(MutexGuard { lock: self })
} else {
None
}
}
}
impl<T: Copy> Mutex<T> {
/// Gets a copy of the value, doing a lock-unlock operation in the process
pub fn get_val(&self) -> T {
unsafe {
self.raw_lock.lock();
let obj_copy = *self.object_cell.get();
self.raw_lock.unlock();
obj_copy
}
}
}
// we only have a bound on Sync instead of Send, because we don't implement into_inner
unsafe impl<T: ?Sized + Sync> Sync for Mutex<T> {}
unsafe impl<T: ?Sized + Sync> Send for Mutex<T> {}
pub struct MutexGuard<'borrow, T: ?Sized> {
pub(self) lock: &'borrow Mutex<T>,
}
unsafe impl<T: ?Sized + Sync> Sync for MutexGuard<'_, T> {}
impl<'borrow, T: ?Sized> MutexGuard<'borrow, T> {
pub fn new(lock: &'borrow Mutex<T>) -> Self {
lock.raw_lock.lock();
Self { lock }
}
}
impl<T: ?Sized> core::ops::Deref for MutexGuard<'_, T> {
type Target = T;
fn deref(&self) -> &T {
unsafe { self.lock.object_cell.get().as_ref().unwrap_unchecked() }
}
}
impl<T: ?Sized> core::ops::DerefMut for MutexGuard<'_, T> {
fn deref_mut(&mut self) -> &mut T {
unsafe {
self.lock
.object_cell
.get()
.as_mut()
// We know the pointer is valid as we have a valid ref to the parent
.unwrap()
}
}
}
impl<T: ?Sized> Drop for MutexGuard<'_, T> {
fn drop(&mut self) {
unsafe { self.lock.raw_lock.unlock() };
}
}
//////////// MUTEX
//////////// RWLOCK
pub struct RwLock<T: ?Sized> {
pub(self) raw_lock: RawRwLock,
pub(self) object_cell: UnsafeCell<T>,
}
impl<T: ?Sized> core::fmt::Debug for RwLock<T> {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
f.write_str("RwLock<")?;
f.write_str(core::any::type_name::<T>())?;
f.write_str(">(...)")
}
}
impl<T> RwLock<T> {
/// Creates a new [`RwLock`] with a value
///
/// # Arguments
///
/// * `is_recursive`: Whether the inner [`Mutex`] is recursive
/// * `t`: The value to store
#[inline]
pub const fn new(t: T) -> Self {
Self {
raw_lock: RawRwLock::new(),
object_cell: UnsafeCell::new(t),
}
}
/// Sets a value, doing a lock-unlock operation in the process
pub fn set(&mut self, t: T) {
unsafe {
self.raw_lock.write();
self.object_cell = UnsafeCell::new(t);
self.raw_lock.write_unlock();
}
}
/// Locks the value for writing and returns a guarded reference to the inner value
pub fn write(&self) -> WriteGuard<'_, T> {
self.raw_lock.write();
WriteGuard { lock: self }
}
/// Locks the value for reading and returns a guarded reference to the inner value
pub fn read(&self) -> ReadGuard<'_, T> {
self.raw_lock.read();
ReadGuard { lock: self }
}
}
impl<T: Copy> RwLock<T> {
/// Gets a copy of the value, doing a lock-unlock operation in the process
pub fn get_val(&self) -> T {
unsafe {
self.raw_lock.read();
let obj_copy = *self.object_cell.get();
self.raw_lock.read_unlock();
obj_copy
}
}
}
unsafe impl<T: ?Sized + Send> Sync for RwLock<T> {}
unsafe impl<T: ?Sized + Send> Send for RwLock<T> {}
pub struct ReadGuard<'borrow, T: ?Sized> {
pub(self) lock: &'borrow RwLock<T>,
}
pub struct WriteGuard<'borrow, T: ?Sized> {
pub(self) lock: &'borrow RwLock<T>,
}
unsafe impl<T: ?Sized + Sync> Sync for ReadGuard<'_, T> {}
unsafe impl<T: ?Sized + Sync> Sync for WriteGuard<'_, T> {}
impl<'borrow, T: ?Sized> ReadGuard<'borrow, T> {
pub fn new(lock: &'borrow RwLock<T>) -> Self {
lock.raw_lock.read();
Self { lock }
}
}
impl<T> core::ops::Deref for ReadGuard<'_, T> {
type Target = T;
fn deref(&self) -> &T {
unsafe { &*self.lock.object_cell.get() }
}
}
impl<T> core::ops::Deref for WriteGuard<'_, T> {
type Target = T;
fn deref(&self) -> &T {
unsafe { &*self.lock.object_cell.get() }
}
}
impl<T> core::ops::DerefMut for WriteGuard<'_, T> {
fn deref_mut(&mut self) -> &mut T {
unsafe { &mut *self.lock.object_cell.get() }
}
}
impl<T: ?Sized> Drop for ReadGuard<'_, T> {
fn drop(&mut self) {
unsafe { self.lock.raw_lock.read_unlock() };
}
}
impl<T: ?Sized> Drop for WriteGuard<'_, T> {
fn drop(&mut self) {
unsafe { self.lock.raw_lock.write_unlock() };
}
}
| rust | MIT | b365c1baa4c4472fe604f4ab9646440d23c3bd9c | 2026-01-04T20:16:15.900894Z | false |
aarch64-switch-rs/nx | https://github.com/aarch64-switch-rs/nx/blob/b365c1baa4c4472fe604f4ab9646440d23c3bd9c/src/fs.rs | src/fs.rs | //! FileSystem support
use crate::ipc::sf as ipc_sf;
use crate::ipc::sf::fsp::IDirectoryClient;
use crate::ipc::sf::fsp::IFileClient;
use crate::ipc::sf::fsp::IFileSystemClient;
use crate::result::*;
use crate::service;
use crate::service::fsp;
use crate::service::fsp::srv::IFileSystemProxyClient;
use crate::sync::RwLock;
use alloc::boxed::Box;
use alloc::string::String;
use alloc::sync::Arc;
use alloc::vec::Vec;
use core::mem as cmem;
use core::ops::DerefMut;
use embedded_io::ErrorType;
pub use embedded_io::SeekFrom;
pub use embedded_io::Write;
pub mod rc;
// TODO: define this types here and alias them in fsp-srv?
pub use fsp::fsp_sf::DirectoryEntry;
pub use fsp::fsp_sf::DirectoryEntryType;
pub use fsp::fsp_sf::DirectoryOpenMode;
pub use fsp::fsp_sf::FileAttribute;
pub use fsp::fsp_sf::FileOpenMode;
pub use fsp::fsp_sf::FileQueryRangeInfo;
pub use fsp::fsp_sf::FileReadOption;
pub use fsp::fsp_sf::FileTimeStampRaw;
pub use fsp::fsp_sf::FileWriteOption;
pub use fsp::fsp_sf::OperationId;
pub use fsp::fsp_sf::QueryId;
/// Represents a file, abstracted from the IPC client API.
pub trait File: Sync {
/// Reads data from the file, returning the actual read size.
///
/// # Arguments:
///
/// * `offset`: The absolute offset.
/// * `out_buf`: The output slice to fill.
/// * `option`: [`FileReadOption`] for file reading flags.
fn read(&mut self, offset: usize, out_buf: &mut [u8], option: FileReadOption) -> Result<usize>;
/// Writes data to a file (this one doesn't return the actual written size, thanks N).
///
/// # Arguments.
///
/// * `offset`: The absolute offset.
/// * `buf`: The input data to write into the file.
/// * `option`: [`FileWriteOption`] value.
fn write(&mut self, offset: usize, buf: &[u8], option: FileWriteOption) -> Result<()>;
/// Flushes the pending file writes.
fn flush(&self) -> Result<()>;
/// Sets the file size.
///
/// This effectively truncates the file.
///
/// # Arguments.
///
/// * `size`: The new file size.
fn set_size(&mut self, size: usize) -> Result<()>;
/// Gets the current file size.
fn get_size(&mut self) -> Result<usize>;
/// Performs a range-operation on the file, returning corresponding result data.
///
/// # Arguments:
///
/// * `operation_id`: The ID of the file operation to perform on the specified range.
/// * `offset`: The absolute offset.
/// * `size`: The file data size in which to operate. i.e. we are operating in the range `[offset, offset+size)`.
fn operate_range(
&mut self,
operation_id: OperationId,
offset: usize,
size: usize,
) -> Result<FileQueryRangeInfo>;
/// Performs a range-operation on the file with custom input/output data.
///
/// # Arguments:
///
/// * `operation_id`: The ID of the file operation to perform on the specified range.
/// * `offset`: The absolute offset.
/// * `size`: The file data size in which to operate. i.e. we are operating in the range `[offset, offset+size)`.
/// * `in_buf`: Input data buffer.
/// * `out_buf`: Output data buffer.
fn operate_range_with_buffer(
&mut self,
operation_id: OperationId,
offset: usize,
size: usize,
in_buf: &[u8],
out_buf: &mut [u8],
) -> Result<()>;
}
/// Represents a directory.
pub trait Directory: Sync {
/// Reads existing entries, returning the actual number of read entries.
///
/// The max number of entries to read is determined by the output slice size and the actually existing entry count.
///
/// # Arguments:
///
/// * `out_entries`: The out [`DirectoryEntry`] slice to fill.
fn read(&self, out_entries: &mut [DirectoryEntry]) -> Result<usize>;
/// Gets the [`Directory`]'s entry count.
fn get_entry_count(&self) -> Result<u64>;
}
/// Represents a filesystem.
pub trait FileSystem: Sync {
/// Creates a file.
///
/// # Arguments:
///
/// * `path`: The file path to create.
/// * `size`: The initial file size.
/// * `attribute`: The file attribute flags.
fn create_file(&self, path: &str, attribute: FileAttribute, size: usize) -> Result<()>;
/// Deletes a file.
///
/// # Arguments:
///
/// * `path`: The file path to delete.
fn remove_file(&self, path: &str) -> Result<()>;
/// Creates a directory.
///
/// # Arguments.
///
/// * `path`: The directory path to create.
fn create_directory(&self, path: &str) -> Result<()>;
/// Deletes a directory.
///
/// # Arguments.
///
/// * `path`: The directory path to delete.
fn remove_dir(&self, path: &str) -> Result<()>;
/// Deletes a directory and all its children files/directories.
///
/// # Arguments:
///
/// * `path`: The directory to recursively remove.
fn remove_dir_all(&self, path: &str) -> Result<()>;
/// Renames a file.
///
/// # Arguments.
///
/// * `old_path`: The current file name/path.
/// * `new_path`: The new file name/path.
fn rename_file(&self, old_path: &str, new_path: &str) -> Result<()>;
/// Renames a directory.
///
/// # Arguments.
///
/// * `old_path`: The current directory path.
/// * `new_path`: The new directory path.
fn rename_directory(&self, old_path: &str, new_path: &str) -> Result<()>;
/// Gets a path's [`DirectoryEntryType`].
///
/// # Arguments.
///
/// * `path`: The path we are checking the entity type of.
fn get_entry_type(&self, path: &str) -> Result<DirectoryEntryType>;
/// Opens a [`File`].
///
/// # Arguments:
///
/// * `path`: The file path to open.
/// * `mode`: The open mode.
fn open_file(&self, path: &str, mode: FileOpenMode) -> Result<Box<dyn File>>;
/// Opens a [`Directory`].
///
/// # Arguments:
///
/// * `path`: The directory path to open.
/// * `mode`: The open mode.
fn open_directory(&self, path: &str, mode: DirectoryOpenMode) -> Result<Box<dyn Directory>>;
/// Commits the filesystem, flushing pending writes.
fn commit(&self) -> Result<()>;
/// Gets the free space size at a given path.
///
/// # Argument.
///
/// * `path`: The path to check.
fn get_free_space_size(&self, path: &str) -> Result<usize>;
/// Gets the total space size at a given path.
///
/// # Arguments:
///
/// * `path`: The path to use.
fn get_total_space_size(&self, path: &str) -> Result<usize>;
/// Deletes all the children files/directories inside a directory.
///
/// # Arguments:
///
/// * `path`: The path to use.
fn remove_children_all(&self, path: &str) -> Result<()>;
/// Gets the [`FileTimeStampRaw`] of a file.
///
/// # Arguments:
///
/// * `path`: The path to use.
fn get_file_time_stamp_raw(&self, path: &str) -> Result<FileTimeStampRaw>;
/// Queries on a path.
///
/// # Arguments:
///
/// * `query_id`: The [`QueryId`].
/// * `in_buf`: Input data.
/// * `out_buf`: Output data.
fn query_entry(
&self,
path: &str,
query_id: QueryId,
in_buf: &[u8],
out_buf: &mut [u8],
) -> Result<()>;
}
/// Represents a wrapper [`File`] implementation to translate IPC [`IFileClient`] objects to [`File`] objects.
pub struct ProxyFile {
file_obj: Box<dyn IFileClient>,
}
// TODO: Remove. This fixes a problem in emuiibo but this whole construct is probably not needed.
unsafe impl Sync for ProxyFile {}
unsafe impl Send for ProxyFile {}
impl ProxyFile {
/// Creates a new [`ProxyFile`] from a [`IFileClient`] shared object.
///
/// # Arguments:
///
/// * `file_obj`: The IPC [`IFileClient`] implementation to wrap.
pub fn new(file: impl IFileClient + 'static) -> Self {
Self {
file_obj: Box::new(file),
}
}
}
impl From<Box<dyn IFileClient>> for ProxyFile {
fn from(value: Box<dyn IFileClient>) -> Self {
Self { file_obj: value }
}
}
impl File for ProxyFile {
fn read(&mut self, offset: usize, out_buf: &mut [u8], option: FileReadOption) -> Result<usize> {
self.file_obj.read(
option,
offset,
out_buf.len(),
ipc_sf::Buffer::from_mut_array(out_buf),
)
}
fn write(&mut self, offset: usize, buf: &[u8], option: FileWriteOption) -> Result<()> {
self.file_obj
.write(option, offset, buf.len(), ipc_sf::Buffer::from_array(buf))
}
fn flush(&self) -> Result<()> {
self.file_obj.flush()
}
fn set_size(&mut self, size: usize) -> Result<()> {
self.file_obj.set_size(size)
}
fn get_size(&mut self) -> Result<usize> {
self.file_obj.get_size()
}
fn operate_range(
&mut self,
operation_id: OperationId,
offset: usize,
size: usize,
) -> Result<FileQueryRangeInfo> {
self.file_obj.operate_range(operation_id, offset, size)
}
fn operate_range_with_buffer(
&mut self,
operation_id: OperationId,
offset: usize,
size: usize,
in_buf: &[u8],
out_buf: &mut [u8],
) -> Result<()> {
self.file_obj.operate_range_with_buffer(
operation_id,
offset,
size,
ipc_sf::Buffer::from_array(in_buf),
ipc_sf::Buffer::from_mut_array(out_buf),
)
}
}
/// Represents a wrapper [`Directory`] implementation to translate IPC [`IDirectoryClient`] objects to [`Directory`] objects.
#[derive(Clone)]
pub struct ProxyDirectory {
dir_obj: Arc<dyn IDirectoryClient>,
}
// TODO: Remove because we don't actually have a guarantee that IDirectoryClient is Sync
unsafe impl Sync for ProxyDirectory {}
unsafe impl Send for ProxyDirectory {}
impl From<Arc<dyn IDirectoryClient>> for ProxyDirectory {
fn from(value: Arc<dyn IDirectoryClient>) -> Self {
Self { dir_obj: value }
}
}
impl ProxyDirectory {
/// Creates a new [`ProxyDirectory`] from a [`IDirectoryClient`] shared object
///
/// # Arguments
///
/// * `dir_obj`: The IPC [`IDirectoryClient`] object to wrap
pub fn new(dir: impl IDirectoryClient + 'static) -> Self {
Self {
dir_obj: Arc::new(dir),
}
}
}
impl Directory for ProxyDirectory {
fn read(&self, out_entries: &mut [DirectoryEntry]) -> Result<usize> {
self.dir_obj
.read(ipc_sf::Buffer::from_mut_array(out_entries))
.map(|r| r as usize)
}
fn get_entry_count(&self) -> Result<u64> {
self.dir_obj.get_entry_count()
}
}
/// Represents a wrapper [`FileSystem`] implementation to translate IPC [`IFileSystemClient`] objects to [`FileSystem`] objects
#[derive(Clone)]
pub struct ProxyFileSystem {
fs_obj: Arc<dyn IFileSystemClient>,
}
unsafe impl Send for ProxyFileSystem {}
unsafe impl Sync for ProxyFileSystem {}
impl ProxyFileSystem {
/// Creates a new [`ProxyFileSystem`] from a [`IFileSystemClient`] shared object
///
/// # Arguments
///
/// * `fs_obj`: The IPC [`IFileSystemClient`] object to wrap
pub fn new(fs_obj: Arc<dyn IFileSystemClient>) -> Self {
Self { fs_obj }
}
}
impl FileSystem for ProxyFileSystem {
fn create_file(&self, path: &str, attribute: FileAttribute, size: usize) -> Result<()> {
let sf_path = fsp::fsp_sf::Path::from_str(path);
self.fs_obj
.create_file(attribute, size, ipc_sf::Buffer::from_var(&sf_path))
}
fn remove_file(&self, path: &str) -> Result<()> {
let sf_path = fsp::fsp_sf::Path::from_str(path);
self.fs_obj.delete_file(ipc_sf::Buffer::from_var(&sf_path))
}
fn create_directory(&self, path: &str) -> Result<()> {
let sf_path = fsp::fsp_sf::Path::from_str(path);
self.fs_obj
.create_directory(ipc_sf::Buffer::from_var(&sf_path))
}
fn remove_dir(&self, path: &str) -> Result<()> {
let sf_path = fsp::fsp_sf::Path::from_str(path);
self.fs_obj
.delete_directory(ipc_sf::Buffer::from_var(&sf_path))
}
fn remove_dir_all(&self, path: &str) -> Result<()> {
let sf_path = fsp::fsp_sf::Path::from_str(path);
self.fs_obj
.delete_directory_recursively(ipc_sf::Buffer::from_var(&sf_path))
}
fn get_entry_type(&self, path: &str) -> Result<DirectoryEntryType> {
let sf_path = fsp::fsp_sf::Path::from_str(path);
self.fs_obj
.get_entry_type(ipc_sf::Buffer::from_var(&sf_path))
}
fn rename_file(&self, old_path: &str, new_path: &str) -> Result<()> {
let sf_old_path = fsp::fsp_sf::Path::from_str(old_path);
let sf_new_path = fsp::fsp_sf::Path::from_str(new_path);
self.fs_obj.rename_file(
ipc_sf::Buffer::from_var(&sf_old_path),
ipc_sf::Buffer::from_var(&sf_new_path),
)
}
fn rename_directory(&self, old_path: &str, new_path: &str) -> Result<()> {
let sf_old_path = fsp::fsp_sf::Path::from_str(old_path);
let sf_new_path = fsp::fsp_sf::Path::from_str(new_path);
self.fs_obj.rename_directory(
ipc_sf::Buffer::from_var(&sf_old_path),
ipc_sf::Buffer::from_var(&sf_new_path),
)
}
fn open_file(&self, path: &str, mode: FileOpenMode) -> Result<Box<dyn File>> {
let sf_path = fsp::fsp_sf::Path::from_str(path);
let file_obj = self
.fs_obj
.open_file(mode, ipc_sf::Buffer::from_var(&sf_path))?;
Ok(Box::new(ProxyFile::new(file_obj)))
}
fn open_directory(&self, path: &str, mode: DirectoryOpenMode) -> Result<Box<dyn Directory>> {
let sf_path = fsp::fsp_sf::Path::from_str(path);
let dir_obj = self
.fs_obj
.open_directory(mode, ipc_sf::Buffer::from_var(&sf_path))?;
Ok(Box::new(ProxyDirectory::new(dir_obj)))
}
fn commit(&self) -> Result<()> {
self.fs_obj.commit()
}
fn get_free_space_size(&self, path: &str) -> Result<usize> {
let sf_path = fsp::fsp_sf::Path::from_str(path);
self.fs_obj
.get_free_space_size(ipc_sf::Buffer::from_var(&sf_path))
}
fn get_total_space_size(&self, path: &str) -> Result<usize> {
let sf_path = fsp::fsp_sf::Path::from_str(path);
self.fs_obj
.get_total_space_size(ipc_sf::Buffer::from_var(&sf_path))
}
fn remove_children_all(&self, path: &str) -> Result<()> {
let sf_path = fsp::fsp_sf::Path::from_str(path);
self.fs_obj
.clean_directory_recursively(ipc_sf::Buffer::from_var(&sf_path))
}
fn get_file_time_stamp_raw(&self, path: &str) -> Result<FileTimeStampRaw> {
let sf_path = fsp::fsp_sf::Path::from_str(path);
self.fs_obj
.get_file_time_stamp_raw(ipc_sf::Buffer::from_var(&sf_path))
}
fn query_entry(
&self,
path: &str,
query_id: QueryId,
in_buf: &[u8],
out_buf: &mut [u8],
) -> Result<()> {
let sf_path = fsp::fsp_sf::Path::from_str(path);
self.fs_obj.query_entry(
ipc_sf::Buffer::from_var(&sf_path),
query_id,
ipc_sf::Buffer::from_array(in_buf),
ipc_sf::Buffer::from_mut_array(out_buf),
)
}
}
/// Represents a wrapper type to simplify file access, tracking the currently seek-ed location in the file.
pub struct FileAccessor {
file: Box<dyn File>,
offset: usize,
}
// we can do this because we never leak the `file` field, which would require us to also require `Send` on the trait `File`
unsafe impl Sync for FileAccessor {}
unsafe impl Send for FileAccessor {}
impl From<Box<dyn File>> for FileAccessor {
fn from(value: Box<dyn File>) -> Self {
Self {
file: value,
offset: 0,
}
}
}
impl FileAccessor {
/// Creates a new [`FileAccessor`] from a given [`File`] shared object.
///
/// # Arguments.
///
/// * `file`: The `File` implementor.
pub fn new(file: impl File + 'static) -> Self {
Self {
file: Box::new(file),
offset: 0,
}
}
/// Gets the file size.
pub fn get_size(&mut self) -> Result<usize> {
self.file.get_size()
}
/// Seeks in the file to a certain offset.
///
/// # Arguments:
///
/// * `offset`: The offset to seek to.
pub fn seek(&mut self, pos: SeekFrom) -> Result<()> {
match pos {
SeekFrom::Start(offset) => self.offset = offset as _,
SeekFrom::Current(offset) => {
self.offset = self.offset.saturating_add_signed(offset as _)
}
SeekFrom::End(offset) => {
let size = self.get_size()?;
self.offset = size.saturating_add_signed(offset as _);
}
};
Ok(())
}
/// Reads data into the given array.
///
/// # Arguments:
///
/// * `out_arr`: The output array.
pub fn read_array<T: Copy>(&mut self, out_arr: &mut [T]) -> Result<usize> {
// SAFETY: This is safe as we're constructing the slice from another valid slice of `T: Copy`.
let read_size = self.file.read(
self.offset,
unsafe {
core::slice::from_raw_parts_mut(
out_arr.as_mut_ptr() as _,
cmem::size_of_val(out_arr),
)
},
FileReadOption::None(),
)?;
self.offset += read_size;
Ok(read_size)
}
/// Reads a value.
pub fn read_val<T: Copy>(&mut self) -> Result<T> {
let mut t = unsafe { cmem::zeroed::<T>() };
let read_size = self.file.read(
self.offset,
unsafe { core::slice::from_raw_parts_mut(&mut t as *mut T as _, cmem::size_of::<T>()) },
FileReadOption::None(),
)?;
self.offset += read_size;
Ok(t)
}
/// Writes data from the given array
///
/// # Arguments
///
/// * `arr`: The input array
pub fn write_array<T: Copy, const FLUSH: bool>(&mut self, arr: &[T]) -> Result<()> {
let transmuted: &[u8] =
unsafe { core::slice::from_raw_parts(arr.as_ptr() as _, core::mem::size_of_val(arr)) };
self.file.write(
self.offset,
transmuted,
if FLUSH {
FileWriteOption::Flush()
} else {
FileWriteOption::None()
},
)?;
self.offset += transmuted.len();
Ok(())
}
/// Writes a value
///
/// # Arguments
///
/// * `t`: The value to write
pub fn write_val<T: Copy, const FLUSH: bool>(&mut self, t: &T) -> Result<()> {
let transmuted = unsafe {
core::slice::from_raw_parts(t as *const T as *const u8, cmem::size_of::<T>())
};
self.file.write(
self.offset,
transmuted,
if FLUSH {
FileWriteOption::Flush()
} else {
FileWriteOption::None()
},
)?;
self.offset += transmuted.len();
Ok(())
}
/// Flushes reads/writes to the underlying file.
///
/// It is provided, but it is not currently required as all writes to [`FileAccessor`] send the `Flush` flag.
pub fn flush(&self) -> Result<()> {
self.file.flush()
}
}
impl ErrorType for FileAccessor {
type Error = ResultCode;
}
impl embedded_io::Read for FileAccessor {
fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
if buf.len() == 0 {
return Ok(0);
}
self.read_array(buf)
}
}
impl embedded_io::Write for FileAccessor {
fn write(&mut self, buf: &[u8]) -> Result<usize> {
if buf.len() == 0 {
return Ok(0);
}
self.write_array::<u8, false>(buf).map(|_| buf.len())
}
fn flush(&mut self) -> Result<()> {
FileAccessor::flush(&*self)
}
}
impl core::fmt::Write for FileAccessor {
fn write_str(&mut self, s: &str) -> core::fmt::Result {
self.write_array::<u8, true>(s.as_bytes())
.map_err(|_| core::fmt::Error)
}
}
/// Represents a wrapper type to simplify directory access
pub struct DirectoryAccessor {
dir: Arc<dyn Directory>,
}
impl From<Arc<dyn Directory>> for DirectoryAccessor {
fn from(value: Arc<dyn Directory>) -> Self {
Self { dir: value }
}
}
impl DirectoryAccessor {
/// Creates a new [`DirectoryAccessor`] from a given [`Directory`] shared object
///
/// # Arguments
///
/// * `dir`: The shared object
pub fn new(dir: impl Directory + 'static) -> Self {
Self { dir: Arc::new(dir) }
}
/// Gets the directory entry count.
pub fn get_entry_count(&mut self) -> Result<u64> {
self.dir.get_entry_count()
}
/// Gets the underlying [`Directory`] shared object.
pub fn get_object(&self) -> Arc<dyn Directory> {
self.dir.clone()
}
/// Tries to read the next entry.
///
/// Note that if the end is reached this will return `Ok(None)`, the result reflects other possible inner I/O errors.
pub fn read_next(&mut self) -> Result<Option<DirectoryEntry>> {
let mut entries: [DirectoryEntry; 1] = Default::default();
let read_count = self.dir.read(&mut entries)?;
if read_count == 1 {
Ok(Some(entries[0]))
} else {
Ok(None)
}
}
}
pub(crate) struct FileSystemDevice {
mount_name: String,
fs: Arc<dyn FileSystem>,
}
impl FileSystemDevice {
pub fn new(mount_name: String, fs: Arc<dyn FileSystem>) -> Self {
Self { mount_name, fs }
}
}
unsafe impl Sync for FileSystemDevice {}
unsafe impl Send for FileSystemDevice {}
pub(crate) static G_DEVICES: RwLock<Vec<FileSystemDevice>> = RwLock::new(Vec::new());
fn find_device_by_name(name: &str) -> Result<Arc<dyn FileSystem>> {
let device_guard = G_DEVICES.read();
for device in device_guard.iter() {
if device.mount_name.as_str() == name {
return Ok(device.fs.clone());
}
}
rc::ResultDeviceNotFound::make_err()
}
static G_FSPSRV_SESSION: RwLock<Option<Arc<fsp::srv::FileSystemProxyService>>> = RwLock::new(None);
define_bit_set! {
/// Represents options for opening files
FileOpenOption (u32) {
None = 0,
Create = bit!(0),
Read = bit!(1),
Write = bit!(2),
Append = bit!(3)
}
}
/// Initializes `fsp-srv` support instantiating a [`FileSystemProxyService`][`fsp::srv::FileSystemProxyService`] shared object
#[inline]
pub fn initialize_fspsrv_session() -> Result<()> {
let mut guard = G_FSPSRV_SESSION.write();
debug_assert!(guard.is_none(), "Double initializing FSP session");
*guard = Some(Arc::new(service::new_service_object::<
fsp::srv::FileSystemProxyService,
>()?));
Ok(())
}
/// Gets whether `fsp-srv` support was initialized
#[inline]
pub fn is_fspsrv_session_initialized() -> bool {
G_FSPSRV_SESSION.read().is_some()
}
/// Finalizes `fsp-srv` support
#[inline]
pub(crate) fn finalize_fspsrv_session() {
G_FSPSRV_SESSION.write().take();
}
/// Gets the global [`IFileSystemProxyClient`] shared object used for `fsp-srv` support
#[inline]
pub fn get_fspsrv_session() -> Result<Arc<fsp::srv::FileSystemProxyService>> {
G_FSPSRV_SESSION
.read()
.as_ref()
.map(Clone::clone)
.ok_or(super::rc::ResultNotInitialized::make())
}
/// Mounts a [`FileSystem`]
///
/// Paths inside the filesystem will be accessible as `<name>:/<path>` with fns like [`open_file`], etc.
///
/// # Arguments
///
/// * `name`: The mount name
/// * `fs`: The [`FileSystem`] shared object
pub fn mount(name: &str, fs: Arc<dyn FileSystem>) {
G_DEVICES
.write()
.push(FileSystemDevice::new(String::from(name), fs));
}
/// Mounts an IPC [`IFileSystemClient`]
///
/// Essentially creates a [`ProxyFileSystem`] and [`mount`]s it
///
/// # Arguments
///
/// * `name`: The mount name
/// * `fs_obj`: The [`IFileSystemClient`] shared object
pub fn mount_fsp_filesystem(name: &str, fs_obj: Arc<dyn IFileSystemClient>) {
let proxy_fs = Arc::new(ProxyFileSystem::new(fs_obj));
mount(name, proxy_fs);
}
/// Mounts the system's SD card using `fsp-srv` support
///
/// This will fail with [`ResultNotInitialized`][`super::rc::ResultNotInitialized`] if `fsp-srv` support isn't initialized
///
/// # Arguments
///
/// * `name`: The name of the mount that we store for the sdcard
pub fn mount_sd_card(name: &str) -> Result<()> {
let sd_fs_obj = get_fspsrv_session()?.open_sd_card_filesystem()?;
mount_fsp_filesystem(name, Arc::new(sd_fs_obj));
Ok(())
}
/// Unmounts a mounted filesystem
///
/// Note that this does nothing if there is no mounted filesystem with the given name
///
/// # Arguments
///
/// * `mount_name`: The mount name
pub fn unmount(mount_name: &str) {
G_DEVICES
.write()
.deref_mut()
.retain(|dev| dev.mount_name.as_str() != mount_name);
}
/// Unmounts all filesystems
pub fn unmount_all() {
G_DEVICES.write().deref_mut().clear();
}
/// Returns the [`FileSystem`] corresponding to a given path
///
/// If there is a filesystem mounted as `demo`, calling this with `"demo:/anything"` will return an instance to that mounted filesystem
///
/// # Arguments
///
/// * `path`: The path to use
pub fn get_path_filesystem(path: &str) -> Result<Arc<dyn FileSystem>> {
let split = path.find(':').ok_or(rc::ResultDeviceNotFound::make())?;
let fs = find_device_by_name(&path[..split])?;
Ok(fs)
}
/// Returns the [`FileSystem`] and the processed path corresponding to a given path
///
/// If there is a filesystem mounted as `demo`, calling this with `"demo:/anything"` will return an instance to that mounted filesystem and `"anything"` as the processed path
///
/// # Arguments
///
/// * `path`: The path to use
pub fn format_path(path: &str) -> Result<(Arc<dyn FileSystem>, &str)> {
let split = path.find(':');
let split = split.ok_or(rc::ResultDeviceNotFound::make())?;
let fs = find_device_by_name(&path[..split])?;
Ok((fs, &path[split + 1..]))
}
/// Creates a file
///
/// # Arguments
///
/// * `path`: The path to use
/// * `size`: The initial file size, default/IPC behavior is to fill the file with zeros
/// * `attribute`: The file attribute, default/IPC behavior uses this to allow creating "concatenation files" (allowing 32GB+ files in FAT32 filesystems)
pub fn create_file(path: &str, size: usize, attribute: FileAttribute) -> Result<()> {
let (fs, processed_path) = format_path(path)?;
fs.create_file(processed_path, attribute, size)
}
/// Deletes a file
///
/// # Arguments
///
/// * `path`: The path to use
pub fn remove_file(path: &str) -> Result<()> {
let (fs, processed_path) = format_path(path)?;
fs.remove_file(processed_path)
}
/// Creates a directory
///
/// # Arguments
///
/// * `path`: The path to use
pub fn create_directory(path: &str) -> Result<()> {
let (fs, processed_path) = format_path(path)?;
fs.create_directory(processed_path)
}
/// Deletes a directory
///
/// Note that (in default/IPC behavior) this won't succeed unless the directory is empty (see [`remove_dir_all`])
///
/// # Arguments
///
/// * `path`: The path to use
pub fn remove_dir(path: &str) -> Result<()> {
let (fs, processed_path) = format_path(path)?;
fs.remove_dir(processed_path)
}
/// Deletes a directory and all its children files/directories
///
/// # Arguments
///
/// * `path`: The path to use
pub fn remove_dir_all(path: &str) -> Result<()> {
let (fs, processed_path) = format_path(path)?;
fs.remove_dir_all(processed_path)
}
/// Deletes all the children files/directories inside a directory
///
/// # Arguments
///
/// * `path`: The path to use
pub fn remove_children_all(path: &str) -> Result<()> {
let (fs, processed_path) = format_path(path)?;
fs.remove_children_all(processed_path)
}
/// Gets a path's [`DirectoryEntryType`]
///
/// This can be use to easily check if a file/directory exists, or whether they actually are a file or a directory
///
/// # Arguments
///
/// * `path`: The path to use
pub fn get_entry_type(path: &str) -> Result<DirectoryEntryType> {
let (fs, processed_path) = format_path(path)?;
fs.get_entry_type(processed_path)
}
/// Converts a [`FileOpenOption`] to a [`FileOpenMode`]
///
/// # Arguments
///
/// * `option`: Input option
pub fn convert_file_open_option_to_mode(option: FileOpenOption) -> FileOpenMode {
let mut mode = FileOpenMode::None();
if option.contains(FileOpenOption::Read()) {
mode |= FileOpenMode::Read();
}
if option.contains(FileOpenOption::Write()) {
mode |= FileOpenMode::Write();
}
if option.contains(FileOpenOption::Append()) {
mode |= FileOpenMode::Append();
}
mode
}
/// Converts a [`FileOpenMode`] to a [`FileOpenOption`]
///
/// # Arguments
///
/// * `mode`: Input mode
pub fn convert_file_open_mode_to_option(mode: FileOpenMode) -> FileOpenOption {
let mut option = FileOpenOption::None();
if mode.contains(FileOpenMode::Read()) {
option |= FileOpenOption::Read();
}
if mode.contains(FileOpenMode::Write()) {
option |= FileOpenOption::Write();
}
if mode.contains(FileOpenMode::Append()) {
option |= FileOpenOption::Append();
}
option
}
/// Renames a file
///
/// # Arguments
///
/// * `old_path`: The old path to use
/// * `new_path`: The new path to use
pub fn rename_file(old_path: &str, new_path: &str) -> Result<()> {
let (old_fs, processed_old_path) = format_path(old_path)?;
let (new_fs, processed_new_path) = format_path(new_path)?;
result_return_unless!(
Arc::<dyn FileSystem>::ptr_eq(&old_fs, &new_fs),
rc::ResultNotInSameFileSystem
);
old_fs.rename_file(processed_old_path, processed_new_path)
}
/// Renames a directory
///
/// # Arguments
///
/// * `old_path`: The old path to use
/// * `new_path`: The new path to use
pub fn rename_directory(old_path: &str, new_path: &str) -> Result<()> {
let (old_fs, processed_old_path) = format_path(old_path)?;
let (new_fs, processed_new_path) = format_path(new_path)?;
result_return_unless!(
Arc::<dyn FileSystem>::ptr_eq(&old_fs, &new_fs),
rc::ResultNotInSameFileSystem
);
old_fs.rename_directory(processed_old_path, processed_new_path)
}
/// Renames a file/directory
///
/// Essentially is a wrapper for checking the entry type and calling [`rename_file`] or [`rename_directory`] according to that
///
/// Note that, to minimize overhead, this should only be used if the entry type isn't known beforehand
///
/// # Arguments
///
/// * `old_path`: The old path to use
/// * `new_path`: The new path to use
pub fn rename(old_path: &str, new_path: &str) -> Result<()> {
let (old_fs, processed_old_path) = format_path(old_path)?;
let (new_fs, processed_new_path) = format_path(new_path)?;
result_return_unless!(
Arc::<dyn FileSystem>::ptr_eq(&old_fs, &new_fs),
rc::ResultNotInSameFileSystem
);
match old_fs.get_entry_type(processed_old_path)? {
DirectoryEntryType::Directory => {
old_fs.rename_directory(processed_old_path, processed_new_path)
}
DirectoryEntryType::File => old_fs.rename_file(processed_old_path, processed_new_path),
}
}
/// Opens a file as a [`FileAccessor`]
///
/// # Arguments
///
/// * `path`: The path to use
/// * `option`: The open option
pub fn open_file(path: &str, option: FileOpenOption) -> Result<FileAccessor> {
let (fs, processed_path) = format_path(path)?;
let mode = convert_file_open_option_to_mode(option);
let mut file = match fs.open_file(processed_path, mode) {
Ok(file) => file,
Err(rc) => {
if fsp::fsp_sf::rc::ResultPathNotFound::matches(rc)
&& option.contains(FileOpenOption::Create())
{
// Create the file if it doesn't exist and we were told to do so
fs.create_file(processed_path, FileAttribute::None(), 0)?;
fs.open_file(processed_path, mode)?
} else {
return Err(rc);
}
}
};
let offset: u64 = match option.contains(FileOpenOption::Append()) {
true => file.get_size().unwrap_or(0) as _,
false => 0,
};
// convert the Boxed file to a FileAccessor
| rust | MIT | b365c1baa4c4472fe604f4ab9646440d23c3bd9c | 2026-01-04T20:16:15.900894Z | true |
aarch64-switch-rs/nx | https://github.com/aarch64-switch-rs/nx/blob/b365c1baa4c4472fe604f4ab9646440d23c3bd9c/src/vmem.rs | src/vmem.rs | //! Virtual memory support
use core::ptr::null_mut;
use core::sync::atomic::AtomicPtr;
use crate::sync::RwLock;
use crate::result::*;
use crate::svc;
/// Represents a virtual region of memory, represented as pointer-sized uints. i.e. [start, end)
#[derive(Copy, Clone, Default, Debug)]
pub struct VirtualRegion {
/// The start address of the region
pub start: usize,
/// The (non inclusive) end address of the region
pub end: usize,
}
impl VirtualRegion {
/// Creates an empty [`VirtualRegion`] with invalid address values
#[inline]
pub const fn null() -> Self {
Self { start: 0, end: 0 }
}
/// Returns whether the specified address is contained in the region
///
/// # Arguments
///
/// * `address` - The address to check
#[inline]
pub const fn contains_addr(&self, address: usize) -> bool {
(address >= self.start) && (address < self.end)
}
/// Returns whether the specified region is fully contained in the region
///
/// # Arguments
///
/// * `other` - The region to check
#[inline]
pub const fn contains(&self, other: Self) -> bool {
self.start <= other.start && self.end >= other.end
}
/// Returns whether the other region overlaps this instance
///
/// # Arguments
///
/// * `other` - The other region to check
#[inline]
pub const fn overlaps(&self, other: Self) -> bool {
!(other.end <= self.start || self.end <= other.start)
}
}
/// The standard memory regions for NX processes
#[derive(Copy, Clone, Debug, Default)]
pub struct StandardRegions {
stack: VirtualRegion,
heap: VirtualRegion,
legacy_alias: VirtualRegion,
global_address_space: VirtualRegion,
}
impl StandardRegions {
const fn new() -> Self {
Self {
stack: VirtualRegion::null(),
heap: VirtualRegion::null(),
legacy_alias: VirtualRegion::null(),
global_address_space: VirtualRegion::null(),
}
}
}
static STANDARD_VMEM_REGIONS: RwLock<StandardRegions> = RwLock::new(StandardRegions::new());
static NEXT_FREE_PTR: AtomicPtr<u8> = AtomicPtr::new(null_mut());
/// Gets the current process's address space [`VirtualRegion`]
///
/// Note that [`initialize()`] must have been called before for the region to be valid (although it's automatically called on [`rrt0`][`crate::rrt0`])
pub fn get_address_space() -> VirtualRegion {
STANDARD_VMEM_REGIONS.read().global_address_space
}
/// Gets the current process's stack [`VirtualRegion`]
///
/// Note that [`initialize()`] must have been called before for the region to be valid (although it's automatically called on [`rrt0`][`crate::rrt0`])
pub fn get_stack_region() -> VirtualRegion {
STANDARD_VMEM_REGIONS.read().stack
}
/// Gets the current process's heap [`VirtualRegion`]
///
/// Note that [`initialize()`] must have been called before for the region to be valid (although it's automatically called on [`rrt0`][`crate::rrt0`])
pub fn get_heap_region() -> VirtualRegion {
STANDARD_VMEM_REGIONS.read().heap
}
/// Gets the current process's legacy alias [`VirtualRegion`]
///
/// Note that [`initialize()`] must have been called before for the region to be valid (although it's automatically called on [`rrt0`][`crate::rrt0`])
pub fn get_legacy_alias_region() -> VirtualRegion {
STANDARD_VMEM_REGIONS.read().legacy_alias
}
fn read_region_info(
address_info_id: svc::InfoId,
size_info_id: svc::InfoId,
) -> Result<VirtualRegion> {
let start = svc::get_info(address_info_id, svc::CURRENT_PROCESS_PSEUDO_HANDLE, 0)? as usize;
let size = svc::get_info(size_info_id, svc::CURRENT_PROCESS_PSEUDO_HANDLE, 0)? as usize;
Ok(VirtualRegion {
start,
end: start + size,
})
}
/// Initializes virtual memory support
///
/// This internally retrieves all the current process's memory [`VirtualRegion`]s
///
/// This is automatically called on [`rrt0`][`crate::rrt0`]
pub fn initialize() -> Result<()> {
use svc::InfoId::*;
*STANDARD_VMEM_REGIONS.write() = StandardRegions {
global_address_space: read_region_info(AslrRegionAddress, AslrRegionSize)?,
stack: read_region_info(StackRegionAddress, StackRegionSize)?,
heap: read_region_info(HeapRegionAddress, HeapRegionSize)?,
legacy_alias: read_region_info(AliasRegionAddress, AliasRegionSize)?,
};
Ok(())
}
/// Finds available virtual memory for the specified size, returning it's address
///
/// Note that [`initialize()`] must have been called before for this to succeed (although it's automatically called on [`rrt0`][`crate::rrt0`])
///
/// # Arguments
///
/// * `size`: The size of the virtual memory to allocate
pub fn allocate(size: usize) -> Result<*mut u8> {
use core::sync::atomic::Ordering::*;
let vmem_regions = *STANDARD_VMEM_REGIONS.read();
let original_free_ptr = NEXT_FREE_PTR.load(Relaxed);
let mut attempt_addr = original_free_ptr as usize;
loop {
if !vmem_regions
.global_address_space
.contains_addr(attempt_addr)
{
attempt_addr = vmem_regions.global_address_space.start;
}
let attempt_region = VirtualRegion {
start: attempt_addr,
end: attempt_addr + size,
};
if vmem_regions.stack.overlaps(attempt_region) {
attempt_addr = vmem_regions.stack.end;
continue;
}
if vmem_regions.heap.overlaps(attempt_region) {
attempt_addr = vmem_regions.heap.end;
continue;
}
if vmem_regions.legacy_alias.overlaps(attempt_region) {
attempt_addr = vmem_regions.legacy_alias.end;
continue;
}
// we have an address that isn't in a predefined region. So now we're going to just check if it's already mapped for something
match svc::query_memory(attempt_addr as *mut u8)? {
(memory_info, _) if memory_info.state == svc::MemoryState::Free => {
match NEXT_FREE_PTR.compare_exchange(
original_free_ptr,
attempt_addr as *mut u8,
SeqCst,
SeqCst,
) {
Ok(_) => {
return Ok(attempt_addr as *mut u8);
}
Err(new_attempt_addr) => {
attempt_addr = new_attempt_addr as usize;
continue;
}
}
}
(memory_info, _) => {
attempt_addr = memory_info.base_address + memory_info.size;
continue;
}
}
}
}
| rust | MIT | b365c1baa4c4472fe604f4ab9646440d23c3bd9c | 2026-01-04T20:16:15.900894Z | false |
aarch64-switch-rs/nx | https://github.com/aarch64-switch-rs/nx/blob/b365c1baa4c4472fe604f4ab9646440d23c3bd9c/src/service.rs | src/service.rs | //! Base service/named port support and wrappers
use core::ffi::CStr;
use sm::IUserInterfaceClient;
use crate::ipc::client;
use crate::ipc::sf;
use crate::result::*;
use crate::svc;
pub mod sm;
/// Represents a named port interface
///
/// Interfaces which wrap named ports (see [`manage_named_port`][`svc::manage_named_port`] or [`connect_to_named_port`][`svc::connect_to_named_port`]) must implement this trait
pub trait INamedPort: client::IClientObject {
/// Gets the name to be used to connect to the named port (via [`connect_to_named_port`][`svc::connect_to_named_port`])
fn get_name() -> &'static CStr;
/// This will get executed after connecting to the named port in [`new_named_port_object`], allowing for extra initialization
///
/// Some interfaces may have initialization commands (check [SM's case][`sm::UserInterface::register_client`]) which can be automatically called this way
fn post_initialize(&mut self) -> Result<()>;
}
/// Represents a service interface
///
/// Interfaces which wrap services (see [SM][`sm::UserInterface`]) must implement this trait
pub trait IService: client::IClientObject {
/// Gets the service's name
fn get_name() -> sm::ServiceName;
/// Gets whether the service should be used as a domain
///
/// If this is [`true`], the service will be converted to a domain after being accessed (see [`convert_to_domain`][`sf::Session::convert_to_domain`]) in [`new_service_object`]
fn as_domain() -> bool;
/// This will get executed after accessing the service in [`new_service_object`], allowing for extra initialization
///
/// Some interfaces may have initialization commands (check [SM's case][`sm::UserInterface::register_client`]) which can be automatically called this way
fn post_initialize(&mut self) -> Result<()>;
}
/// Wrapper for connecting to a named port and instantiating the wrapper interface over the specified named port
///
/// For more information about this, check [`INamedPort`]
pub fn new_named_port_object<T: INamedPort + 'static>() -> Result<T> {
let handle = unsafe { svc::connect_to_named_port(T::get_name()) }?;
let mut object = T::new(sf::Session::from_handle(handle));
object.post_initialize()?;
Ok(object)
}
/// Wrapper for accessing a service and instantiating the wrapper interface over the specified service
///
/// For more information about this, check [`IService`]
pub fn new_service_object<T: IService>() -> Result<T> {
let sm = new_named_port_object::<sm::UserInterface>()?;
let session_handle = sm.get_service_handle(T::get_name())?;
sm.detach_client(sf::ProcessId::new())?;
let mut object = T::new(sf::Session::from_handle(session_handle.handle));
if T::as_domain() {
object.convert_to_domain()?;
}
object.post_initialize()?;
Ok(object)
}
/// "psm" service definitions.
pub mod psm;
/// "fsp-srv" service definitions.
pub mod fsp;
/// "lm" service definitions.
pub mod lm;
/// "vi:*" service definitions.
pub mod vi;
/// "nvdrv" and "nvdrv:*" service definitions.
pub mod nv;
/// "dispdrv" service definitions.
pub mod dispdrv;
/// "fatal:u" service definitions.
pub mod fatal;
/// "hid" service definitions.
pub mod hid;
/// "appletAE" service definitions.
pub mod applet;
/// "psc:m" service definitions.
pub mod psc;
/// "pm:*" service definitions.
pub mod pm;
/// "set:sys" service definitions.
pub mod set;
/// "mii:e" service definitions.
pub mod mii;
/// "csrng" service definitions.
pub mod spl;
/// "usb:hs" service definitions.
pub mod usb;
/// "ldr:shel" service definitions.
pub mod ldr;
/// "nfp:*" service definitions.
pub mod nfp;
/// "ncm" service definitions.
pub mod ncm;
/// "lr" service definitions.
pub mod lr;
/// "bsd" socket service definitions
pub mod bsd;
/// "aud*" auudio service definitions
pub mod audio;
| rust | MIT | b365c1baa4c4472fe604f4ab9646440d23c3bd9c | 2026-01-04T20:16:15.900894Z | false |
aarch64-switch-rs/nx | https://github.com/aarch64-switch-rs/nx/blob/b365c1baa4c4472fe604f4ab9646440d23c3bd9c/src/smc.rs | src/smc.rs | //! Secure monitor support and wrappers
use crate::svc;
use core::mem as cmem;
/// Represents the secure monitor function IDs
///
/// Note that only those supported by this libraries are present in the enum...
/// See [SwitchBrew](https://switchbrew.org/wiki/SMC) for all options.
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
#[repr(u64)]
pub enum FunctionId {
Invalid = 0,
//SetConfig = 0xC3000401,
//GetConfig = 0xC3000002,
//GetResult = 0xC3000003,
//GetResultData = 0xC3000404,
//MudularExponentiate = 0xC3000E05,
GenerateRandomBytes = 0xC3000006,
//GenerateAesKek = 0xC3000007,
//LoadAesKey = 0xC3000008,
//ComputeAes = 0xC3000009,
//GenerateSpecificAesKey = 0xC300000A,
//ComputeCmac = 0xC300040B,
//DecryptAndImportEsDeviceKey = 0xC300100C,
//ReencryptDeviceUniqueData = 0xC300D60C,
//DecryptDeviceUniqueData = 0xC300100D,
//DecryptAndImportLotusKey = 0xC300100E,
//ModularExponentiateByStorageKey = 0xC300060F,
//PrepareEsDeviceUniqueKey = 0xC3000610,
//LoadPreparedAesKey = 0xC3000011,
//PrepareEsCommonKey = 0xC3000012,
}
/// Represents the raw argument layout used in secure monitor calls
pub type Arguments = [u64; 8];
/// Represents the secure monitor call input layout (special case of [`Arguments`] for input)
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
#[repr(C)]
pub struct Input {
/// The calling function ID
pub fn_id: FunctionId,
/// The function-specific arguments
pub args: [u64; 7],
}
const_assert!(cmem::size_of::<Input>() == 0x40);
impl Input {
/// Creates a new, empty call [`Input`] with a certain function ID
///
/// # Arguments
///
/// * `fn_id`: Function ID value
#[inline]
pub const fn new(fn_id: FunctionId) -> Self {
Self {
fn_id,
args: [0; 7],
}
}
/// Converts this [`Input`] to the more generic [`Arguments`] layout
#[inline]
pub fn to_args(self) -> Arguments {
unsafe { cmem::transmute(self) }
}
}
/// Represents the result values returned on secure monitor call responses
#[derive(Copy, Clone, PartialEq, Eq, Debug, Default)]
#[repr(u64)]
pub enum Result {
#[default]
Success = 0,
NotImplemented = 1,
InvalidArgument = 2,
InProgress = 3,
NoAsyncOperation = 4,
InvalidAsyncOperation = 5,
NotPermitted = 6,
}
/// Represents the secure monitor call output layout (special case of [`Arguments`] for output)
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
#[repr(C)]
pub struct Output {
/// The response result
pub result: Result,
/// The response parameters
pub params: [u64; 7],
}
const_assert!(cmem::size_of::<Output>() == cmem::size_of::<Input>());
impl Output {
/// Creates an [`Output`] layout from a more generic [`Arguments`] layout
///
/// # Arguments
///
/// * `args`: The layout to create from
#[inline]
pub fn from_args(args: Arguments) -> Self {
unsafe { cmem::transmute(args) }
}
}
/// Represents the maximum size of the random bytes one can get in the `generate_random_bytes` SMC
///
/// This value is equivalent to the size of [`Output::params`]
pub const GENERATE_RANDOM_BYTES_MAX_SIZE: usize = 0x38;
/// Secure monitor call which generates random bytes
///
/// Note that the process needs to be running in processor 3 in order to be able to execute secure monitor calls
///
/// # Arguments
///
/// * `out_bytes`: Array to fill with random bytes, whose size mustn't exceed [`GENERATE_RANDOM_BYTES_MAX_SIZE`]
pub fn generate_random_bytes(out_bytes: &mut [u8]) -> Result {
let mut input = Input::new(FunctionId::GenerateRandomBytes);
input.args[0] = out_bytes.len() as u64;
let output = Output::from_args(svc::call_secure_monitor(input.to_args()));
if output.result == Result::Success {
unsafe {
core::ptr::copy(
output.params.as_ptr() as *const u8,
out_bytes.as_mut_ptr(),
out_bytes.len(),
);
}
}
output.result
}
| rust | MIT | b365c1baa4c4472fe604f4ab9646440d23c3bd9c | 2026-01-04T20:16:15.900894Z | false |
aarch64-switch-rs/nx | https://github.com/aarch64-switch-rs/nx/blob/b365c1baa4c4472fe604f4ab9646440d23c3bd9c/src/result.rs | src/result.rs | //! Common result support
use core::fmt;
use core::result;
use nx_derive::{Request, Response};
const MODULE_BITS: u32 = 9;
const DESCRIPTION_BITS: u32 = 13;
const DEFAULT_VALUE: u32 = 0;
const SUCCESS_VALUE: u32 = DEFAULT_VALUE;
#[inline]
pub(crate) const fn pack_value(module: u32, description: u32) -> u32 {
module | (description << MODULE_BITS)
}
#[inline]
pub(crate) const fn unpack_module(value: u32) -> u32 {
value & !(!DEFAULT_VALUE << MODULE_BITS)
}
#[inline]
pub(crate) const fn unpack_description(value: u32) -> u32 {
(value >> MODULE_BITS) & !(!DEFAULT_VALUE << DESCRIPTION_BITS)
}
/// Represents a (raw) result value used all over the OS
///
/// These are referred as `Result` on docs/official code, but we intentionally name it as [`ResultCode`] to distinguish it from the [`Result`] enum type
///
/// Results are often displayed/shown, for example, like `2168-0002`, which corresponds to `<2000 + module>-<description>`
///
/// [`Debug`][`fmt::Debug`] formatting formats the results as a hex-value (`0x4A8`), while [`Display`][`fmt::Display`] formatting formats the result in the format described above (`2168-0002`)
#[derive(Request, Response, Copy, Clone, PartialEq, Eq, Default)]
#[repr(C)]
pub struct ResultCode {
value: u32,
}
impl ResultCode {
/// Creates a [`ResultCode`] from a raw value
///
/// # Arguments
///
/// * `value`: The raw value
#[inline]
pub const fn new(value: u32) -> Self {
Self { value }
}
/// Wrapper for creating a new [`Result::Err`] value with the following raw result
///
/// # Arguments
///
/// * `value`: The raw value, note that it mustn't be `0`/success (that would be undefined behavior)
#[inline]
pub const fn new_err<T>(value: u32) -> Result<T> {
Err(Self::new(value))
}
/// Returns whether the [`ResultCode`] is successful
///
/// A result value of `0` is a successful value, this essentially checks that
#[inline]
pub const fn is_success(&self) -> bool {
self.value == SUCCESS_VALUE
}
/// Returns whether the [`ResultCode`] is not successful
///
/// This is the exact opposite of [`is_success`][`ResultCode::is_success`]
#[inline]
pub const fn is_failure(&self) -> bool {
!self.is_success()
}
/// Gets the raw value of the [`ResultCode`]
#[inline]
pub const fn get_value(&self) -> u32 {
self.value
}
/// Gets the module of the [`ResultCode`]
#[inline]
pub const fn get_module(&self) -> u32 {
unpack_module(self.value)
}
/// Gets the description of the [`ResultCode`]
#[inline]
pub const fn get_description(&self) -> u32 {
unpack_description(self.value)
}
}
impl fmt::Debug for ResultCode {
fn fmt(&self, fmt: &mut fmt::Formatter) -> result::Result<(), fmt::Error> {
write!(fmt, "{:#X}", self.value)
}
}
impl fmt::Display for ResultCode {
fn fmt(&self, fmt: &mut fmt::Formatter) -> result::Result<(), fmt::Error> {
write!(
fmt,
"{:0>4}-{:0>4}",
2000 + self.get_module(),
self.get_description()
)
}
}
/// Represents a result holding a certain value or a [`ResultCode`] as an indication of failure
pub type Result<T> = result::Result<T, ResultCode>;
/// Produces a `Result` whose value will depend on whether the supplied [`ResultCode`] was successful
///
/// # Arguments
///
/// * `rc`: The [`ResultCode`] value
/// * `value`: The value to pack if the [`ResultCode`] is successful
#[inline(always)]
pub fn pack<T>(rc: ResultCode, value: T) -> Result<T> {
if rc.is_success() { Ok(value) } else { Err(rc) }
}
/// Produces the [`ResultCode`] corresponding to a packed result
///
/// # Arguments
///
/// * `rc`: The [`Result`] to unpack
#[inline(always)]
pub fn unpack<T>(rc: &Result<T>) -> ResultCode {
match rc {
Ok(_) => ResultSuccess::make(),
Err(rc) => *rc,
}
}
/// Represents a base trait for result value definitions to follow
pub trait ResultBase {
/// Gets the result definition's module
fn get_module() -> u32;
/// Gets the result definition's description
fn get_description() -> u32;
/// Gets the result definition's raw value
#[inline(always)]
fn get_value() -> u32 {
pack_value(Self::get_module(), Self::get_description())
}
/// Produces a [`ResultCode`] from this result definition
#[inline(always)]
fn make() -> ResultCode {
ResultCode::new(Self::get_value())
}
/// Produces a [`Result::Err`] value from this result definition
#[inline(always)]
fn make_err<T>() -> Result<T> {
ResultCode::new_err(Self::get_value())
}
/// Returns whether the given [`ResultCode`] matches this result definition
///
/// # Arguments
///
/// * `rc`: The [`ResultCode`] to check
#[inline(always)]
fn matches(rc: ResultCode) -> bool {
rc.get_value() == Self::get_value()
}
}
// TODO: document all results? are the names not explicit enough?
result_define! {
Success: 0, 0
}
#[cfg(any(feature = "fs", feature = "socket"))]
impl embedded_io::Error for ResultCode {
fn kind(&self) -> embedded_io::ErrorKind {
use embedded_io::ErrorKind;
match (self.get_module(), self.get_description()) {
#[cfg(feature = "socket")]
(crate::socket::rc::RESULT_MODULE, errno) => match errno {
1004 => ErrorKind::Interrupted,
1005 => ErrorKind::WriteZero,
1011 => ErrorKind::TimedOut,
1032 => ErrorKind::BrokenPipe,
_ => ErrorKind::Other,
},
#[cfg(feature = "fs")]
(crate::fs::rc::RESULT_SUBMODULE, errno) => match errno {
4000..=4999 => ErrorKind::InvalidData,
6003..=6199 => ErrorKind::InvalidInput,
6602 | 6603 => ErrorKind::NotFound,
6300..=6399 => ErrorKind::Unsupported,
6400..=6449 => ErrorKind::PermissionDenied,
_ => ErrorKind::Other,
},
_ => ErrorKind::Other,
}
}
}
| rust | MIT | b365c1baa4c4472fe604f4ab9646440d23c3bd9c | 2026-01-04T20:16:15.900894Z | false |
aarch64-switch-rs/nx | https://github.com/aarch64-switch-rs/nx/blob/b365c1baa4c4472fe604f4ab9646440d23c3bd9c/src/util.rs | src/util.rs | //! Common library utilities
use crate::diag::abort;
use crate::diag::log;
use crate::diag::log::Logger;
use crate::result::*;
use crate::thread;
use alloc::string::String;
use alloc::string::ToString;
use core::fmt;
use core::panic;
use core::ptr;
use core::str;
use nx_derive::{Request, Response};
pub mod rc;
#[doc(hidden)]
pub trait AsInner<Inner: ?Sized> {
fn as_inner(&self) -> &Inner;
}
#[doc(hidden)]
#[allow(dead_code)] // not used on all platforms
pub trait AsInnerMut<Inner: ?Sized> {
fn as_inner_mut(&mut self) -> &mut Inner;
}
#[doc(hidden)]
pub trait IntoInner<Inner> {
fn into_inner(self) -> Inner;
}
/// Represents a 16-byte UUID
#[derive(Request, Response, Copy, Clone, PartialEq, Eq, Debug, Default)]
#[repr(C)]
pub struct Uuid {
/// The UUID byte array
pub uuid: [u8; 0x10],
}
#[cfg(feature = "rand")]
impl Uuid {
pub fn random() -> Result<Self> {
use crate::ipc::sf::Buffer;
use crate::service::spl::{IRandomClient, RandomService};
let mut uuid = [8; 16];
crate::service::new_service_object::<RandomService>()?
.generate_random_bytes(Buffer::from_mut_array(&mut uuid))?;
Ok(Self { uuid })
}
pub fn from_rng(rng: &mut impl nx::rand::RngCore) -> Result<Self> {
let mut uuid = [8; 16];
rng.fill_bytes(&mut uuid);
Ok(Self { uuid })
}
}
/// Represents a pair of a pointer and a size
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
#[repr(C)]
pub struct PointerAndSize {
/// The pointer address
pub address: *mut u8,
/// The pointer size
pub size: usize,
}
impl PointerAndSize {
/// Creates an empty, thus invalid [`PointerAndSize`] (with a null pointer and size `0`)
#[inline]
pub const fn empty() -> Self {
Self {
address: ptr::null_mut(),
size: 0,
}
}
/// Creates a [`PointerAndSize`]
///
/// # Arguments
///
/// * `address`: The address
/// * `size`: The size
#[inline]
pub const fn new(address: *mut u8, size: usize) -> Self {
Self { address, size }
}
/// Checks whether the [`PointerAndSize`] is valid
///
/// Essentially, this checks that the pointer isn't null and that the size is non-zero
#[inline]
pub fn is_valid(&self) -> bool {
!self.address.is_null() && (self.size != 0)
}
}
/// Represents a pair of a pointer and a size
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
#[repr(C)]
pub struct ConstPointerAndSize {
/// The pointer address
pub address: *const u8,
/// The pointer size
pub size: usize,
}
impl ConstPointerAndSize {
/// Creates an empty, thus invalid [`ConstPointerAndSize`] (with a null pointer and size `0`)
#[inline]
pub const fn empty() -> Self {
Self {
address: ptr::null(),
size: 0,
}
}
/// Creates a [`ConstPointerAndSize`]
///
/// # Arguments
///
/// * `address`: The address
/// * `size`: The size
#[inline]
pub const fn new(address: *const u8, size: usize) -> Self {
Self { address, size }
}
/// Checks whether the [`PointerAndSize`] is valid
///
/// Essentially, this checks that the pointer isn't null and that the size is non-zero
#[inline]
pub fn is_valid(&self) -> bool {
!self.address.is_null() && (self.size != 0)
}
}
pub(crate) const fn const_usize_min(a: usize, b: usize) -> usize {
// TODO: const min traits
if a > b { b } else { a }
}
#[allow(dead_code)]
pub(crate) const fn const_usize_max(a: usize, b: usize) -> usize {
// TODO: const min traits
if a < b { b } else { a }
}
/// Represents a C-like string of a given size (mostly like a C `char[S]` array)
///
/// Note that `char` is 4-bytes in Rust for encoding reasons, thus we must stick to `u8` arrays
#[derive(Copy, Clone)]
#[repr(C)]
pub struct ArrayString<const S: usize> {
/// The actual array (like `char[S]` in C)
c_str: [u8; S],
}
impl<const S: usize> crate::ipc::server::RequestCommandParameter<'_, ArrayString<S>>
for ArrayString<S>
{
fn after_request_read(ctx: &mut crate::ipc::server::ServerContext) -> Result<Self> {
Ok(ctx.raw_data_walker.advance_get())
}
}
impl<const S: usize> crate::ipc::server::ResponseCommandParameter for ArrayString<S> {
type CarryState = ();
fn before_response_write(
_raw: &Self,
ctx: &mut crate::ipc::server::ServerContext,
) -> Result<()> {
ctx.raw_data_walker.advance::<Self>();
Ok(())
}
fn after_response_write(
raw: Self,
_carry_state: (),
ctx: &mut crate::ipc::server::ServerContext,
) -> Result<()> {
ctx.raw_data_walker.advance_set(raw);
Ok(())
}
}
impl<const S: usize> crate::ipc::client::RequestCommandParameter for ArrayString<S> {
fn before_request_write(
_raw: &Self,
walker: &mut crate::ipc::DataWalker,
_ctx: &mut crate::ipc::CommandContext,
) -> crate::result::Result<()> {
walker.advance::<Self>();
Ok(())
}
fn before_send_sync_request(
raw: &Self,
walker: &mut crate::ipc::DataWalker,
_ctx: &mut crate::ipc::CommandContext,
) -> crate::result::Result<()> {
walker.advance_set(*raw);
Ok(())
}
}
impl<const S: usize> crate::ipc::client::ResponseCommandParameter<ArrayString<S>>
for ArrayString<S>
{
fn after_response_read(
walker: &mut crate::ipc::DataWalker,
_ctx: &mut crate::ipc::CommandContext,
) -> crate::result::Result<Self> {
Ok(walker.advance_get())
}
}
impl<const S: usize> fmt::Debug for ArrayString<S> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let str_data = self.get_str().unwrap_or("<invalid-str>");
write!(f, "{str_data}")
}
}
impl<const S: usize> PartialEq for ArrayString<S> {
fn eq(&self, other: &Self) -> bool {
if let Ok(self_str) = self.get_str()
&& let Ok(other_str) = other.get_str()
{
return self_str == other_str;
}
false
}
}
impl<const S: usize> Eq for ArrayString<S> {}
impl<const S: usize> Default for ArrayString<S> {
fn default() -> Self {
Self::new()
}
}
impl<const S: usize> ArrayString<S> {
/// Creates an empty [`ArrayString`]
pub const fn new() -> Self {
Self { c_str: [0; S] }
}
/// Creates a [`ArrayString`] from a given byte array
///
/// # Arguments
///
/// * `raw_bytes`: Byte array to use
pub const fn from_raw(raw_bytes: [u8; S]) -> Self {
Self { c_str: raw_bytes }
}
/// Creates a [`ArrayString`] from a given `&str`
///
/// This creates an empty [`ArrayString`] and initializes it with the provided string.
/// This will copy at max `S - 1` bytes/chars in order to ensure that the string is NUL-terminated.
/// This will truncate the string at the first null, so we can unconditionally return and keep it const.
///
/// # Arguments
///
/// * `string`: The `&str` to use
pub const fn from_str_truncate_null(string: &str) -> Self {
let mut out = Self::new();
let string = string.as_bytes();
let len = const_usize_min(S - 1, string.len());
let mut offset = 0;
// truncate at nuls since we're writing a cstr
while offset < len && string[offset] != 0 {
out.c_str[offset] = string[offset];
offset += 1;
}
out
}
/// Creates a [`ArrayString`] from a given `&str`
///
/// This creates an empty [`ArrayString`] and calls [`ArrayString::set_str`] on it
///
/// # Arguments
///
/// * `string`: The `&str` to use
#[allow(clippy::should_implement_trait)] // We don't implement the trait as we do the conversion infallibly
pub fn from_str(string: &str) -> Self {
let mut cstr = Self::new();
let _ = cstr.set_str(string);
cstr
}
/// Creates a [`ArrayString`] from a given `String`
///
/// This creates an empty [`ArrayString`] and calls [`ArrayString::set_string`] on it
///
/// # Arguments
///
/// * `string`: The `String` to use
pub fn from_string(string: &String) -> Self {
let mut cstr = Self::new();
let _ = cstr.set_string(string);
cstr
}
/// Returns the length of the [`ArrayString`]
///
/// This is similar to C's `strlen()` function, thus taking into account the string's NUL-termination
pub fn len(&self) -> usize {
self.c_str.iter().position(|byte| *byte == 0).expect("We should always have at least one null as we always make sure to keep the last index null")
}
/// Returns whether this [`ArrayString`] is empty
#[inline(always)]
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Sets a `&str` as the contents of this [`ArrayString`]
///
/// This will copy at max `S - 1` bytes/chars in order to ensure that the string is NUL-terminated
/// Returns and error when the string has internal nulls. Truncates the written strings over `S-1` bytes in length.
///
/// # Arguments
///
/// * `string`: The `&str` to set
pub fn set_str(&mut self, string: &str) -> Result<()> {
// we're writing a c-string, so we can't have internal nuls
result_return_if!(string.find('\0').is_some(), rc::ResultInvalidUtf8Conversion);
self.c_str = [0; S];
let string = string.as_bytes();
let len = const_usize_min(S - 1, string.len());
let mut offset = 0;
while offset < len {
self.c_str[offset] = string[offset];
offset += 1;
}
Ok(())
}
/// Sets a string as the contents of this [`ArrayString`]
///
/// This will copy at max `S - 1` bytes/chars in order to ensure that the string is NUL-terminated
///
/// # Arguments
///
/// * `string`: The content to set
#[inline(always)]
#[allow(clippy::ptr_arg)]
pub fn set_string(&mut self, string: &String) -> Result<()> {
self.set_str(string.as_str())
}
/// Gets a `&str` corresponding to this [`ArrayString`]
pub fn get_str(&self) -> Result<&str> {
core::ffi::CStr::from_bytes_until_nul(&self.c_str)
.expect("We should never error as we always keep a null at the last index")
.to_str()
.map_err(|_| rc::ResultInvalidUtf8Conversion::make())
}
/// Gets a `String` corresponding to this [`ArrayString`]
pub fn get_string(&self) -> Result<String> {
self.get_str().map(Into::into)
}
/// Borrows a view into the whole array
pub fn as_buffer(&self) -> &[u8; S] {
&self.c_str
}
/// Borrows only the initialized bytes (including the null terminator)
pub fn as_bytes(&self) -> &[u8] {
&self.c_str[..(self.len() + 1)]
}
}
impl<S: AsRef<str>, const LEN: usize> From<S> for ArrayString<LEN> {
fn from(value: S) -> Self {
let reffed_val: &str = value.as_ref();
Self::from_str(reffed_val)
}
}
/// Represents a C-like 16-bit string of a given size (mostly like a C `char16_t[S]` array)
///
/// Note that `char` is 4-bytes in Rust for encoding reasons, thus we must stick to `u16` arrays
#[derive(Copy, Clone)]
#[repr(C)]
pub struct ArrayWideString<const S: usize> {
/// The actual array (like `char16_t[S]` in C)
c_wstr: [u16; S],
}
impl<const S: usize> crate::ipc::server::RequestCommandParameter<'_, ArrayWideString<S>>
for ArrayWideString<S>
{
fn after_request_read(ctx: &mut crate::ipc::server::ServerContext) -> Result<Self> {
Ok(ctx.raw_data_walker.advance_get())
}
}
impl<const S: usize> crate::ipc::server::ResponseCommandParameter for ArrayWideString<S> {
type CarryState = ();
fn before_response_write(
_raw: &Self,
ctx: &mut crate::ipc::server::ServerContext,
) -> Result<()> {
ctx.raw_data_walker.advance::<Self>();
Ok(())
}
fn after_response_write(
raw: Self,
_carry_state: (),
ctx: &mut crate::ipc::server::ServerContext,
) -> Result<()> {
ctx.raw_data_walker.advance_set(raw);
Ok(())
}
}
impl<const S: usize> crate::ipc::client::RequestCommandParameter for ArrayWideString<S> {
fn before_request_write(
_raw: &Self,
walker: &mut crate::ipc::DataWalker,
_ctx: &mut crate::ipc::CommandContext,
) -> crate::result::Result<()> {
walker.advance::<Self>();
Ok(())
}
fn before_send_sync_request(
raw: &Self,
walker: &mut crate::ipc::DataWalker,
_ctx: &mut crate::ipc::CommandContext,
) -> crate::result::Result<()> {
walker.advance_set(*raw);
Ok(())
}
}
impl<const S: usize> crate::ipc::client::ResponseCommandParameter<ArrayWideString<S>>
for ArrayWideString<S>
{
fn after_response_read(
walker: &mut crate::ipc::DataWalker,
_ctx: &mut crate::ipc::CommandContext,
) -> crate::result::Result<Self> {
Ok(walker.advance_get())
}
}
impl<const S: usize> fmt::Debug for ArrayWideString<S> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let str_data = self.get_string().unwrap_or("<invalid-str>".to_string());
write!(f, "{str_data}")
}
}
impl<const S: usize> PartialEq for ArrayWideString<S> {
fn eq(&self, other: &Self) -> bool {
self.c_wstr.as_slice().eq(other.c_wstr.as_slice())
}
}
impl<const S: usize> Eq for ArrayWideString<S> {}
impl<const S: usize> Default for ArrayWideString<S> {
fn default() -> Self {
Self::new()
}
}
impl<const S: usize> ArrayWideString<S> {
/// Creates an empty [`ArrayWideString`]
pub const fn new() -> Self {
Self { c_wstr: [0; S] }
}
/// Creates a [`ArrayWideString`] from a given byte array
///
/// # Arguments
///
/// * `raw_bytes`: Byte array to use
pub const fn from_raw(raw_bytes: [u16; S]) -> Self {
Self { c_wstr: raw_bytes }
}
/// Creates a [`ArrayWideString`] from a given `String`
///
/// This creates an empty [`ArrayWideString`] and calls [`ArrayWideString::set_string`] on it
///
/// # Arguments
///
/// * `string`: The `String` to use
pub fn from_string(string: String) -> Self {
let mut cstr = Self::new();
cstr.set_string(string);
cstr
}
/// Returns the length of the [`ArrayWideString`]
///
/// This is similar to C's `strlen()` function, thus taking into account the string's NUL-termination
pub fn len(&self) -> usize {
self.c_wstr
.iter()
.position(|word| *word == 0)
.expect("We will have at least one null as we always keep the last index null")
}
/// Returns if this [`ArrayWideString`] is empty
#[inline(always)]
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Sets a `&str` as the contents of this [`ArrayWideString`]
///
/// This will copy at max `S - 1` bytes/chars in order to ensure that the string is NUL-terminated
///
/// # Arguments
///
/// * `string`: The `&str` to set
pub fn set_str(&mut self, string: &str) {
let mut c_str = &mut self.c_wstr[..S - 1];
let mut char_buf = [0u16; 2];
for char in string.chars() {
let encoded_char = char.encode_utf16(&mut char_buf);
// we can't write any u16s if there aren't enough for surrogate pairs
// so we bail out early
if encoded_char.len() > c_str.len() {
break;
}
// a character will always be at least one u16
c_str[0] = encoded_char[0].to_be();
// check if character required 4-byte encoding
if encoded_char.len() == 2 {
c_str[1] = encoded_char[1].to_be();
}
// advance the window by the length of the written u16 buffer
c_str = &mut c_str[encoded_char.len()..]
}
}
/// Sets a `String` as the contents of this [`ArrayWideString`]
///
/// This will copy at max `S - 1` bytes/chars in order to ensure that the string is NUL-terminated
///
/// # Arguments
///
/// * `string`: The `String` to set
pub fn set_string(&mut self, string: impl AsRef<str>) {
self.set_str(string.as_ref())
}
/// Gets a `String` corresponding to this [`ArrayWideString`]
pub fn get_string(&self) -> Result<String> {
// create a clone of the internal buffer
let mut tmp = self.c_wstr;
// convert the u16s from big-endian encoding
let _: () = tmp[..self.len()]
.iter_mut()
.map(|place| *place = u16::from_be(*place))
.collect();
// we don't need to use the endian version, since we've already converted from be-encoding
let pulled_string = String::from_utf16(&tmp[..self.len()])
.map_err(|_| rc::ResultInvalidUtf16Conversion::make())?;
Ok(pulled_string)
}
/// Borrows a view into the whole array
pub fn as_buffer(&self) -> &[u16; S] {
&self.c_wstr
}
/// Borrows only the initialized bytes (including the null terminator)
pub fn as_u16s(&self) -> &[u16] {
&self.c_wstr[..(self.len() + 1)]
}
}
impl<const S: usize> core::str::FromStr for ArrayWideString<S> {
type Err = ResultCode;
fn from_str(s: &str) -> core::result::Result<Self, Self::Err> {
let mut cstr = Self::new();
cstr.set_str(s);
Ok(cstr)
}
}
/// Same as C's `strlen()`
///
/// # Arguments
///
/// * `str_ptr`: The `const char*`-like ptr to use
///
/// # Safety
///
/// This is a wrapper around `Cstr::from_ptr`[`core::ffi::CStr::from_ptr`]
/// so the same safety requirements apply.
#[inline(always)]
pub unsafe fn str_ptr_len(str_ptr: *const u8) -> usize {
unsafe { core::ffi::CStr::from_ptr(str_ptr.cast()).to_bytes().len() }
}
/// Simplified panic handler using a provided [`Logger`] type, available as a helpful default panic handler
///
/// This handler does the following:
/// * Logs the panic information via [`diag_log!`] macro and the provided [`Logger`] type
/// * Aborts with [`ResultPanicked`][`super::rc::ResultPanicked`] and the specified desired [`AbortLevel`][`abort::AbortLevel`]
///
/// # Arguments
///
/// * `info`: `PanicInfo` object got from the actual panic handler
/// * `desired_level`: Desired [`AbortLevel`][`abort::AbortLevel`] to abort with
pub fn simple_panic_handler<L: Logger>(
info: &panic::PanicInfo,
desired_level: abort::AbortLevel,
) -> ! {
let thread_name = match unsafe { thread::current().as_ref() }.map(|t| t.name.get_str()) {
Some(Ok(name)) => name,
_ => "<unknown>",
};
diag_log!(L { log::LogSeverity::Fatal, true } => "Panic! at thread '{}' -> {}\n", thread_name, info);
abort::abort(desired_level, super::rc::ResultPanicked::make())
}
| rust | MIT | b365c1baa4c4472fe604f4ab9646440d23c3bd9c | 2026-01-04T20:16:15.900894Z | false |
aarch64-switch-rs/nx | https://github.com/aarch64-switch-rs/nx/blob/b365c1baa4c4472fe604f4ab9646440d23c3bd9c/src/diag.rs | src/diag.rs | //! Diagnostics (logging and aborting) support
pub mod rc;
pub mod abort;
pub mod log;
| rust | MIT | b365c1baa4c4472fe604f4ab9646440d23c3bd9c | 2026-01-04T20:16:15.900894Z | false |
aarch64-switch-rs/nx | https://github.com/aarch64-switch-rs/nx/blob/b365c1baa4c4472fe604f4ab9646440d23c3bd9c/src/rand.rs | src/rand.rs | //! Pseudo-RNG support
use alloc::sync::Arc;
pub use rand::{Rng, RngCore};
/// Represents a pseudo-RNG
use crate::ipc::sf::Buffer;
use crate::result::*;
use crate::service;
pub use crate::service::spl::{IRandomClient, RandomService};
use crate::sync::Mutex;
impl RngCore for RandomService {
fn next_u32(&mut self) -> u32 {
let mut data = [0; 4];
self.generate_random_bytes(Buffer::from_mut_array(&mut data))
.expect("Generating rand bytes should never fail");
u32::from_ne_bytes(data)
}
fn next_u64(&mut self) -> u64 {
let mut data = [0; 8];
self.generate_random_bytes(Buffer::from_mut_array(&mut data))
.expect("Generating rand bytes should never fail");
u64::from_ne_bytes(data)
}
fn fill_bytes(&mut self, dst: &mut [u8]) {
self.generate_random_bytes(Buffer::from_mut_array(dst))
.expect("Generating rand bytes should never fail");
}
}
// Global RNG source
static G_RNG: Mutex<Option<spl::SplCsrngGenerator>> = Mutex::new(None);
pub fn initialize() -> Result<()> {
let mut guard = G_RNG.lock();
if guard.is_none() {
*guard = Some(spl::SplCsrngGenerator::new()?);
}
Ok(())
}
pub fn finalize() {
*G_RNG.lock() = None;
}
#[inline]
pub fn get_rng() -> Result<spl::SplCsrngGenerator> {
G_RNG
.lock()
.clone()
.ok_or(nx::rc::ResultNotInitialized::make())
}
mod spl {
use super::*;
/// Represents a pseudo-RNG using [`spl`][`crate::service::spl`]'s [`RandomService`] interface
#[derive(Clone)]
pub struct SplCsrngGenerator {
csrng: Arc<RandomService>,
}
impl SplCsrngGenerator {
/// Creates a new [`SplCsrngGenerator`]
pub fn new() -> Result<Self> {
Ok(Self {
csrng: Arc::new(service::new_service_object::<RandomService>()?),
})
}
}
impl RngCore for SplCsrngGenerator {
fn next_u32(&mut self) -> u32 {
let mut data = [0; 4];
self.csrng
.generate_random_bytes(Buffer::from_mut_array(&mut data))
.expect("Generating rand bytes should never fail");
u32::from_ne_bytes(data)
}
fn next_u64(&mut self) -> u64 {
let mut data = [0; 8];
self.csrng
.generate_random_bytes(Buffer::from_mut_array(&mut data))
.expect("Generating rand bytes should never fail");
u64::from_ne_bytes(data)
}
fn fill_bytes(&mut self, dst: &mut [u8]) {
self.csrng
.generate_random_bytes(Buffer::from_mut_array(dst))
.expect("Generating rand bytes should never fail");
}
}
}
| rust | MIT | b365c1baa4c4472fe604f4ab9646440d23c3bd9c | 2026-01-04T20:16:15.900894Z | false |
aarch64-switch-rs/nx | https://github.com/aarch64-switch-rs/nx/blob/b365c1baa4c4472fe604f4ab9646440d23c3bd9c/src/rrt0.rs | src/rrt0.rs | //! Initial code/entrypoint support and utils
//!
//! # Custom entrypoint
//!
//! If you wish to define your custom entrypoint, you can do so by redefining the `__nx_rrt0_entry` weak fn.
//!
//! Example (check [here](https://switchbrew.org/wiki/Homebrew_ABI#Entrypoint_Arguments) for more entrypoint details):
//! ```
//! #[unsafe(no_mangle)]
//! unsafe extern "C" fn __nx_rrt0_entry(arg0: usize, arg1: usize) {
//! // ...
//! }
//! ```
//!
//! # Custom version setup
//!
//! On the default entrypoint routine, the internal system version (see [`get_version`][`version::get_version`] and [`set_version`][`version::set_version`]) gets set the following way:
//! * If the process was launched through HBL, use the "HOS version" value we got from it
//! * Otherwise (and if using the `services` feature), use settings services ([`SystemSettingsServer`][`crate::service::set::ISystemSettingsServer`]) to get it
//!
//! If you wish to define your custom version setup (for instance, in contexts in which you wish to avoid executing the aforementioned setup), you can do so by redefining the `initialize_version` weak fn.
//!
//! Example:
//! ```
//! #[unsafe(no_mangle)]
//! fn initialize_version(hbl_hos_version_opt: Option<hbl::Version>) {
//! // ...
//! }
//! ```
use crate::elf;
use crate::hbl;
use crate::hbl::AbiConfigEntry;
use crate::mem::alloc;
use crate::result::*;
use crate::svc;
use crate::svc::Handle;
use crate::sync;
use crate::thread;
use crate::thread::get_thread_local_region;
use crate::util;
use crate::version;
use crate::vmem;
#[cfg(feature = "services")]
use crate::{ipc::sf, service, service::set};
use core::arch::asm;
use core::mem;
use core::ptr;
use core::sync::atomic::AtomicUsize;
use atomic_enum::atomic_enum;
// These functions must be implemented by any binary using this crate
unsafe extern "Rust" {
fn main();
fn initialize_heap(hbl_heap: util::PointerAndSize) -> util::PointerAndSize;
}
/// Represents the fn pointer used for exiting
pub type ExitFn = unsafe extern "C" fn(ResultCode) -> !;
#[atomic_enum]
/// Represents the executable type of the current process
#[derive(PartialEq, Eq, Default)]
pub enum ExecutableType {
#[default]
None,
Nso,
Nro,
}
static G_EXECUTABLE_TYPE: AtomicExecutableType = AtomicExecutableType::new(ExecutableType::None);
pub(crate) fn set_executable_type(exec_type: ExecutableType) {
G_EXECUTABLE_TYPE.store(exec_type, core::sync::atomic::Ordering::SeqCst);
}
/// Gets the current process's executable type
///
/// Note that this can be used to determine if this process was launched through HBL or not (if so, we would be a homebrew NRO and this would return [`ExecutableType::Nro`])
pub fn get_executable_type() -> ExecutableType {
G_EXECUTABLE_TYPE.load(core::sync::atomic::Ordering::Relaxed)
}
/// Represents the process module format used by processes
///
/// This layout has to be present at the start of the process's `.rodata` section, containing its module name
#[derive(Copy, Clone)]
#[repr(C)]
pub struct ModulePath {
/// Unused value
_zero: u32,
/// The length of the module name
path_len: u32,
/// The module name string
path: util::ArrayString<0x200>,
}
impl ModulePath {
/// Creates a [`ModulePath`] with the given module name
///
/// # Arguments
///
/// * `name`: The module name
#[inline]
pub const fn new(name: &str) -> Self {
Self {
_zero: 0,
path_len: util::const_usize_min(name.len(), 0x200 - 1) as u32,
path: util::ArrayString::from_str_truncate_null(name),
}
}
pub fn set_name(&mut self, new_name: &str) {
self.path = util::ArrayString::from_str(new_name);
self.path_len = new_name.len() as u32
}
pub fn get_name(&self) -> util::ArrayString<0x200> {
self.path
}
}
#[used]
#[linkage = "weak"]
#[unsafe(link_section = ".module_name")]
#[unsafe(export_name = "__nx_rrt0_module_name")]
static G_MODULE_NAME: ModulePath = ModulePath::new("aarch64-switch-rs (unknown module)");
/// Gets this process's module name
///
/// The module name is `aarch64-switch-rs (unknown module)` by default, but it can be set to a custom one with [`rrt0_define_module_name`] or [`rrt0_define_default_module_name`] macros
pub fn get_module_name() -> ModulePath {
G_MODULE_NAME
}
static G_EXIT_FN: sync::Mutex<Option<ExitFn>> = sync::Mutex::new(None);
static G_MAIN_THREAD: sync::Mutex<Option<thread::Thread>> = sync::Mutex::new(None);
pub(crate) static TEXT_BASE_ADDRESS: AtomicUsize = AtomicUsize::new(0);
static EH_FRAME_HDR_SECTION: elf::EhFrameHdrPtr = elf::EhFrameHdrPtr::new();
/// Exits the current process
///
/// This will call the HBL-specific exit fn if running as a homebrew NRO, or [`exit_process`][`svc::exit_process`] otherwise
pub fn exit(rc: ResultCode) -> ! {
match *G_EXIT_FN.lock() {
Some(exit_fn) => unsafe { (exit_fn)(rc) },
None => svc::exit_process(),
}
}
#[linkage = "weak"]
#[unsafe(export_name = "__nx__rrt0_initialize_version")]
pub fn initialize_version(hbl_hos_version_opt: Option<hbl::Version>) {
if let Some(hbl_hos_version) = hbl_hos_version_opt {
unsafe { version::set_version(hbl_hos_version.to_version()) };
} else {
#[cfg(feature = "services")]
{
use crate::service::set::{ISystemSettingsClient, SystemSettingsService};
let set_sys = service::new_service_object::<SystemSettingsService>().unwrap();
let mut fw_version: set::FirmwareVersion = Default::default();
set_sys
.get_firmware_version(sf::Buffer::from_mut_var(&mut fw_version))
.unwrap();
let version =
version::Version::new(fw_version.major, fw_version.minor, fw_version.micro);
unsafe { version::set_version(version) };
}
}
}
static mut MAIN_THREAD: thread::imp::Thread = thread::imp::Thread::empty();
#[inline]
unsafe fn set_main_thread_tlr(handle: svc::Handle) {
unsafe {
let tlr_raw = get_thread_local_region();
(*tlr_raw).nx_thread_vars.handle = handle;
(*tlr_raw).nx_thread_vars.magic = thread::imp::LibNxThreadVars::MAGIC;
MAIN_THREAD.__nx_thread.handle = handle;
(*tlr_raw).nx_thread_vars.thread_ref = &raw mut MAIN_THREAD;
}
}
#[allow(unsafe_op_in_unsafe_fn)]
unsafe fn normal_entry(loader_mode: LoaderMode, exit_config: Option<ExitFn>) -> ! {
let mut main_thread_handle: svc::Handle = 0;
let mut heap = util::PointerAndSize::new(ptr::null_mut(), crate::mem::alloc::DEFAULT_HEAP_SIZE);
let mut hos_version_opt: Option<hbl::Version> = None;
match loader_mode {
LoaderMode::Nso(thread_handle) => {
main_thread_handle = thread_handle;
set_executable_type(ExecutableType::Nso);
}
LoaderMode::Nro(mut abi_entry) => {
set_executable_type(ExecutableType::Nro);
unsafe {
loop {
match (*abi_entry).key {
hbl::AbiConfigEntryKey::EndOfList => {
let loader_info_data = (*abi_entry).value[0] as *mut u8;
let loader_info_data_len = (*abi_entry).value[1] as usize;
if loader_info_data_len > 0 {
let loader_info_slice = core::slice::from_raw_parts(
loader_info_data,
loader_info_data_len,
);
if let Ok(loader_info) = core::str::from_utf8(loader_info_slice) {
hbl::set_loader_info(loader_info);
}
}
break;
}
hbl::AbiConfigEntryKey::MainThreadHandle => {
main_thread_handle = (*abi_entry).value[0] as svc::Handle;
}
hbl::AbiConfigEntryKey::NextLoadPath => {
// lengths from nx-hbloader:source/main.c
// https://github.com/switchbrew/nx-hbloader/blob/cd6a723acbeabffd827a8bdc40563066f5401fb7/source/main.c#L13-L14
let next_load_path =
core::ptr::with_exposed_provenance_mut::<util::ArrayString<512>>(
(*abi_entry).value[0] as usize,
)
.as_mut();
let next_load_argv =
core::ptr::with_exposed_provenance_mut::<util::ArrayString<2048>>(
(*abi_entry).value[1] as usize,
)
.as_mut();
hbl::set_next_load_entry_ptr(next_load_path, next_load_argv);
}
hbl::AbiConfigEntryKey::OverrideHeap => {
heap.address = (*abi_entry).value[0] as *mut u8;
heap.size = (*abi_entry).value[1] as usize;
}
hbl::AbiConfigEntryKey::OverrideService => {
// todo!("OverrideService");
}
hbl::AbiConfigEntryKey::Argv => {
// todo!("Argv");
}
hbl::AbiConfigEntryKey::SyscallAvailableHint => {
// todo!("SyscallAvailableHint");
}
hbl::AbiConfigEntryKey::AppletType => {
let applet_type: hbl::AppletType =
mem::transmute((*abi_entry).value[0] as u32);
hbl::set_applet_type(applet_type);
}
hbl::AbiConfigEntryKey::ProcessHandle => {
let proc_handle = (*abi_entry).value[0] as Handle;
hbl::set_process_handle(proc_handle);
}
hbl::AbiConfigEntryKey::LastLoadResult => {
let last_load_rc = ResultCode::new((*abi_entry).value[0] as u32);
hbl::set_last_load_result(last_load_rc);
}
hbl::AbiConfigEntryKey::RandomSeed => {
let random_seed = ((*abi_entry).value[0], (*abi_entry).value[1]);
hbl::set_random_seed(random_seed);
}
hbl::AbiConfigEntryKey::UserIdStorage => {
// todo!("UserIdStorage");
}
hbl::AbiConfigEntryKey::HosVersion => {
let hos_version_v = (*abi_entry).value[0] as u32;
let os_impl_magic = (*abi_entry).value[1];
hos_version_opt = Some(hbl::Version::new(hos_version_v, os_impl_magic));
}
_ => {
// TODO: invalid config entries?
}
}
abi_entry = abi_entry.add(1);
}
}
}
}
// we need to set up our own ThreadLocalRegion
unsafe { set_main_thread_tlr(main_thread_handle) };
// Initialize virtual memory
vmem::initialize().unwrap();
// set the exit_fn
*G_EXIT_FN.lock() = exit_config;
// Initialize heap and memory allocation
heap = initialize_heap(heap);
alloc::initialize(heap);
let main_thread: thread::Thread = thread::Thread::new_remote("MainThread", main_thread_handle);
G_MAIN_THREAD.set(Some(main_thread));
// Initialize version support
initialize_version(hos_version_opt);
unsafe { main() };
// unmount fs devices
#[cfg(feature = "fs")]
{
// clears any globally held fs dev handles
crate::fs::unmount_all();
// clears the global fsp_srv session.
crate::fs::finalize_fspsrv_session();
}
#[cfg(feature = "applet")]
{
crate::applet::finalize();
}
#[cfg(feature = "mii")]
{
crate::mii::finalize();
}
#[cfg(feature = "la")]
{
crate::la::finalize()
}
#[cfg(feature = "rand")]
{
crate::rand::finalize();
}
#[cfg(feature = "socket")]
{
crate::socket::finalize();
}
// Successful exit by default
exit(ResultSuccess::make());
}
enum LoaderMode {
Nso(u32),
Nro(*const AbiConfigEntry),
}
#[unsafe(no_mangle)]
#[allow(unsafe_op_in_unsafe_fn)]
unsafe extern "C" fn __nx_rrt0_entry(arg0: usize, arg1: usize) -> ! {
// Since we're using the `b` instruction instead of `bl` in `rrt0.s`, the `lr` register will still have the passed in value.
// This will be null for NSOs that are directly executed, but has the loader's return pointer for hbl/ovll loaded NROs.
let lr_raw: usize;
asm!(
"mov {}, lr",
out(reg) lr_raw
);
let lr_exit_fn: Option<ExitFn> = match lr_raw {
0 => None,
ptr => unsafe { Some(core::mem::transmute::<usize, ExitFn>(ptr)) },
};
// We actually want `_start` which is at the start of the .text region, but we don't know if
// it will be close enough to support lookup via `adr`.
// Since this function is in `.text` anyway, use QueryMemory SVC to find the actual start
// This is also a contender to let the compiler decide how to do this by just getting a function pointer (e.g. just get `__nx_rrt0_entry`` as a `fn() - !` type)
let self_base_address: *mut u8;
asm!(
"adr {}, __nx_rrt0_entry",
out(reg) self_base_address
);
let (info, _) = svc::query_memory(self_base_address).unwrap();
// Use the strict provenance API to convert the usize to a *mut u8 with copied pointer metadata.
let aslr_base_address = self_base_address.with_addr(info.base_address);
// assume that the MOD0 structure is at the start of .text
let mod0 = elf::mod0::Header::from_text_start_addr(aslr_base_address);
let start_dyn = mod0.get_dyn_start();
elf::relocate_with_dyn(aslr_base_address, start_dyn);
mod0.zero_bss_section();
let eh_hdr_ptr_start = mod0.get_eh_frame_header_start();
TEXT_BASE_ADDRESS.store(
self_base_address.expose_provenance(),
core::sync::atomic::Ordering::Relaxed,
);
EH_FRAME_HDR_SECTION.set(eh_hdr_ptr_start);
let _ = unwinding::custom_eh_frame_finder::set_custom_eh_frame_finder(&EH_FRAME_HDR_SECTION);
// make sure that the writes are complete before there are any accesses
core::sync::atomic::compiler_fence(core::sync::atomic::Ordering::SeqCst);
/*
Possible entry arguments:
- NSO/KIP: x0 = 0, x1 = <main-thread-handle>
- NRO (hbl): x0 = <abi-config-entries-ptr>, x1 = usize::MAX
- Exception: x0 = <exception-type>, x1 = <stack-top>
*/
let loader_mode = match (arg0, arg1) {
(0, main_thread_handle) => LoaderMode::Nso(main_thread_handle as u32),
(config_pointer, usize::MAX) => {
LoaderMode::Nro(aslr_base_address.with_addr(config_pointer) as _)
}
(exception_type, exception_stack_top) => {
let exception_type: svc::ExceptionType = mem::transmute(exception_type as u32);
crate::exception::__nx_exception_dispatch(
exception_type,
core::ptr::with_exposed_provenance_mut(exception_stack_top),
);
}
};
normal_entry(loader_mode, lr_exit_fn);
}
| rust | MIT | b365c1baa4c4472fe604f4ab9646440d23c3bd9c | 2026-01-04T20:16:15.900894Z | false |
aarch64-switch-rs/nx | https://github.com/aarch64-switch-rs/nx/blob/b365c1baa4c4472fe604f4ab9646440d23c3bd9c/src/svc.rs | src/svc.rs | //! This module wraps svc calls provided by `asm.rs`.
//! There is generally no function-level Safety docs, but the core requirement is that all raw pointers provided must be
//! validated by the caller.
use crate::arm;
use crate::ipc::sf::ncm;
use crate::result::*;
use crate::util;
use crate::util::ArrayString;
use core::mem;
use core::ptr;
pub mod asm;
pub mod rc;
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
#[repr(u32)]
pub enum ArbitrationType {
WaitIfLessThan = 0,
DecrementAndWaitIfLessThan = 1,
WaitIfEqual = 2,
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
#[repr(u32)]
pub enum SignalType {
Signal = 0,
SignalAndIncrementIfEqual = 1,
SignalAndModifyBasedOnWaitingThreadCountIfEqual = 2,
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
#[repr(u32)]
pub enum BreakReason {
Panic = 0,
Assert = 1,
User = 2,
PreLoadDll = 3,
PostLoadDll = 4,
PreUnloadDll = 5,
PostUnloadDll = 6,
CppException = 7,
NotificationOnlyFlag = 0x80000000,
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
#[repr(u32)]
pub enum CodeMapOperation {
MapOwner = 0,
MapSlave,
UnmapOwner,
UnmapSlave,
}
#[derive(Copy, Clone, PartialEq, Eq, Debug, Default)]
#[repr(u32)]
pub enum MemoryState {
#[default]
Free = 0x0,
Io = 0x1,
Static = 0x2,
Code = 0x3,
CodeData = 0x4,
Normal = 0x5,
Shared = 0x6,
Alias = 0x7,
AliasCode = 0x8,
AliasCodeData = 0x9,
Ipc = 0xA,
Stack = 0xB,
ThreadLocal = 0xC,
Transferred = 0xD,
SharedTransferred = 0xE,
SharedCode = 0xF,
Inaccessible = 0x10,
NonSecureIpc = 0x11,
NonDeviceIpc = 0x12,
Kernel = 0x13,
GeneratedCode = 0x14,
CodeOut = 0x15,
}
define_bit_set! {
MemoryPermission (u32) {
None = 0,
Read = bit!(0),
Write = bit!(1),
Execute = bit!(2),
DontCare = bit!(28)
}
}
define_bit_set! {
MemoryAttribute (u32) {
None = 0,
Borrowed = bit!(0),
IpcMapped = bit!(1),
DeviceMapped = bit!(2),
Uncached = bit!(3)
}
}
#[derive(Copy, Clone, PartialEq, Eq, Debug, Default)]
#[repr(C)]
pub struct MemoryInfo {
pub base_address: usize,
pub size: usize,
pub state: MemoryState,
pub attribute: MemoryAttribute,
pub permission: MemoryPermission,
pub ipc_refcount: u32,
pub device_refcount: u32,
pub pad: u32,
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
#[repr(u32)]
pub enum InfoId {
/// Bitmask of allowed Core IDs.
CoreMask = 0,
/// Bitmask of allowed Thread Priorities.
PriorityMask = 1,
/// Base of the Alias memory region.
AliasRegionAddress = 2,
/// Size of the Alias memory region.
AliasRegionSize = 3,
/// Base of the Heap memory region.
HeapRegionAddress = 4,
/// Size of the Heap memory region.
HeapRegionSize = 5,
/// Total amount of memory available for process.
TotalMemorySize = 6,
/// Amount of memory currently used by process.
UsedMemorySize = 7,
/// Whether current process is being debugged.
DebuggerAttached = 8,
/// Current process's resource limit handle.
ResourceLimit = 9,
/// Number of idle ticks on CPU.
IdleTickCount = 10,
/// [2.0.0+] Random entropy for current process.
RandomEntropy = 11,
/// [2.0.0+] Base of the process's address space.
AslrRegionAddress = 12,
/// [2.0.0+] Size of the process's address space.
AslrRegionSize = 13,
/// [2.0.0+] Base of the Stack memory region.
StackRegionAddress = 14,
/// [2.0.0+] Size of the Stack memory region.
StackRegionSize = 15,
/// [3.0.0+] Total memory allocated for process memory management.
SystemResourceSizeTotal = 16,
/// [3.0.0+] Amount of memory currently used by process memory management.
SystemResourceSizeUsed = 17,
/// [3.0.0+] Program ID for the process.
ProgramId = 18,
/// [4.0.0-4.1.0] Min/max initial process IDs.
InitialProcessIdRange = 19,
/// [5.0.0+] Address of the process's exception context (for break).
UserExceptionContextAddress = 20,
/// [6.0.0+] Total amount of memory available for process, excluding that for process memory management.
TotalNonSystemMemorySize = 21,
/// [6.0.0+] Amount of memory used by process, excluding that for process memory management.
UsedNonSystemMemorySize = 22,
/// [9.0.0+] Whether the specified process is an Application.
IsApplication = 23,
/// [11.0.0+] The number of free threads available to the process's resource limit.
FreeThreadCount = 24,
/// [13.0.0+] Number of ticks spent on thread.
ThreadTickCount = 25,
/// [14.0.0+] Does process have access to SVC (only usable with \ref svcSynchronizePreemptionState at present).
IsSvcPermitted = 26,
/// [16.0.0+] Low bits of the physical address for a KIoRegion.
IoRegionHint = 27,
/// [18.0.0+] Extra size added to the reserved region.
AliasRegionExtraSize = 28,
/// [19.0.0+] Low bits of the process address for a KTransferMemory.
TransferMemoryHint = 34,
/// [1.0.0-12.1.0] Number of ticks spent on thread.
ThreadTickCountDeprecated = 0xF0000002,
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
#[repr(u64)]
pub enum SystemInfoParam {
/// Total amount of DRAM available to system.
TotalPhysicalMemorySize = 0,
/// Current amount of DRAM used by system.
UsedPhysicalMemorySize = 1,
/// Min/max initial process IDs.
InitialProcessIdRange = 2,
}
#[derive(Copy, Clone, PartialEq, Eq, Debug, Default)]
#[repr(u8)]
pub enum AddressSpaceType {
ThirtyTwoBit = 0,
SixtyFourBitDeprecated = 1,
ThirtyTwoBitWithoutAlias = 2,
SixtyFourBit = 3,
#[default]
Mask = 0x7,
}
impl AddressSpaceType {
const fn into_bits(self) -> u8 {
self as _
}
const fn from_bits(val: u8) -> Self {
match val {
0..=3 => unsafe { core::mem::transmute::<u8, Self>(val) },
_ => Self::Mask,
}
}
}
#[derive(Copy, Clone, PartialEq, Eq, Debug, Default)]
#[repr(u8)]
pub enum MemoryPoolType {
Application = 0,
Applet = 1,
System = 2,
SystemNonSecure = 3,
#[default]
Mask = 0xF,
}
impl MemoryPoolType {
const fn into_bits(self) -> u8 {
self as _
}
const fn from_bits(val: u8) -> Self {
match val {
0..=3 => unsafe { core::mem::transmute::<u8, Self>(val) },
_ => Self::Mask,
}
}
}
#[bitfield_struct::bitfield(u32, order = Lsb)]
pub struct CreateProcessFlags {
pub is_64bit: bool,
#[bits(3, default = AddressSpaceType::Mask)]
pub address_space_flags: AddressSpaceType,
pub enable_debug: bool,
pub enable_aslr: bool,
pub is_application: bool,
#[bits(4, default = MemoryPoolType::Mask)]
pub memory_pool_type: MemoryPoolType,
pub optimise_memory_allocation: bool,
pub disable_device_address_space_merge: bool,
pub alias_region_extra_size: bool,
#[bits(18)]
_unused: u32,
}
impl CreateProcessFlags {
pub const fn all() -> Self {
Self::new()
.with_is_64bit(true)
.with_address_space_flags(AddressSpaceType::Mask)
.with_enable_debug(true)
.with_enable_aslr(true)
.with_is_application(true)
.with_memory_pool_type(MemoryPoolType::Mask)
.with_optimise_memory_allocation(true)
.with_disable_device_address_space_merge(true)
.with_alias_region_extra_size(true)
}
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
#[repr(C)]
pub struct CreateProcessInfo {
pub name: ArrayString<12>,
pub version: u32,
pub program_id: u64,
pub code_address: usize,
pub code_num_pages: i32,
pub flags: u32,
pub resource_limit_handle: Handle,
pub system_resource_page_count: i32,
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
#[repr(C)]
pub enum DebugThreadParam {
ActualPriority = 0,
State = 1,
IdealCore = 2,
CurrentCore = 3,
CoreMask = 4,
}
#[derive(Copy, Clone, PartialEq, Eq, Debug, Default)]
#[repr(C)]
pub struct PhysicalMemoryInfo {
physical_address: usize,
virtual_address: usize,
size: usize,
}
#[derive(Copy, Clone, PartialEq, Eq, Debug, Default)]
#[repr(C)]
pub struct AttachProcessDebugEventInfo {
pub program_id: ncm::ProgramId,
pub process_id: u64,
pub name: util::ArrayString<12>,
pub flags: u32,
pub user_exception_context_address: u64,
}
#[derive(Copy, Clone, PartialEq, Eq, Debug, Default)]
#[repr(C)]
pub struct AttachThreadDebugEventInfo {
pub thread_id: u64,
pub tls_ptr: usize,
pub entrypoint: usize,
}
#[derive(Copy, Clone, PartialEq, Eq, Debug, Default)]
#[repr(C)]
pub struct ExitDebugEventInfo {
pub exit_type: u32,
}
#[derive(Copy, Clone, PartialEq, Eq, Debug, Default)]
#[repr(C)]
pub struct ExceptionDebugEventInfo {
pub exception_type: u32,
pub fault_register: u32,
}
#[derive(Copy, Clone)]
#[repr(C)]
pub union DebugEventInfo {
pub attach_process: AttachProcessDebugEventInfo,
pub attach_thread: AttachThreadDebugEventInfo,
pub exit_process: ExitDebugEventInfo,
pub exit_thread: ExitDebugEventInfo,
pub exception: ExceptionDebugEventInfo,
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
#[repr(u32)]
pub enum DebugEventType {
AttachProcess,
AttachThread,
ExitProcess,
ExitThread,
Exception,
}
#[derive(Copy, Clone)]
#[repr(C)]
pub struct DebugEvent {
pub event_type: DebugEventType,
pub flags: u32,
pub thread_id: u32,
pub info: DebugEventInfo,
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
#[repr(u32)]
pub enum ExceptionType {
Init = 0x000,
InstructionAbort = 0x100,
DataAbort = 0x101,
UnalignedInstruction = 0x102,
UnalignedData = 0x103,
UndefinedInstruction = 0x104,
ExceptionInstruction = 0x105,
MemorySystemError = 0x106,
FpuException = 0x200,
InvalidSystemCall = 0x301,
SystemCallBreak = 0x302,
AtmosphereStdAbort = 0xFFE,
}
pub type PageInfo = u32;
pub type Address = *const u8;
pub type MutAddress = *mut u8;
pub type Size = usize;
pub type ThreadEntrypointFn = unsafe extern "C" fn(*mut u8) -> !;
pub type Handle = u32;
pub struct ScopedHandle(pub Handle);
impl ScopedHandle {
/// Creates a scoped guard for the handle.
/// The handle can still be accessed and copied, but will become invalid when this struct is dropped.
pub fn guard(handle: Handle) -> Self {
Self(handle)
}
// Take the value out without running the destructor and closing the handle, consuming the guard
pub unsafe fn take(guard: Self) -> Handle {
mem::ManuallyDrop::new(guard).0
}
}
impl Drop for ScopedHandle {
fn drop(&mut self) {
if self.0 != INVALID_HANDLE {
// ignore the error as it will only happen if the handle has already become invalid.
let _ = close_handle(self.0);
}
}
}
#[derive(Copy, Clone, PartialEq, Eq, Debug, Default)]
#[repr(C)]
/// Context of a scheduled thread.
pub struct LastThreadContext {
/// Frame Pointer for the thread.
fp: u64,
/// Stack Pointer for the thread.
sp: u64,
/// Link Register for the thread.
lr: u64,
/// Program Counter for the thread.
pc: u64,
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
#[repr(C)]
/// Limitable Resources.
pub enum LimitableResource {
/// How much memory can a process map.
Memory = 0,
/// How many threads can a process spawn.
Threads = 1,
/// How many events can a process have.
Events = 2,
/// How many transfer memories can a process make.
TransferMemories = 3,
/// How many sessions can a process own.
Sessions = 4,
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
#[repr(C)]
/// Thread/Process Scheduler State.
pub enum SchedulerState {
/// Can be scheduled.
Runnable = 0,
/// Will not be scheduled.
Paused = 1,
}
pub const INVALID_HANDLE: Handle = 0;
pub const CURRENT_THREAD_PSEUDO_HANDLE: Handle = 0xFFFF8000;
pub const CURRENT_PROCESS_PSEUDO_HANDLE: Handle = 0xFFFF8001;
pub const DEFAULT_PROCESS_PROCESSOR_ID: i32 = -2;
/// Set the process heap to a given size. It can both extend and shrink the heap.
#[inline(always)]
pub fn set_heap_size(size: Size) -> Result<MutAddress> {
unsafe {
let mut address: MutAddress = ptr::null_mut();
let rc = asm::set_heap_size(&mut address, size);
pack(rc, address)
}
}
/// Set the memory permissions of a (page-aligned) range of memory.
///
/// `MemoryPermission::Execute()` and `MemoryPermission::Execute()` are not allowed.
/// This can be used to move back and forth between `MemoryPermission::None()`, `MemoryPermission::Read()`
/// and `MemoryPermission::Read() | MemoryPermission::Write()`.
#[inline(always)]
pub unsafe fn set_memory_permission(
address: Address,
size: Size,
value: MemoryPermission,
) -> Result<()> {
unsafe {
let rc = asm::set_memory_permission(address, size, value);
pack(rc, ())
}
}
/// Set the memory attributes of a (page-aligned) range of memory.
///
/// Only setting or unsetting the `Uncached` flag (bit 3) is supported,
/// so the function signature has been changed from libnx to enforce this constraint.
///
/// # Safety
///
/// The provided address must be valid, and should be page aligned (0x1000).
#[inline(always)]
pub unsafe fn set_memory_attribute(address: Address, size: Size, set_uncached: bool) -> Result<()> {
unsafe {
let rc = asm::set_memory_attribute(
address,
size,
8,
if set_uncached {
MemoryAttribute::Uncached()
} else {
MemoryAttribute::None()
},
);
pack(rc, ())
}
}
/// Maps a memory range into a different range. Mainly used for adding guard pages around stack.
///
/// Source range gets reprotected to [`MemoryAttribute::None()`] (it can no longer be accessed),
/// and [`MemoryAttribute::Borrowed()`] is set in the source page's [`MemoryAttribute`].
#[inline(always)]
pub unsafe fn map_memory(address: Address, source_address: MutAddress, size: Size) -> Result<()> {
unsafe {
let rc = asm::map_memory(address, source_address, size);
pack(rc, ())
}
}
/// Unmaps a region that was previously mapped with [`map_memory`]
#[inline(always)]
pub unsafe fn unmap_memory(address: Address, source_address: MutAddress, size: Size) -> Result<()> {
unsafe {
let rc = asm::unmap_memory(address, source_address, size);
pack(rc, ())
}
}
/// Query information about an address. Will always fetch the lowest page-aligned mapping that contains the provided address.
///
/// null pointers are OK here, as we are just querying memory properties.
#[inline(always)]
#[allow(clippy::not_unsafe_ptr_arg_deref)]
pub fn query_memory(address: Address) -> Result<(MemoryInfo, PageInfo)> {
unsafe {
let mut memory_info: MemoryInfo = Default::default();
let mut page_info: PageInfo = 0;
let rc = asm::query_memory(&mut memory_info, &mut page_info, address);
pack(rc, (memory_info, page_info))
}
}
/// What it says on the tin.
#[inline(always)]
pub fn exit_process() -> ! {
unsafe { asm::exit_process() }
}
/// Creates a thread.
///
/// The pointer to the thread arguments and stack memory _must_ live at least as long as the thread is alive.
#[inline(always)]
pub unsafe fn create_thread(
entry: ThreadEntrypointFn,
entry_arg: MutAddress,
stack_top: MutAddress,
priority: i32,
processor_id: i32,
) -> Result<Handle> {
unsafe {
let mut handle: Handle = 0;
let rc = asm::create_thread(
&mut handle,
entry,
entry_arg,
stack_top,
priority,
processor_id,
);
pack(rc, handle)
}
}
/// Starts executing a prepared thread by handle (received from [`create_thread`]).
#[inline(always)]
pub fn start_thread(handle: Handle) -> Result<()> {
unsafe {
let rc = asm::start_thread(handle);
pack(rc, ())
}
}
/// What it says on the tin.
#[inline(always)]
pub fn exit_thread() -> ! {
unsafe { asm::exit_thread() }
}
/// Sleep the current thread for at least `timeout` nanoseconds or yields if passed a value from [`YieldType`][`crate::thread::YieldType`]
#[inline(always)]
pub fn sleep_thread(timeout: i64) -> Result<()> {
unsafe {
let rc = asm::sleep_thread(timeout);
pack(rc, ())
}
}
/// Gets a thread's priority.
#[inline(always)]
pub fn get_thread_priority(handle: Handle) -> Result<i32> {
unsafe {
let mut priority: i32 = 0;
let rc = asm::get_thread_priority(&mut priority, handle);
pack(rc, priority)
}
}
/// Sets a thread's priority.
#[inline(always)]
pub fn set_thread_priority(handle: Handle, priority: i32) -> Result<()> {
unsafe {
let rc = asm::set_thread_priority(handle, priority);
pack(rc, ())
}
}
/// Gets a thread's core mask.
#[inline(always)]
pub fn get_thread_core_mask(handle: Handle) -> Result<(i32, u64)> {
unsafe {
let mut mask = 0;
let mut affinity = 0;
let rc = asm::get_thread_core_mask(&mut mask, &mut affinity, handle);
pack(rc, (mask, affinity))
}
}
/// Sets a thread's core mask.
#[inline(always)]
pub fn set_thread_core_mask(handle: Handle, preferred_core: i32, affinity_mask: u32) -> Result<()> {
unsafe {
let rc = asm::set_thread_core_mask(handle, preferred_core, affinity_mask);
pack(rc, ())
}
}
/// Gets the processor number (core) that the current thread is executing on.
#[inline(always)]
pub fn get_current_processor_number() -> u32 {
unsafe { asm::get_current_processor_number() }
}
// Sets an event's signalled status.
#[inline(always)]
pub fn signal_event(handle: Handle) -> Result<()> {
unsafe {
let rc = asm::signal_event(handle);
pack(rc, ())
}
}
// Clears an event's signalled status.
#[inline(always)]
pub fn clear_event(handle: Handle) -> Result<()> {
unsafe {
let rc = asm::clear_event(handle);
pack(rc, ())
}
}
/// Maps a block of shared memory.
#[inline(always)]
pub unsafe fn map_shared_memory(
handle: Handle,
address: MutAddress,
size: Size,
permission: MemoryPermission,
) -> Result<()> {
unsafe {
let rc = asm::map_shared_memory(handle, address, size, permission);
pack(rc, ())
}
}
/// Unmaps a block of shared memory.
#[inline(always)]
pub unsafe fn unmap_shared_memory(handle: Handle, address: Address, size: Size) -> Result<()> {
unsafe {
let rc = asm::unmap_shared_memory(handle, address, size);
pack(rc, ())
}
}
/// Creates a block of transfer memory.
///
/// The memory will be reprotected with `permissions` after creation (usually set to none).
/// The original memory permissions will be restored when the handle is closed.
#[inline(always)]
#[allow(clippy::not_unsafe_ptr_arg_deref)]
pub fn create_transfer_memory(
address: MutAddress,
size: Size,
permissions: MemoryPermission,
) -> Result<Handle> {
unsafe {
let mut handle: Handle = 0;
let rc = asm::create_transfer_memory(&mut handle, address, size, permissions);
pack(rc, handle)
}
}
/// Closes a handle, decrementing the reference count of the corresponding kernel object.
#[inline(always)]
pub fn close_handle(handle: Handle) -> Result<()> {
unsafe {
let rc = asm::close_handle(handle);
pack(rc, ())
}
}
/// Resets a signal.
#[inline(always)]
pub fn reset_signal(handle: Handle) -> Result<()> {
unsafe {
let rc = asm::reset_signal(handle);
pack(rc, ())
}
}
/// Waits on one or more synchronization objects, optionally with a timeout.
///
/// The max number of handles is `0x40` (64). This is a Horizon kernel limitation.
#[inline(always)]
pub unsafe fn wait_synchronization(handles: &[Handle], timeout: i64) -> Result<i32> {
unsafe {
let mut index: i32 = 0;
let rc =
asm::wait_synchronization(&mut index, handles.as_ptr(), handles.len() as u32, timeout);
pack(rc, index)
}
}
/// The same as [`wait_synchronization`] for a single handle
#[inline(always)]
pub fn wait_synchronization_one(handle: Handle, timeout: i64) -> Result<()> {
unsafe {
let mut index: i32 = 0;
let rc = asm::wait_synchronization(&mut index, &handle, 1u32, timeout);
pack(rc, ())
}
}
/// If the referenced thread is currently in a synchronization call ([`wait_synchronization`], [`reply_and_receive`]
/// or [`reply_and_receive_light`]), that call will be interrupted and return `0xec01`([`ResultCancelled`][`rc::ResultCancelled`]) .
/// If that thread is not currently executing such a synchronization call, the next call to a synchronization call will return `0xec01``.
///
/// This doesn't take force-pause (activity/debug pause) into account.
#[inline(always)]
pub fn cancel_synchronization(thread_handle: Handle) -> Result<()> {
unsafe {
let rc = asm::cancel_synchronization(thread_handle);
pack(rc, ())
}
}
/// Arbitrates a mutex lock operation in userspace.
#[inline(always)]
pub unsafe fn arbitrate_lock(thread_handle: Handle, tag_location: Address, tag: u32) -> Result<()> {
unsafe {
let rc = asm::arbitrate_lock(thread_handle, tag_location, tag);
pack(rc, ())
}
}
/// Arbitrates a mutex unlock operation in userspace.
#[inline(always)]
pub unsafe fn arbitrate_unlock(tag_location: Address) -> Result<()> {
unsafe {
let rc = asm::arbitrate_unlock(tag_location);
pack(rc, ())
}
}
/// Performs a condition variable wait operation in userspace.
#[inline(always)]
pub unsafe fn wait_process_wide_key_atomic(
wait_location: Address,
tag_location: Address,
desired_tag: u32,
timeout: i64,
) -> Result<()> {
unsafe {
let rc =
asm::wait_process_wide_key_atomic(wait_location, tag_location, desired_tag, timeout);
pack(rc, ())
}
}
/// Performs a condition variable wake-up operation in userspace.
#[inline(always)]
pub unsafe fn signal_process_wide_key(tag_location: Address, desired_tag: i32) -> Result<()> {
unsafe {
let rc = asm::signal_process_wide_key(tag_location, desired_tag);
pack(rc, ())
}
}
/// Gets the current system tick.
#[inline(always)]
pub fn get_system_tick() -> u64 {
unsafe { asm::get_system_tick() }
}
/// Connects to a registered named port.
#[inline(always)]
pub unsafe fn connect_to_named_port(name: &core::ffi::CStr) -> Result<Handle> {
unsafe {
let mut handle: Handle = 0;
let rc = asm::connect_to_named_port(&mut handle, name.as_ptr().cast());
pack(rc, handle)
}
}
/// Sends a light IPC synchronization request to a session.
#[inline(always)]
pub fn send_sync_request_light(handle: Handle) -> Result<()> {
unsafe {
let rc = asm::send_sync_request_light(handle);
pack(rc, ())
}
}
/// Sends a synchronous IPC request to a session.
#[inline(always)]
pub fn send_sync_request(handle: Handle) -> Result<()> {
unsafe {
let rc = asm::send_sync_request(handle);
pack(rc, ())
}
}
/// Sends a synchronous IPC request to a session from an user allocated buffer.
///
/// The buffer size must be a multiple of the system page size (0x1000).
#[inline(always)]
pub unsafe fn send_sync_request_with_user_data(buffer: &mut [u8], handle: Handle) -> Result<()> {
unsafe {
let rc = asm::send_sync_request_with_user_data(buffer.as_mut_ptr(), buffer.len(), handle);
pack(rc, ())
}
}
/// Sends an asynchronous IPC request to a session from an user allocated buffer .
///
/// The buffer size must be a multiple of the system page size (0x1000).
#[inline(always)]
pub unsafe fn send_async_request_with_user_data(
buffer: &mut [u8],
session: Handle,
) -> Result<Handle> {
unsafe {
let mut out_handle = 0;
let rc = asm::send_async_request_with_user_data(
&mut out_handle,
buffer.as_mut_ptr(),
buffer.len(),
session,
);
pack(rc, out_handle)
}
}
/// Gets the PID associated with a process.
#[inline(always)]
pub fn get_process_id(process_handle: Handle) -> Result<u64> {
unsafe {
let mut process_id: u64 = 0;
let rc = asm::get_process_id(&mut process_id, process_handle);
pack(rc, process_id)
}
}
/// Gets the TID associated with a process.
#[inline(always)]
pub fn get_thread_id(handle: Handle) -> Result<u64> {
unsafe {
let mut thread_id: u64 = 0;
let rc = asm::get_thread_id(&mut thread_id, handle);
pack(rc, thread_id)
}
}
/// Breaks execution
///
/// The `debug_data` buffer is passed to a debugging instance if one is attached.
#[inline(always)]
pub fn r#break(reason: BreakReason, debug_data: &[u8]) -> Result<()> {
unsafe {
let rc = asm::r#break(reason, debug_data.as_ptr(), debug_data.len());
pack(rc, ())
}
}
/// Outputs debug text, if used during debugging.
#[inline(always)]
pub unsafe fn output_debug_string(msg: &core::ffi::CStr) -> Result<()> {
unsafe {
let rc = asm::output_debug_string(msg.as_ptr().cast(), msg.count_bytes());
pack(rc, ())
}
}
/// Returns from an exception.
#[inline(always)]
pub fn return_from_exception(res: ResultCode) -> ! {
unsafe { asm::return_from_exception(res) }
}
/// Retrieves information about the system, or a certain kernel object, depending on the value of `id`.
///
/// `handle` is for particular kernel objects, but `INVALID_HANDLE` is used to retrieve information about the system.
#[inline(always)]
pub fn get_info(id: InfoId, handle: Handle, sub_id: u64) -> Result<u64> {
unsafe {
let mut info: u64 = 0;
let rc = asm::get_info(&mut info, id, handle, sub_id);
pack(rc, info)
}
}
/*
/// Flushes the entire data cache (by set/way).
///
/// This is a privileged syscall and may not be available.
///
/// This syscall has dangerous side effects and should not be used.
#[inline(always)]
#[deprecated]
pub unsafe fn flush_entire_data_cache() -> Result<()> {
unsafe {
let rc = asm::flush_entire_data_cache();
pack(rc, ())
}
}
*/
/// Flushes data cache for a virtual address range.
///
/// [`cache_flush`][`crate::arm::cache_flush`] should be used instead whenever possible.
#[inline(always)]
#[deprecated]
pub unsafe fn flush_data_cache(address: Address, len: Size) -> Result<()> {
unsafe {
let rc = asm::flush_data_cache(address, len);
pack(rc, ())
}
}
/// Maps new heap memory at the desired address. [3.0.0+]
#[inline(always)]
pub unsafe fn map_physical_memory(address: Address, len: Size) -> Result<()> {
unsafe {
let rc = asm::map_physical_memory(address, len);
pack(rc, ())
}
}
/// Unmaps memory mapped by [`map_physical_memory`]. [3.0.0+]
#[inline(always)]
pub unsafe fn unmap_physical_memory(address: Address, len: Size) -> Result<()> {
unsafe {
let rc = asm::unmap_physical_memory(address, len);
pack(rc, ())
}
}
/// Gets information about a thread that will be scheduled in the future. [5.0.0+]
///
/// `ns` is the nanoseconds in the future when the thread information will be sampled
/// by the kernel.
///
/// This is a privileged syscall and may not be available.
#[inline(always)]
pub fn get_debug_future_thread_info(
debug_proc_handle: Handle,
ns: i64,
) -> Result<(LastThreadContext, u64)> {
unsafe {
let mut out_context = mem::zeroed();
let mut out_thread_id = 0;
let rc = asm::get_debug_future_thread_info(
&mut out_context,
&mut out_thread_id,
debug_proc_handle,
ns,
);
pack(rc, (out_context, out_thread_id))
}
}
/// Gets information about the previously-scheduled thread.
#[inline(always)]
pub fn get_last_thread_info() -> Result<(LastThreadContext, u64, u32)> {
unsafe {
let mut out_context = mem::zeroed();
let mut out_tls_address = 0;
let mut out_flags = 0;
let rc = asm::get_last_thread_info(&mut out_context, &mut out_tls_address, &mut out_flags);
pack(rc, (out_context, out_tls_address, out_flags))
}
}
/// Gets the maximum value a LimitableResource can have, for a Resource Limit handle.
///
/// This is a privileged syscall and may not be available.
#[inline(always)]
pub fn get_resource_limit_limit_value(
resource_limit_handle: Handle,
limit_kind: LimitableResource,
) -> Result<i64> {
unsafe {
let mut out_val = 0;
let rc =
asm::get_resource_limit_limit_value(&mut out_val, resource_limit_handle, limit_kind);
pack(rc, out_val)
}
}
/// Gets the current value a LimitableResource has, for a Resource Limit handle.
///
/// This is a privileged syscall and may not be available.
#[inline(always)]
pub fn get_resource_limit_current_value(
resource_limit_handle: Handle,
limit_kind: LimitableResource,
) -> Result<i64> {
unsafe {
let mut out_val = 0;
let rc =
asm::get_resource_limit_current_value(&mut out_val, resource_limit_handle, limit_kind);
pack(rc, out_val)
}
}
/// Pauses/unpauses a thread.
#[inline(always)]
pub fn set_thread_activity(thread_handle: Handle, thread_state: SchedulerState) -> Result<()> {
unsafe {
let rc = asm::set_thread_activity(thread_handle, thread_state);
pack(rc, ())
}
}
/// Dumps the registers of a thread paused by [`set_thread_activity`] (register groups: all)
#[inline(always)]
pub fn get_thread_context3(thread_handle: Handle) -> Result<()> {
unsafe {
let mut out_context = Default::default();
let rc = asm::get_thread_context3(&mut out_context, thread_handle);
pack(rc, ())
}
}
/// Arbitrates an address depending on type and value. [4.0.0+]
#[inline(always)]
pub unsafe fn wait_for_address(
address: Address,
arbitration_type: ArbitrationType,
value: u32,
timeout: i64,
) -> Result<()> {
unsafe {
let rc = asm::wait_for_address(address, arbitration_type as u32, value, timeout);
pack(rc, ())
}
}
/// Signals (and updates) an address depending on type and value. [4.0.0+]
#[inline(always)]
pub unsafe fn signal_to_address(
address: Address,
signal: SignalType,
value: u32,
thread_signal_count: i32,
) -> Result<()> {
unsafe {
let rc = asm::signal_to_address(address, signal as u32, value, thread_signal_count);
pack(rc, ())
}
}
/// Sets thread preemption state (used during abort/panic). [8.0.0+]
#[inline(always)]
pub unsafe fn synchronize_preemption_state() -> Result<()> {
unsafe {
let rc = asm::synchronize_preemption_states();
pack(rc, ())
}
}
/// Creates an IPC session.
///
/// This is a privileged syscall and may not be available.
#[inline(always)]
pub fn create_session(is_light: bool, unk_name: u64) -> Result<(Handle, Handle)> {
unsafe {
let mut server_handle: Handle = 0;
let mut client_handle: Handle = 0;
let rc = asm::create_session(&mut server_handle, &mut client_handle, is_light, unk_name);
pack(rc, (server_handle, client_handle))
}
}
/// Accepts an IPC session.
#[inline(always)]
pub fn accept_session(handle: Handle) -> Result<Handle> {
unsafe {
let mut session_handle: Handle = 0;
let rc = asm::accept_session(&mut session_handle, handle);
pack(rc, session_handle)
}
}
/// Performs light IPC input/output.
///
/// This is a privileged syscall and may not be available.
#[inline(always)]
pub fn reply_and_receive_light(handle: Handle) -> Result<()> {
unsafe {
let rc = asm::reply_and_receive_light(handle);
pack(rc, ())
}
}
/// Performs IPC input/output.
///
/// This is a privileged syscall and may not be available.
#[inline(always)]
pub unsafe fn reply_and_receive(
handles: *const Handle,
handle_count: u32,
reply_target: Handle,
timeout: i64,
) -> Result<i32> {
unsafe {
let mut index: i32 = 0;
let rc = asm::reply_and_receive(&mut index, handles, handle_count, reply_target, timeout);
pack(rc, index)
}
}
/// Performs IPC input/output on a user-allocated buffer.
///
/// This is a privileged syscall and may not be available.
#[inline(always)]
pub unsafe fn reply_and_receive_with_user_buffer(
user_buffer: &mut [u8],
handles: &[Handle],
reply_target: Handle,
timeout: i64,
) -> Result<i32> {
unsafe {
| rust | MIT | b365c1baa4c4472fe604f4ab9646440d23c3bd9c | 2026-01-04T20:16:15.900894Z | true |
aarch64-switch-rs/nx | https://github.com/aarch64-switch-rs/nx/blob/b365c1baa4c4472fe604f4ab9646440d23c3bd9c/src/macros.rs | src/macros.rs | //! Library macros
//!
//! Note that all library macros are defined in this module (and exported) to easily use them all over the library
pub mod diag;
pub mod ipc;
pub mod alloc;
pub mod result;
pub mod rrt0;
pub mod util;
| rust | MIT | b365c1baa4c4472fe604f4ab9646440d23c3bd9c | 2026-01-04T20:16:15.900894Z | false |
aarch64-switch-rs/nx | https://github.com/aarch64-switch-rs/nx/blob/b365c1baa4c4472fe604f4ab9646440d23c3bd9c/src/wait.rs | src/wait.rs | //! Sync/waiting utilities and wrappers
use crate::arm;
use crate::result::*;
use crate::svc;
/// Represents an event via a remote handle
pub struct RemoteEvent {
/// The remote handle
pub handle: svc::Handle,
}
impl RemoteEvent {
/// Creates a [`RemoteEvent`] from a remote handle
///
/// # Arguments
///
/// * `handle` - The remote handle
#[inline]
pub const fn new(handle: svc::Handle) -> Self {
Self { handle }
}
/// Resets the [`RemoteEvent`]
#[inline]
pub fn reset(&self) -> Result<()> {
svc::reset_signal(self.handle)
}
/// Waits for the [`RemoteEvent`] with a given timeout, then resets it
///
/// # Arguments
///
/// * `timeout` - Wait timeout in nanoseconds, `-1` can be used to wait indefinitely
#[inline]
pub fn wait(&self, timeout: i64) -> Result<()> {
wait_handles(&[self.handle], timeout)?;
self.reset()
}
}
impl Drop for RemoteEvent {
/// Destroys the [`RemoteEvent`], closing its handle
fn drop(&mut self) {
let _ = svc::close_handle(self.handle);
}
}
/// Represents a system event with server and client handles
pub struct SystemEvent {
/// The event's server handle
pub server_handle: svc::Handle,
/// The event's client handle
pub client_handle: svc::Handle,
}
impl SystemEvent {
/// Creates a new [`SystemEvent`] via the client/server handles obtained from [`svc::create_event`]
///
/// # Arguments
///
/// * `timeout` - Wait timeout in nanoseconds, `-1` can be used to wait indefinitely
pub fn new() -> Result<Self> {
let (server_handle, client_handle) = svc::create_event()?;
Ok(Self {
server_handle,
client_handle,
})
}
/// Signals the [`SystemEvent`] (via the server handle)
#[inline]
pub fn signal(&self) -> Result<()> {
svc::signal_event(self.server_handle)
}
}
impl Drop for SystemEvent {
/// Destroys the [`SystemEvent`], closing both server/client handles
fn drop(&mut self) {
let _ = svc::close_handle(self.server_handle);
let _ = svc::close_handle(self.client_handle);
}
}
/// Represents how a waiter operates (essentially, whether it gets automatically cleared after being signaled)
pub enum WaiterType {
/// A simple handle, that doesn't get cleared when the waiter wakes
Handle,
/// A wait handle that has the signal automatically cleared
HandleWithClear,
}
/// Represents the max amount of objects the Nintendo Switch kernel can wait-sync on at the same time (like Windows)
pub const MAX_OBJECT_COUNT: u32 = 0x40;
/// Represents a waiting object for a handle
#[allow(dead_code)]
pub struct Waiter {
handle: svc::Handle,
wait_type: WaiterType,
}
impl Waiter {
/// Creates a new [`Waiter`] from a handle and a type
///
/// # Arguments
///
/// * `handle` - The waiter handle
/// * `wait_type` - Thr waiter type
#[inline]
pub const fn from(handle: svc::Handle, wait_type: WaiterType) -> Self {
Self { handle, wait_type }
}
/// Creates a new [`Waiter`] from a handle and [`WaiterType::Handle`] type
///
/// # Arguments
///
/// * `handle` - The waiter handle
#[inline]
pub const fn from_handle(handle: svc::Handle) -> Self {
Self::from(handle, WaiterType::Handle)
}
/// Creates a new `Waiter` from a handle and [`WaiterType::HandleWithClear`] type
///
/// # Arguments
///
/// * `handle` - The waiter handle
#[inline]
pub const fn from_handle_with_clear(handle: svc::Handle) -> Self {
Self::from(handle, WaiterType::HandleWithClear)
}
}
type WaitFn<W> = fn(&[W], i64) -> Result<usize>;
fn handles_wait_fn(handles: &[svc::Handle], timeout: i64) -> Result<usize> {
unsafe { svc::wait_synchronization(handles, timeout).map(|idx| idx as usize) }
}
fn waiters_wait_fn(_waiters: &[Waiter], _timeout: i64) -> Result<usize> {
todo!();
}
fn wait_impl<W>(wait_objects: &[W], timeout: i64, wait_fn: WaitFn<W>) -> Result<usize> {
let has_timeout = timeout != -1;
let mut deadline: u64 = 0;
if has_timeout {
deadline = arm::get_system_tick().saturating_add(arm::nanoseconds_to_ticks(timeout as u64));
}
loop {
let this_timeout = match has_timeout {
true => {
let remaining = deadline.saturating_sub(arm::get_system_tick());
arm::ticks_to_nanoseconds(remaining) as i64
}
false => -1,
};
match (wait_fn)(wait_objects, this_timeout) {
Ok(index) => return Ok(index),
Err(rc) => {
if svc::rc::ResultTimedOut::matches(rc) {
if has_timeout {
return Err(rc);
}
} else if !svc::rc::ResultCancelled::matches(rc) {
return Err(rc);
}
}
}
}
}
/// Waits for several [`Waiter`]s for a specified timeout, returning the index of the waiter which signals first
///
/// # Arguments
///
/// * `waiters` - [`Waiter`]s to wait for
/// * `timeout` - Wait timeout in nanoseconds, `-1` can be used to wait indefinitely
#[inline]
pub fn wait(waiters: &[Waiter], timeout: i64) -> Result<usize> {
wait_impl(waiters, timeout, waiters_wait_fn)
}
/// Waits for several handles for a specified timeout, returning the index of the handle which signals first
///
/// # Arguments
///
/// * `handles` - Handles to wait for
/// * `timeout` - Wait timeout in nanoseconds, `-1` can be used to wait indefinitely
#[inline]
pub fn wait_handles(handles: &[svc::Handle], timeout: i64) -> Result<usize> {
wait_impl(handles, timeout, handles_wait_fn)
}
| rust | MIT | b365c1baa4c4472fe604f4ab9646440d23c3bd9c | 2026-01-04T20:16:15.900894Z | false |
aarch64-switch-rs/nx | https://github.com/aarch64-switch-rs/nx/blob/b365c1baa4c4472fe604f4ab9646440d23c3bd9c/src/rc.rs | src/rc.rs | //! Generic library result definitions
//!
//! All `rc` modules in this library contain result definitions (usually related to/for the parent module)
//!
//! All library results have module [`RESULT_MODULE`], and their descriptions are `<mod-specific submodule> + <res-value>`
//!
//! For example, [`ResultNotImplemented`] has module [`RESULT_MODULE`] and description [`RESULT_SUBMODULE`] + `1`
//!
//! List of existing submodules in the library:
//!
//! * `0`: library (misc)
//! * `100`: elf
//! * `200`: (unused)
//! * `300`: util
//! * `400`: diag
//! * `500`: gpu
//! * `600`: ipc
//! * `700`: fs
//! * `800`: input
//! * `900`: thread
//! * `1000`: mem
//! * `1100`: gpu/binder
//! * `1200`: gpu/parcel
//! * `1300`: ipc/server
pub const RESULT_MODULE: u32 = 430;
/// Result submodule for the base `rc` module.
pub const RESULT_SUBMODULE: u32 = 0;
result_define_subgroup!(RESULT_MODULE, RESULT_SUBMODULE => {
NotImplemented: 1,
NotSupported: 2,
NotInitialized: 3,
Panicked: 4
});
/*
- Submodule list for our own results:
0: library (misc)
100: dynamic
200: dynamic/elf
300: util
400: diag
500: gpu
600: ipc
700: fs
800: input
900: thread
1000: mem
1100: gpu/binder
1200: gpu/parcel
1300: ipc/server
*/
| rust | MIT | b365c1baa4c4472fe604f4ab9646440d23c3bd9c | 2026-01-04T20:16:15.900894Z | false |
aarch64-switch-rs/nx | https://github.com/aarch64-switch-rs/nx/blob/b365c1baa4c4472fe604f4ab9646440d23c3bd9c/src/mem.rs | src/mem.rs | //! Memory (heap) support and utils
use crate::result::ResultBase;
use crate::svc;
pub mod alloc;
/// Blocks thread until the memory region specified has the permission passed
///
/// # Arguments
///
/// * `address`: The address to query for memory permissions
/// * `permissions`: The memory permission to wait on
///
/// Note that if multiple permissions are specified (e.g. `MemoryPermission::Read | MemoryPermission::Write`), the function will return if *any* specified permission is present.
#[inline(always)]
pub fn wait_for_permission(
address: svc::Address,
permission: svc::MemoryPermission,
timeout: Option<usize>,
) -> crate::result::Result<()> {
let timeout = timeout.unwrap_or(usize::MAX);
let mut time_taken: usize = 0;
while !svc::query_memory(address)?
.0
.permission
.intersects(permission)
{
result_return_if!(time_taken >= timeout, svc::rc::ResultTimedOut);
time_taken = time_taken.saturating_add(100_000);
let _ = crate::thread::sleep(100_000);
}
Ok(())
}
| rust | MIT | b365c1baa4c4472fe604f4ab9646440d23c3bd9c | 2026-01-04T20:16:15.900894Z | false |
aarch64-switch-rs/nx | https://github.com/aarch64-switch-rs/nx/blob/b365c1baa4c4472fe604f4ab9646440d23c3bd9c/src/hbl.rs | src/hbl.rs | //! HBL (homebrew loader) ABI support and utils
use atomic_enum::atomic_enum;
use core::sync::atomic::AtomicU32;
use core::sync::atomic::Ordering;
use core::sync::atomic::Ordering::Relaxed;
use crate::result::*;
use crate::svc::Handle;
use crate::svc::INVALID_HANDLE;
use crate::sync::Mutex;
use crate::sync::RwLock;
use crate::util::ArrayString;
use crate::version;
/// Represents the entry value keys for a hbl ABI context
#[derive(Copy, Clone, PartialEq, Eq, Debug, Default)]
#[repr(u32)]
pub enum AbiConfigEntryKey {
#[default]
EndOfList = 0,
MainThreadHandle = 1,
NextLoadPath = 2,
OverrideHeap = 3,
OverrideService = 4,
Argv = 5,
SyscallAvailableHint = 6,
AppletType = 7,
AppletWorkaround = 8,
Reserved9 = 9,
ProcessHandle = 10,
LastLoadResult = 11,
RandomSeed = 14,
UserIdStorage = 15,
HosVersion = 16,
}
define_bit_set! {
/// Represents optional flags for config entries
AbiConfigEntryFlags (u32) {
/// Mandatory entry
Mandatory = bit!(0)
}
}
define_bit_set! {
/// Represents optional flag values for the specific case of [`AbiConfigEntryKey::AppletType`] config entries
AbiConfigAppletFlags (u32) {
ApplicationOverride = bit!(0)
}
}
/// Represents an ABI config entry layout
#[derive(Copy, Clone, PartialEq, Eq, Debug, Default)]
#[repr(C)]
pub struct AbiConfigEntry {
/// The entry type identifier
pub key: AbiConfigEntryKey,
/// The entry flags
pub flags: AbiConfigEntryFlags,
/// The entry-specific values
pub value: [u64; 2],
}
/// Represents the hbl-ABI format of the system version
#[derive(Copy, Clone, PartialEq, Eq, Debug, Default)]
#[repr(C)]
pub struct Version {
value: u32,
}
impl Version {
/// Represents the [`u64`] magic present in [`AbiConfigEntryKey::HosVersion`] entry values if Atmosphere is currently running
pub const ATMOSPHERE_OS_IMPL_MAGIC: u64 = u64::from_be_bytes(*b"ATMOSPHR");
/// Represents the bit set in the [`Version`] value if Atmosphere is the current OS implementation
pub const IS_ATMOSPHERE_BIT: u32 = bit!(31);
/// Creates an empty [`Version`], whose value will be `0.0.0`
#[inline]
pub const fn empty() -> Self {
Self { value: 0 }
}
/// Creates a [`Version`] from a raw value and the magic representing the current OS implementation
///
/// # Arguments
///
/// * `value`: The raw value
/// * `os_impl_magic`: The magic value
#[inline]
pub const fn new(value: u32, os_impl_magic: u64) -> Self {
let actual_value = match os_impl_magic == Self::ATMOSPHERE_OS_IMPL_MAGIC {
true => value | Self::IS_ATMOSPHERE_BIT,
false => value,
};
Self {
value: actual_value,
}
}
/// Gets the major component of the [`Version`]
#[inline]
pub const fn get_major(&self) -> u8 {
((self.value >> 16) & 0xFF) as u8
}
/// Gets the minor component of the [`Version`]
#[inline]
pub const fn get_minor(&self) -> u8 {
((self.value >> 8) & 0xFF) as u8
}
/// Gets the micro component of the [`Version`]
#[inline]
pub const fn get_micro(&self) -> u8 {
(self.value & 0xFF) as u8
}
/// Gets whether Atmosphere is the current OS implementation
#[inline]
pub const fn is_atmosphere(&self) -> bool {
(self.value & Self::IS_ATMOSPHERE_BIT) != 0
}
/// Gets a [`Version`][`version::Version`] type from this [`Version`]
#[inline]
pub const fn to_version(&self) -> version::Version {
version::Version::new(self.get_major(), self.get_minor(), self.get_micro())
}
}
static G_LAST_LOAD_RESULT: AtomicU32 = AtomicU32::new(0);
pub(crate) fn set_last_load_result(rc: ResultCode) {
G_LAST_LOAD_RESULT.store(rc.get_value(), Ordering::Release);
}
/// Gets the last load [`ResultCode`]
///
/// This value represents the [`ResultCode`] of the last homebrew NRO executed before the current one
///
/// This value will only be set/useful if the current code is running through HBL
pub fn get_last_load_result() -> ResultCode {
ResultCode::new(G_LAST_LOAD_RESULT.load(Ordering::Acquire))
}
static G_PROCESS_HANDLE: AtomicU32 = AtomicU32::new(INVALID_HANDLE);
pub(crate) fn set_process_handle(handle: Handle) {
G_PROCESS_HANDLE.store(handle, Ordering::Release);
}
/// Gets the current process handle
///
/// This value will only be set/useful if the current code is running through HBL
pub fn get_process_handle() -> Handle {
G_PROCESS_HANDLE.load(Ordering::Relaxed)
}
#[atomic_enum]
/// Represents the applet types for HBL
#[derive(PartialEq, Eq, Default)]
#[repr(i32)]
pub enum AppletType {
#[default]
None = -2,
Default = -1,
Application = 0,
SystemApplet = 1,
LibraryApplet = 2,
OverlayApplet = 3,
SystemApplication = 4,
}
static G_APPLET_TYPE: AtomicAppletType = AtomicAppletType::new(AppletType::Default);
pub(crate) fn set_applet_type(applet_type: AppletType) {
G_APPLET_TYPE.store(applet_type, Relaxed);
}
/// Gets the current applet type (according to HBL)
///
/// This value will only be set/useful if the current code is running through HBL
pub fn get_applet_type() -> AppletType {
G_APPLET_TYPE.load(Relaxed)
}
static G_LOADER_INFO: RwLock<&'static str> = RwLock::new("");
pub(crate) fn set_loader_info(loader_info: &'static str) {
*G_LOADER_INFO.write() = loader_info;
}
/// Gets the loader information string, about HBL
///
/// This value will only be set/useful if the current code is running through HBL
pub fn get_loader_info() -> &'static str {
*G_LOADER_INFO.read()
}
pub static G_NEXT_LOAD_PATH: Mutex<Option<&'static mut ArrayString<512>>> = Mutex::new(None);
pub static G_NEXT_LOAD_ARGV: Mutex<Option<&'static mut ArrayString<2048>>> = Mutex::new(None);
pub(crate) fn set_next_load_entry_ptr(
next_load_path: Option<&'static mut ArrayString<512>>,
next_load_argv: Option<&'static mut ArrayString<2048>>,
) {
*G_NEXT_LOAD_PATH.lock() = next_load_path;
*G_NEXT_LOAD_ARGV.lock() = next_load_argv;
}
/// Gets the next load path, AKA the path of the homebrew NRO which will be executed after this one exits
///
/// This value will only be set/useful if the current code is running through HBL
pub fn get_next_load_path() -> Option<ArrayString<512>> {
G_NEXT_LOAD_PATH
.lock()
.as_ref()
.map(|s: &&mut ArrayString<512>| *(*s))
}
/// Gets the next load argv, AKA the argv of the homebrew NRO which will be executed after this one exits
///
/// This value will only be set/useful if the current code is running through HBL
pub fn get_next_load_argv() -> Option<ArrayString<2048>> {
G_NEXT_LOAD_ARGV
.lock()
.as_ref()
.map(|s: &&mut ArrayString<2048>| *(*s))
}
/// Sets the next homebrew NRO (path and argv) to execute after this one exits
///
/// This will only make any effect if the current code is running through HB
///
/// # Arguments
///
/// * `next_load_path`: NRO path
/// * `next_load_argv`: NRO argv
///
/// Returns true if the buffers have been initialized, else false.
///
pub fn set_next_load_entry(
next_load_path: &'static str,
next_load_argv: &'static str,
) -> Result<(bool, bool)> {
Ok((
{
let mut path_handle = G_NEXT_LOAD_PATH.lock();
if let Some(buffer) = path_handle.as_mut() {
(*buffer).set_str(next_load_path)?;
true
} else {
false
}
},
{
let mut argv_handle = G_NEXT_LOAD_ARGV.lock();
if let Some(buffer) = argv_handle.as_mut() {
(*buffer).set_str(next_load_argv)?;
true
} else {
false
}
},
))
}
static G_RANDOM_SEED: RwLock<(u64, u64)> = RwLock::new((0, 0));
pub(crate) fn set_random_seed(seed: (u64, u64)) {
*G_RANDOM_SEED.write() = seed;
}
/// Gets the random seed values sent by HBL
///
/// This values will only be set/useful if the current code is running through HBL
pub fn get_random_seed() -> (u64, u64) {
*G_RANDOM_SEED.read()
}
| rust | MIT | b365c1baa4c4472fe604f4ab9646440d23c3bd9c | 2026-01-04T20:16:15.900894Z | false |
aarch64-switch-rs/nx | https://github.com/aarch64-switch-rs/nx/blob/b365c1baa4c4472fe604f4ab9646440d23c3bd9c/src/elf.rs | src/elf.rs | //! ELF (aarch64) support and utils
use core::sync::atomic::AtomicUsize;
use core::sync::atomic::Ordering::SeqCst;
use unwinding::custom_eh_frame_finder::{FrameInfo, FrameInfoKind};
pub mod mod0;
pub mod rc;
/// Represents ELF tags.
/// Cherry picked from [valid relocation types](https://github.com/cole14/rust-elf/blob/cdc67691a79a18995e74ce7b65682db4c59c260c/src/abi.rs#L817-1017).
#[derive(Copy, Clone, PartialEq, Eq, Debug, Default)]
#[repr(i64)]
#[allow(missing_docs)]
pub enum Tag {
#[default]
Null = 0,
Needed = 1,
PltRelSize = 2,
Hash = 4,
StrTab = 5,
SymTab = 6,
RelaOffset = 7,
RelaSize = 8,
RelaEntrySize = 9,
SymEnt = 11,
RelOffset = 17,
RelSize = 18,
RelEntrySize = 19,
PltRel = 20,
JmpRel = 23,
InitArray = 25,
FiniArray = 26,
InitArraySize = 27,
FiniArraySize = 28,
RelaCount = 0x6FFFFFF9,
RelCount = 0x6FFFFFFA,
}
/// Represents ELF relocation types.
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
#[repr(u32)]
#[allow(missing_docs)]
pub enum RelocationType {
AArch64Abs64 = 257,
AArch64GlobDat = 1025,
AArch64JumpSlot = 1026,
AArch64Relative = 1027,
}
/// Represents an ELF dynamic entry.
#[derive(Copy, Clone, PartialEq, Eq, Debug, Default)]
#[repr(C)]
#[allow(missing_docs)]
pub struct Dyn {
pub tag: Tag,
pub val_ptr: usize,
}
/// Represents an ELF info symbol.
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
#[repr(C)]
#[allow(missing_docs)]
pub struct InfoSymbol {
pub relocation_type: RelocationType,
pub symbol: u32,
}
/// Represents an ELF info value.
#[derive(Copy, Clone)]
#[repr(C)]
#[allow(missing_docs)]
pub union Info {
pub value: u64,
pub symbol: InfoSymbol,
}
/// Represents an ELF Rel type.
#[derive(Copy, Clone)]
#[repr(C)]
#[allow(missing_docs)]
pub struct Rel {
pub offset: usize,
pub info: Info,
}
/// Represents an ELF Rela type.
#[derive(Copy, Clone)]
#[repr(C)]
#[allow(missing_docs)]
pub struct Rela {
pub offset: usize,
pub info: Info,
pub addend: i64,
}
/// Relocates a base address with its corresponding [`Dyn`] reference.
///
/// # Arguments:
///
/// * `base_address`: The base address to relocate.
/// * `start_dyn`: Pointer to the start of the [`Dyn`] list.
///
/// # Safety
///
/// The caller is responsible for providing valid pointers for `base_address` and `start_dyn`
pub unsafe fn relocate_with_dyn(base_address: *mut u8, start_dyn: *const Dyn) {
unsafe {
let mut rel_offset_v: Option<usize> = None;
let mut rel_entry_size_v: Option<usize> = None;
let mut rel_count_v: Option<usize> = None;
let mut rela_offset_v: Option<usize> = None;
let mut rela_entry_size_v: Option<usize> = None;
let mut rela_count_v: Option<usize> = None;
let mut cur_dyn = start_dyn;
loop {
match (*cur_dyn).tag {
Tag::Null => break,
Tag::RelOffset => rel_offset_v = Some((*cur_dyn).val_ptr),
Tag::RelEntrySize => rel_entry_size_v = Some((*cur_dyn).val_ptr),
Tag::RelCount => rel_count_v = Some((*cur_dyn).val_ptr),
Tag::RelaOffset => rela_offset_v = Some((*cur_dyn).val_ptr),
Tag::RelaEntrySize => rela_entry_size_v = Some((*cur_dyn).val_ptr),
Tag::RelaCount => rela_count_v = Some((*cur_dyn).val_ptr),
_ => { /* ignore */ }
};
cur_dyn = cur_dyn.add(1);
}
if let (Some(rel_offset), Some(rel_count)) = (rel_offset_v, rel_count_v) {
let rel_entry_size = rel_entry_size_v.unwrap_or(core::mem::size_of::<Rel>());
let rel_base = base_address.add(rel_offset);
for i in 0..rel_count {
let rel = rel_base.add(i * rel_entry_size) as *const Rel;
if (*rel).info.symbol.relocation_type == RelocationType::AArch64Relative {
let relocation_offset = base_address.add((*rel).offset) as *mut *const u8;
*relocation_offset = base_address;
}
}
}
if let (Some(rela_offset), Some(rela_count)) = (rela_offset_v, rela_count_v) {
let rela_entry_size = rela_entry_size_v.unwrap_or(core::mem::size_of::<Rela>());
let rela_base = base_address.add(rela_offset);
for i in 0..rela_count {
let rela = rela_base.add(i * rela_entry_size) as *const Rela;
if (*rela).info.symbol.relocation_type == RelocationType::AArch64Relative {
let relocation_offset = base_address.add((*rela).offset) as *mut *const u8;
*relocation_offset = base_address.offset((*rela).addend as isize);
}
}
}
}
}
/// A struct containing a pointer sized int, representing a pointer to the start of the eh_frame_hdr elf section.
/// This is obviously not a great option to use with Rust's upcoming strict/exposed providence APIs, but works fine here as
/// the Switch has a single address space and the memory will have a static lifetime that is longer than the currently running code.
#[derive(Debug)]
pub(crate) struct EhFrameHdrPtr(AtomicUsize);
impl EhFrameHdrPtr {
pub(crate) const fn new() -> Self {
Self(AtomicUsize::new(0))
}
/// Stores the pointer to the EhFrameHdr section
pub(crate) fn set(&self, val: *const u8) {
self.0.store(val.expose_provenance(), SeqCst);
}
}
unsafe impl Sync for EhFrameHdrPtr {}
unsafe impl unwinding::custom_eh_frame_finder::EhFrameFinder for EhFrameHdrPtr {
fn find(&self, _pc: usize) -> Option<unwinding::custom_eh_frame_finder::FrameInfo> {
match self.0.load(SeqCst) {
0 => None,
ptr => Some(FrameInfo {
text_base: Some(crate::rrt0::TEXT_BASE_ADDRESS.load(SeqCst)),
kind: FrameInfoKind::EhFrameHdr(ptr),
}),
}
}
}
| rust | MIT | b365c1baa4c4472fe604f4ab9646440d23c3bd9c | 2026-01-04T20:16:15.900894Z | false |
aarch64-switch-rs/nx | https://github.com/aarch64-switch-rs/nx/blob/b365c1baa4c4472fe604f4ab9646440d23c3bd9c/src/mii.rs | src/mii.rs | //! Mii Support
use crate::result::*;
use crate::sync::{Mutex, MutexGuard};
pub use crate::service::mii::*;
static G_STATIC_SRV: Mutex<Option<StaticService>> = Mutex::new(None);
static G_DB_SRV: Mutex<Option<MiiDatabase>> = Mutex::new(None);
/// Initializes the Mii service objects
pub fn initialize() -> Result<()> {
let static_service = crate::service::new_service_object::<StaticService>()?;
let db_service = static_service.get_database_service(SpecialKeyCode::Normal)?;
*G_STATIC_SRV.lock() = Some(static_service);
*G_DB_SRV.lock() = Some(db_service);
Ok(())
}
/// Gets access to the global [`IStaticClient`] shared object instance
pub fn get_static_service<'a>() -> MutexGuard<'a, Option<StaticService>> {
G_STATIC_SRV.lock()
}
/// Gets access to the global [`IMiiDatabaseClient`] shared object instance
pub fn get_mii_database<'a>() -> MutexGuard<'a, Option<MiiDatabase>> {
G_DB_SRV.lock()
}
pub(crate) fn finalize() {
*G_DB_SRV.lock() = None;
*G_STATIC_SRV.lock() = None;
}
/// Gets the Mii author ID for the current user.
#[inline]
pub fn get_device_id() -> Result<CreateId> {
use crate::service::set::{ISystemSettingsClient, SystemSettingsService};
crate::service::new_service_object::<SystemSettingsService>()?.get_mii_author_id()
}
| rust | MIT | b365c1baa4c4472fe604f4ab9646440d23c3bd9c | 2026-01-04T20:16:15.900894Z | false |
aarch64-switch-rs/nx | https://github.com/aarch64-switch-rs/nx/blob/b365c1baa4c4472fe604f4ab9646440d23c3bd9c/src/console.rs | src/console.rs | //! Console Services
/// Virtual TTY functionality
///
/// The types contained are used to create a tty-like environment, that emulate an
/// ANSI console (e.g. by wrapping the canvas in a [`embedded_term::TextOnGraphic`]).
#[cfg(feature = "vty")]
pub mod vty {
use alloc::boxed::Box;
use embedded_graphics_core::prelude::OriginDimensions;
use crate::gpu::canvas::{AlphaBlend, CanvasManager, RGBA8, sealed::CanvasColorFormat};
use crate::result::Result;
use embedded_graphics_core::Pixel;
pub use embedded_graphics_core::draw_target::DrawTarget;
pub use embedded_graphics_core::geometry::{Dimensions, Point, Size};
pub use embedded_graphics_core::pixelcolor::{Rgb888, RgbColor};
pub use embedded_graphics_core::primitives::rectangle::Rectangle;
/// Type alias for a drawable text-buffer backed console.
///
/// The console state is stored in a text buffer, and draws are pushed through a Canvas
/// implementation that keeps a persistant pixel buffer between draw calls.
pub type TextBufferConsole =
embedded_term::Console<embedded_term::TextOnGraphic<PersistentBufferedCanvas>>;
/// Canvas/Framebuffer type that keeps a single buffer that is
/// flushed to the display on change.
pub struct PersistentBufferedCanvas {
buffer: Box<[Rgb888]>,
canvas: CanvasManager<Rgb888>,
}
impl PersistentBufferedCanvas {
/// Wraps and existing `CanvasManager` instance
#[inline(always)]
pub fn new(canvas: CanvasManager<Rgb888>) -> Self {
Self {
buffer: vec![
Rgb888::new(0, 0, 0);
(canvas.surface.width() * canvas.surface.height()) as usize
]
.into_boxed_slice(),
canvas,
}
}
}
impl CanvasColorFormat for Rgb888 {
type RawType = u32;
const COLOR_FORMAT: crate::gpu::ColorFormat = <RGBA8 as CanvasColorFormat>::COLOR_FORMAT;
const PIXEL_FORMAT: crate::gpu::PixelFormat = <RGBA8 as CanvasColorFormat>::PIXEL_FORMAT;
#[inline(always)]
fn blend_with(self, other: Self, blend: AlphaBlend) -> Self {
if matches!(blend, AlphaBlend::Destination) {
other
} else {
self
}
}
#[inline(always)]
fn from_raw(raw: Self::RawType) -> Self {
let intermediate = RGBA8::from_bits(raw);
Rgb888::new(intermediate.r(), intermediate.g(), intermediate.b())
}
#[inline(always)]
fn new() -> Self {
Rgb888::new(0, 0, 0)
}
#[inline(always)]
fn new_scaled(r: u8, g: u8, b: u8, _a: u8) -> Self {
Rgb888::new(r, g, b)
}
#[inline(always)]
fn scale_alpha(self, _alpha: f32) -> Self {
self
}
#[inline(always)]
fn to_raw(self) -> Self::RawType {
RGBA8::new_scaled(self.r(), self.g(), self.b(), 255).to_raw()
}
}
impl OriginDimensions for PersistentBufferedCanvas {
#[inline(always)]
fn size(&self) -> Size {
Size {
width: self.canvas.surface.width(),
height: self.canvas.surface.height(),
}
}
}
impl DrawTarget for PersistentBufferedCanvas {
type Color = Rgb888;
type Error = crate::result::ResultCode;
fn draw_iter<I>(&mut self, pixels: I) -> Result<()>
where
I: IntoIterator<Item = Pixel<Self::Color>>,
{
for Pixel(Point { x, y }, color) in pixels.into_iter() {
self.buffer[(x + y * self.canvas.surface.width() as i32) as usize] = color;
}
self.canvas.render_prepared_buffer(self.buffer.as_ref())?;
self.canvas.wait_vsync_event(None)
}
fn fill_contiguous<I>(&mut self, area: &Rectangle, colors: I) -> Result<()>
where
I: IntoIterator<Item = Self::Color>,
{
let Rectangle {
top_left: Point { x, y },
size: Size { width, height },
} = *area;
let mut color_iter = colors.into_iter().peekable();
if color_iter.peek().is_none() {
// no point iterating and rendering
return Ok(());
}
for y in y..(y + height as i32) {
for x in x..(x + width as i32) {
if let Some(color) = color_iter.next()
&& (0..self.canvas.surface.height().cast_signed()).contains(&y)
&& (0..self.canvas.surface.width().cast_signed()).contains(&x)
{
self.buffer[(x + y * self.canvas.surface.width() as i32) as usize] = color;
}
}
}
self.canvas.render_prepared_buffer(self.buffer.as_ref())?;
self.canvas.wait_vsync_event(None)
}
fn fill_solid(&mut self, area: &Rectangle, color: Self::Color) -> Result<()> {
let Rectangle {
top_left: Point { x, y },
size: Size { width, height },
} = *area;
for y in y..(y + height as i32) {
for x in x..(x + width as i32) {
self.buffer[(x + y * self.canvas.surface.width() as i32) as usize] = color;
}
}
self.canvas.render_prepared_buffer(self.buffer.as_ref())?;
self.canvas.wait_vsync_event(None)
}
}
}
#[cfg(feature = "console")]
pub mod scrollback {
//! Console types that are really just text buffers that you can push data into.
//!
//! These types are useful if you want to log data to the screen as text, but can't do edits or backtracking
//! like the vty module.
use core::num::NonZeroU16;
use crate::{
gpu::{
self,
canvas::{Canvas, CanvasManager, RGBA4},
},
result::Result,
sync::RwLock,
};
use crate::sync::Mutex;
use crate::thread::{Builder, JoinHandle};
use alloc::{
collections::vec_deque::VecDeque,
string::{String, ToString},
sync::Arc,
};
/// A channel-like object for sending strings to the console for display.
///
/// When all clones of this object are dropped, the strong count of the inner `Arc` will drop to zero and the `Weak`
/// handle in the background thread will no longer be able to upgrade and read the data. This will cause the background thread to exit.
#[derive(Clone)]
pub struct BackgroundWriter {
inner: Arc<Mutex<VecDeque<String>>>,
}
impl BackgroundWriter {
/// Create a new console to live in a background thread.
pub fn new(
gpu_ctx: Arc<RwLock<gpu::Context>>,
history_limit: u16,
line_max_chars: NonZeroU16,
line_wrap: bool,
text_color: Option<RGBA4>,
scale: u8,
) -> Result<Self> {
let mut console = ScrollbackConsole::new(
gpu_ctx,
history_limit,
line_max_chars,
line_wrap,
text_color,
scale,
)?;
let fake_channel = Arc::new(Mutex::new(VecDeque::new()));
let _background_thread: JoinHandle<Result<()>> = {
let receiver = Arc::downgrade(&fake_channel);
Builder::new()
.name("console")
.stack_size(0x1000)
.spawn(move || {
while let Some(reader) = receiver.upgrade() {
{
let mut reader = reader.lock();
while let Some(message) = reader.pop_front() {
console.write(message);
}
}
console.draw()?;
console.wait_vsync_event(None)?;
}
Ok(())
})?
};
Ok(Self {
inner: fake_channel,
})
}
/// Writes a string into the cross-thread buffer.
#[inline(always)]
pub fn write(&self, message: impl ToString) {
self.inner.lock().push_back(message.to_string());
}
}
/// This console creates a full-screen layer that will just scroll through provided strings
pub struct ScrollbackConsole {
canvas: CanvasManager<RGBA4>,
/// The foreground color of the text
pub text_color: RGBA4,
/// The maximum lines of text to keep, excluding the active line
pub history_limit: u16,
/// The maximum number of chars per line of text.
pub line_max_chars: u16,
/// Controls whether the console will automatically wrap to the next line
pub line_wrap: bool,
scrollback_history: alloc::collections::VecDeque<String>,
scrollback_history_offset: u16,
current_line: String,
/// Scale of the text when drawing
pub scale: u8,
}
unsafe impl Send for ScrollbackConsole {}
unsafe impl Sync for ScrollbackConsole {}
impl ScrollbackConsole {
/// Create a new instance of the console.
#[inline(always)]
pub fn new(
gpu_ctx: Arc<RwLock<gpu::Context>>,
history_limit: u16,
line_max_chars: NonZeroU16,
line_wrap: bool,
text_color: Option<RGBA4>,
scale: u8,
) -> Result<Self> {
let canvas = nx::gpu::canvas::CanvasManager::new_stray(
gpu_ctx,
Default::default(),
3,
gpu::BlockLinearHeights::OneGob,
)?;
Ok(Self {
history_limit,
text_color: text_color.unwrap_or(RGBA4::from_bits(u16::MAX)),
line_wrap,
line_max_chars: line_max_chars.get().min(canvas.surface.width() as u16 / 8),
scrollback_history: VecDeque::with_capacity(history_limit as _),
current_line: String::new(),
canvas,
scrollback_history_offset: 0,
scale,
})
}
/// Attempts to scroll up through the scroll buffer.
///
/// Only takes affect if there are more lines of text than can be displayed on the screen.
#[inline(always)]
pub fn scroll_up(&mut self) {
let max_line_count = self.max_line_count();
let history_len = self.scrollback_history.len();
if history_len > max_line_count as usize - 1 {
self.scrollback_history_offset = self
.scrollback_history_offset
.saturating_add(1)
.min(history_len as _);
}
}
/// Attempts to scroll down through the scroll buffer
///
/// Only takes affect if there are more lines of text than can be displayed on the screen,
/// and the current scroll location is not at the most recent line.
#[inline(always)]
pub fn scroll_down(&mut self) {
self.scrollback_history_offset = self.scrollback_history_offset.saturating_sub(1);
}
fn push_line(&mut self, text: &str, commit: bool) {
self.current_line.push_str(text);
let real_max_len = (self.line_max_chars as u32)
.min((self.canvas.surface.width() - 4) / (8 * self.scale as u32))
as usize;
if !self.line_wrap && self.current_line.len() > real_max_len {
self.current_line.truncate(real_max_len - 1);
self.current_line.push('>');
} else {
while self.current_line.len() > real_max_len {
let mut temp = core::mem::take(&mut self.current_line);
let new_line = temp.split_off(real_max_len);
self.push_history_line(temp);
self.current_line = new_line;
}
}
if commit {
let commit_str = core::mem::take(&mut self.current_line);
self.push_history_line(commit_str);
}
}
/// Writes a pre-formatted line directly to the history, bypassing the current line
///
/// Panics if the line length is longer then the maximum displayable characters in a line,
/// or if the string contains a newline character.
#[inline(always)]
pub fn push_history_line(&mut self, line: String) {
let real_max_len =
self.line_max_chars
.min(self.canvas.surface.width() as u16 / 8) as usize;
debug_assert!(
line.find('\n').is_none(),
"History lines MUST NOT contain a newline character"
);
debug_assert!(
line.len() <= real_max_len,
"History lines not be longer that the max char count"
);
if self.scrollback_history.len() == self.history_limit as _ {
self.scrollback_history.pop_front();
}
self.scrollback_history.push_back(line);
if self.scrollback_history_offset != 0 {
let history_len = self.scrollback_history.len();
self.scrollback_history_offset = self
.scrollback_history_offset
.saturating_add(1)
.min(history_len as _);
}
}
/// Writes a string to the console buffer.
pub fn write(&mut self, text: impl AsRef<str>) {
let mut text = text.as_ref();
while let Some(position) = text.find('\n') {
self.push_line(&text[..position], true);
text = &text[position + 1..];
}
self.push_line(text, false);
}
fn max_line_count(&self) -> u32 {
(self.canvas.surface.height() - 4) / (10 * self.scale as u32)
}
/// Renders the console to the screen.
pub fn draw(&mut self) -> Result<()> {
let max_line_count = self.max_line_count();
self.canvas.render(Some(RGBA4::new()), |canvas| {
let mut line_y = 2 + 8 * self.scale as i32; // leave a bit of a gap
// We take one more from the history if we're not displaying the current line
let max_history_lines = if self.scrollback_history_offset == 0 {
max_line_count - 1
} else {
max_line_count
};
let history_print_offset = self
.scrollback_history
.len()
.saturating_sub(max_history_lines as usize)
.saturating_sub(self.scrollback_history_offset as usize);
let history_lines_printed = self
.scrollback_history
.iter()
.skip(history_print_offset)
.take(max_history_lines as _)
.map(|s| {
canvas.draw_ascii_bitmap_text(
s,
self.text_color,
self.scale as u32,
2,
line_y,
crate::gpu::canvas::AlphaBlend::None,
);
line_y += 10 * self.scale as i32;
})
.count();
if history_lines_printed < max_line_count as usize {
canvas.draw_ascii_bitmap_text(
&self.current_line,
self.text_color,
self.scale as u32,
2,
line_y,
crate::gpu::canvas::AlphaBlend::None,
);
}
Ok(())
})
}
/// Wait for a vsync event to ensure that the previously submitted frame has been fully rendered to the display.
#[inline(always)]
pub fn wait_vsync_event(&self, timeout: Option<i64>) -> Result<()> {
self.canvas.wait_vsync_event(timeout)
}
}
}
| rust | MIT | b365c1baa4c4472fe604f4ab9646440d23c3bd9c | 2026-01-04T20:16:15.900894Z | false |
aarch64-switch-rs/nx | https://github.com/aarch64-switch-rs/nx/blob/b365c1baa4c4472fe604f4ab9646440d23c3bd9c/src/la.rs | src/la.rs | //! Library applet support and utils
use crate::arm;
use crate::ipc::sf;
use crate::result::*;
use crate::service::applet;
use crate::service::applet::ILibraryAppletAccessorClient;
use crate::service::applet::ILibraryAppletCreatorClient;
use crate::service::applet::{IStorageAccessorClient, IStorageClient, Storage};
use crate::service::sm::rc;
use crate::svc;
use crate::sync::{Mutex, MutexGuard};
use crate::wait;
use alloc::boxed::Box;
use applet::LibraryAppletCreator;
use core::mem as cmem;
/// Represents the common arguments layout sent as starting input by/to all library applets
#[derive(Copy, Clone, PartialEq, Eq, Debug, Default)]
#[repr(C)]
pub struct CommonArguments {
/// Represents the [`CommonArguments`] version
///
/// Usually value `1` is used
pub version: u32,
/// [`CommonArguments`] size (essentially the [`size_of`][`cmem::size_of`] this struct)
pub size: u32,
/// Represents the API version of the specific library applet being launched
pub la_api_version: u32,
/// Represents the theme color for the library applet to use
pub theme_color: u32,
/// Represents whether the library applet should make a startup sound when launched
pub play_startup_sound: bool,
/// Padding bytes
pub pad: [u8; 7],
/// Represents the system tick of when the library applet gets launched
pub system_tick: u64,
}
/// Represents a wrapper type for using library applets
pub struct LibraryAppletHolder {
accessor: Box<dyn ILibraryAppletAccessorClient>,
state_changed_event_handle: svc::Handle,
}
impl LibraryAppletHolder {
/// Creates a [`LibraryAppletHolder`] from an existing [`ILibraryAppletAccessorClient`] shared object
///
/// This shouldn't be manually created unless the accessor object was obtained manually (see [`create_library_applet`])
pub fn new(mut accessor: Box<dyn ILibraryAppletAccessorClient>) -> Result<Self> {
let state_changed_event_h = accessor.get_applet_state_changed_event()?;
Ok(Self {
accessor,
state_changed_event_handle: state_changed_event_h.handle,
})
}
/// Gets a reference to the underlying [`ILibraryAppletAccessorClient`] shared object
#[inline]
pub fn get_accessor(&self) -> &dyn ILibraryAppletAccessorClient {
&*self.accessor
}
/// Gets a mutable reference to the underlying [`ILibraryAppletAccessorClient`] shared object
#[inline]
pub fn get_accessor_mut(&mut self) -> &dyn ILibraryAppletAccessorClient {
&mut *self.accessor
}
/// Pushes an input [`IStorageClient`] shared object to the library applet
#[inline]
pub fn push_in_data_storage(&mut self, storage: Storage) -> Result<()> {
self.accessor.push_in_data(storage)
}
/// Pushes input data to the library applet
///
/// This is a wrapper which creates an [`IStorageClient`] object with the given value and pushes it
pub fn push_in_data<T: Copy>(&mut self, t: T) -> Result<()> {
let t_st = create_write_storage(t)?;
self.push_in_data_storage(t_st)
}
/// Starts the library applet
#[inline]
pub fn start(&mut self) -> Result<()> {
self.accessor.start()
}
/// Waits until the library applet's state-changed event signals
///
/// This effectively waits until the library applet exits or the timeout expires
#[inline]
pub fn join(&mut self, timeout: Option<i64>) -> Result<()> {
wait::wait_handles(&[self.state_changed_event_handle], timeout.unwrap_or(-1))?;
Ok(())
}
/// Pops an output [`IStorageClient`] shared object from the library applet
#[inline]
pub fn pop_out_data_storage(&mut self) -> Result<Storage> {
self.accessor.pop_out_data()
}
/// Pops output data from the library applet
///
/// This is a wrapper which pops an [`IStorageClient`] object and reads its data (reads [`size_of`][`cmem::size_of`] `O` bytes and returns that data)
pub fn pop_out_data<O: Copy>(&mut self) -> Result<O> {
let mut o_st = self.pop_out_data_storage()?;
read_storage(&mut o_st)
}
}
impl Drop for LibraryAppletHolder {
/// Drops the [`LibraryAppletHolder`], closing the [`ILibraryAppletAccessorClient`] object instance and the acquired state-changed event handle
fn drop(&mut self) {
let _ = svc::close_handle(self.state_changed_event_handle);
}
}
static G_CREATOR: Mutex<Option<LibraryAppletCreator>> = Mutex::new(None);
/// Initializes library applet support with the provided [`LibraryAppletCreator`]
///
/// # Arguments
///
/// * `creator`: The shared object to use globally
#[inline]
pub fn initialize(creator: LibraryAppletCreator) {
*G_CREATOR.lock() = Some(creator);
}
/// Gets whether library applet support was initialized
#[inline]
pub fn is_initialized() -> bool {
G_CREATOR.lock().is_some()
}
/// Finalizes library applet support, dropping the inner [`ILibraryAppletCreatorClient`] shared object instance. Gets run in the rrt0 runtime after the main function runs.
#[inline]
pub(crate) fn finalize() {
*G_CREATOR.lock() = None;
}
/// Gets access to the global [`ILibraryAppletCreatorClient`] shared object instance
#[inline]
pub fn get_creator<'a>() -> MutexGuard<'a, Option<LibraryAppletCreator>> {
G_CREATOR.lock()
}
/// Wrapper for reading data from a [`IStorageClient`] shared object
///
/// This will try to read [`size_of`][`cmem::size_of`] `T` bytes from the storage and return them as the expected value
///
/// # Arguments
///
/// * `storage`: The storage to read from
pub fn read_storage<T: Copy>(storage: &mut Storage) -> Result<T> {
let mut t = unsafe { cmem::zeroed::<T>() };
let storage_accessor = storage.open()?;
storage_accessor.read(0, sf::Buffer::from_other_mut_var(&mut t))?;
Ok(t)
}
/// Wrapper for writing data to a [`IStorageClient`] shared object
///
/// This will try to write [`size_of`][`cmem::size_of`] `T` bytes to the storage from the given value
///
/// # Arguments
///
/// * `storage`: The storage to write to
/// * `t`: The value to write
pub fn write_storage<T: Copy>(storage: &mut Storage, t: T) -> Result<()> {
result_return_unless!(is_initialized(), super::rc::ResultNotInitialized);
let storage_accessor = storage.open()?;
storage_accessor.write(0, sf::Buffer::from_other_var(&t))?;
Ok(())
}
/// Wrapper for creating a [`IStorageClient`] shared object from the given value
///
/// This will fail with [`ResultNotInitialized`][`super::rc::ResultNotInitialized`] if library applet support isn't initialized
///
/// This will create a [`IStorageClient`] object using the global [`ILibraryAppletCreatorClient`] object and write the given value to it
///
/// # Arguments
///
/// * `t`: The value to write
pub fn create_write_storage<T: Copy>(t: T) -> Result<Storage> {
result_return_unless!(is_initialized(), super::rc::ResultNotInitialized);
let mut storage = get_creator()
.as_ref()
.ok_or(rc::ResultNotInitialized::make())?
.create_storage(cmem::size_of::<T>())?;
write_storage(&mut storage, t)?;
Ok(storage)
}
/// Creates a [`LibraryAppletHolder`] from the given library applet params
///
/// This automatically sets the [`CommonArguments`] `system_tick` value to the current system tick and pushes it as input using [`push_in_data`][`LibraryAppletHolder::push_in_data`]
///
/// # Arguments
///
/// * `id`: The [`AppletId`][`applet::AppletId`] of the library applet to create
/// * `mode`: The [`LibraryAppletMode`][`applet::LibraryAppletMode`] to create the library applet with
/// * `common_args`: The library applet-specific [`CommonArguments`] to send as input
pub fn create_library_applet(
id: applet::AppletId,
mode: applet::LibraryAppletMode,
mut common_args: CommonArguments,
) -> Result<LibraryAppletHolder> {
result_return_unless!(is_initialized(), super::rc::ResultNotInitialized);
let accessor = get_creator()
.as_ref()
.ok_or(rc::ResultNotInitialized::make())?
.create_library_applet(id, mode)?;
let mut holder = LibraryAppletHolder::new(Box::new(accessor))?;
common_args.system_tick = arm::get_system_tick();
holder.push_in_data(common_args)?;
Ok(holder)
}
/// Wrapper to create, launch and wait for a library applet, expecting simple input and output data
///
/// The mode used (since all simple library applets expect it) is [`LibraryAppletMode::AllForeground`][`applet::LibraryAppletMode::AllForeground`]
///
/// Note that this won't be useful, for instance, with library applets taking interactive in/out data, like [`AppletId::LibraryAppletSwkbd`][`applet::AppletId::LibraryAppletSwkbd`]
///
/// # Arguments
///
/// * `id`: The [`AppletId`][`applet::AppletId`] of the library applet to create
/// * `common_args`: The library applet-specific [`CommonArguments`] to send as input
/// * `input`: The only input data to send after the [`CommonArguments`]
pub fn launch_wait_library_applet<I: Copy, O: Copy>(
id: applet::AppletId,
common_args: CommonArguments,
input: I,
timeout: Option<i64>,
) -> Result<O> {
let mut holder =
create_library_applet(id, applet::LibraryAppletMode::AllForeground, common_args)?;
holder.push_in_data(input)?;
holder.start()?;
holder.join(timeout)?;
holder.pop_out_data()
}
// TODO: specific library applet implementations in submodules (err, psel, swkbd, etc.)
| rust | MIT | b365c1baa4c4472fe604f4ab9646440d23c3bd9c | 2026-01-04T20:16:15.900894Z | false |
aarch64-switch-rs/nx | https://github.com/aarch64-switch-rs/nx/blob/b365c1baa4c4472fe604f4ab9646440d23c3bd9c/src/socket.rs | src/socket.rs | //! Implementation of the Rust libstd TCP/UDP APIs, and re-exports of the raw bsd sockets API.
use core::alloc::Layout;
use core::ops::Deref;
use core::ops::DerefMut;
use core::sync::atomic::AtomicU16;
use core::sync::atomic::AtomicUsize;
use core::sync::atomic::Ordering;
use alloc::alloc::Allocator;
use alloc::alloc::Global;
use alloc::boxed::Box;
use alloc::vec::Vec;
use crate::ipc::client::IClientObject;
use crate::ipc::sf;
use crate::ipc::sf::CopyHandle;
use crate::mem::alloc::Buffer;
use crate::mem::alloc::PAGE_ALIGNMENT;
use crate::mem::wait_for_permission;
use crate::result::Result;
pub use crate::service::bsd::*;
use crate::service::new_service_object;
use crate::svc::Handle;
use crate::svc::MemoryPermission;
use crate::sync::sys::futex::Futex;
use crate::sync::{ReadGuard, RwLock, WriteGuard};
#[repr(usize)]
pub enum Paralellism {
One = 1,
Two,
Three,
Four,
Five,
Six,
Seven,
Eight,
Nine,
Ten,
Eleven,
Twelve,
Thirteen,
Fourteen,
Fifteen,
Sixteen,
}
pub(crate) enum BsdServiceDispatcher {
U(UserBsdService),
A(AppletBsdService),
S(SystemBsdService),
}
impl Deref for BsdServiceDispatcher {
type Target = dyn IBsdClient;
fn deref(&self) -> &Self::Target {
match self {
Self::A(service) => service as &dyn IBsdClient,
Self::U(service) => service as &dyn IBsdClient,
Self::S(service) => service as &dyn IBsdClient,
}
}
}
impl DerefMut for BsdServiceDispatcher {
fn deref_mut(&mut self) -> &mut Self::Target {
match self {
Self::A(service) => service as &mut dyn IBsdClient,
Self::U(service) => service as &mut dyn IBsdClient,
Self::S(service) => service as &mut dyn IBsdClient,
}
}
}
struct BsdServiceHandle<'p> {
parent: &'p BsdSocketService,
slot: usize,
}
impl Deref for BsdServiceHandle<'_> {
type Target = dyn IBsdClient;
fn deref(&self) -> &Self::Target {
unsafe { self.parent.services.get_unchecked(self.slot) }.deref()
}
}
impl Drop for BsdServiceHandle<'_> {
fn drop(&mut self) {
let inverse_mask: u16 = !(1 << self.slot);
// return the slot to the queue for use
self.parent
.checkout_slots
.fetch_and(inverse_mask, Ordering::Release);
// if there are any waiters, notify one
if self.parent.waiters.load(Ordering::Acquire) > 0 {
self.parent.service_waiter.signal_one();
}
}
}
/// Holder type for the intialized bsd service
pub struct BsdSocketService {
_tmem_buffer: Buffer<u8>,
tmem_handle: Handle,
checkout_slots: AtomicU16,
waiters: AtomicUsize,
service_waiter: Futex,
services: Vec<BsdServiceDispatcher>,
_monitor_service: Box<dyn IBsdClient + Send + 'static>,
_bsd_client_pid: u64,
}
unsafe impl Sync for BsdSocketService {}
unsafe impl Send for BsdSocketService {}
impl BsdSocketService {
fn new(
config: BsdServiceConfig,
kind: BsdSrvkind,
transfer_mem_buffer: Option<Buffer<u8>>,
parrallellism: Paralellism,
) -> Result<Self> {
let mut services: Vec<BsdServiceDispatcher> = match kind {
BsdSrvkind::Applet => {
let mut services = vec![new_service_object::<AppletBsdService>()?];
for _ in 1..(parrallellism as usize) {
let copy = services[0].clone()?;
services.push(copy);
}
services.into_iter().map(BsdServiceDispatcher::A).collect()
}
BsdSrvkind::System => {
let mut services = vec![new_service_object::<SystemBsdService>()?];
for _ in 1..(parrallellism as usize) {
let copy = services[0].clone()?;
services.push(copy);
}
services.into_iter().map(BsdServiceDispatcher::S).collect()
}
BsdSrvkind::User => {
let mut services = vec![new_service_object::<UserBsdService>()?];
for _ in 1..(parrallellism as usize) {
let copy = services[0].clone()?;
services.push(copy);
}
services.into_iter().map(BsdServiceDispatcher::U).collect()
}
};
let mut monitor_service = match kind {
BsdSrvkind::Applet => Box::new(new_service_object::<AppletBsdService>()?)
as Box<dyn IBsdClient + Send + Sync + 'static>,
BsdSrvkind::System => Box::new(new_service_object::<SystemBsdService>()?)
as Box<dyn IBsdClient + Send + Sync + 'static>,
BsdSrvkind::User => Box::new(new_service_object::<UserBsdService>()?)
as Box<dyn IBsdClient + Send + Sync + 'static>,
};
let tmem_min_size = config.min_transfer_mem_size();
let tmem_buffer: Buffer<u8> = if let Some(prepared_buffer) = transfer_mem_buffer
&& prepared_buffer.layout.size() >= tmem_min_size
{
prepared_buffer
} else {
let layout =
unsafe { Layout::from_size_align_unchecked(tmem_min_size, PAGE_ALIGNMENT) };
Buffer {
ptr: Global.allocate(layout).unwrap().as_ptr().cast(),
layout,
allocator: Global,
}
};
let tmem_handle = crate::svc::create_transfer_memory(
tmem_buffer.ptr.cast(),
tmem_buffer.layout.size(),
MemoryPermission::None(),
)?;
let bsd_client_pid = services[0].register_client(
config,
sf::ProcessId::new(),
tmem_buffer.layout.size(),
CopyHandle::from(tmem_handle),
)?;
monitor_service.start_monitoring(sf::ProcessId::from(bsd_client_pid))?;
Ok(Self {
_tmem_buffer: tmem_buffer,
tmem_handle,
checkout_slots: AtomicU16::new(0),
waiters: AtomicUsize::new(0),
service_waiter: Futex::new(),
services,
_monitor_service: monitor_service,
_bsd_client_pid: bsd_client_pid,
})
}
fn get_service(&self) -> BsdServiceHandle<'_> {
let slot_limit = self.services.len();
loop {
if let Ok(value) =
self.checkout_slots
.fetch_update(Ordering::AcqRel, Ordering::Acquire, |v| {
let slot = v.trailing_ones() as usize;
if slot < slot_limit {
// write a checkout bit into the checkout slot
Some(v | (1 << slot))
} else {
// all valid slots are filled, bail on the update so we can wait on the futex.
None
}
})
{
// since the atomic update succeeded, we can rerun the trailing_ones call on
// the returned original value to re-calculate the slot we took.
return BsdServiceHandle {
parent: self,
slot: value.trailing_ones() as usize,
};
} else {
// wait for an active hold on the service to be released.
self.waiters.fetch_add(1, Ordering::Release);
self.service_waiter.wait(0, -1);
self.waiters.fetch_sub(1, Ordering::Release);
}
}
}
}
impl Drop for BsdSocketService {
fn drop(&mut self) {
self._monitor_service.close_session();
self.services.iter_mut().for_each(drop);
let _ = crate::svc::close_handle(self.tmem_handle);
let _ = wait_for_permission(self._tmem_buffer.ptr as _, MemoryPermission::Write(), None);
}
}
static BSD_SERVICE: RwLock<Option<BsdSocketService>> = RwLock::new(None);
/// Initializes the bsd/socket service.
pub fn initialize(
kind: BsdSrvkind,
config: BsdServiceConfig,
tmem_buffer: Option<Buffer<u8>>,
paralellism: Paralellism,
) -> Result<()> {
let mut service_handle = BSD_SERVICE.write();
if service_handle.is_some() {
return Ok(());
}
*service_handle = Some(BsdSocketService::new(
config,
kind,
tmem_buffer,
paralellism,
)?);
Ok(())
}
pub(crate) fn finalize() {
*BSD_SERVICE.write() = None;
}
pub fn read_socket_service<'a>() -> ReadGuard<'a, Option<BsdSocketService>> {
BSD_SERVICE.read()
}
pub fn write_socket_service<'a>() -> WriteGuard<'a, Option<BsdSocketService>> {
BSD_SERVICE.write()
}
/// Implementation of the Rust stdlib TCP/UDP API
pub mod net {
use core::time::Duration;
use core::{mem::offset_of, net::Ipv4Addr};
use alloc::vec::Vec;
use super::*;
use crate::ipc::sf::OutAutoSelectBuffer;
use crate::service::bsd::PollFlags;
use crate::service::bsd::{PollFd, SocketOptions};
use crate::socket::{BsdDuration, Linger, SOL_SOCKET};
use crate::{
ipc::sf::Buffer,
result::{Result, ResultBase, ResultCode},
service::bsd::{BsdResult, ReadFlags, SendFlags, SocketAddrRepr},
};
pub mod traits {
use super::*;
/// Trait for making a bsd-fd wrapper type usable in `super::poll`
pub trait Pollable {
fn get_poll_fd(&self) -> i32;
}
impl<T: SocketCommon> Pollable for T {
fn get_poll_fd(&self) -> i32 {
self.as_raw_fd()
}
}
/// Contains common functions for bsd-compatible socket-like types
///
/// # Safety
///
/// Implementors are responsible for synchonising any interior mutablility for types, if any exists.
pub unsafe trait SocketCommon {
/// gets the raw file descriptor for the type
fn as_raw_fd(&self) -> i32;
/// Opens a connection to a remote host.
fn connect<A: Into<Ipv4Addr>>(destination: A, port: u16) -> Result<Self>
where
Self: Sized;
/// Creates a new independently owned handle to the underlying socket.
///
/// The returned object is a references the same stream that this
/// object references. Both handles will read and write the same stream of
/// data, and options set on one stream will be propagated to the other
/// stream.
///
/// This function is also why objects implementing this trait _should not_ contain any methods requiring mutable references.
/// Consumers should expect that calls to these functions are synchronized by the implementation.
fn try_clone(&self) -> Result<Self>
where
Self: Sized;
/// Reads data from the remote side into the provided buffer.
///
/// Immediately returns an error if the socket is not connected.
fn recv(&self, data: &mut [u8]) -> Result<usize> {
let socket_server_handle = BSD_SERVICE.read();
let socket_server = socket_server_handle.as_ref().unwrap();
match socket_server.get_service().recv(
self.as_raw_fd(),
ReadFlags::None(),
Buffer::from_mut_array(data),
)? {
BsdResult::Ok(ret, ()) => Ok(ret as usize),
BsdResult::Err(errno) => ResultCode::new_err(nx::result::pack_value(
rc::RESULT_MODULE,
1000 + errno.cast_unsigned(),
)),
}
}
/// Receives single datagram on the socket from the remote address to which it is connected, without removing the message from input queue. On success, returns the number of bytes peeked.
///
/// The function must be called with valid byte array buf of sufficient size to hold the message bytes. If a message is too long to fit in the supplied buffer, excess bytes may be discarded.
///
/// Successive calls return the same data. This is accomplished by passing `MSG_PEEK`` as a flag to the underlying `recv` system call.
///
/// Do not use this function to implement busy waiting, instead use [`poll`][`nx::socket::net::poll`] to synchronize IO events on one or more sockets.
/// `UdpSocket::connect` will connect this socket to a remote address. This method will fail if the socket is not connected.
///
/// # Errors
///
/// This method will fail if the socket is not connected. The connect method will connect this socket to a remote address.
fn peek(&self, data: &mut [u8]) -> Result<usize> {
let socket_server_handle = BSD_SERVICE.read();
let socket_server = socket_server_handle.as_ref().unwrap();
match socket_server.get_service().recv(
self.as_raw_fd(),
ReadFlags::Peek(),
Buffer::from_mut_array(data),
)? {
BsdResult::Ok(ret, ()) => Ok(ret as usize),
BsdResult::Err(errno) => ResultCode::new_err(nx::result::pack_value(
rc::RESULT_MODULE,
1000 + errno.cast_unsigned(),
)),
}
}
/// Sends data on the socket to the remote address to which it is connected.
/// For TCP, all data is sent or an error is returned.
///
/// Returns the length of the data written from the buffer
///
/// `Self::connect`` will connect this socket to a remote address. This method will fail if the socket is not connected.
fn send(&self, data: &[u8]) -> Result<u32> {
let socket_server_handle = BSD_SERVICE.read();
let socket_server = socket_server_handle.as_ref().unwrap();
match socket_server.get_service().send(
self.as_raw_fd(),
SendFlags::None(),
Buffer::from_array(data),
)? {
BsdResult::Ok(len, ()) => Ok(len.cast_unsigned()),
BsdResult::Err(errno) => ResultCode::new_err(nx::result::pack_value(
rc::RESULT_MODULE,
1000 + errno.cast_unsigned(),
)),
}
}
/// Sends data on the socket to the remote address to which it is connected.
/// For TCP, all data is sent or an error is returned.
///
/// Returns the length of the data written from the buffer
///
/// `Self::connect`` will connect this socket to a remote address. This method will fail if the socket is not connected.
fn send_non_blocking(&self, data: &[u8]) -> Result<()> {
let socket_server_handle = BSD_SERVICE.read();
let socket_server = socket_server_handle.as_ref().unwrap();
match socket_server.get_service().send(
self.as_raw_fd(),
SendFlags::DontWait(),
Buffer::from_array(data),
)? {
BsdResult::Ok(_, ()) => Ok(()),
BsdResult::Err(errno) => ResultCode::new_err(nx::result::pack_value(
rc::RESULT_MODULE,
1000 + errno.cast_unsigned(),
)),
}
}
/// Receives data on the socket from the remote address to which it is connected.
/// On success, returns the number of bytes read or a None value if there is no data.
///
///The function must be called with valid byte array buf of sufficient size to hold the message bytes. If a message is too long to fit in the supplied buffer, excess bytes may be discarded.
///
/// `UdpSocket::connect`` will connect this socket to a remote address. This method will fail if the socket is not connected.
fn recv_non_blocking(&self, buffer: &mut [u8]) -> Result<Option<usize>> {
let socket_server_handle = BSD_SERVICE.read();
let socket_server = socket_server_handle.as_ref().unwrap();
match socket_server.get_service().recv(self.as_raw_fd(), ReadFlags::DontWait(), Buffer::from_mut_array(buffer))? {
BsdResult::Ok(ret, ()) => {
Ok(Some(ret as usize))
},
BsdResult::Err(11) /* EAGAIN */ => {
Ok(None)
}
BsdResult::Err(errno) => {
ResultCode::new_err(nx::result::pack_value(
rc::RESULT_MODULE,
1000 + errno.cast_unsigned(),
))
}
}
}
/// Returns the local address of this socket
fn local_addr(&self) -> Result<SocketAddrRepr> {
let socket_server_handle = BSD_SERVICE.read();
let socket_server = socket_server_handle.as_ref().unwrap();
let mut out_ip: SocketAddrRepr = Default::default();
match socket_server
.get_service()
.get_socket_name(self.as_raw_fd(), Buffer::from_mut_var(&mut out_ip))?
{
BsdResult::Ok(_, written_sockaddr_size) => {
debug_assert!(
written_sockaddr_size as usize >= offset_of!(SocketAddrRepr, _zero),
"Invalid write length for returned socket addr"
);
Ok(out_ip)
}
BsdResult::Err(errno) => ResultCode::new_err(nx::result::pack_value(
rc::RESULT_MODULE,
1000 + errno.cast_unsigned(),
)),
}
}
/// Returns the remote address of this socket (errors for unconnected UDP sockets).
fn peer_addr(&self) -> Result<SocketAddrRepr> {
let socket_server_handle = BSD_SERVICE.read();
let socket_server = socket_server_handle.as_ref().unwrap();
let mut out_ip: SocketAddrRepr = Default::default();
match socket_server
.get_service()
.get_peer_name(self.as_raw_fd(), Buffer::from_mut_var(&mut out_ip))?
{
BsdResult::Ok(_, written_sockaddr_size) => {
debug_assert!(
written_sockaddr_size as usize >= offset_of!(SocketAddrRepr, _zero),
"Invalid write length for returned socket addr"
);
Ok(out_ip)
}
BsdResult::Err(errno) => ResultCode::new_err(nx::result::pack_value(
rc::RESULT_MODULE,
1000 + errno.cast_unsigned(),
)),
}
}
/// Sets the value for the `IP_TTL` option on this socket.
///
/// This value sets the time-to-live field that is used in every packet sent
/// from this socket.
fn set_ttl(&self, ttl: u32) -> Result<()> {
let socket_server_handle = BSD_SERVICE.read();
let socket_server = socket_server_handle.as_ref().unwrap();
if let BsdResult::Err(errno) = socket_server.get_service().set_sock_opt(
self.as_raw_fd(),
IpProto::IP as _,
IpOptions::TimeToLive as _,
Buffer::from_other_var(&ttl),
)? {
return ResultCode::new_err(nx::result::pack_value(
rc::RESULT_MODULE,
1000 + errno.cast_unsigned(),
));
}
Ok(())
}
/// Gets the value of the `IP_TTL` option for this socket
fn ttl(&self) -> Result<u32> {
let socket_server_handle = BSD_SERVICE.read();
let socket_server = socket_server_handle.as_ref().unwrap();
let mut ttl: u32 = 0;
if let BsdResult::Err(errno) = socket_server.get_service().get_sock_opt(
self.as_raw_fd(),
IpProto::IP as _,
IpOptions::TimeToLive as _,
Buffer::from_other_mut_var(&mut ttl),
)? {
return ResultCode::new_err(nx::result::pack_value(
rc::RESULT_MODULE,
1000 + errno.cast_unsigned(),
));
}
Ok(ttl)
}
/// Moves this TCP stream into or out of nonblocking mode.
///
/// This will result in `read`, `write`, `recv` and `send` system operations
/// becoming nonblocking, i.e., immediately returning from their calls.
/// If the IO operation is successful, `Ok` is returned and no further
/// action is required. If the IO operation could not be completed and needs
/// to be retried, an error with the value set to `EAGAIN` is
/// returned.
fn set_nonblocking(&self, nonblocking: bool) -> Result<()> {
const O_NONBLOCK: i32 = 0x800;
let socket_server_handle = BSD_SERVICE.read();
let socket_server = socket_server_handle.as_ref().unwrap();
let current_flags = match socket_server.get_service().fcntl(
self.as_raw_fd(),
super::FcntlCmd::GetFl,
0,
)? {
BsdResult::Ok(flags, ()) => flags,
BsdResult::Err(errno) => {
return ResultCode::new_err(nx::result::pack_value(
rc::RESULT_MODULE,
1000 + errno.cast_unsigned(),
));
}
};
let flags = if nonblocking {
current_flags | O_NONBLOCK
} else {
current_flags & !O_NONBLOCK
};
if let BsdResult::Err(errno) = socket_server.get_service().fcntl(
self.as_raw_fd(),
super::FcntlCmd::SetFl,
flags,
)? {
return ResultCode::new_err(nx::result::pack_value(
rc::RESULT_MODULE,
1000 + errno.cast_unsigned(),
));
}
Ok(())
}
/// Returns the read timeout of this socket.
/// If the timeout is [`None`], then [`SocketCommon::recv`] calls will block indefinitely.
fn recv_timeout(&self) -> Result<Option<Duration>> {
let socket_server_handle = BSD_SERVICE.read();
let socket_server = socket_server_handle.as_ref().unwrap();
let mut timeout: BsdDuration = Default::default();
match socket_server.get_service().get_sock_opt(
self.as_raw_fd(),
SOL_SOCKET,
SocketOptions::ReceiveTimeout as _,
Buffer::from_other_mut_var(&mut timeout),
)? {
BsdResult::Ok(_, written_len) => {
debug_assert_eq!(written_len as usize, size_of::<BsdDuration>())
}
BsdResult::Err(errno) => {
return ResultCode::new_err(nx::result::pack_value(
rc::RESULT_MODULE,
1000 + errno.cast_unsigned(),
));
}
}
if timeout == Default::default() {
Ok(None)
} else {
Ok(Some(timeout.into()))
}
}
/// Sets the read timeout to the timeout specified.
///
/// If the value specified is [`None`], then [`SocketCommon::recv`] calls will block
/// indefinitely. An [`Err`] is returned if the zero [`Duration`] is
/// passed to this method.
fn set_read_timeout(&self, timeout: Option<Duration>) -> Result<()> {
result_return_if!(timeout == Some(Duration::ZERO), rc::ResultInvalidTimeout);
let socket_server_handle = BSD_SERVICE.read();
let socket_server = socket_server_handle.as_ref().unwrap();
let timeout: BsdDuration = timeout.into();
if let BsdResult::Err(errno) = socket_server.get_service().set_sock_opt(
self.as_raw_fd(),
SOL_SOCKET,
SocketOptions::ReceiveTimeout as _,
Buffer::from_other_var(&timeout),
)? {
return ResultCode::new_err(nx::result::pack_value(
rc::RESULT_MODULE,
1000 + errno.cast_unsigned(),
));
}
Ok(())
}
/// Returns the write timeout of this socket.
///
/// If the timeout is [`None`], then [`SocketCommon::send`] calls will block indefinitely.
fn send_timeout(&self) -> Result<Option<Duration>> {
let socket_server_handle = BSD_SERVICE.read();
let socket_server = socket_server_handle.as_ref().unwrap();
let mut timeout: BsdDuration = Default::default();
match socket_server.get_service().get_sock_opt(
self.as_raw_fd(),
SOL_SOCKET,
SocketOptions::SendTimeout as _,
Buffer::from_other_mut_var(&mut timeout),
)? {
BsdResult::Ok(_, written_len) => {
debug_assert_eq!(written_len as usize, size_of::<BsdDuration>())
}
BsdResult::Err(errno) => {
return ResultCode::new_err(nx::result::pack_value(
rc::RESULT_MODULE,
1000 + errno.cast_unsigned(),
));
}
}
if timeout == Default::default() {
Ok(None)
} else {
Ok(Some(timeout.into()))
}
}
/// Sets the write timeout to the timeout specified.
///
/// If the value specified is [`None`], then [`write`] calls will block
/// indefinitely. An [`Err`] is returned if the zero [`Duration`] is
/// passed to this method.
fn set_write_timeout(&self, timeout: Option<Duration>) -> Result<()> {
result_return_if!(timeout == Some(Duration::ZERO), rc::ResultInvalidTimeout);
let socket_server_handle = BSD_SERVICE.read();
let socket_server = socket_server_handle.as_ref().unwrap();
let timeout: BsdDuration = timeout.into();
if let BsdResult::Err(errno) = socket_server.get_service().set_sock_opt(
self.as_raw_fd(),
SOL_SOCKET,
SocketOptions::SendTimeout as _,
Buffer::from_other_var(&timeout),
)? {
return ResultCode::new_err(nx::result::pack_value(
rc::RESULT_MODULE,
1000 + errno.cast_unsigned(),
));
}
Ok(())
}
/// Gets the value of the `SO_ERROR` option on this socket.
///
/// This will retrieve the stored error in the underlying socket, clearing
/// the field in the process. This can be useful for checking errors between
/// calls.
fn take_error(&self) -> Result<Option<i32>> {
let socket_server_handle = BSD_SERVICE.read();
let socket_server = socket_server_handle.as_ref().unwrap();
let mut ret_errno: i32 = 0;
if let BsdResult::Err(errno) = socket_server.get_service().get_sock_opt(
self.as_raw_fd(),
SOL_SOCKET,
SocketOptions::Error as i32,
OutAutoSelectBuffer::from_other_mut_var(&mut ret_errno),
)? {
return ResultCode::new_err(nx::result::pack_value(
rc::RESULT_MODULE,
1000 + errno.cast_unsigned(),
));
}
Ok(if ret_errno != 0 {
Some(ret_errno)
} else {
None
})
}
}
}
use traits::{Pollable, SocketCommon};
/// Takes a slice of pollable values and requested events returns an iterator over the matched index in the input list and the returned events.
#[inline(always)]
pub fn poll<P: traits::Pollable>(
pollers: &[(P, PollFlags)],
timeout: Option<i32>,
) -> Result<impl Iterator<Item = (usize, PollFlags)>> {
poll_impl(
pollers
.iter()
.map(|(poll, flags)| PollFd {
fd: poll.get_poll_fd(),
events: *flags,
revents: Default::default(),
})
.collect(),
timeout.unwrap_or(-1),
)
}
#[doc(hidden)]
fn poll_impl(
mut fds: Vec<PollFd>,
timeout: i32,
) -> Result<impl Iterator<Item = (usize, PollFlags)>> {
let socket_server_handle = BSD_SERVICE.read();
let socket_server = socket_server_handle
.as_ref()
.ok_or(rc::ResultNotInitialized::make())?;
if let BsdResult::Err(errno) = socket_server
.get_service()
.poll(Buffer::from_mut_array(fds.as_mut_slice()), timeout)?
{
return ResultCode::new_err(nx::result::pack_value(
rc::RESULT_MODULE,
1000 + errno.cast_unsigned(),
));
}
Ok(fds.into_iter().enumerate().filter_map(|(index, pollfd)| {
if pollfd.events.intersects(pollfd.revents) {
Some((index, pollfd.revents))
} else {
None
}
}))
}
pub struct TcpListener(i32);
impl TcpListener {
pub fn bind(ip: Ipv4Addr, port: u16) -> Result<Self> {
let socket_server_handle = BSD_SERVICE.read();
let socket_server = socket_server_handle
.as_ref()
.ok_or(rc::ResultNotInitialized::make())?;
let ipaddr = SocketAddrRepr::from((ip, port));
let listenfd = match socket_server.get_service().socket(
super::SocketDomain::INet,
super::SocketType::Stream,
super::IpProto::IP,
)? {
BsdResult::Ok(ret, ()) => ret,
BsdResult::Err(errno) => {
return ResultCode::new_err(nx::result::pack_value(
rc::RESULT_MODULE,
1000 + errno.cast_unsigned(),
));
}
};
let yes = 1i32;
if let BsdResult::Err(errno) = socket_server.get_service().set_sock_opt(
listenfd,
SOL_SOCKET,
SocketOptions::ReuseAddr as i32,
Buffer::from_other_var(&yes),
)? {
return ResultCode::new_err(nx::result::pack_value(
rc::RESULT_MODULE,
1000 + errno.cast_unsigned(),
));
}
if let BsdResult::Err(errno) = socket_server
.get_service()
| rust | MIT | b365c1baa4c4472fe604f4ab9646440d23c3bd9c | 2026-01-04T20:16:15.900894Z | true |
aarch64-switch-rs/nx | https://github.com/aarch64-switch-rs/nx/blob/b365c1baa4c4472fe604f4ab9646440d23c3bd9c/src/input.rs | src/input.rs | //! Input utils and wrappers
use rc::ResultInvalidControllerId;
use crate::applet;
use crate::ipc::sf;
use crate::result::*;
use crate::service;
use crate::service::applet::AppletResourceUserId;
use crate::service::hid;
use crate::service::hid::AnalogStickState;
use crate::service::hid::AppletResource;
use crate::service::hid::HidService;
use crate::service::hid::IAppletResourceClient;
use crate::service::hid::IHidClient;
use crate::service::hid::shmem;
use crate::service::hid::shmem::SharedMemoryFormat;
use crate::svc;
use crate::version;
use crate::vmem;
pub mod rc;
#[inline(always)]
fn get_npad_id_shmem_entry_index(npad_id: hid::NpadIdType) -> usize {
match npad_id {
hid::NpadIdType::Handheld => 8,
hid::NpadIdType::Other => 9,
_ => npad_id as usize, // (No1...8 -> 0...7)
}
}
macro_rules! get_npad_property {
($self:expr, $property:ident) => {
match $self.shmem {
$crate::service::hid::shmem::SharedMemoryFormat::V1(m) => {
&m.npad.entries[$self.npad_id_idx].$property
}
$crate::service::hid::shmem::SharedMemoryFormat::V2(m) => {
&m.npad.entries[$self.npad_id_idx].$property
}
$crate::service::hid::shmem::SharedMemoryFormat::V3(m) => {
&m.npad.entries[$self.npad_id_idx].$property
}
$crate::service::hid::shmem::SharedMemoryFormat::V4(m) => {
&m.npad.entries[$self.npad_id_idx].$property
}
$crate::service::hid::shmem::SharedMemoryFormat::V5(m) => {
&m.npad.entries[$self.npad_id_idx].$property
}
$crate::service::hid::shmem::SharedMemoryFormat::V6(m) => {
&m.npad.entries[$self.npad_id_idx].$property
}
}
};
}
macro_rules! get_keyboard_tail_item {
($self:expr) => {
match $self.shmem {
$crate::service::hid::shmem::SharedMemoryFormat::V1(m) => {
m.keyboard.lifo.get_tail_item()
}
$crate::service::hid::shmem::SharedMemoryFormat::V2(m) => {
m.keyboard.lifo.get_tail_item()
}
$crate::service::hid::shmem::SharedMemoryFormat::V3(m) => {
m.keyboard.lifo.get_tail_item()
}
$crate::service::hid::shmem::SharedMemoryFormat::V4(m) => {
m.keyboard.lifo.get_tail_item()
}
$crate::service::hid::shmem::SharedMemoryFormat::V5(m) => {
m.keyboard.lifo.get_tail_item()
}
$crate::service::hid::shmem::SharedMemoryFormat::V6(m) => {
m.keyboard.lifo.get_tail_item()
}
}
};
}
macro_rules! get_state_one_tag {
($self:expr, $style_tag:expr, $state_field:ident) => {
if $style_tag.contains(hid::NpadStyleTag::FullKey()) {
get_npad_property!($self, full_key_lifo)
.get_tail_item()
.$state_field
} else if $style_tag.contains(hid::NpadStyleTag::Handheld()) {
get_npad_property!($self, handheld_lifo)
.get_tail_item()
.$state_field
} else if $style_tag.contains(hid::NpadStyleTag::JoyDual()) {
get_npad_property!($self, joy_dual_lifo)
.get_tail_item()
.$state_field
} else if $style_tag.contains(hid::NpadStyleTag::JoyLeft()) {
get_npad_property!($self, joy_left_lifo)
.get_tail_item()
.$state_field
} else if $style_tag.contains(hid::NpadStyleTag::JoyRight()) {
get_npad_property!($self, joy_right_lifo)
.get_tail_item()
.$state_field
} else if $style_tag.contains(hid::NpadStyleTag::System())
|| $style_tag.contains(hid::NpadStyleTag::SystemExt())
{
get_npad_property!($self, system_ext_lifo)
.get_tail_item()
.$state_field
} else {
Default::default()
}
};
}
macro_rules! get_state_multi_tag {
($self:expr, $style_tag:expr, $state_type:ty, $state_field:ident) => {{
let mut state: $state_type = Default::default();
if $style_tag.contains(hid::NpadStyleTag::FullKey()) {
state |= get_npad_property!($self, full_key_lifo)
.get_tail_item()
.$state_field;
}
if $style_tag.contains(hid::NpadStyleTag::Handheld()) {
state |= get_npad_property!($self, handheld_lifo)
.get_tail_item()
.$state_field;
}
if $style_tag.contains(hid::NpadStyleTag::JoyDual()) {
state |= get_npad_property!($self, joy_dual_lifo)
.get_tail_item()
.$state_field;
}
if $style_tag.contains(hid::NpadStyleTag::JoyLeft()) {
state |= get_npad_property!($self, joy_left_lifo)
.get_tail_item()
.$state_field;
}
if $style_tag.contains(hid::NpadStyleTag::JoyRight()) {
state |= get_npad_property!($self, joy_right_lifo)
.get_tail_item()
.$state_field;
}
if $style_tag.contains(hid::NpadStyleTag::System())
|| $style_tag.contains(hid::NpadStyleTag::SystemExt())
{
state |= get_npad_property!($self, system_ext_lifo)
.get_tail_item()
.$state_field;
}
state
}};
}
/// Represents a console controller type
///
/// It's essentially a wrapper type over HID shared-memory to simplify input detection
pub struct Player<'player> {
npad_id: hid::NpadIdType,
npad_id_idx: usize,
supported_style_tags: hid::NpadStyleTag,
shmem: &'player SharedMemoryFormat,
prev_buttons: hid::NpadButton,
}
impl<'player, 'context: 'player> Player<'player> {
/// Creates a [`Player`] from shared-memory information
///
/// If using a [`Context`], look for [`Context::get_player`] instead (for simplicity)
///
/// # Arguments
///
/// * `npad_id`: The [`NpadIdType`][`hid::NpadIdType`] of the desired controller
/// * `supported_style_tags`: The [`NpadStyleTag`][`hid::NpadStyleTag`] flags which will be used by the [`Player`] to scan for input, etc.
/// * `shmem_ptr`: The address of HID shared-memory
pub fn new(
npad_id: hid::NpadIdType,
supported_style_tags: hid::NpadStyleTag,
shmem: &'context SharedMemoryFormat,
) -> Result<Self> {
Ok(Self {
npad_id,
npad_id_idx: get_npad_id_shmem_entry_index(npad_id),
supported_style_tags,
shmem,
prev_buttons: Default::default(),
})
}
#[inline]
pub fn get_previous_buttons(&self) -> hid::NpadButton {
self.prev_buttons
}
/// Gets the [`NpadAttribute`][`hid::NpadAttribute`]s for a certain [`NpadStyleTag`][`hid::NpadStyleTag`]
///
/// # Arguments
///
/// * `style_tag`: Must be a [`NpadStyleTag`][`hid::NpadStyleTag`] with a single flag set (otherwise only one will take effect and the rest will be ignored)
#[inline]
pub fn get_style_tag_attributes(&self, style_tag: hid::NpadStyleTag) -> hid::NpadAttribute {
get_state_one_tag!(self, style_tag, attributes)
}
/// Gets the stick status from a provided style tag (which may or may not be configured)
#[inline]
pub fn get_stick_status(
&self,
style_tag: hid::NpadStyleTag,
) -> (AnalogStickState, AnalogStickState) {
(
get_state_one_tag!(self, style_tag, analog_stick_l),
get_state_one_tag!(self, style_tag, analog_stick_r),
)
}
#[inline]
pub fn get_first_stick_status(
&self,
deadzone: f32,
) -> Option<(AnalogStickState, AnalogStickState)> {
debug_assert!(
(0.0..=1.0).contains(&deadzone),
"deadzone is a factor in the range (0, 1)"
);
// TODO - make an iterator through the supported style tags to get the first one passing the deadzone test.
None
}
/// Gets the [`NpadButton`][`hid::NpadButton`]s for a certain [`NpadStyleTag`][`hid::NpadStyleTag`]
///
/// # Arguments
///
/// * `style_tag`: Must be a [`NpadStyleTag`][`hid::NpadStyleTag`] with a single flag set (otherwise only one will take effect and the rest will be ignored)
pub fn get_style_tag_buttons(&mut self, style_tag: hid::NpadStyleTag) -> hid::NpadButton {
let cur_buttons = get_state_one_tag!(self, style_tag, buttons);
self.prev_buttons = cur_buttons;
cur_buttons
}
/// Gets the down [`NpadButton`][`hid::NpadButton`]s for a certain [`NpadStyleTag`][`hid::NpadStyleTag`]
///
/// This is similar to `get_style_tag_buttons` but this only gets the buttons once after they're down/pressed
///
/// # Arguments
///
/// * `style_tag`: Must be a [`NpadStyleTag`][`hid::NpadStyleTag`] with a single flag set (otherwise only one will take effect and the rest will be ignored)
pub fn get_style_tag_buttons_down(&mut self, style_tag: hid::NpadStyleTag) -> hid::NpadButton {
let prev_buttons = self.prev_buttons;
let cur_buttons = self.get_style_tag_buttons(style_tag);
(!prev_buttons) & cur_buttons
}
/// Gets the up [`NpadButton`][`hid::NpadButton`]s for a certain [`NpadStyleTag`][`hid::NpadStyleTag`]
///
/// This is similar to `get_style_tag_buttons` but this only gets the buttons once after they're up/released
///
/// # Arguments
///
/// * `style_tag`: Must be a [`NpadStyleTag`][`hid::NpadStyleTag`] with a single flag set (otherwise only one will take effect and the rest will be ignored)
pub fn get_style_tag_buttons_up(&mut self, style_tag: hid::NpadStyleTag) -> hid::NpadButton {
let prev_buttons = self.prev_buttons;
let cur_buttons = self.get_style_tag_buttons(style_tag);
prev_buttons & (!cur_buttons)
}
/// Gets the [`NpadButton`][`hid::NpadButton`]s for all of the supported [`NpadStyleTag`][`hid::NpadStyleTag`]s, combining all of them
///
/// This is like combining the result of `get_style_tag_buttons` with all the supported [`NpadStyleTag`][`hid::NpadStyleTag`] flags
pub fn get_buttons(&mut self) -> hid::NpadButton {
let cur_buttons =
get_state_multi_tag!(self, self.supported_style_tags, hid::NpadButton, buttons);
self.prev_buttons = cur_buttons;
cur_buttons
}
/// Gets the down [`NpadButton`][`hid::NpadButton`]s for all of the supported [`NpadStyleTag`][`hid::NpadStyleTag`]s, combining all of them
///
/// This is similar to `get_buttons` but this only gets the buttons once after they're down/pressed
pub fn get_buttons_down(&mut self) -> hid::NpadButton {
let prev_buttons = self.prev_buttons;
let cur_buttons = self.get_buttons();
(!prev_buttons) & cur_buttons
}
/// Gets the up [`NpadButton`][`hid::NpadButton`]s for all of the supported [`NpadStyleTag`][`hid::NpadStyleTag`]s, combining all of them
///
/// This is similar to `get_buttons` but this only gets the buttons once after they're up/released
pub fn get_buttons_up(&mut self) -> hid::NpadButton {
let prev_buttons = self.prev_buttons;
let cur_buttons = self.get_buttons();
prev_buttons & (!cur_buttons)
}
/// Gets the up [`NpadButton`][`hid::NpadButton`]s for all of the supported [`NpadStyleTag`][`hid::NpadStyleTag`]s, combining all of them
///
/// This only updates the state once, but is otherwise equivalent to `(self.get_previous(), self.get_buttons(), self.get_buttons_down(), self.get_buttons_up())`
#[inline]
pub fn get_button_updates(
&mut self,
) -> (
hid::NpadButton,
hid::NpadButton,
hid::NpadButton,
hid::NpadButton,
) {
let prev_buttons = self.prev_buttons;
let cur_buttons = self.get_buttons();
(
prev_buttons,
cur_buttons,
(!prev_buttons) & cur_buttons,
prev_buttons & (!cur_buttons),
)
}
/// Gets the [`NpadIdType`][`hid::NpadIdType`] being used with this [`Player`]
#[inline]
pub fn get_npad_id(&self) -> hid::NpadIdType {
self.npad_id
}
/// Gets the supported [`NpadStyleTag`][`hid::NpadStyleTag`] flags being used with this [`Player`]
#[inline]
pub fn get_supported_style_tags(&self) -> hid::NpadStyleTag {
self.supported_style_tags
}
#[inline]
pub fn get_reported_style_tag(&self) -> hid::NpadStyleTag {
*get_npad_property!(self, style_tag)
}
#[inline]
pub fn get_controller_type(&self) -> hid::DeviceType {
*get_npad_property!(self, device_type)
}
#[inline]
pub fn get_keyboard_state(&self) -> shmem::KeyboardState {
get_keyboard_tail_item!(self)
}
}
/// Represents a simple type for dealing with input handling
#[allow(dead_code)]
pub struct Context {
hid_service: HidService,
applet_resource: AppletResource,
supported_style_tags: hid::NpadStyleTag,
shmem_handle: svc::Handle,
shmem: SharedMemoryFormat,
}
impl Context {
/// Creates a [`Context`] from supported input values
///
/// The supported values are essentially used to enable supported controllers/controller configs via [`hid`] commands, and for opening [`Player`] types
///
/// # Arguments
///
/// * `supported_style_tags`: Supported [`NpadStyleTag`][`hid::NpadStyleTag`] flags
/// * `supported_npad_ids`: Supported [`NpadIdType`][`hid::NpadIdType`] values
pub fn new(supported_style_tags: hid::NpadStyleTag, mut player_count: usize) -> Result<Self> {
result_return_unless!((1..=8).contains(&player_count), ResultInvalidControllerId);
let mut players: [hid::NpadIdType; 9] = [hid::NpadIdType::No1; 9];
for (player_id, player_slot) in players.iter_mut().enumerate().take(player_count) {
// corresponding to values No1..No7
*player_slot = unsafe { core::mem::transmute::<u32, hid::NpadIdType>(player_id as u32) }
}
if supported_style_tags.contains(hid::NpadStyleTag::Handheld())
|| supported_style_tags.contains(hid::NpadStyleTag::HandheldLark())
{
// allow for 8 controller players plus the handheld controllers still working
players[player_count] = hid::NpadIdType::Handheld;
player_count += 1;
}
let aruid = AppletResourceUserId::new(
applet::GLOBAL_ARUID.load(core::sync::atomic::Ordering::Relaxed),
);
let players = sf::Buffer::from_array(&players[..player_count]);
let mut hid_srv = service::new_service_object::<HidService>()?;
let mut applet_res = hid_srv.create_applet_resource(aruid.clone())?;
let shmem_handle = applet_res.get_shared_memory_handle()?;
let shmem_address = vmem::allocate(shmem::SHMEM_SIZE)?;
unsafe {
svc::map_shared_memory(
shmem_handle.handle,
shmem_address,
shmem::SHMEM_SIZE,
svc::MemoryPermission::Read(),
)?
};
Self::activate_npad(&mut hid_srv, aruid.clone())?;
hid_srv.set_supported_npad_style_set(supported_style_tags, aruid.clone())?;
hid_srv.set_supported_npad_id_type(aruid.clone(), players)?;
let _styles = hid_srv.get_supported_npad_style_set(aruid);
Ok(Self {
hid_service: hid_srv,
applet_resource: applet_res,
supported_style_tags,
shmem_handle: shmem_handle.handle,
shmem: unsafe { SharedMemoryFormat::from_shmem_ptr(shmem_address)? },
})
}
fn activate_npad(hid_srv: &mut HidService, aruid: AppletResourceUserId) -> Result<()> {
let current_version = version::get_version();
if current_version < version::Version::new(5, 0, 0) {
hid_srv.activate_npad(aruid)
} else {
let revision = match current_version.major {
0..6 => 1,
6..8 => 2,
8..18 => 3,
18.. => 5,
};
hid_srv.activate_npad_with_revision(revision, aruid)
}
}
/// Opens a [`Player`] type for the specified [`NpadIdType`][`hid::NpadIdType`]
///
/// This simplifies creating a [`Player`] type, since this context contains the supported [`NpadStyleTag`][`hid::NpadStyleTag`] values and the mapped shared-memory address
///
/// # Arguments
///
/// `npad_id`: The [`NpadIdType`][`hid::NpadIdType`] to use
#[inline]
pub fn get_player(&'_ self, npad_id: hid::NpadIdType) -> Player<'_> {
Player::new(npad_id, self.supported_style_tags, &self.shmem)
.expect("The pointers provided by the hid service should never be invalid")
}
/// Gets the current [`TouchScreenState`][`shmem::TouchScreenState`] values for the console touch-screen, returning the number of states present/set
///
/// # Arguments
///
/// * `touch_states`: Array of [`TouchState`][`hid::TouchState`] values to get filled, the array doesn't have to be bigger than `17` items
pub fn get_touch_state(&self) -> shmem::TouchScreenState {
match self.shmem {
shmem::SharedMemoryFormat::V1(m) => &m.touch_screen,
shmem::SharedMemoryFormat::V2(m) => &m.touch_screen,
shmem::SharedMemoryFormat::V3(m) => &m.touch_screen,
shmem::SharedMemoryFormat::V4(m) => &m.touch_screen,
shmem::SharedMemoryFormat::V5(m) => &m.touch_screen,
shmem::SharedMemoryFormat::V6(m) => &m.touch_screen,
}
.lifo
.get_tail_item()
}
/// Gets the current [`TouchState`][`hid::TouchState`] values for the console touch-screen, returning the number of states present/set
///
/// # Arguments
///
/// * `touch_states`: Array of [`TouchState`][`hid::TouchState`] values to get filled, the array doesn't have to be bigger than `17` items
pub fn get_touches(&self, touch_states: &mut [hid::TouchState]) -> usize {
let screen_state = self.get_touch_state();
let min_count = touch_states.len().min(screen_state.count as usize);
touch_states[..min_count].copy_from_slice(&screen_state.touches[..min_count]);
min_count
}
}
impl Drop for Context {
/// Destroys the [`Context`], un-mapping the shared-memory and closing it, and also closing its [`IHidClient`] session
fn drop(&mut self) {
let _ = self
.hid_service
.deactivate_npad(AppletResourceUserId::new(0));
let _ = unsafe {
svc::unmap_shared_memory(self.shmem_handle, self.shmem.as_ptr(), shmem::SHMEM_SIZE)
};
let _ = svc::close_handle(self.shmem_handle);
}
}
| rust | MIT | b365c1baa4c4472fe604f4ab9646440d23c3bd9c | 2026-01-04T20:16:15.900894Z | false |
aarch64-switch-rs/nx | https://github.com/aarch64-switch-rs/nx/blob/b365c1baa4c4472fe604f4ab9646440d23c3bd9c/src/ipc.rs | src/ipc.rs | //! Contains the machinery for Horizon OS's IPC interface
use crate::result::*;
use crate::svc;
use crate::thread;
use arrayvec::ArrayVec;
use core::mem;
use core::ptr;
pub mod rc;
#[derive(Copy, Clone, PartialEq, Eq, Debug, Default)]
#[repr(u8)]
pub enum CommandProtocol {
#[default]
Cmif,
Tipc,
}
#[derive(Copy, Clone, PartialEq, Eq, Debug, Default)]
#[repr(C)]
pub struct ObjectInfo {
pub handle: svc::Handle,
pub domain_object_id: cmif::DomainObjectId,
pub owns_handle: bool,
pub protocol: CommandProtocol,
}
impl ObjectInfo {
pub const fn new() -> Self {
Self {
handle: 0,
domain_object_id: 0,
owns_handle: false,
protocol: CommandProtocol::Cmif,
}
}
pub const fn from_handle(handle: svc::Handle) -> Self {
Self {
handle,
domain_object_id: 0,
owns_handle: true,
protocol: CommandProtocol::Cmif,
}
}
pub const fn from_domain_object_id(
parent_handle: svc::Handle,
domain_object_id: cmif::DomainObjectId,
) -> Self {
Self {
handle: parent_handle,
domain_object_id,
owns_handle: false,
protocol: CommandProtocol::Cmif,
}
}
pub const fn is_valid(&self) -> bool {
self.handle != 0
}
pub const fn is_domain(&self) -> bool {
self.domain_object_id != 0
}
pub fn uses_cmif_protocol(&self) -> bool {
self.protocol == CommandProtocol::Cmif
}
pub fn uses_tipc_protocol(&self) -> bool {
self.protocol == CommandProtocol::Tipc
}
pub fn convert_current_object_to_domain(&mut self) -> Result<cmif::DomainObjectId> {
if self.uses_tipc_protocol() {
return super::rc::ResultNotSupported::make_err();
}
ipc_client_send_control_command!([*self; cmif::ControlRequestId::ConvertCurrentObjectToDomain] () => (domain_object_id: cmif::DomainObjectId))
}
pub fn query_pointer_buffer_size(&self) -> Result<u16> {
if self.uses_tipc_protocol() {
return super::rc::ResultNotSupported::make_err();
}
ipc_client_send_control_command!([*self; cmif::ControlRequestId::QueryPointerBufferSize] () => (pointer_buffer_size: u16))
}
pub fn clone_current_object(&self) -> Result<sf::MoveHandle> {
if self.uses_tipc_protocol() {
return super::rc::ResultNotSupported::make_err();
}
ipc_client_send_control_command!([*self; cmif::ControlRequestId::CloneCurrentObject] () => (cloned_handle: sf::MoveHandle))
}
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
#[repr(u8)]
pub enum BufferFlags {
Normal = 0,
NonSecure = 1,
Invalid = 2,
NonDevice = 3,
}
#[derive(Copy, Clone, PartialEq, Eq, Debug, Default)]
#[repr(C)]
pub struct BufferDescriptor {
pub size_low: u32,
pub address_low: u32,
pub bits: u32,
}
impl BufferDescriptor {
pub const fn empty() -> Self {
Self {
size_low: 0,
address_low: 0,
bits: 0,
}
}
pub fn new(buffer: *const u8, buffer_size: usize, flags: BufferFlags) -> Self {
let buffer = buffer.expose_provenance();
let address_low = buffer as u32;
let address_mid = (buffer >> 32) as u32;
let address_high = (buffer >> 36) as u32;
let size_low = buffer_size as u32;
let size_high = (buffer_size >> 32) as u32;
let mut bits: u32 = 0;
write_bits!(0, 1, bits, flags as u32);
write_bits!(2, 23, bits, address_high);
write_bits!(24, 27, bits, size_high);
write_bits!(28, 31, bits, address_mid);
Self {
size_low,
address_low,
bits,
}
}
pub const fn get_address(&self) -> *mut u8 {
let address_high = read_bits!(2, 23, self.bits);
let address_mid = read_bits!(28, 31, self.bits);
(self.address_low as usize
| ((address_mid as usize) << 32)
| ((address_high as usize) << 36)) as *mut u8
}
pub const fn get_size(&self) -> usize {
let size_high = read_bits!(24, 27, self.bits);
self.size_low as usize | ((size_high as usize) << 32)
}
}
#[derive(Copy, Clone, PartialEq, Eq, Debug, Default)]
#[repr(C)]
pub struct SendStaticDescriptor {
bits: u32,
address_low: u32,
}
impl SendStaticDescriptor {
pub const fn empty() -> Self {
Self {
bits: 0,
address_low: 0,
}
}
pub fn new(buffer: *const u8, buffer_size: usize, index: u32) -> Self {
let buffer = buffer.expose_provenance();
let address_low = buffer as u32;
let address_mid = (buffer >> 32) as u32;
let address_high = (buffer >> 36) as u32;
let mut bits: u32 = 0;
write_bits!(0, 5, bits, index);
write_bits!(6, 11, bits, address_high);
write_bits!(12, 15, bits, address_mid);
write_bits!(16, 31, bits, buffer_size as u32);
Self { bits, address_low }
}
pub const fn get_address(&self) -> *mut u8 {
let address_high = read_bits!(6, 11, self.bits);
let address_mid = read_bits!(12, 15, self.bits);
(self.address_low as usize
| ((address_mid as usize) << 32)
| ((address_high as usize) << 36)) as *mut u8
}
pub const fn get_size(&self) -> usize {
read_bits!(16, 31, self.bits) as usize
}
}
#[derive(Copy, Clone, PartialEq, Eq, Debug, Default)]
#[repr(C)]
pub struct ReceiveStaticDescriptor {
address_low: u32,
bits: u32,
}
impl ReceiveStaticDescriptor {
pub const fn empty() -> Self {
Self {
address_low: 0,
bits: 0,
}
}
pub fn new(buffer: *const u8, buffer_size: usize) -> Self {
let buffer = buffer.expose_provenance();
let address_low = buffer as u32;
let address_high = (buffer >> 32) as u32;
let mut bits: u32 = 0;
write_bits!(0, 15, bits, address_high);
write_bits!(16, 31, bits, buffer_size as u32);
Self { address_low, bits }
}
pub const fn get_address(&self) -> *mut u8 {
let address_high = read_bits!(0, 15, self.bits);
(self.address_low as usize | ((address_high as usize) << 32)) as *mut u8
}
pub const fn get_size(&self) -> usize {
read_bits!(16, 31, self.bits) as usize
}
}
#[derive(Copy, Clone, PartialEq, Eq, Debug, Default)]
#[repr(C)]
pub struct CommandHeader {
bits_1: u32,
bits_2: u32,
}
impl CommandHeader {
pub const fn empty() -> Self {
Self {
bits_1: 0,
bits_2: 0,
}
}
pub const fn encode_receive_static_type(receive_static_count: u32) -> u32 {
let mut static_type: u32 = 0;
if receive_static_count > 0 {
static_type += 2;
if receive_static_count != 0xFF {
static_type += receive_static_count;
}
}
static_type
}
pub const fn decode_receive_static_type(receive_static_type: u32) -> u32 {
let mut count: u32 = 0;
if receive_static_type > 0 {
if receive_static_type == 2 {
count = 0xFF;
} else if receive_static_type > 2 {
count = receive_static_type - 2;
}
}
count
}
#[allow(clippy::too_many_arguments)]
pub const fn new(
command_type: u32,
send_static_count: u32,
send_buffer_count: u32,
receive_buffer_count: u32,
exchange_buffer_count: u32,
data_word_count: u32,
receive_static_count: u32,
has_special_header: bool,
) -> Self {
let mut bits_1: u32 = 0;
write_bits!(0, 15, bits_1, command_type);
write_bits!(16, 19, bits_1, send_static_count);
write_bits!(20, 23, bits_1, send_buffer_count);
write_bits!(24, 27, bits_1, receive_buffer_count);
write_bits!(28, 31, bits_1, exchange_buffer_count);
let mut bits_2: u32 = 0;
write_bits!(0, 9, bits_2, data_word_count);
write_bits!(
10,
13,
bits_2,
Self::encode_receive_static_type(receive_static_count)
);
write_bits!(31, 31, bits_2, has_special_header as u32);
Self { bits_1, bits_2 }
}
pub const fn get_command_type(&self) -> u32 {
read_bits!(0, 15, self.bits_1)
}
pub const fn get_send_static_count(&self) -> u32 {
read_bits!(16, 19, self.bits_1)
}
pub const fn get_send_buffer_count(&self) -> u32 {
read_bits!(20, 23, self.bits_1)
}
pub const fn get_receive_buffer_count(&self) -> u32 {
read_bits!(24, 27, self.bits_1)
}
pub const fn get_exchange_buffer_count(&self) -> u32 {
read_bits!(28, 31, self.bits_1)
}
pub const fn get_data_word_count(&self) -> u32 {
read_bits!(0, 9, self.bits_2)
}
pub const fn get_receive_static_count(&self) -> u32 {
Self::decode_receive_static_type(read_bits!(10, 13, self.bits_2))
}
pub const fn get_has_special_header(&self) -> bool {
read_bits!(31, 31, self.bits_2) != 0
}
}
#[derive(Copy, Clone, PartialEq, Eq, Debug, Default)]
#[repr(C)]
pub struct CommandSpecialHeader {
bits: u32,
}
impl CommandSpecialHeader {
pub const fn empty() -> Self {
Self { bits: 0 }
}
pub const fn new(
send_process_id: bool,
copy_handle_count: u32,
move_handle_count: u32,
) -> Self {
let mut bits: u32 = 0;
write_bits!(0, 0, bits, send_process_id as u32);
write_bits!(1, 4, bits, copy_handle_count);
write_bits!(5, 8, bits, move_handle_count);
Self { bits }
}
pub const fn get_send_process_id(&self) -> bool {
read_bits!(0, 0, self.bits) != 0
}
pub const fn get_copy_handle_count(&self) -> u32 {
read_bits!(1, 4, self.bits)
}
pub const fn get_move_handle_count(&self) -> u32 {
read_bits!(5, 8, self.bits)
}
}
pub const DATA_PADDING: u32 = 16;
const MAX_COUNT: usize = 8;
#[derive(Clone)]
pub struct DataWalker {
ptr: *mut u8,
cur_offset: isize,
}
impl DataWalker {
pub fn empty() -> Self {
Self {
ptr: ptr::null_mut(),
cur_offset: 0,
}
}
pub fn new(ptr: *mut u8) -> Self {
Self { ptr, cur_offset: 0 }
}
pub fn advance<T>(&mut self) {
let align_of_type = core::mem::align_of::<T>() as isize;
self.cur_offset += align_of_type - 1;
self.cur_offset -= self.cur_offset % align_of_type;
self.cur_offset += core::mem::size_of::<T>() as isize;
}
pub fn advance_get<T>(&mut self) -> T {
unsafe {
let align_of_type = core::mem::align_of::<T>() as isize;
self.cur_offset += align_of_type - 1;
self.cur_offset -= self.cur_offset % align_of_type;
let offset = self.cur_offset;
self.cur_offset += core::mem::size_of::<T>() as isize;
let data_ref = self.ptr.offset(offset) as *const T;
// even though we have aligned the offsets of the output data, we unfortunately don't know that the raw
// data pointer will be aligned for out type, so we need to do an unaligned read (in libnx they just memcpy into the output object)
data_ref.read_unaligned()
}
}
pub fn advance_set<T>(&mut self, t: T) {
unsafe {
let align_of_type = core::mem::align_of::<T>() as isize;
self.cur_offset += align_of_type - 1;
self.cur_offset -= self.cur_offset % align_of_type;
let offset = self.cur_offset;
self.cur_offset += core::mem::size_of::<T>() as isize;
let data_ref = self.ptr.offset(offset) as *mut T;
// As above, we need an unaligned read just incase self.ptr doesn't have sufficiently large alignment
data_ref.write_unaligned(t);
}
}
pub fn reset(&mut self) {
self.cur_offset = 0;
}
pub fn reset_with(&mut self, ptr: *mut u8) {
self.reset();
self.ptr = ptr;
}
pub fn get_offset(&self) -> isize {
self.cur_offset
}
}
#[inline(always)]
pub fn get_msg_buffer() -> *mut u8 {
unsafe { (*thread::get_thread_local_region()).msg_buffer.as_mut_ptr() }
}
#[inline(always)]
/// Reads to an IPC array from a provided buffer
///
/// # Arguments
///
/// * `buffer`: In data buffer
/// * `count`:In data size in T-count, not bytes
/// * `array`: The ipc array to read the data from
///
/// # Safety
///
/// The caller is responsible for providing a pointer valid to read `count * size_of::<T>()` bytes
pub unsafe fn read_array_from_buffer<T: Copy, const LEN: usize>(
buffer: *mut u8,
count: u32,
array: &mut ArrayVec<T, LEN>,
) -> *mut u8 {
//debug_assert!(count <= MAX_COUNT, "Taking too may items from a data buffer");
debug_assert!(
is_aligned!(buffer as usize, align_of::<T>()),
"Data buffer is not properly aligned"
);
array.clear();
unsafe {
let tmp_buffer = buffer.cast();
let _ =
array.try_extend_from_slice(core::slice::from_raw_parts(tmp_buffer, count as usize));
tmp_buffer.add(count as usize).cast()
}
}
/// Reads an IPC array into a provided buffer
///
/// # Arguments
///
/// * `buffer`: Out data buffer
/// * `count`: Out data size in T-count, not bytes
/// * `array`: The ipc array to read the data from
///
/// # Safety
///
/// The caller is responsible for providing a pointer valid to write `count * size_of::<T>()` bytes
#[inline(always)]
pub unsafe fn write_array_to_buffer<T: Copy, const LEN: usize>(
buffer: *mut u8,
count: u32,
array: &ArrayVec<T, LEN>,
) -> *mut u8 {
//debug_assert!(count <= MAX_COUNT, "Taking too may items from a data buffer");
debug_assert!(
is_aligned!(buffer as usize, align_of::<T>()),
"Data buffer is not properly aligned"
);
debug_assert!(count as usize <= LEN, "Writing too may items to array");
unsafe {
let tmp_buffer = buffer as *mut T;
core::ptr::copy(array.as_ptr(), tmp_buffer, count as usize);
tmp_buffer.add(count as usize) as *mut u8
}
}
#[inline(always)]
#[allow(clippy::not_unsafe_ptr_arg_deref)]
pub fn get_aligned_data_offset(data_words_offset: *mut u8, base_offset: *mut u8) -> *mut u8 {
let align = DATA_PADDING as usize - 1;
let data_offset = (data_words_offset.addr() - base_offset.addr() + align) & !align;
unsafe { base_offset.add(data_offset) }
}
pub struct CommandContent {
pub send_process_id: bool,
pub process_id: u64,
pub data_size: u32,
pub data_offset: *mut u8,
pub data_words_offset: *mut u8,
pub objects_offset: *mut u8,
copy_handles: ArrayVec<svc::Handle, MAX_COUNT>,
move_handles: ArrayVec<svc::Handle, MAX_COUNT>,
objects: ArrayVec<cmif::DomainObjectId, MAX_COUNT>,
out_pointer_sizes: ArrayVec<u16, MAX_COUNT>,
}
impl CommandContent {
pub fn empty() -> Self {
Self {
send_process_id: false,
process_id: 0,
data_size: 0,
data_offset: ptr::null_mut(),
data_words_offset: ptr::null_mut(),
objects_offset: ptr::null_mut(),
copy_handles: ArrayVec::new(),
move_handles: ArrayVec::new(),
objects: ArrayVec::new(),
out_pointer_sizes: ArrayVec::new(),
}
}
fn add_copy_handle(&mut self, handle: svc::Handle) -> Result<()> {
match self.copy_handles.try_push(handle) {
Ok(()) => Ok(()),
Err(_) => rc::ResultCopyHandlesFull::make_err(),
}
}
fn add_move_handle(&mut self, handle: svc::Handle) -> Result<()> {
match self.move_handles.try_push(handle) {
Ok(()) => Ok(()),
Err(_) => rc::ResultMoveHandlesFull::make_err(),
}
}
pub fn add_handle<const MOVE: bool>(&mut self, handle: sf::Handle<MOVE>) -> Result<()> {
match MOVE {
false => self.add_copy_handle(handle.handle),
true => self.add_move_handle(handle.handle),
}
}
pub fn add_domain_object(&mut self, domain_object_id: cmif::DomainObjectId) -> Result<()> {
match self.objects.try_push(domain_object_id) {
Ok(()) => Ok(()),
Err(_) => rc::ResultDomainObjectsFull::make_err(),
}
}
pub fn add_object(&mut self, object_info: ObjectInfo) -> Result<()> {
if object_info.is_domain() {
self.add_domain_object(object_info.domain_object_id)
} else {
rc::ResultInvalidDomainObject::make_err()
}
}
fn add_out_pointer_size(&mut self, pointer_size: u16) -> Result<()> {
match self.out_pointer_sizes.try_push(pointer_size) {
Ok(()) => Ok(()),
Err(_) => rc::ResultPointerSizesFull::make_err(),
}
}
pub fn pop_copy_handle(&mut self) -> Result<svc::Handle> {
match self.copy_handles.pop_at(0) {
Some(handle) => Ok(handle),
None => cmif::rc::ResultInvalidOutObjectCount::make_err(),
}
}
pub fn pop_move_handle(&mut self) -> Result<svc::Handle> {
match self.move_handles.pop_at(0) {
Some(handle) => Ok(handle),
None => cmif::rc::ResultInvalidOutObjectCount::make_err(),
}
}
pub fn pop_handle<const MOVE: bool>(&mut self) -> Result<sf::Handle<MOVE>> {
let handle = match MOVE {
false => sf::Handle::from(self.pop_copy_handle()?),
true => sf::Handle::from(self.pop_move_handle()?),
};
Ok(handle)
}
fn push_copy_handle(&mut self, handle: svc::Handle) -> Result<()> {
match self.copy_handles.try_push(handle) {
Ok(()) => Ok(()),
Err(_) => rc::ResultCopyHandlesFull::make_err(),
}
}
fn push_move_handle(&mut self, handle: svc::Handle) -> Result<()> {
match self.move_handles.try_push(handle) {
Ok(()) => Ok(()),
Err(_) => rc::ResultMoveHandlesFull::make_err(),
}
}
pub fn push_handle<const MOVE: bool>(&mut self, handle: sf::Handle<MOVE>) -> Result<()> {
match MOVE {
false => self.push_copy_handle(handle.handle),
true => self.push_move_handle(handle.handle),
}
}
pub fn pop_domain_object(&mut self) -> Result<cmif::DomainObjectId> {
match self.objects.pop_at(0) {
Some(handle) => Ok(handle),
None => cmif::rc::ResultInvalidOutObjectCount::make_err(),
}
}
pub fn push_domain_object(&mut self, domain_object_id: cmif::DomainObjectId) -> Result<()> {
match self.objects.try_push(domain_object_id) {
Ok(()) => Ok(()),
Err(_) => rc::ResultDomainObjectsFull::make_err(),
}
}
}
pub struct CommandContext {
pub object_info: ObjectInfo,
pub in_params: CommandContent,
pub out_params: CommandContent,
send_statics: ArrayVec<SendStaticDescriptor, MAX_COUNT>,
receive_statics: ArrayVec<ReceiveStaticDescriptor, MAX_COUNT>,
send_buffers: ArrayVec<BufferDescriptor, MAX_COUNT>,
receive_buffers: ArrayVec<BufferDescriptor, MAX_COUNT>,
exchange_buffers: ArrayVec<BufferDescriptor, MAX_COUNT>,
pointer_buffer: *mut u8,
in_pointer_buffer_offset: usize,
out_pointer_buffer_offset: usize,
pointer_size_walker: DataWalker,
pointer_size_walker_initialized: bool,
}
impl CommandContext {
pub fn empty() -> Self {
Self {
object_info: ObjectInfo::new(),
in_params: CommandContent::empty(),
out_params: CommandContent::empty(),
send_statics: ArrayVec::new(),
receive_statics: ArrayVec::new(),
send_buffers: ArrayVec::new(),
receive_buffers: ArrayVec::new(),
exchange_buffers: ArrayVec::new(),
pointer_buffer: core::ptr::null_mut(),
in_pointer_buffer_offset: 0,
out_pointer_buffer_offset: 0,
pointer_size_walker: DataWalker::empty(),
pointer_size_walker_initialized: false,
}
}
pub fn new_client(object_info: ObjectInfo) -> Self {
let mut ctx = Self::empty();
ctx.object_info = object_info;
ctx
}
fn ensure_pointer_size_walker(&mut self, raw_data_walker: &mut DataWalker) {
if !self.pointer_size_walker_initialized {
if self.object_info.uses_cmif_protocol() {
let mut data_size = raw_data_walker.get_offset()
+ DATA_PADDING as isize
+ mem::size_of::<cmif::DataHeader>() as isize;
if self.object_info.is_domain() {
data_size += (mem::size_of::<cmif::DomainInDataHeader>()
+ mem::size_of::<cmif::DomainObjectId>() * self.in_params.objects.len())
as isize;
}
data_size = (data_size + 1) & !1;
let out_pointer_sizes_offset =
unsafe { self.in_params.data_words_offset.offset(data_size) };
self.pointer_size_walker = DataWalker::new(out_pointer_sizes_offset);
}
self.pointer_size_walker_initialized = true;
}
}
pub fn new_server(object_info: ObjectInfo, pointer_buffer: *mut u8) -> Self {
let mut ctx = Self::empty();
ctx.object_info = object_info;
ctx.pointer_buffer = pointer_buffer;
ctx
}
fn add_send_static(&mut self, send_static: SendStaticDescriptor) -> Result<()> {
match self.send_statics.try_push(send_static) {
Ok(()) => Ok(()),
Err(_) => rc::ResultSendStaticsFull::make_err(),
}
}
fn add_receive_static(&mut self, receive_static: ReceiveStaticDescriptor) -> Result<()> {
match self.receive_statics.try_push(receive_static) {
Ok(()) => Ok(()),
Err(_) => rc::ResultReceiveStaticsFull::make_err(),
}
}
fn add_send_buffer(&mut self, send_buffer: BufferDescriptor) -> Result<()> {
match self.send_buffers.try_push(send_buffer) {
Ok(()) => Ok(()),
Err(_) => rc::ResultSendBuffersFull::make_err(),
}
}
fn add_receive_buffer(&mut self, receive_buffer: BufferDescriptor) -> Result<()> {
match self.receive_buffers.try_push(receive_buffer) {
Ok(()) => Ok(()),
Err(_) => rc::ResultReceiveBuffersFull::make_err(),
}
}
fn add_exchange_buffer(&mut self, exchange_buffer: BufferDescriptor) -> Result<()> {
match self.exchange_buffers.try_push(exchange_buffer) {
Ok(()) => Ok(()),
Err(_) => rc::ResultExchangeBuffersFull::make_err(),
}
}
pub fn add_buffer<
const IN: bool,
const OUT: bool,
const MAP_ALIAS: bool,
const POINTER: bool,
const FIXED_SIZE: bool,
const AUTO_SELECT: bool,
const ALLOW_NON_SECURE: bool,
const ALLOW_NON_DEVICE: bool,
T,
>(
&mut self,
buffer: &sf::Buffer<
'_,
IN,
OUT,
MAP_ALIAS,
POINTER,
FIXED_SIZE,
AUTO_SELECT,
ALLOW_NON_SECURE,
ALLOW_NON_DEVICE,
T,
>,
) -> Result<()> {
let buf_addr = buffer.get_address();
let buf_size = buffer.get_size();
if AUTO_SELECT {
if self.pointer_buffer.is_null() {
self.pointer_buffer = self.object_info.query_pointer_buffer_size()? as *mut u8;
}
let pointer_buf_size = self.pointer_buffer as usize;
let mut buffer_in_static = false;
if pointer_buf_size > 0 {
let left_size = pointer_buf_size - self.in_pointer_buffer_offset;
buffer_in_static = buf_size <= left_size;
}
if buffer_in_static {
self.in_pointer_buffer_offset += buf_size;
}
if IN {
if buffer_in_static {
self.add_send_buffer(BufferDescriptor::new(
ptr::null(),
0,
BufferFlags::Normal,
))?;
self.add_send_static(SendStaticDescriptor::new(
buf_addr,
buf_size,
self.send_statics.len() as u32,
))?;
} else {
self.add_send_buffer(BufferDescriptor::new(
buf_addr,
buf_size,
BufferFlags::Normal,
))?;
self.add_send_static(SendStaticDescriptor::new(
ptr::null(),
0,
self.send_statics.len() as u32,
))?;
}
}
if OUT {
if buffer_in_static {
self.add_receive_buffer(BufferDescriptor::new(
ptr::null(),
0,
BufferFlags::Normal,
))?;
self.add_receive_static(ReceiveStaticDescriptor::new(buf_addr, buf_size))?;
self.in_params.add_out_pointer_size(buf_size as u16)?;
} else {
self.add_receive_buffer(BufferDescriptor::new(
buf_addr,
buf_size,
BufferFlags::Normal,
))?;
self.add_receive_static(ReceiveStaticDescriptor::new(ptr::null(), 0))?;
self.in_params.add_out_pointer_size(0)?;
}
}
} else if POINTER {
if IN {
self.add_send_static(SendStaticDescriptor::new(
buf_addr,
buf_size,
self.send_statics.len() as u32,
))?;
}
if OUT {
self.add_receive_static(ReceiveStaticDescriptor::new(buf_addr, buf_size))?;
if !FIXED_SIZE {
self.in_params.add_out_pointer_size(buf_size as u16)?;
}
}
} else if MAP_ALIAS {
let mut flags = BufferFlags::Normal;
if ALLOW_NON_SECURE {
flags = BufferFlags::NonSecure;
} else if ALLOW_NON_DEVICE {
flags = BufferFlags::NonDevice;
}
let buf_desc = BufferDescriptor::new(buf_addr, buf_size, flags);
match (IN, OUT) {
(true, true) => self.add_exchange_buffer(buf_desc),
(true, false) => self.add_send_buffer(buf_desc),
(false, true) => self.add_receive_buffer(buf_desc),
(false, false) => Ok(()),
}?;
} else {
return rc::ResultInvalidBufferAttributes::make_err();
}
Ok(())
}
fn pop_send_static(&mut self) -> Result<SendStaticDescriptor> {
match self.send_statics.pop_at(0) {
Some(send_static) => Ok(send_static),
None => rc::ResultInvalidSendStaticCount::make_err(),
}
}
fn pop_receive_static(&mut self) -> Result<ReceiveStaticDescriptor> {
match self.receive_statics.pop_at(0) {
Some(receive_static) => Ok(receive_static),
None => rc::ResultInvalidReceiveStaticCount::make_err(),
}
}
fn pop_send_buffer(&mut self) -> Result<BufferDescriptor> {
match self.send_buffers.pop_at(0) {
Some(send_buffer) => Ok(send_buffer),
None => rc::ResultInvalidSendBufferCount::make_err(),
}
}
fn pop_receive_buffer(&mut self) -> Result<BufferDescriptor> {
match self.receive_buffers.pop_at(0) {
Some(receive_buffer) => Ok(receive_buffer),
None => rc::ResultInvalidReceiveBufferCount::make_err(),
}
}
fn pop_exchange_buffer(&mut self) -> Result<BufferDescriptor> {
match self.exchange_buffers.pop_at(0) {
Some(exchange_buffer) => Ok(exchange_buffer),
None => rc::ResultInvalidExchangeBufferCount::make_err(),
}
}
fn pop_buffer<
const IN: bool,
const OUT: bool,
const MAP_ALIAS: bool,
const POINTER: bool,
const FIXED_SIZE: bool,
const AUTO_SELECT: bool,
const ALLOW_NON_SECURE: bool,
const ALLOW_NON_DEVICE: bool,
T,
>(
&mut self,
raw_data_walker: &mut DataWalker,
) -> Result<(*mut u8, usize)> {
if AUTO_SELECT {
if IN {
if let Ok(static_desc) = self.pop_send_static()
&& let Ok(send_desc) = self.pop_send_buffer()
{
if !static_desc.get_address().is_null() && (static_desc.get_size() > 0) {
return Ok((static_desc.get_address(), static_desc.get_size()));
}
if !send_desc.get_address().is_null() && (send_desc.get_size() > 0) {
return Ok((send_desc.get_address(), send_desc.get_size()));
}
}
} else if OUT
&& let Ok(static_desc) = self.pop_receive_static()
&& let Ok(recv_desc) = self.pop_receive_buffer()
{
if !static_desc.get_address().is_null() && (static_desc.get_size() > 0) {
return Ok((static_desc.get_address(), static_desc.get_size()));
}
if !recv_desc.get_address().is_null() && (recv_desc.get_size() > 0) {
return Ok((recv_desc.get_address(), recv_desc.get_size()));
}
}
} else if POINTER {
if IN && let Ok(static_desc) = self.pop_send_static() {
return Ok((static_desc.get_address(), static_desc.get_size()));
} else if OUT {
let buf_size = match FIXED_SIZE {
true => core::mem::size_of::<T>(),
false => {
self.ensure_pointer_size_walker(raw_data_walker);
self.pointer_size_walker.advance_get::<u16>() as usize
}
};
let buf = unsafe { self.pointer_buffer.add(self.out_pointer_buffer_offset) };
self.out_pointer_buffer_offset += buf_size;
return Ok((buf, buf_size));
}
} else if MAP_ALIAS {
match (IN, OUT) {
(true, true) => {
if let Ok(exch_desc) = self.pop_exchange_buffer() {
return Ok((exch_desc.get_address(), exch_desc.get_size()));
}
}
(true, false) => {
if let Ok(send_desc) = self.pop_send_buffer() {
return Ok((send_desc.get_address(), send_desc.get_size()));
}
}
(false, true) => {
if let Ok(recv_desc) = self.pop_receive_buffer() {
return Ok((recv_desc.get_address(), recv_desc.get_size()));
}
}
(false, false) => {}
}
}
rc::ResultInvalidBufferAttributes::make_err()
}
pub fn pop_object(&mut self) -> Result<ObjectInfo> {
if self.object_info.is_domain() {
let domain_object_id = self.out_params.pop_domain_object()?;
Ok(ObjectInfo::from_domain_object_id(
self.object_info.handle,
domain_object_id,
))
} else {
let handle: sf::MoveHandle = self.out_params.pop_handle()?;
Ok(ObjectInfo::from_handle(handle.handle))
}
}
}
pub mod client;
pub mod server;
pub mod cmif;
pub mod tipc;
pub mod sf;
| rust | MIT | b365c1baa4c4472fe604f4ab9646440d23c3bd9c | 2026-01-04T20:16:15.900894Z | false |
aarch64-switch-rs/nx | https://github.com/aarch64-switch-rs/nx/blob/b365c1baa4c4472fe604f4ab9646440d23c3bd9c/src/thread.rs | src/thread.rs | //! Threading support and wrappers
use ::alloc::boxed::Box;
use ::alloc::string::String;
use ::alloc::string::ToString;
use ::alloc::sync::Arc;
use imp::LibNxThreadVars;
use crate::diag::abort;
use crate::diag::abort::AbortLevel;
use crate::result::*;
use crate::svc;
use crate::util;
use core::cell::UnsafeCell;
use core::fmt;
use core::marker::PhantomData;
use core::mem;
use core::mem::ManuallyDrop;
use core::pin::Pin;
pub mod rc;
//pub mod _local;
pub mod scoped;
type ThreadId = u64;
/// Thread factory, which can be used in order to configure the properties of
/// a new thread.
///
/// Methods can be chained on it in order to configure it.
///
/// The two configurations available are:
///
/// - [`name`]: specifies an [associated name for the thread][naming-threads]
/// - [`stack_size`]: specifies the [desired stack size for the thread][stack-size]
///
/// The [`spawn`] method will take ownership of the builder and create an
/// [`crate::result::Result`] to the thread handle with the given configuration.
///
/// The [`thread::spawn`] free function uses a `Builder` with default
/// configuration and [`unwrap`]s its return value.
///
/// You may want to use [`spawn`] instead of [`thread::spawn`], when you want
/// to recover from a failure to launch a thread, indeed the free function will
/// panic where the `Builder` method will return a [`crate::result::Result`].
///
/// # Examples
///
/// ```
/// use nx::thread;
///
/// let builder = thread::Builder::new();
///
/// let handler = builder.spawn(|| {
/// // thread code
/// }).unwrap();
///
/// handler.join().unwrap();
/// ```
///
/// [`stack_size`]: Builder::stack_size
/// [`name`]: Builder::name
/// [`spawn`]: Builder::spawn
/// [`thread::spawn`]: spawn
/// [`Result`]: crate::result::Result
/// [`unwrap`]: crate::result::Result::unwrap
/// [naming-threads]: ./index.html#naming-threads
/// [stack-size]: ./index.html#stack-size
#[derive(Default)]
#[must_use = "must eventually spawn the thread"]
pub struct Builder {
// A name for the thread-to-be, for identification in panic messages
name: Option<String>,
// The size of the stack for the spawned thread in bytes
stack_size: Option<usize>,
// The priority of the thread to spawn
priority: Option<ThreadPriority>,
// The requested core of the thread to spawn
core: Option<ThreadStartCore>,
}
impl Builder {
/// Generates the base configuration for spawning a thread, from which
/// configuration methods can be chained.
///
/// # Examples
///
/// ```
/// use nx::thread;
///
/// let builder = thread::Builder::new()
/// .name("foo".into())
/// .stack_size(32 * 1024);
///
/// let handler = builder.spawn(|| {
/// // thread code
/// }).unwrap();
///
/// handler.join().unwrap();
/// ```
pub fn new() -> Builder {
Default::default()
}
/// Names the thread-to-be. Currently the name is used for identification
/// only in panic messages.
///
/// The name must not contain null bytes (`\0`).
///
/// For more information about named threads, see
/// [this module-level documentation][naming-threads].
///
/// # Examples
///
/// ```
/// use nx::thread;
///
/// let builder = thread::Builder::new()
/// .name("foo".into());
///
/// let handler = builder.spawn(|| {
/// assert_eq!(thread::current().name(), Some("foo"))
/// }).unwrap();
///
/// handler.join().unwrap();
/// ```
///
/// [naming-threads]: ./index.html#naming-threads
pub fn name<S: ToString>(mut self, name: S) -> Builder {
self.name = Some(name.to_string());
self
}
/// Sets the priority for the new thread
///
/// # Examples
///
/// ```
/// use nx::thread;
///
/// let builder = thread::Builder::new().priority(ThreadPriority::Default);
/// ```
pub fn priority(mut self, priority: ThreadPriority) -> Builder {
self.priority = Some(priority);
self
}
/// Sets the size of the stack (in bytes) for the new thread, to be allocated during stack creation
///
/// The actual stack size may be greater to align up to the page size.
///
/// For more information about the stack size for threads, see
/// [this module-level documentation][stack-size].
///
/// # Examples
///
/// ```
/// use nx::thread;
///
/// let builder = thread::Builder::new().stack_size(32 * 1024);
/// ```
pub fn stack_size(mut self, size: usize) -> Builder {
self.stack_size = Some(size);
self
}
/// Sets the CPU core for the new thread to start on
///
/// # Examples
///
/// ```
/// use nx::thread;
///
/// let builder = thread::Builder::new().core(ThreadStartCore::Default);
/// ```
pub fn core(mut self, core: ThreadStartCore) -> Builder {
self.core = Some(core);
self
}
/// Spawns a new thread by taking ownership of the `Builder`, and returns an
/// [`crate::result::Result`] to its [`JoinHandle`].
///
/// The spawned thread may outlive the caller (unless the caller thread
/// is the main thread; the whole process is terminated when the main
/// thread finishes). The join handle can be used to block on
/// termination of the spawned thread, including recovering its panics.
///
/// For a more complete documentation see [`thread::spawn`][`spawn`].
///
/// # Errors
///
/// Unlike the [`spawn`] free function, this method yields an
/// [`crate::result::Result`] to capture any failure to create the thread at
/// the OS level.
///
/// [`crate::result::Result`]: crate::result::Result
///
/// # Panics
///
/// Panics if a thread name was set and it contained null bytes.
///
/// # Examples
///
/// ```
/// use nx::thread;
///
/// let builder = thread::Builder::new();
///
/// let handler = builder.spawn(|| {
/// // thread code
/// }).unwrap();
///
/// handler.join().unwrap();
/// ```
pub fn spawn<F, T>(self, f: F) -> crate::result::Result<JoinHandle<T, false>>
where
F: FnOnce() -> T,
F: Send + 'static,
T: Send + 'static,
{
unsafe { self.spawn_unchecked(f) }
}
pub fn spawn_blocking<F, T>(self, f: F) -> crate::result::Result<JoinHandle<T, true>>
where
F: FnOnce() -> T,
F: Send + 'static,
T: Send + 'static,
{
unsafe { self.spawn_unchecked(f) }
}
/// Spawns a new thread without any lifetime restrictions by taking ownership
/// of the `Builder`, and returns an [`crate::result::Result`] to its [`JoinHandle`].
///
/// The spawned thread may outlive the caller (unless the caller thread
/// is the main thread; the whole process is terminated when the main
/// thread finishes). The join handle can be used to block on
/// termination of the spawned thread, including recovering its panics.
///
/// This method is identical to [`thread::Builder::spawn`][`Builder::spawn`],
/// except for the relaxed lifetime bounds, which render it unsafe.
/// For a more complete documentation see [`thread::spawn`][`spawn`].
///
/// # Errors
///
/// Unlike the [`spawn`] free function, this method yields an
/// [`crate::result::Result`] to capture any failure to create the thread at
/// the OS level.
///
/// # Panics
///
/// Panics if a thread name was set and it contained null bytes.
///
/// # Safety
///
/// The caller has to ensure that the spawned thread does not outlive any
/// references in the supplied thread closure and its return type.
/// This can be guaranteed in two ways:
///
/// - ensure that [`join`][`JoinHandle::join`] is called before any referenced
/// data is dropped
/// - use only types with `'static` lifetime bounds, i.e., those with no or only
/// `'static` references (both [`thread::Builder::spawn`][`Builder::spawn`]
/// and [`thread::spawn`][`spawn`] enforce this property statically)
///
/// # Examples
///
/// ```
/// use nx::thread;
///
/// let builder = thread::Builder::new();
///
/// let x = 1;
/// let thread_x = &x;
///
/// let handler = unsafe {
/// builder.spawn_unchecked(move || {
/// println!("x = {}", *thread_x);
/// }).unwrap()
/// };
///
/// // caller has to ensure `join()` is called, otherwise
/// // it is possible to access freed memory if `x` gets
/// // dropped before the thread closure is executed!
/// handler.join().unwrap();
/// ```
///
/// [`Result`]: crate::result::Result
pub unsafe fn spawn_unchecked<F, T, const WAIT_ON_DROP: bool>(
self,
f: F,
) -> crate::result::Result<JoinHandle<T, WAIT_ON_DROP>>
where
F: FnOnce() -> T + 'static,
F: Send,
T: Send + 'static,
{
Ok(JoinHandle(unsafe { self.spawn_unchecked_(f, None) }?))
}
#[allow(unsafe_op_in_unsafe_fn)]
unsafe fn spawn_unchecked_<F, T>(
self,
f: F,
scope_data: Option<Arc<scoped::ScopeData>>,
) -> crate::result::Result<JoinInner<'static, T>>
where
F: FnOnce() -> T + 'static,
F: Send,
T: Send + 'static,
{
let Builder {
name,
stack_size,
priority,
core,
} = self;
let stack_size = stack_size.unwrap_or(0x8000);
let name = name
.map(|s| ThreadName::from_string(&s))
.unwrap_or(ThreadName::new());
let priority = priority.unwrap_or_default();
let core = core.unwrap_or_default();
let my_thread = Thread::new_inner(name);
let _their_thread = my_thread.clone();
let my_packet: Arc<Packet<'static, T>> = Arc::new(Packet {
scope: scope_data,
result: UnsafeCell::new(None),
_marker: PhantomData,
});
let their_packet = my_packet.clone();
// Pass `f` in `MaybeUninit` because actually that closure might *run longer than the lifetime of `F`*.
// See <https://github.com/rust-lang/rust/issues/101983> for more details.
// To prevent leaks we use a wrapper that drops its contents.
#[repr(transparent)]
struct MaybeDangling<T>(mem::MaybeUninit<T>);
impl<T> MaybeDangling<T> {
fn new(x: T) -> Self {
MaybeDangling(mem::MaybeUninit::new(x))
}
fn into_inner(self) -> T {
// Make sure we don't drop.
let this = ManuallyDrop::new(self);
// SAFETY: we are always initialized.
unsafe { this.0.assume_init_read() }
}
}
impl<T> Drop for MaybeDangling<T> {
fn drop(&mut self) {
// SAFETY: we are always initialized.
unsafe { self.0.assume_init_drop() };
}
}
let f = MaybeDangling::new(f);
let main = move || {
let f = f.into_inner();
let try_result = unwinding::panic::catch_unwind(core::panic::AssertUnwindSafe(f));
// SAFETY: `their_packet` as been built just above and moved by the
// closure (it is an Arc<...>) and `my_packet` will be stored in the
// same `JoinInner` as this closure meaning the mutation will be
// safe (not modify it and affect a value far away).
unsafe { *their_packet.result.get() = Some(try_result) };
// Here `their_packet` gets dropped, and if this is the last `Arc` for that packet that
// will call `decrement_num_running_threads` and therefore signal that this thread is
// done.
drop(their_packet);
// Here, the lifetime `'static` can end. `main` keeps running for a bit
// after that before returning itself.
};
if let Some(scope_data) = &my_packet.scope {
scope_data.increment_num_running_threads();
}
let main = Box::new(main);
// SAFETY: dynamic size and alignment of the Box remain the same. See below for why the
// lifetime change is justified.
let main =
unsafe { Box::from_raw(Box::into_raw(main) as *mut (dyn FnOnce() + Send + 'static)) };
let mut thread_storage = Arc::new(imp::Thread::empty());
let thread_handle = thread_storage.init_in_place(stack_size, main, priority, core)?;
let mut res = JoinInner {
// SAFETY:
//
// `imp::Thread::new` takes a closure with a `'static` lifetime, since it's passed
// through FFI or otherwise used with low-level threading primitives that have no
// notion of or way to enforce lifetimes.
//
// As mentioned in the `Safety` section of this function's documentation, the caller of
// this function needs to guarantee that the passed-in lifetime is sufficiently long
// for the lifetime of the thread.
//
// Similarly, the `sys` implementation must guarantee that no references to the closure
// exist after the thread has terminated, which is signaled by `Thread::join`
// returning.
native: Pin::new(thread_storage),
thread: my_thread,
packet: my_packet,
};
// we are still the only running reader/writer since the thread hasn't been started
// so we can just reach inside the pin as long as we don't cause a move
unsafe {
res.thread.inner.set_handle(thread_handle);
}
res.native.start()?;
Ok(res)
}
}
/// Spawns a new thread, returning a [`JoinHandle`] for it.
///
/// The join handle provides a [`join`] method that can be used to join the spawned
/// thread. If the spawned thread panics, [`join`] will return an [`Err`] containing
/// the argument given to [`panic!`].
///
/// If the join handle is dropped, the spawned thread will implicitly be *detached*.
/// In this case, the spawned thread may no longer be joined.
/// (It is the responsibility of the program to either eventually join threads it
/// creates or detach them; otherwise, a resource leak will result.)
///
/// This call will create a thread using default parameters of [`Builder`], if you
/// want to specify the stack size or the name of the thread, use this API
/// instead.
///
/// As you can see in the signature of `spawn` there are two constraints on
/// both the closure given to `spawn` and its return value, let's explain them:
///
/// - The `'static` constraint means that the closure and its return value
/// must have a lifetime of the whole program execution. The reason for this
/// is that threads can outlive the lifetime they have been created in.
///
/// Indeed if the thread, and by extension its return value, can outlive their
/// caller, we need to make sure that they will be valid afterwards, and since
/// we *can't* know when it will return we need to have them valid as long as
/// possible, that is until the end of the program, hence the `'static`
/// lifetime.
/// - The [`Send`] constraint is because the closure will need to be passed
/// *by value* from the thread where it is spawned to the new thread. Its
/// return value will need to be passed from the new thread to the thread
/// where it is `join`ed.
/// As a reminder, the [`Send`] marker trait expresses that it is safe to be
/// passed from thread to thread. [`Sync`] expresses that it is safe to have a
/// reference be passed from thread to thread.
///
/// # Panics
///
/// Panics if the OS fails to create a thread; use [`Builder::spawn`]
/// to recover from such errors.
///
/// # Examples
///
/// Creating a thread.
///
/// ```
/// use nx::thread;
///
/// let handler = thread::spawn(|| {
/// // thread code
/// });
///
/// handler.join().unwrap();
/// ```
///
/// As mentioned in the module documentation, threads are usually made to
/// communicate using `channels`, here is how it usually looks.
///
/// This example also shows how to use `move`, in order to give ownership
/// of values to a thread.
///
/// ```
/// use nx::thread;
/// use uchan::channel;
///
/// let (tx, rx) = channel();
///
/// let sender = thread::spawn(move || {
/// tx.send("Hello, thread".to_owned())
/// .expect("Unable to send on channel");
/// });
///
/// let receiver = thread::spawn(move || {
/// let value = rx.recv().expect("Unable to receive from channel");
/// println!("{value}");
/// });
///
/// sender.join().expect("The sender thread has panicked");
/// receiver.join().expect("The receiver thread has panicked");
/// ```
///
/// A thread can also return a value through its [`JoinHandle`], you can use
/// this to make asynchronous computations (futures might be more appropriate
/// though).
///
/// ```
/// use nx::thread;
///
/// let computation = thread::spawn(|| {
/// // Some expensive computation.
/// 42
/// });
///
/// let result = computation.join().unwrap();
/// println!("{result}");
/// ```
///
/// # Notes
///
/// This function has the same minimal guarantee regarding "foreign" unwinding operations (e.g.
/// an exception thrown from C++ code, or a `panic!` in Rust code compiled or linked with a
/// different runtime) as [`catch_unwind`]; namely, if the thread created with `thread::spawn`
/// unwinds all the way to the root with such an exception, one of two behaviors are possible,
/// and it is unspecified which will occur:
///
/// * The process aborts.
/// * The process does not abort, and [`join`] will return a `Result::Err`
/// containing an opaque type.
///
/// [`catch_unwind`]: unwinding::panic::catch_unwind
/// [`join`]: JoinHandle::join
/// [`Err`]: crate::result::Result::Err
pub fn spawn<F, T>(f: F) -> JoinHandle<T, false>
where
F: FnOnce() -> T,
F: Send + 'static,
T: Send + 'static,
{
Builder::new().spawn(f).expect("failed to spawn thread")
}
/// Internal wrapper function that is run as the entrypoint to newly spawned functions.
#[doc(hidden)]
#[allow(unsafe_op_in_unsafe_fn)]
unsafe extern "C" fn thread_wrapper(raw_ptr: *mut u8) -> ! {
// SAFETY: This is fine as it is created with a call to Box::<ThreadArgs>::new()
let entry_env: Box<ThreadArgs> = Box::from_raw(raw_ptr.cast());
// SAFETY: The this may actually get mutated by the running thread, so the parent thread *MUST* never read/modify the thread object as it is running
set_current_thread(&*entry_env.thread.as_ref() as *const _ as _);
// runs only once and we don't need to handle panics as they're handled with catch_unwind in the runner.
// The runner is actually a wrapper of the thread payload that captures the thread environment (e.g. the packet for returning data from the thread)
(entry_env.runner)();
// access the thread state through the thread local storage, just like the runner would have to.
current().as_mut().unwrap().state = ThreadState::Terminated;
svc::exit_thread();
}
struct ThreadArgs {
runner: Box<dyn FnOnce() + Send + 'static>,
thread: Pin<Arc<imp::Thread>>,
}
#[derive(Clone)]
#[allow(rustdoc::private_intra_doc_links)]
/// A handle to a thread.
///
/// Threads are represented via the `Thread` type, which you can get in one of
/// two ways:
///
/// * By spawning a new thread, e.g., using the [`thread::spawn`][`spawn`]
/// function, and calling [`thread`][`JoinHandle::thread`] on the
/// [`JoinHandle`].
/// * By requesting the current thread, using the (private, for now) [`thread::current`][`current`] function.
///
/// There is usually no need to create a `Thread` struct yourself, one
/// should instead use a function like `spawn` to create new threads, see the
/// docs of [`Builder`] and [`spawn`] for more details.
pub struct Thread {
inner: Pin<Arc<Inner>>,
}
impl Thread {
pub fn new_remote<S: AsRef<str>>(name: S, handle: svc::Handle) -> Thread {
Self {
inner: Pin::new(Arc::new(Inner {
name: name.as_ref().into(),
thread_handle: UnsafeCell::new(handle),
})),
}
}
/// # Safety
/// If `name` is `ThreadName::Other(_)`, the contained string must be valid UTF-8.
unsafe fn new_inner(name: ThreadName) -> Thread {
// We have to use `unsafe` here to construct the `Inner` in-place.
//
// SAFETY: We pin the Arc immediately after creation, so its address never
// changes.
let inner = unsafe {
let mut arc = Arc::<Inner>::new_uninit();
let ptr = Arc::get_mut(&mut arc).unwrap_unchecked().as_mut_ptr();
(&raw mut (*ptr).name).write(name);
*(*ptr).thread_handle.get_mut() = svc::INVALID_HANDLE;
Pin::new_unchecked(arc.assume_init())
};
Thread { inner }
}
/// Gets the thread's unique identifier.
///
/// # Examples
///
/// ```
/// use crate::thread;
///
/// let other_thread = thread::spawn(|| {
/// thread::current().id()
/// });
///
/// let other_thread_id = other_thread.join().unwrap();
/// assert!(thread::current().id() != other_thread_id);
/// ```
#[must_use]
pub fn id(&self) -> ThreadId {
svc::get_thread_id(unsafe { *self.inner.thread_handle.get() }).unwrap()
}
/// Gets the thread's name.
///
/// For more information about named threads, see
/// [this module-level documentation][naming-threads].
///
/// # Examples
///
/// Threads by default have no name specified:
///
/// ```
/// use crate::thread;
///
/// let builder = thread::Builder::new();
///
/// let handler = builder.spawn(|| {
/// assert!(thread::current().name().is_none());
/// }).unwrap();
///
/// handler.join().unwrap();
/// ```
///
/// Thread with a specified name:
///
/// ```
/// use crate::thread;
///
/// let builder = thread::Builder::new()
/// .name("foo".into());
///
/// let handler = builder.spawn(|| {
/// assert_eq!(thread::current().name(), Some("foo"))
/// }).unwrap();
///
/// handler.join().unwrap();
/// ```
///
/// [naming-threads]: ./index.html#naming-threads
#[must_use]
pub fn name(&self) -> ThreadName {
self.inner.name
}
/// Atomically makes the handle’s token available if it is not already.
///
/// Takes no effect if the thread was currently unparked, but this marks parked threads for scheduling.
pub fn unpark(&self) {
let _ = unsafe {
svc::set_thread_activity(
core::ptr::read(self.inner.thread_handle.get()),
svc::SchedulerState::Runnable,
)
};
}
}
impl fmt::Debug for Thread {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Thread")
.field("id", &self.id())
.field("name", &self.name())
.finish_non_exhaustive()
}
}
struct Inner {
name: ThreadName,
thread_handle: UnsafeCell<svc::Handle>,
}
unsafe impl Sync for Inner {}
impl Inner {
pub(self) unsafe fn set_handle(&self, handle: svc::Handle) {
unsafe { *self.thread_handle.get() = handle };
}
}
////////////////////////////////////////////////////////////////////////////////
// JoinHandle
////////////////////////////////////////////////////////////////////////////////
/// A specialized [`Result`] type for threads.
///
/// Indicates the manner in which a thread exited.
///
/// The value contained in the `Result::Err` variant
/// is the value the thread panicked with;
/// that is, the argument the `panic!` macro was called with.
/// Unlike with normal errors, this value doesn't implement
/// the [`Error`](core::error::Error) trait.
///
/// Thus, a sensible way to handle a thread panic is to either:
///
/// 1. re-raise the panic with [`unwinding::panic::begin_panic`]
/// 2. or in case the thread is intended to be a subsystem boundary
/// that is supposed to isolate system-level failures,
/// match on the `Err` variant and handle the panic in an appropriate way
///
/// A thread that completes without panicking is considered to exit successfully.
///
/// # Examples
///
/// Matching on the result of a joined thread:
///
/// ```no_run
/// use crate::{fs, thread, panic};
///
/// fn copy_in_thread() -> thread::Result<()> {
/// thread::spawn(|| {
/// fs::copy("sdmc://foo.txt", "sdmc://bar.txt").unwrap();
/// }).join()
/// }
///
/// fn main() {
/// fs::initialize_fspsrv_session();
/// fs::mount_sd_card("sdmc");
///
/// match copy_in_thread() {
/// Ok(_) => println!("copy succeeded"),
/// Err(e) => unwinding::panic::begin_panic(e),
/// }
/// }
/// ```
///
/// [`Result`]: core::result::Result
/// [`begin_panic`]: unwinding::panic::begin_panic
pub type Result<T> = core::result::Result<T, Box<dyn core::any::Any + Send + 'static>>;
/// This packet is used to communicate the return value between the spawned
/// thread and the rest of the program. It is shared through an `Arc` and
/// there's no need for a mutex here because synchronization happens with `join()`
/// (the caller will never read this packet until the thread has exited).
///
/// An Arc to the packet is stored into a `JoinInner` which in turns is placed
/// in `JoinHandle`.
struct Packet<'scope, T> {
scope: Option<Arc<scoped::ScopeData>>,
result: UnsafeCell<Option<Result<T>>>,
_marker: PhantomData<Option<&'scope scoped::ScopeData>>,
}
// Due to the usage of `UnsafeCell` we need to manually implement Sync.
// The type `T` should already always be Send (otherwise the thread could not
// have been created) and the Packet is Sync because all access to the
// `UnsafeCell` synchronized (by the `join()` boundary), and `ScopeData` is Sync.
unsafe impl<T: Send> Sync for Packet<'_, T> {}
impl<T> Drop for Packet<'_, T> {
fn drop(&mut self) {
// If this packet was for a thread that ran in a scope, the thread
// panicked, and nobody consumed the panic payload, we make sure
// the scope function will panic.
let unhandled_panic = matches!(self.result.get_mut(), Some(Err(_)));
// Drop the result without causing unwinding.
// This is only relevant for threads that aren't join()ed, as
// join() will take the `result` and set it to None, such that
// there is nothing left to drop here.
// If this panics, we should handle that, because we're outside the
// outermost `catch_unwind` of our thread.
// We just abort in that case, since there's nothing else we can do.
// (And even if we tried to handle it somehow, we'd also need to handle
// the case where the panic payload we get out of it also panics on
// drop, and so on. See issue #86027.)
if unwinding::panic::catch_unwind(core::panic::AssertUnwindSafe(|| {
*self.result.get_mut() = None;
}))
.is_err()
{
abort::abort(AbortLevel::Panic(), crate::rc::ResultPanicked::make());
}
// Book-keeping so the scope knows when it's done.
if let Some(scope) = &self.scope {
// Now that there will be no more user code running on this thread
// that can use 'static, mark the thread as 'finished'.
// It's important we only do this after the `result` has been dropped,
// since dropping it might still use things it borrowed from 'static.
scope.decrement_num_running_threads(unhandled_panic);
}
}
}
/// Inner representation for JoinHandle
struct JoinInner<'scope, T> {
native: Pin<Arc<imp::Thread>>,
thread: Thread,
packet: Arc<Packet<'scope, T>>,
}
impl<T> Clone for JoinInner<'static, T> {
fn clone(&self) -> Self {
Self {
native: self.native.clone(),
thread: self.thread.clone(),
packet: self.packet.clone(),
}
}
}
impl<T> JoinInner<'_, T> {
fn join(&mut self) -> Result<T> {
let _ = self.native.join();
Arc::get_mut(&mut self.packet)
.unwrap()
.result
.get_mut()
.take()
.unwrap()
}
fn wait_exit(&self, timeout: Option<i64>) -> crate::result::Result<()> {
self.native.join_timeout(timeout)
}
}
/// An owned permission to join on a thread (block on its termination).
///
/// A `JoinHandle` *detaches* the associated thread when it is dropped, which
/// means that there is no longer any handle to the thread and no way to `join`
/// on it.
///
/// Due to platform restrictions, it is not possible to [`Clone`] this
/// handle: the ability to join a thread is a uniquely-owned permission.
///
/// This `struct` is created by the [`thread::spawn`] function and the
/// [`thread::Builder::spawn`] method.
///
/// # Examples
///
/// Creation from [`thread::spawn`]:
///
/// ```
/// use crate::thread;
///
/// let join_handle: thread::JoinHandle<_> = thread::spawn(|| {
/// // some work here
/// });
/// ```
///
/// Creation from [`thread::Builder::spawn`]:
///
/// ```
/// use crate::thread;
///
/// let builder = thread::Builder::new();
///
/// let join_handle: thread::JoinHandle<_> = builder.spawn(|| {
/// // some work here
/// }).unwrap();
/// ```
///
/// A thread being detached and outliving the thread that spawned it:
///
/// ```no_run
/// use crate::thread;
/// use crate::time::Duration;
///
/// let original_thread = thread::spawn(|| {
/// let _detached_thread = thread::spawn(|| {
/// // Here we sleep to make sure that the first thread returns before.
/// thread::sleep(Duration::from_millis(10));
/// // This will be called, even though the JoinHandle is dropped.
/// println!("♫ Still alive ♫");
/// });
/// });
///
/// original_thread.join().expect("The thread being joined has panicked");
/// println!("Original thread is joined.");
///
/// // We make sure that the new thread has time to run, before the main
/// // thread returns.
///
/// thread::sleep(Duration::from_millis(1000));
/// ```
///
/// [`thread::Builder::spawn`]: Builder::spawn
/// [`thread::spawn`]: spawn
pub struct JoinHandle<T, const BLOCK_ON_DROP: bool = false>(JoinInner<'static, T>);
unsafe impl<T, const BLOCK_ON_DROP: bool> Send for JoinHandle<T, BLOCK_ON_DROP> {}
unsafe impl<T, const BLOCK_ON_DROP: bool> Sync for JoinHandle<T, BLOCK_ON_DROP> {}
impl<T, const BLOCK_ON_DROP: bool> JoinHandle<T, BLOCK_ON_DROP> {
/// Extracts a handle to the underlying thread.
///
/// # Examples
///
/// ```
/// use crate::thread;
///
/// let builder = thread::Builder::new();
///
/// let join_handle: thread::JoinHandle<_> = builder.spawn(|| {
/// // some work here
/// }).unwrap();
///
/// let thread = join_handle.thread();
/// println!("thread id: {:?}", thread.id());
/// ```
#[must_use]
pub fn thread(&self) -> &Thread {
&self.0.thread
}
/// Waits for the associated thread to finish.
///
/// This function will return immediately if the associated thread has already finished.
///
/// In terms of [atomic memory orderings], the completion of the associated
/// thread synchronizes with this function returning. In other words, all
/// operations performed by that thread [happen
/// before](https://doc.rust-lang.org/nomicon/atomics.html#data-accesses) all
/// operations that happen after `join` returns.
///
/// If the associated thread panics, [`Err`] is returned with the parameter given
/// to [`panic!`].
///
/// [`Err`]: crate::result::Result::Err
/// [atomic memory orderings]: core::sync::atomic
///
/// # Panics
///
/// This function may panic on some platforms if a thread attempts to join
/// itself or otherwise may create a deadlock with joining threads.
///
/// # Examples
///
/// ```
/// use crate::thread;
///
/// let builder = thread::Builder::new();
///
/// let join_handle: thread::JoinHandle<_> = builder.spawn(|| {
/// // some work here
/// }).unwrap();
/// join_handle.join().expect("Couldn't join on the associated thread");
/// ```
| rust | MIT | b365c1baa4c4472fe604f4ab9646440d23c3bd9c | 2026-01-04T20:16:15.900894Z | true |
aarch64-switch-rs/nx | https://github.com/aarch64-switch-rs/nx/blob/b365c1baa4c4472fe604f4ab9646440d23c3bd9c/src/util/rc.rs | src/util/rc.rs | //! Util-specific result definitions
use crate::rc;
/// Result Submodule ID for the parent module
pub const RESULT_SUBMODULE: u32 = 300;
result_define_subgroup!(rc::RESULT_MODULE, RESULT_SUBMODULE => {
InvalidPointer: 1,
InvalidSize: 2,
InvalidUtf8Conversion: 3,
InvalidUtf16Conversion: 4
});
| rust | MIT | b365c1baa4c4472fe604f4ab9646440d23c3bd9c | 2026-01-04T20:16:15.900894Z | false |
aarch64-switch-rs/nx | https://github.com/aarch64-switch-rs/nx/blob/b365c1baa4c4472fe604f4ab9646440d23c3bd9c/src/service/applet.rs | src/service/applet.rs | use crate::hbl::AppletType;
use crate::ipc::sf::sm;
use crate::result::*;
use crate::{hbl, service};
pub use crate::ipc::sf::applet::*;
ipc_client_define_client_default!(AllSystemAppletProxiesService);
impl IAllSystemAppletProxiesClient for AllSystemAppletProxiesService {}
impl service::IService for AllSystemAppletProxiesService {
fn get_name() -> sm::ServiceName {
// we only want to
let applet_type = hbl::get_applet_type();
sm::ServiceName::new(
if applet_type == AppletType::Application || applet_type == AppletType::Default {
"appletOE"
} else {
"appletAE"
},
)
}
fn as_domain() -> bool {
true
}
fn post_initialize(&mut self) -> Result<()> {
Ok(())
}
}
| rust | MIT | b365c1baa4c4472fe604f4ab9646440d23c3bd9c | 2026-01-04T20:16:15.900894Z | false |
aarch64-switch-rs/nx | https://github.com/aarch64-switch-rs/nx/blob/b365c1baa4c4472fe604f4ab9646440d23c3bd9c/src/service/sm.rs | src/service/sm.rs | use core::ffi::CStr;
use crate::ipc;
use crate::ipc::sf;
use crate::result::*;
use crate::service;
use crate::version;
pub use crate::ipc::sf::sm::*;
impl service::INamedPort for UserInterface {
fn get_name() -> &'static CStr {
c"sm:"
}
fn post_initialize(&mut self) -> Result<()> {
if version::get_version() >= version::Version::new(12, 0, 0) {
self.session.object_info.protocol = ipc::CommandProtocol::Tipc;
}
self.register_client(sf::ProcessId::new())
}
}
| rust | MIT | b365c1baa4c4472fe604f4ab9646440d23c3bd9c | 2026-01-04T20:16:15.900894Z | false |
aarch64-switch-rs/nx | https://github.com/aarch64-switch-rs/nx/blob/b365c1baa4c4472fe604f4ab9646440d23c3bd9c/src/service/spl.rs | src/service/spl.rs | use crate::ipc::sf::sm;
use crate::result::*;
use crate::service;
pub use crate::ipc::sf::spl::*;
ipc_client_define_client_default!(RandomService);
impl IRandomClient for RandomService {}
impl service::IService for RandomService {
fn get_name() -> sm::ServiceName {
sm::ServiceName::new("csrng")
}
fn as_domain() -> bool {
false
}
fn post_initialize(&mut self) -> Result<()> {
Ok(())
}
}
| rust | MIT | b365c1baa4c4472fe604f4ab9646440d23c3bd9c | 2026-01-04T20:16:15.900894Z | false |
aarch64-switch-rs/nx | https://github.com/aarch64-switch-rs/nx/blob/b365c1baa4c4472fe604f4ab9646440d23c3bd9c/src/service/dispdrv.rs | src/service/dispdrv.rs | use crate::ipc::sf::sm;
use crate::result::*;
use crate::service;
pub use crate::ipc::sf::dispdrv::*;
/// This one gets special pribileges of not being suffixed with `Service` as it is the only implementor of `IService` that
/// can be received from a call to a different service.
impl service::IService for HOSBinderDriver {
fn get_name() -> sm::ServiceName {
sm::ServiceName::new("dispdrv")
}
fn as_domain() -> bool {
false
}
fn post_initialize(&mut self) -> Result<()> {
Ok(())
}
}
| rust | MIT | b365c1baa4c4472fe604f4ab9646440d23c3bd9c | 2026-01-04T20:16:15.900894Z | false |
aarch64-switch-rs/nx | https://github.com/aarch64-switch-rs/nx/blob/b365c1baa4c4472fe604f4ab9646440d23c3bd9c/src/service/psm.rs | src/service/psm.rs | use crate::ipc::sf::sm;
use crate::result::*;
use crate::service;
pub use crate::ipc::sf::psm::*;
ipc_client_define_client_default!(PsmService);
impl IPsmClient for PsmService {}
impl service::IService for PsmService {
fn get_name() -> sm::ServiceName {
sm::ServiceName::new("psm")
}
fn as_domain() -> bool {
false
}
fn post_initialize(&mut self) -> Result<()> {
Ok(())
}
}
| rust | MIT | b365c1baa4c4472fe604f4ab9646440d23c3bd9c | 2026-01-04T20:16:15.900894Z | false |
aarch64-switch-rs/nx | https://github.com/aarch64-switch-rs/nx/blob/b365c1baa4c4472fe604f4ab9646440d23c3bd9c/src/service/hid.rs | src/service/hid.rs | use crate::ipc::sf::sm;
use crate::result::*;
use crate::service;
pub use crate::ipc::sf::hid::*;
ipc_client_define_client_default!(HidService);
ipc_client_define_client_default!(HidSysService);
impl IHidClient for HidService {}
impl IHidSysClient for HidSysService {}
impl service::IService for HidService {
fn get_name() -> sm::ServiceName {
sm::ServiceName::new("hid")
}
fn as_domain() -> bool {
false
}
fn post_initialize(&mut self) -> Result<()> {
Ok(())
}
}
impl service::IService for HidSysService {
fn get_name() -> sm::ServiceName {
sm::ServiceName::new("hid:sys")
}
fn as_domain() -> bool {
false
}
fn post_initialize(&mut self) -> Result<()> {
Ok(())
}
}
| rust | MIT | b365c1baa4c4472fe604f4ab9646440d23c3bd9c | 2026-01-04T20:16:15.900894Z | false |
aarch64-switch-rs/nx | https://github.com/aarch64-switch-rs/nx/blob/b365c1baa4c4472fe604f4ab9646440d23c3bd9c/src/service/nfp.rs | src/service/nfp.rs | use crate::ipc::sf::sm;
use crate::result::*;
use crate::service;
pub use crate::ipc::sf::nfp::*;
ipc_client_define_client_default!(UserManagerService);
impl IUserManagerClient for UserManagerService {}
ipc_client_define_client_default!(DebugManagerService);
impl IDebugManagerClient for DebugManagerService {}
ipc_client_define_client_default!(SystemManagerService);
impl ISystemManagerClient for SystemManagerService {}
impl service::IService for UserManagerService {
fn get_name() -> sm::ServiceName {
sm::ServiceName::new("nfp:user")
}
fn as_domain() -> bool {
true
}
fn post_initialize(&mut self) -> Result<()> {
Ok(())
}
}
impl service::IService for SystemManagerService {
fn get_name() -> sm::ServiceName {
sm::ServiceName::new("nfp:sys")
}
fn as_domain() -> bool {
true
}
fn post_initialize(&mut self) -> Result<()> {
Ok(())
}
}
impl service::IService for DebugManagerService {
fn get_name() -> sm::ServiceName {
sm::ServiceName::new("nfp:dbg")
}
fn as_domain() -> bool {
true
}
fn post_initialize(&mut self) -> Result<()> {
Ok(())
}
}
| rust | MIT | b365c1baa4c4472fe604f4ab9646440d23c3bd9c | 2026-01-04T20:16:15.900894Z | false |
aarch64-switch-rs/nx | https://github.com/aarch64-switch-rs/nx/blob/b365c1baa4c4472fe604f4ab9646440d23c3bd9c/src/service/lr.rs | src/service/lr.rs | use super::*;
use crate::service;
pub use crate::ipc::sf::lr::*;
ipc_client_define_client_default!(LocationResolverManagerService);
impl ILocationResolverManagerClient for LocationResolverManagerService {}
impl service::IService for LocationResolverManagerService {
fn get_name() -> sm::ServiceName {
sm::ServiceName::new("lr")
}
fn as_domain() -> bool {
false
}
fn post_initialize(&mut self) -> Result<()> {
Ok(())
}
}
| rust | MIT | b365c1baa4c4472fe604f4ab9646440d23c3bd9c | 2026-01-04T20:16:15.900894Z | false |
aarch64-switch-rs/nx | https://github.com/aarch64-switch-rs/nx/blob/b365c1baa4c4472fe604f4ab9646440d23c3bd9c/src/service/ncm.rs | src/service/ncm.rs | use super::*;
use crate::service;
pub use crate::ipc::sf::ncm::*;
ipc_client_define_client_default!(ContentManagerService);
impl IContentManagerClient for ContentManagerService {}
impl service::IService for ContentManagerService {
fn get_name() -> sm::ServiceName {
sm::ServiceName::new("ncm")
}
fn as_domain() -> bool {
false
}
fn post_initialize(&mut self) -> Result<()> {
Ok(())
}
}
| rust | MIT | b365c1baa4c4472fe604f4ab9646440d23c3bd9c | 2026-01-04T20:16:15.900894Z | false |
aarch64-switch-rs/nx | https://github.com/aarch64-switch-rs/nx/blob/b365c1baa4c4472fe604f4ab9646440d23c3bd9c/src/service/bsd.rs | src/service/bsd.rs | use crate::ipc::sf::sm;
use crate::result::Result;
use crate::service;
pub use crate::ipc::sf::bsd::*;
ipc_client_define_client_default!(SystemBsdService);
impl IBsdClient for SystemBsdService {}
impl service::IService for SystemBsdService {
fn get_name() -> sm::ServiceName {
sm::ServiceName::new("bsd:s")
}
fn as_domain() -> bool {
false
}
fn post_initialize(&mut self) -> Result<()> {
Ok(())
}
}
ipc_client_define_client_default!(AppletBsdService);
impl IBsdClient for AppletBsdService {}
impl service::IService for AppletBsdService {
fn get_name() -> sm::ServiceName {
sm::ServiceName::new("bsd:a")
}
fn as_domain() -> bool {
false
}
fn post_initialize(&mut self) -> Result<()> {
Ok(())
}
}
ipc_client_define_client_default!(UserBsdService);
impl IBsdClient for UserBsdService {}
impl service::IService for UserBsdService {
fn get_name() -> sm::ServiceName {
sm::ServiceName::new("bsd:u")
}
fn as_domain() -> bool {
false
}
fn post_initialize(&mut self) -> Result<()> {
Ok(())
}
}
pub enum BsdSrvkind {
/// "bsd:u"
User,
/// "bsd:a"
Applet,
/// "bsd:s"
System,
}
| rust | MIT | b365c1baa4c4472fe604f4ab9646440d23c3bd9c | 2026-01-04T20:16:15.900894Z | false |
aarch64-switch-rs/nx | https://github.com/aarch64-switch-rs/nx/blob/b365c1baa4c4472fe604f4ab9646440d23c3bd9c/src/service/psc.rs | src/service/psc.rs | use crate::ipc::sf::sm;
use crate::result::*;
use crate::service;
pub use crate::ipc::sf::psc::*;
ipc_client_define_client_default!(PmService);
impl IPmClient for PmService {}
impl service::IService for PmService {
fn get_name() -> sm::ServiceName {
sm::ServiceName::new("psc:m")
}
fn as_domain() -> bool {
true
}
fn post_initialize(&mut self) -> Result<()> {
Ok(())
}
}
| rust | MIT | b365c1baa4c4472fe604f4ab9646440d23c3bd9c | 2026-01-04T20:16:15.900894Z | false |
aarch64-switch-rs/nx | https://github.com/aarch64-switch-rs/nx/blob/b365c1baa4c4472fe604f4ab9646440d23c3bd9c/src/service/pm.rs | src/service/pm.rs | use crate::ipc::sf::sm;
use crate::result::*;
use crate::service;
pub use crate::ipc::sf::pm::*;
ipc_client_define_client_default!(InformationInterfaceService);
impl IInformationInterfaceClient for InformationInterfaceService {}
impl service::IService for InformationInterfaceService {
fn get_name() -> sm::ServiceName {
sm::ServiceName::new("pm:info")
}
fn as_domain() -> bool {
false
}
fn post_initialize(&mut self) -> Result<()> {
Ok(())
}
}
ipc_client_define_client_default!(DebugMonitorInterfaceService);
impl IDebugMonitorInterfaceClient for DebugMonitorInterfaceService {}
impl service::IService for DebugMonitorInterfaceService {
fn get_name() -> sm::ServiceName {
sm::ServiceName::new("pm:dmnt")
}
fn as_domain() -> bool {
false
}
fn post_initialize(&mut self) -> Result<()> {
Ok(())
}
}
| rust | MIT | b365c1baa4c4472fe604f4ab9646440d23c3bd9c | 2026-01-04T20:16:15.900894Z | false |
aarch64-switch-rs/nx | https://github.com/aarch64-switch-rs/nx/blob/b365c1baa4c4472fe604f4ab9646440d23c3bd9c/src/service/ldr.rs | src/service/ldr.rs | use crate::ipc::sf::sm;
use crate::result::*;
use crate::service;
pub use crate::ipc::sf::ldr::*;
ipc_client_define_client_default!(ShellInterfaceService);
impl IShellInterfaceClient for ShellInterfaceService {}
impl service::IService for ShellInterfaceService {
fn get_name() -> sm::ServiceName {
sm::ServiceName::new("ldr:shel")
}
fn as_domain() -> bool {
false
}
fn post_initialize(&mut self) -> Result<()> {
Ok(())
}
}
| rust | MIT | b365c1baa4c4472fe604f4ab9646440d23c3bd9c | 2026-01-04T20:16:15.900894Z | false |
aarch64-switch-rs/nx | https://github.com/aarch64-switch-rs/nx/blob/b365c1baa4c4472fe604f4ab9646440d23c3bd9c/src/service/mii.rs | src/service/mii.rs | use crate::ipc::sf::sm;
use crate::result::*;
use crate::service;
pub use crate::ipc::sf::mii::*;
ipc_client_define_client_default!(StaticService);
impl IStaticClient for StaticService {}
impl service::IService for StaticService {
fn get_name() -> sm::ServiceName {
sm::ServiceName::new("mii:e")
}
fn as_domain() -> bool {
false
}
fn post_initialize(&mut self) -> Result<()> {
Ok(())
}
}
| rust | MIT | b365c1baa4c4472fe604f4ab9646440d23c3bd9c | 2026-01-04T20:16:15.900894Z | false |
aarch64-switch-rs/nx | https://github.com/aarch64-switch-rs/nx/blob/b365c1baa4c4472fe604f4ab9646440d23c3bd9c/src/service/usb.rs | src/service/usb.rs | pub use crate::ipc::sf::usb::*;
/// "usb:hs" service interface definitions.
pub mod hs;
| rust | MIT | b365c1baa4c4472fe604f4ab9646440d23c3bd9c | 2026-01-04T20:16:15.900894Z | false |
aarch64-switch-rs/nx | https://github.com/aarch64-switch-rs/nx/blob/b365c1baa4c4472fe604f4ab9646440d23c3bd9c/src/service/audio.rs | src/service/audio.rs | use crate::ipc::sf::sm;
use crate::result::*;
use crate::service;
pub use crate::ipc::sf::audio::*;
ipc_client_define_client_default!(AudioOutManagerService);
impl IAudioOutManagerClient for AudioOutManagerService {}
impl service::IService for AudioOutManagerService {
fn get_name() -> sm::ServiceName {
sm::ServiceName::new("audout:u")
}
fn as_domain() -> bool {
false
}
fn post_initialize(&mut self) -> Result<()> {
Ok(())
}
}
ipc_client_define_client_default!(AudioInManagerService);
impl IAudioInManagerClient for AudioInManagerService {}
impl service::IService for AudioInManagerService {
fn get_name() -> sm::ServiceName {
sm::ServiceName::new("audin:u")
}
fn as_domain() -> bool {
false
}
fn post_initialize(&mut self) -> Result<()> {
Ok(())
}
}
| rust | MIT | b365c1baa4c4472fe604f4ab9646440d23c3bd9c | 2026-01-04T20:16:15.900894Z | false |
aarch64-switch-rs/nx | https://github.com/aarch64-switch-rs/nx/blob/b365c1baa4c4472fe604f4ab9646440d23c3bd9c/src/service/lm.rs | src/service/lm.rs | use crate::ipc::sf::sm;
use crate::result::*;
use crate::service;
pub use crate::ipc::sf::lm::*;
ipc_client_define_client_default!(LoggingService);
impl ILoggingClient for LoggingService {}
impl service::IService for LoggingService {
fn get_name() -> sm::ServiceName {
sm::ServiceName::new("lm")
}
fn as_domain() -> bool {
false
}
fn post_initialize(&mut self) -> Result<()> {
Ok(())
}
}
| rust | MIT | b365c1baa4c4472fe604f4ab9646440d23c3bd9c | 2026-01-04T20:16:15.900894Z | false |
aarch64-switch-rs/nx | https://github.com/aarch64-switch-rs/nx/blob/b365c1baa4c4472fe604f4ab9646440d23c3bd9c/src/service/nv.rs | src/service/nv.rs | use crate::ipc::sf::sm;
use crate::result::*;
use crate::service;
pub use crate::ipc::sf::nv::*;
ipc_client_define_client_default!(ApplicationNvDrvService);
impl INvDrvClient for ApplicationNvDrvService {}
impl service::IService for ApplicationNvDrvService {
fn get_name() -> sm::ServiceName {
sm::ServiceName::new("nvdrv")
}
fn as_domain() -> bool {
false
}
fn post_initialize(&mut self) -> Result<()> {
Ok(())
}
}
ipc_client_define_client_default!(AppletNvDrvService);
impl INvDrvClient for AppletNvDrvService {}
impl service::IService for AppletNvDrvService {
fn get_name() -> sm::ServiceName {
sm::ServiceName::new("nvdrv:a")
}
fn as_domain() -> bool {
false
}
fn post_initialize(&mut self) -> Result<()> {
Ok(())
}
}
ipc_client_define_client_default!(SystemNvDrvService);
impl INvDrvClient for SystemNvDrvService {}
impl service::IService for SystemNvDrvService {
fn get_name() -> sm::ServiceName {
sm::ServiceName::new("nvdrv:s")
}
fn as_domain() -> bool {
false
}
fn post_initialize(&mut self) -> Result<()> {
Ok(())
}
}
| rust | MIT | b365c1baa4c4472fe604f4ab9646440d23c3bd9c | 2026-01-04T20:16:15.900894Z | false |
aarch64-switch-rs/nx | https://github.com/aarch64-switch-rs/nx/blob/b365c1baa4c4472fe604f4ab9646440d23c3bd9c/src/service/fatal.rs | src/service/fatal.rs | use crate::ipc::sf::sm;
use crate::result::*;
use crate::service;
pub use crate::ipc::sf::fatal::*;
ipc_client_define_client_default!(FatalService);
impl IFatalClient for FatalService {}
impl service::IService for FatalService {
fn get_name() -> sm::ServiceName {
sm::ServiceName::new("fatal:u")
}
fn as_domain() -> bool {
false
}
fn post_initialize(&mut self) -> Result<()> {
Ok(())
}
}
| rust | MIT | b365c1baa4c4472fe604f4ab9646440d23c3bd9c | 2026-01-04T20:16:15.900894Z | false |
aarch64-switch-rs/nx | https://github.com/aarch64-switch-rs/nx/blob/b365c1baa4c4472fe604f4ab9646440d23c3bd9c/src/service/set.rs | src/service/set.rs | use crate::ipc::sf::sm;
use crate::result::*;
use crate::service;
pub use crate::ipc::sf::set::*;
ipc_client_define_client_default!(SystemSettingsService);
impl ISystemSettingsClient for SystemSettingsService {}
impl service::IService for SystemSettingsService {
fn get_name() -> sm::ServiceName {
sm::ServiceName::new("set:sys")
}
fn as_domain() -> bool {
false
}
fn post_initialize(&mut self) -> Result<()> {
Ok(())
}
}
| rust | MIT | b365c1baa4c4472fe604f4ab9646440d23c3bd9c | 2026-01-04T20:16:15.900894Z | false |
aarch64-switch-rs/nx | https://github.com/aarch64-switch-rs/nx/blob/b365c1baa4c4472fe604f4ab9646440d23c3bd9c/src/service/fsp.rs | src/service/fsp.rs | pub use crate::ipc::sf::fsp as fsp_sf;
/// "fsp-srv" service definitions.
pub mod srv;
| rust | MIT | b365c1baa4c4472fe604f4ab9646440d23c3bd9c | 2026-01-04T20:16:15.900894Z | false |
aarch64-switch-rs/nx | https://github.com/aarch64-switch-rs/nx/blob/b365c1baa4c4472fe604f4ab9646440d23c3bd9c/src/service/vi.rs | src/service/vi.rs | use crate::ipc::sf::sm;
use crate::result::*;
use crate::service;
pub use crate::ipc::sf::vi::*;
ipc_client_define_client_default!(ApplicationDisplayRootService);
ipc_client_define_client_default!(ManagerDisplayRootService);
ipc_client_define_client_default!(SystemDisplayRootService);
impl IApplicationDisplayRootClient for ApplicationDisplayRootService {}
impl IManagerDisplayRootClient for ManagerDisplayRootService {}
impl ISystemDisplayRootClient for SystemDisplayRootService {}
impl service::IService for ApplicationDisplayRootService {
fn get_name() -> sm::ServiceName {
sm::ServiceName::new("vi:u")
}
fn as_domain() -> bool {
true
}
fn post_initialize(&mut self) -> Result<()> {
Ok(())
}
}
impl service::IService for SystemDisplayRootService {
fn get_name() -> sm::ServiceName {
sm::ServiceName::new("vi:s")
}
fn as_domain() -> bool {
true
}
fn post_initialize(&mut self) -> Result<()> {
Ok(())
}
}
impl service::IService for ManagerDisplayRootService {
fn get_name() -> sm::ServiceName {
sm::ServiceName::new("vi:m")
}
fn as_domain() -> bool {
true
}
fn post_initialize(&mut self) -> Result<()> {
Ok(())
}
}
| rust | MIT | b365c1baa4c4472fe604f4ab9646440d23c3bd9c | 2026-01-04T20:16:15.900894Z | false |
aarch64-switch-rs/nx | https://github.com/aarch64-switch-rs/nx/blob/b365c1baa4c4472fe604f4ab9646440d23c3bd9c/src/service/usb/hs.rs | src/service/usb/hs.rs | use crate::ipc::sf::sm;
use crate::result::*;
use crate::service;
pub use crate::ipc::sf::usb::hs::*;
ipc_client_define_client_default!(ClientRootSessionService);
impl IClientRootSessionClient for ClientRootSessionService {}
impl service::IService for ClientRootSessionService {
fn get_name() -> sm::ServiceName {
sm::ServiceName::new("usb:hs")
}
fn as_domain() -> bool {
true
}
fn post_initialize(&mut self) -> Result<()> {
Ok(())
}
}
| rust | MIT | b365c1baa4c4472fe604f4ab9646440d23c3bd9c | 2026-01-04T20:16:15.900894Z | false |
aarch64-switch-rs/nx | https://github.com/aarch64-switch-rs/nx/blob/b365c1baa4c4472fe604f4ab9646440d23c3bd9c/src/service/fsp/srv.rs | src/service/fsp/srv.rs | use crate::ipc::sf;
use crate::ipc::sf::sm;
use crate::result::*;
use crate::service;
pub use crate::ipc::sf::fsp::srv::*;
ipc_client_define_client_default!(FileSystemProxyService);
impl IFileSystemProxyClient for FileSystemProxyService {}
impl service::IService for FileSystemProxyService {
fn get_name() -> sm::ServiceName {
sm::ServiceName::new("fsp-srv")
}
fn as_domain() -> bool {
true
}
fn post_initialize(&mut self) -> Result<()> {
self.set_current_process(sf::ProcessId::new())
}
}
| rust | MIT | b365c1baa4c4472fe604f4ab9646440d23c3bd9c | 2026-01-04T20:16:15.900894Z | false |
aarch64-switch-rs/nx | https://github.com/aarch64-switch-rs/nx/blob/b365c1baa4c4472fe604f4ab9646440d23c3bd9c/src/macros/sync.rs | src/macros/sync.rs | #[macro_export]
macro_rules! acquire {
($x:expr) => {
::core::sync::atomic::fence(Acquire)
};
}
| rust | MIT | b365c1baa4c4472fe604f4ab9646440d23c3bd9c | 2026-01-04T20:16:15.900894Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.