repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/mobile/native/src/max_quantity.rs | mobile/native/src/max_quantity.rs | use crate::calculations;
use crate::calculations::calculate_pnl;
use crate::channel_trade_constraints::channel_trade_constraints;
use crate::dlc;
use crate::trade::position;
use bitcoin::Amount;
use bitcoin::SignedAmount;
use rust_decimal::prelude::FromPrimitive;
use rust_decimal::prelude::ToPrimitive;
use rust_decimal::Decimal;
use std::cmp::max;
use xxi_node::commons;
use xxi_node::commons::Direction;
use xxi_node::commons::Price;
/// Calculates the max quantity
///
/// The max quantity a user can trade is the lower value of either:
/// a) The max coordinator margin which is restricted to a certain max amount.
/// b) The max trader margin which is off-chain balance if a channel already exists.
pub fn max_quantity(
price: Decimal,
trader_leverage: f32,
trader_trade_direction: Direction,
) -> anyhow::Result<Decimal> {
let channel_trade_constraints = channel_trade_constraints()?;
let on_chain_fee_estimate = match channel_trade_constraints.is_channel_balance {
true => None,
false => {
let channel_fee_reserve = dlc::estimated_fee_reserve()?;
let funding_tx_fee = dlc::estimated_funding_tx_fee()?;
// double the funding tx fee to ensure we have enough buffer
let funding_tx_with_buffer = funding_tx_fee * 2;
Some(channel_fee_reserve + funding_tx_with_buffer)
}
};
let max_coordinator_balance =
Amount::from_sat(channel_trade_constraints.max_counterparty_balance_sats);
// If the trader has a channel, his max balance is the channel balance and we continue,
// otherwise we can return here as the max amount to trade depends on what the coordinator can
// provide
let max_trader_balance = if channel_trade_constraints.is_channel_balance {
Amount::from_sat(channel_trade_constraints.max_local_balance_sats)
} else {
return Ok(Decimal::from_f32(calculations::calculate_quantity(
price.to_f32().expect("to fit"),
max_coordinator_balance.to_sat(),
channel_trade_constraints.coordinator_leverage,
))
.expect("to fit"));
};
let order_matching_fee_rate = channel_trade_constraints.order_matching_fee_rate;
let order_matching_fee_rate = Decimal::try_from(order_matching_fee_rate).expect("to fit");
let positions = position::handler::get_positions()?;
let position = positions.first();
let accumulated_order_matching_fees = position
.map(|p| p.order_matching_fees)
.unwrap_or(Amount::ZERO);
let (max_coordinator_margin, max_trader_margin) = match position {
Some(position) if trader_trade_direction != position.direction => {
let total_collateral = channel_trade_constraints.total_collateral.unwrap_or(0);
let total_balance = channel_trade_constraints.max_counterparty_balance_sats
+ channel_trade_constraints.max_local_balance_sats;
let total_balance = Amount::from_sat(total_balance);
let trader_margin = Amount::from_sat(position.collateral);
let coordinator_margin = Amount::from_sat(total_collateral)
.checked_sub(total_balance + trader_margin)
.unwrap_or(Amount::ZERO);
let trader_pnl = calculate_pnl(
position.average_entry_price,
Price {
ask: price,
bid: price,
},
position.quantity,
position.leverage,
position.direction,
)?;
let max_coordinator_margin =
max_coordinator_balance.to_signed()? + coordinator_margin.to_signed()?;
let max_trader_margin = max_trader_balance.to_signed()? + trader_margin.to_signed()?;
let trader_pnl = SignedAmount::from_sat(trader_pnl);
let max_coordinator_margin = max_coordinator_margin
.checked_sub(trader_pnl)
.unwrap_or(SignedAmount::ZERO);
let max_trader_margin = max_trader_margin
.checked_add(trader_pnl)
.unwrap_or(SignedAmount::ZERO);
debug_assert!(
max_coordinator_margin.is_positive(),
"max coordinator margin must be positive after subtracting the trader pnl"
);
debug_assert!(
max_trader_margin.is_positive(),
"max trader margin must be positive after adding the trader pnl"
);
(
Amount::from_sat(max(0, max_coordinator_margin.to_sat()) as u64),
Amount::from_sat(max(0, max_trader_margin.to_sat()) as u64),
)
}
_ => (max_coordinator_balance, max_trader_balance),
};
let open_quantity = match position {
Some(position) if position.direction != trader_trade_direction => {
Decimal::try_from(position.quantity).expect("to fit")
}
_ => Decimal::ZERO,
};
let max_quantity = calculate_max_quantity(
price,
max_coordinator_margin,
max_trader_margin,
on_chain_fee_estimate,
channel_trade_constraints.coordinator_leverage,
trader_leverage,
order_matching_fee_rate,
accumulated_order_matching_fees,
open_quantity,
);
Ok(max_quantity)
}
/// Calculates the max quantity. If an on-chain fee estimate is
/// provided the max margins are reduced by that amount to ensure the fees are considered.
///
/// 1. Calculate the max coordinator quantity and max trader quantity.
/// 2. The smaller quantity is used to derive the order matching fee.
/// 3. Reduce the max margin by the order matching fee.
/// 4. Recalculate and return the max quantity from the reduced margin.
///
/// Note, this function will not exactly find the max quantity possible, but a very close
/// approximation.
#[allow(clippy::too_many_arguments)]
fn calculate_max_quantity(
price: Decimal,
max_coordinator_margin: Amount,
max_trader_margin: Amount,
on_chain_fee_estimate: Option<Amount>,
coordinator_leverage: f32,
trader_leverage: f32,
order_matching_fee_rate: Decimal,
accumulated_order_matching_fees: Amount,
open_quantity: Decimal,
) -> Decimal {
// subtract required on-chain fees with buffer if the trade is opening a channel.
let max_coordinator_margin = max_coordinator_margin
.checked_sub(on_chain_fee_estimate.unwrap_or(Amount::ZERO))
.unwrap_or(Amount::ZERO)
.checked_sub(accumulated_order_matching_fees)
.unwrap_or(Amount::ZERO);
let max_trader_margin = max_trader_margin
.checked_sub(on_chain_fee_estimate.unwrap_or(Amount::ZERO))
.unwrap_or(Amount::ZERO);
let price_f32 = price.to_f32().expect("to fit");
let max_trader_quantity =
calculations::calculate_quantity(price_f32, max_trader_margin.to_sat(), trader_leverage);
let max_coordinator_quantity = calculations::calculate_quantity(
price_f32,
max_coordinator_margin.to_sat(),
coordinator_leverage,
);
// determine the biggest quantity possible from either side.
let (quantity, max_margin, leverage) = match max_trader_quantity > max_coordinator_quantity {
true => (
max_coordinator_quantity,
max_coordinator_margin,
coordinator_leverage,
),
false => (max_trader_quantity, max_trader_margin, trader_leverage),
};
// calculate the fee from this quantity + any open quantity to ensure there is enough space for
// the fees.
let open_quantity = open_quantity.to_f32().expect("to fit");
let order_matching_fee =
commons::order_matching_fee(quantity + open_quantity, price, order_matching_fee_rate);
// subtract the fee from the max margin and recalculate the quantity. That
// might not be perfect but the closest we can get with a relatively simple logic.
let max_margin_without_order_matching_fees = max_margin - order_matching_fee;
let max_quantity = calculations::calculate_quantity(
price_f32,
max_margin_without_order_matching_fees.to_sat(),
leverage,
);
Decimal::try_from((max_quantity + open_quantity).floor()).expect("to fit")
}
#[cfg(test)]
mod tests {
use super::*;
use rust_decimal_macros::dec;
#[test]
fn test_calculate_max_quantity_with_open_quantity() {
let price = Decimal::new(22001, 0);
let max_coordinator_margin = Amount::from_sat(765_763);
let max_trader_margin = Amount::from_sat(747_499);
let trader_leverage = 2.0;
let coordinator_leverage = 2.0;
let order_matching_fee_rate = dec!(0.003);
let open_quantity = dec!(323);
let accumulated_order_matching_fee = Amount::from_sat(4459);
let max_quantity = calculate_max_quantity(
price,
max_coordinator_margin,
max_trader_margin,
None,
coordinator_leverage,
trader_leverage,
order_matching_fee_rate,
accumulated_order_matching_fee,
open_quantity,
);
// max trader quantity: 0.00,747,499 * 22,001 * 2.0 = 328,91450998
// order matching fee: (328,91450998 + 323) * (1/22,001) * 0.003 = 0.00,008,889 BTC
// max trader margin without order matching fee: 747,499 - 8,889 = 738,610
// max quantity without order matching fee: 0.00,738,610 * 22,001 * 2.0 = 325,0031722
// 325 + 323 = 648
assert_eq!(dec!(648), max_quantity);
// Ensure that the coordinator has enough funds for the trade
let coordinator_margin = calculations::calculate_margin(
price.to_f32().unwrap(),
(max_quantity - open_quantity).to_f32().unwrap(),
coordinator_leverage,
);
assert!(coordinator_margin < max_coordinator_margin.to_sat());
}
#[test]
fn test_calculate_max_quantity_with_accumulated_order_matching_fee() {
let price = Decimal::new(14999, 0);
let max_coordinator_margin = Amount::from_sat(7464);
let max_trader_margin = Amount::from_sat(1_048_951);
let trader_leverage = 2.0;
let coordinator_leverage = 2.0;
let order_matching_fee_rate = dec!(0.003);
let max_quantity = calculate_max_quantity(
price,
max_coordinator_margin,
max_trader_margin,
None,
coordinator_leverage,
trader_leverage,
order_matching_fee_rate,
Amount::from_sat(4500),
dec!(0),
);
assert_eq!(Decimal::ZERO, max_quantity);
}
#[test]
fn test_calculate_max_quantity() {
let price = Decimal::new(30209, 0);
let max_coordinator_margin = Amount::from_sat(3_000_000);
let max_trader_margin = Amount::from_sat(280_000);
let on_chain_fee_estimate = Amount::from_sat(13_500);
let trader_leverage = 2.0;
let coordinator_leverage = 2.0;
let order_matching_fee_rate = dec!(0.003);
let max_quantity = calculate_max_quantity(
price,
max_coordinator_margin,
max_trader_margin,
Some(on_chain_fee_estimate),
coordinator_leverage,
trader_leverage,
order_matching_fee_rate,
Amount::ZERO,
dec!(0),
);
let trader_margin = calculations::calculate_margin(
price.to_f32().unwrap(),
max_quantity.to_f32().unwrap(),
trader_leverage,
);
let order_matching_fee = commons::order_matching_fee(
max_quantity.to_f32().unwrap(),
price,
order_matching_fee_rate,
);
// Note this is not exactly the max margin the trader, but its the closest we can get.
assert_eq!(
Amount::from_sat(trader_margin) + on_chain_fee_estimate + order_matching_fee,
// max trader margin: 280,000 - 13.500 = 266,500
// max trader quantity: 0.00,266,500 * 30,209 * 2.0 = 161,01397
// order matching fee: 161,01397 * (1/30,209) * 0.003 = 0.00,001,599 BTC
// max trader margin without order matching fee: 266,500 - 1,599 = 264,901
// max quantity without order matching fee: 0.00,264,901 * 30,209 * 2.0 = 160,04788618
// trader margin: 160 / (30,209 * 2.0) = 0.00,264,821 BTC
// order matching fee: 160 * (1/30,209) * 0,003 = 0.00,001,589 BTC
// 264,822 + 13,500 + 1589
Amount::from_sat(279_911)
);
// Ensure that the trader still has enough for the order matching fee
assert!(Amount::from_sat(trader_margin) + order_matching_fee < max_trader_margin,
"Trader does not have enough margin left for order matching fee. Has {}, order matching fee {}, needed for order {} ",
max_trader_margin, order_matching_fee , trader_margin);
// Ensure that the coordinator has enough funds for the trade
let coordinator_margin = calculations::calculate_margin(
price.to_f32().unwrap(),
max_quantity.to_f32().unwrap(),
coordinator_leverage,
);
assert!(coordinator_margin < max_coordinator_margin.to_sat());
}
#[test]
fn test_calculate_max_quantity_with_smaller_coordinator_margin() {
let price = Decimal::new(30209, 0);
let max_coordinator_margin = Amount::from_sat(280_000);
let max_trader_margin = Amount::from_sat(280_001);
let trader_leverage = 2.0;
let coordinator_leverage = 2.0;
let order_matching_fee_rate = dec!(0.003);
let max_quantity = calculate_max_quantity(
price,
max_coordinator_margin,
max_trader_margin,
None,
coordinator_leverage,
trader_leverage,
order_matching_fee_rate,
Amount::ZERO,
dec!(0),
);
let trader_margin = calculations::calculate_margin(
price.to_f32().unwrap(),
max_quantity.to_f32().unwrap(),
trader_leverage,
);
let order_matching_fee = commons::order_matching_fee(
max_quantity.to_f32().unwrap(),
price,
order_matching_fee_rate,
);
// Note this is not exactly the max margin of the coordinator, but its the closest we can
// get.
assert_eq!(trader_margin, 278_063);
// Ensure that the trader still has enough for the order matching fee
assert!(Amount::from_sat(trader_margin) + order_matching_fee < max_trader_margin,
"Trader does not have enough margin left for order matching fee. Has {}, order matching fee {}, needed for order {} ",
max_trader_margin, order_matching_fee , trader_margin);
// Ensure that the coordinator has enough funds for the trade
let coordinator_margin = calculations::calculate_margin(
price.to_f32().unwrap(),
max_quantity.to_f32().unwrap(),
coordinator_leverage,
);
assert!(
Amount::from_sat(coordinator_margin) < max_coordinator_margin,
"Coordinator does not have enough margin for the trade. Has {}, needed for order {} ",
max_coordinator_margin,
coordinator_margin
);
}
#[test]
fn test_calculate_max_quantity_with_higher_trader_leverage() {
let price = Decimal::new(30209, 0);
let max_coordinator_margin = Amount::from_sat(450_000);
let max_trader_margin = Amount::from_sat(280_000);
let trader_leverage = 5.0;
let coordinator_leverage = 2.0;
let order_matching_fee_rate = dec!(0.003);
let max_quantity = calculate_max_quantity(
price,
max_coordinator_margin,
max_trader_margin,
None,
coordinator_leverage,
trader_leverage,
order_matching_fee_rate,
Amount::ZERO,
dec!(0),
);
let trader_margin = calculations::calculate_margin(
price.to_f32().unwrap(),
max_quantity.to_f32().unwrap(),
trader_leverage,
);
let order_matching_fee = commons::order_matching_fee(
max_quantity.to_f32().unwrap(),
price,
order_matching_fee_rate,
);
// Note we can not max out the users balance, because the counterparty does not have enough
// funds to match that trade on a leverage 2.0
assert_eq!(Amount::from_sat(trader_margin), Amount::from_sat(178_755));
// Ensure that the trader still has enough for the order matching fee
assert!(Amount::from_sat(trader_margin) + order_matching_fee < max_trader_margin,
"Trader does not have enough margin left for order matching fee. Has {}, order matching fee {}, needed for order {} ",
max_trader_margin, order_matching_fee , trader_margin);
// Ensure that the coordinator has enough funds for the trade
let coordinator_margin = calculations::calculate_margin(
price.to_f32().unwrap(),
max_quantity.to_f32().unwrap(),
coordinator_leverage,
);
// Note this is not the max coordinator balance, but the closest we can get.
assert_eq!(coordinator_margin, 446_887);
}
#[test]
fn test_calculate_max_quantity_zero_balance() {
let price = Decimal::from(30353);
let max_coordinator_margin = Amount::from_sat(3_000_000);
let max_trader_margin = Amount::from_sat(0);
let trader_leverage = 2.0;
let coordinator_leverage = 2.0;
let order_matching_fee_rate = dec!(0.003);
let on_chain_fee_estimate = Amount::from_sat(1515);
let max_quantity = calculate_max_quantity(
price,
max_coordinator_margin,
max_trader_margin,
Some(on_chain_fee_estimate),
coordinator_leverage,
trader_leverage,
order_matching_fee_rate,
Amount::ZERO,
dec!(0),
);
assert_eq!(max_quantity, Decimal::ZERO)
}
#[test]
fn test_calculate_max_quantity_with_max_channel_size() {
let price = Decimal::new(28409, 0);
let max_coordinator_margin = Amount::from_sat(3_000_000);
let max_trader_margin = Amount::from_btc(1.0).unwrap();
let trader_leverage = 2.0;
let coordinator_leverage = 2.0;
let order_matching_fee_rate = dec!(0.003);
let on_chain_fee_estimate = Amount::from_sat(1515);
let max_quantity = calculate_max_quantity(
price,
max_coordinator_margin,
max_trader_margin,
Some(on_chain_fee_estimate),
coordinator_leverage,
trader_leverage,
order_matching_fee_rate,
Amount::ZERO,
dec!(0),
);
let trader_margin = calculations::calculate_margin(
price.to_f32().unwrap(),
max_quantity.to_f32().unwrap(),
trader_leverage,
);
let order_matching_fee = commons::order_matching_fee(
max_quantity.to_f32().unwrap(),
price,
order_matching_fee_rate,
);
// Note we can not max out the users balance, because the counterparty does not have enough
// funds to match that trade on a leverage 2.0
assert_eq!(trader_margin, 2_979_690);
// Ensure that the trader still has enough for the order matching fee
assert!(Amount::from_sat(trader_margin) + order_matching_fee < max_trader_margin,
"Trader does not have enough margin left for order matching fee. Has {}, order matching fee {}, needed for order {} ",
max_trader_margin, order_matching_fee , trader_margin);
// Ensure that the coordinator has enough funds for the trade
let coordinator_margin = calculations::calculate_margin(
price.to_f32().unwrap(),
max_quantity.to_f32().unwrap(),
coordinator_leverage,
);
// Note this is not the max coordinator balance, but the closest we can get.
assert!(
Amount::from_sat(coordinator_margin) < max_coordinator_margin,
"Coordinator does not have enough margin for the trade. Has {}, needed for order {} ",
max_coordinator_margin,
coordinator_margin
);
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/mobile/native/src/position.rs | mobile/native/src/position.rs | use crate::db;
use crate::dlc::ChannelState;
use crate::dlc::DlcChannel;
use crate::dlc::SignedChannelState;
use crate::event;
use crate::event::subscriber::Subscriber;
use crate::event::EventInternal;
use crate::event::EventType;
use xxi_node::commons::ContractSymbol;
#[derive(Clone, Copy)]
pub struct ForceCloseDlcChannelSubscriber;
impl Subscriber for ForceCloseDlcChannelSubscriber {
fn notify(&self, event: &EventInternal) {
let runtime = match crate::state::get_or_create_tokio_runtime() {
Ok(runtime) => runtime,
Err(e) => {
tracing::error!("Failed to get tokio runtime. Error: {e:#}");
return;
}
};
runtime.spawn_blocking({
let event = event.clone();
move || {
if matches!(
event,
EventInternal::DlcChannelEvent(DlcChannel {
channel_state: ChannelState::Signed {
state: SignedChannelState::Closing,
..
},
..
})
) {
tracing::warn!("Removing position after dlc channel got force closed.");
if let Err(e) = db::delete_positions() {
tracing::error!("Failed to delete position after the dlc channel has been force closed. Error: {e:#}")
}
event::publish(&EventInternal::PositionCloseNotification(
ContractSymbol::BtcUsd,
));
}}
});
}
fn events(&self) -> Vec<EventType> {
vec![EventType::DlcChannelEvent]
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/mobile/native/src/channel_trade_constraints.rs | mobile/native/src/channel_trade_constraints.rs | use crate::dlc;
use anyhow::Context;
use anyhow::Result;
pub struct TradeConstraints {
/// Max balance the local party can use
///
/// This depends on whether the user has a channel or not. If he has a channel, then his
/// channel balance is the max amount, otherwise his on-chain balance dictates the max amount
pub max_local_balance_sats: u64,
/// Max amount the counterparty is willing to put.
///
/// This depends whether the user has a channel or not, i.e. if he has a channel then the max
/// amount is what the counterparty has in the channel, otherwise, it's a fixed amount what
/// the counterparty is willing to provide.
pub max_counterparty_balance_sats: u64,
/// The leverage the coordinator will take
pub coordinator_leverage: f32,
/// Smallest allowed amount of contracts
pub min_quantity: u64,
/// If true it means that the user has a channel and hence the max amount is limited by what he
/// has in the channel. In the future we can consider splice in and allow the user to use more
/// than just his channel balance.
pub is_channel_balance: bool,
/// Smallest allowed margin
pub min_margin: u64,
/// The maintenance margin in percent defines the margin requirement left in the dlc channel.
/// If the margin drops below that value the position gets liquidated.
pub maintenance_margin_rate: f32,
/// The fee rate for order matching.
pub order_matching_fee_rate: f32,
/// Total collateral in the dlc channel, none if [`is_channel_balance`] is false.
pub total_collateral: Option<u64>,
}
pub fn channel_trade_constraints() -> Result<TradeConstraints> {
let config =
crate::state::try_get_tentenone_config().context("We can't trade without LSP config")?;
let signed_channel = dlc::get_signed_dlc_channel()?;
let min_margin = match &signed_channel {
Some(_) => 1,
// TODO(holzeis): https://github.com/get10101/10101/issues/1905
None => 250_000,
};
let min_quantity = config.min_quantity;
let maintenance_margin_rate = config.maintenance_margin_rate;
let order_matching_fee_rate = config.order_matching_fee_rate;
// TODO(bonomat): this logic should be removed once we have our liquidity options again and the
// on-boarding logic. For now we take the highest liquidity option
let option = config
.liquidity_options
.iter()
.filter(|option| option.active)
.max_by_key(|option| &option.trade_up_to_sats)
.context("we need at least one liquidity option")?;
let coordinator_leverage = option.coordinator_leverage;
// FIXME: This doesn't work if the channel is in `Closing` and related states.
let trade_constraints = match signed_channel {
None => {
let balance = dlc::get_onchain_balance();
let counterparty_balance_sats = option.trade_up_to_sats;
TradeConstraints {
max_local_balance_sats: balance.confirmed
+ balance.trusted_pending
+ balance.untrusted_pending,
max_counterparty_balance_sats: counterparty_balance_sats,
coordinator_leverage,
min_quantity,
is_channel_balance: false,
min_margin,
maintenance_margin_rate,
order_matching_fee_rate,
total_collateral: None,
}
}
Some(channel) => {
let local_balance = dlc::get_usable_dlc_channel_balance()?.to_sat();
let counterparty_balance = dlc::get_usable_dlc_channel_balance_counterparty()?.to_sat();
TradeConstraints {
max_local_balance_sats: local_balance,
max_counterparty_balance_sats: counterparty_balance,
coordinator_leverage,
min_quantity,
is_channel_balance: true,
min_margin,
maintenance_margin_rate,
order_matching_fee_rate,
total_collateral: Some(
channel.own_params.collateral + channel.counter_params.collateral,
),
}
}
};
Ok(trade_constraints)
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/mobile/native/src/dlc_channel.rs | mobile/native/src/dlc_channel.rs | use crate::dlc;
use flutter_rust_bridge::frb;
#[frb]
#[derive(Clone)]
pub struct DlcChannel {
pub reference_id: String,
pub dlc_channel_id: String,
pub channel_state: ChannelState,
}
#[frb]
#[derive(Debug, Clone)]
pub enum ChannelState {
Offered {
contract_id: String,
},
Accepted {
contract_id: String,
},
Signed {
contract_id: Option<String>,
funding_txid: String,
funding_tx_vout: usize,
closing_txid: Option<String>,
state: SignedChannelState,
},
Closing {
contract_id: String,
buffer_txid: String,
},
SettledClosing {
settle_txid: String,
},
Closed {
closing_txid: String,
},
CounterClosed {
closing_txid: String,
},
ClosedPunished,
CollaborativelyClosed {
closing_txid: String,
},
FailedAccept,
FailedSign,
Cancelled {
contract_id: String,
},
}
#[frb]
#[derive(Debug, Clone)]
pub enum SignedChannelState {
Established,
SettledOffered,
SettledReceived,
SettledAccepted,
SettledConfirmed,
Settled,
SettledClosing,
RenewOffered,
RenewAccepted,
RenewConfirmed,
RenewFinalized,
Closing,
CollaborativeCloseOffered,
}
impl From<dlc::DlcChannel> for DlcChannel {
fn from(value: dlc::DlcChannel) -> Self {
DlcChannel {
reference_id: value.reference_id,
dlc_channel_id: value.channel_id,
channel_state: value.channel_state.into(),
}
}
}
impl From<dlc::ChannelState> for ChannelState {
fn from(value: dlc::ChannelState) -> Self {
match value {
dlc::ChannelState::Offered { contract_id } => ChannelState::Offered { contract_id },
dlc::ChannelState::Accepted { contract_id } => ChannelState::Accepted { contract_id },
dlc::ChannelState::Signed {
contract_id,
funding_txid,
funding_tx_vout,
closing_txid,
state,
} => ChannelState::Signed {
contract_id,
funding_txid,
funding_tx_vout,
closing_txid,
state: SignedChannelState::from(state),
},
dlc::ChannelState::Closing {
contract_id,
buffer_txid,
} => ChannelState::Closing {
contract_id,
buffer_txid,
},
dlc::ChannelState::SettledClosing { settle_txid } => {
ChannelState::SettledClosing { settle_txid }
}
dlc::ChannelState::Closed { closing_txid } => ChannelState::Closed { closing_txid },
dlc::ChannelState::CounterClosed { closing_txid } => {
ChannelState::CounterClosed { closing_txid }
}
dlc::ChannelState::ClosedPunished => ChannelState::ClosedPunished,
dlc::ChannelState::CollaborativelyClosed { closing_txid } => {
ChannelState::CollaborativelyClosed { closing_txid }
}
dlc::ChannelState::FailedAccept => ChannelState::FailedAccept,
dlc::ChannelState::FailedSign => ChannelState::FailedSign,
dlc::ChannelState::Cancelled { contract_id } => ChannelState::Cancelled { contract_id },
}
}
}
impl From<dlc::SignedChannelState> for SignedChannelState {
fn from(value: dlc::SignedChannelState) -> Self {
match value {
dlc::SignedChannelState::Established { .. } => SignedChannelState::Established,
dlc::SignedChannelState::SettledOffered { .. } => SignedChannelState::SettledOffered,
dlc::SignedChannelState::SettledReceived { .. } => SignedChannelState::SettledReceived,
dlc::SignedChannelState::SettledAccepted { .. } => SignedChannelState::SettledAccepted,
dlc::SignedChannelState::SettledConfirmed { .. } => {
SignedChannelState::SettledConfirmed
}
dlc::SignedChannelState::Settled { .. } => SignedChannelState::Settled,
dlc::SignedChannelState::RenewOffered { .. } => SignedChannelState::RenewOffered,
dlc::SignedChannelState::RenewAccepted { .. } => SignedChannelState::RenewAccepted,
dlc::SignedChannelState::RenewConfirmed { .. } => SignedChannelState::RenewConfirmed,
dlc::SignedChannelState::RenewFinalized { .. } => SignedChannelState::RenewFinalized,
dlc::SignedChannelState::Closing { .. } => SignedChannelState::Closing,
dlc::SignedChannelState::CollaborativeCloseOffered { .. } => {
SignedChannelState::CollaborativeCloseOffered
}
dlc::SignedChannelState::SettledClosing { .. } => SignedChannelState::SettledClosing,
}
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/mobile/native/src/unfunded_channel_opening_order.rs | mobile/native/src/unfunded_channel_opening_order.rs | use crate::event;
use crate::event::EventInternal;
use crate::event::FundingChannelTask;
use crate::hodl_invoice;
use crate::state::get_node;
use crate::trade::order;
use crate::trade::order::api::NewOrder;
use crate::watcher;
use anyhow::Error;
use bitcoin::Amount;
use xxi_node::commons::ChannelOpeningParams;
pub struct ExternalFunding {
pub bitcoin_address: String,
/// The payment request of the hodl invoice. Could be none if the lnd node is down.
pub payment_request: Option<String>,
}
/// handles orders which would open a channel where the user does not have funds in his wallets
/// prior to the call
///
/// there are two things that are happening here:
/// 1. we watch an on-chain address of funding arrives
/// 2. we ask the coordinator for a hodl invoice and watch for it getting paid
///
/// if either 1) or 2) of those two task report that the funds are here, we continue and post the
/// order. if task 2) was done (hodl invoice), we also share the pre-image with the coordinator
pub async fn submit_unfunded_channel_opening_order(
order: NewOrder,
coordinator_reserve: u64,
trader_reserve: u64,
estimated_margin: u64,
order_matching_fee: u64,
) -> anyhow::Result<ExternalFunding, Error> {
let node = get_node();
let bitcoin_address = node.inner.get_new_address()?;
let fees = Amount::from_sat(order_matching_fee)
+ crate::dlc::estimated_fee_reserve()?
+ crate::dlc::estimated_funding_tx_fee()?;
let funding_amount = Amount::from_sat(estimated_margin + trader_reserve) + fees;
let hodl_invoice = hodl_invoice::get_hodl_invoice_from_coordinator(funding_amount)
.await
.inspect_err(|e| tracing::error!("Failed to get hodl invoice. Error: {e:#}"))
.ok();
let payment_request = hodl_invoice.clone().map(|invoice| invoice.payment_request);
// abort previous watcher before starting new task.
abort_watcher().await?;
let runtime = crate::state::get_or_create_tokio_runtime()?;
let watch_handle = runtime.spawn({
let bitcoin_address = bitcoin_address.clone();
let pre_image = hodl_invoice.clone().map(|invoice| invoice.pre_image);
let r_hash = hodl_invoice.map(|invoice| invoice.r_hash).unwrap_or_default();
async move {
event::publish(&EventInternal::FundingChannelNotification(
FundingChannelTask::Pending,
));
// we must only create the order on either event. If the bitcoin address is funded we cancel the watch for the lightning invoice and vice versa.
let maybe_pre_image = tokio::select! {
_ = watcher::watch_funding_address(bitcoin_address.clone(), funding_amount) => {
// received bitcoin payment.
tracing::info!(%bitcoin_address, %funding_amount, "Found funding amount on bitcoin address.");
None
}
_ = watcher::watch_lightning_payment(r_hash) => {
// received lightning payment.
tracing::info!(%funding_amount, "Found lighting payment.");
pre_image
}
};
event::publish(&EventInternal::FundingChannelNotification(
FundingChannelTask::Funded,
));
tracing::debug!(
coordinator_reserve,
%funding_amount,
"Creating new order with values {order:?}"
);
match order::handler::submit_order(
order.into(),
Some(ChannelOpeningParams {
coordinator_reserve: Amount::from_sat(coordinator_reserve),
trader_reserve: Amount::from_sat(trader_reserve),
pre_image: maybe_pre_image,
})
)
.await
.map_err(anyhow::Error::new)
.map(|id| id.to_string())
{
Ok(order_id) => {
tracing::info!(order_id, "Order created");
event::publish(&EventInternal::FundingChannelNotification(
FundingChannelTask::OrderCreated(order_id),
));
}
Err(error) => {
tracing::error!("Failed at submitting order {error:?}");
event::publish(&EventInternal::FundingChannelNotification(
FundingChannelTask::Failed("Failed at posting the order".to_string()),
));
}
};
}
});
*node.watcher_handle.lock() = Some(watch_handle);
Ok(ExternalFunding {
bitcoin_address: bitcoin_address.to_string(),
payment_request,
})
}
/// Aborts any existing watch for bitcoin address or hodl invoice funding.
pub async fn abort_watcher() -> anyhow::Result<()> {
let node = get_node();
let watch_handle = { node.watcher_handle.lock().take() };
if let Some(watch_handle) = watch_handle {
watch_handle.abort();
_ = watch_handle.await;
};
Ok(())
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/mobile/native/src/dlc/node.rs | mobile/native/src/dlc/node.rs | use crate::db;
use crate::event;
use crate::event::BackgroundTask;
use crate::event::EventInternal;
use crate::event::TaskStatus;
use crate::storage::TenTenOneNodeStorage;
use crate::trade::funding_fee_event::handler::handle_unpaid_funding_fee_events;
use crate::trade::funding_fee_event::handler::mark_funding_fee_events_as_paid;
use crate::trade::order;
use crate::trade::order::FailureReason;
use crate::trade::order::InvalidSubchannelOffer;
use crate::trade::position;
use crate::trade::position::handler::get_positions;
use crate::trade::position::handler::handle_rollover_offer;
use crate::trade::position::handler::update_position_after_dlc_channel_creation_or_update;
use crate::trade::position::handler::update_position_after_dlc_closure;
use crate::trade::position::handler::update_position_after_rollover;
use crate::trade::FundingFeeEvent;
use anyhow::anyhow;
use anyhow::Context;
use anyhow::Result;
use bitcoin::secp256k1::PublicKey;
use dlc_manager::ReferenceId;
use dlc_messages::channel::CollaborativeCloseOffer;
use dlc_messages::channel::OfferChannel;
use dlc_messages::channel::Reject;
use dlc_messages::channel::RenewOffer;
use dlc_messages::channel::SettleOffer;
use itertools::Itertools;
use lightning::chain::transaction::OutPoint;
use lightning::sign::DelayedPaymentOutputDescriptor;
use lightning::sign::SpendableOutputDescriptor;
use lightning::sign::StaticPaymentOutputDescriptor;
use rust_decimal::prelude::ToPrimitive;
use rust_decimal::Decimal;
use std::collections::HashSet;
use std::sync::Arc;
use std::time::Duration;
use time::OffsetDateTime;
use tokio::task::JoinHandle;
use tracing::instrument;
use uuid::Uuid;
use xxi_node::bitcoin_conversion::to_secp_pk_30;
use xxi_node::commons;
use xxi_node::commons::OrderReason;
use xxi_node::dlc_message::DlcMessage;
use xxi_node::dlc_message::SerializedDlcMessage;
use xxi_node::message_handler::TenTenOneAcceptChannel;
use xxi_node::message_handler::TenTenOneCollaborativeCloseOffer;
use xxi_node::message_handler::TenTenOneMessage;
use xxi_node::message_handler::TenTenOneMessageType;
use xxi_node::message_handler::TenTenOneOfferChannel;
use xxi_node::message_handler::TenTenOneReject;
use xxi_node::message_handler::TenTenOneRenewAccept;
use xxi_node::message_handler::TenTenOneRenewOffer;
use xxi_node::message_handler::TenTenOneRenewRevoke;
use xxi_node::message_handler::TenTenOneRolloverAccept;
use xxi_node::message_handler::TenTenOneRolloverOffer;
use xxi_node::message_handler::TenTenOneRolloverRevoke;
use xxi_node::message_handler::TenTenOneSettleConfirm;
use xxi_node::message_handler::TenTenOneSettleOffer;
use xxi_node::message_handler::TenTenOneSignChannel;
use xxi_node::node;
use xxi_node::node::event::NodeEvent;
use xxi_node::node::rust_dlc_manager::DlcChannelId;
use xxi_node::node::tentenone_message_name;
use xxi_node::node::NodeInfo;
use xxi_node::node::RunningNode;
use xxi_node::transaction::Transaction;
use xxi_node::TransactionDetails;
#[derive(Clone)]
pub struct Node {
pub inner: Arc<
node::Node<
bdk_file_store::Store<bdk::wallet::ChangeSet>,
TenTenOneNodeStorage,
NodeStorage,
>,
>,
_running: Arc<RunningNode>,
// TODO: we should make this persistent as invoices might get paid later - but for now this is
// good enough
pub pending_usdp_invoices: Arc<parking_lot::Mutex<HashSet<bitcoin_old::hashes::sha256::Hash>>>,
pub watcher_handle: Arc<parking_lot::Mutex<Option<JoinHandle<()>>>>,
}
impl Node {
pub fn new(
node: Arc<
node::Node<
bdk_file_store::Store<bdk::wallet::ChangeSet>,
TenTenOneNodeStorage,
NodeStorage,
>,
>,
running: RunningNode,
) -> Self {
Self {
inner: node,
_running: Arc::new(running),
pending_usdp_invoices: Arc::new(Default::default()),
watcher_handle: Arc::new(Default::default()),
}
}
}
pub struct Balances {
pub on_chain: u64,
pub off_chain: Option<u64>,
}
impl From<Balances> for crate::event::api::Balances {
fn from(value: Balances) -> Self {
Self {
on_chain: value.on_chain,
off_chain: value.off_chain,
}
}
}
pub struct WalletHistory {
pub on_chain: Vec<TransactionDetails>,
}
impl Node {
pub fn get_blockchain_height(&self) -> Result<u64> {
self.inner.get_blockchain_height()
}
pub fn get_wallet_balances(&self) -> Balances {
let on_chain = self.inner.get_on_chain_balance();
let on_chain = on_chain.confirmed + on_chain.trusted_pending;
let off_chain = match self.inner.get_dlc_channels_usable_balance() {
Ok(off_chain) => Some(off_chain.to_sat()),
Err(e) => {
tracing::error!("Failed to get dlc channels usable balance. {e:#}");
None
}
};
Balances {
on_chain,
off_chain,
}
}
pub fn get_wallet_history(&self) -> WalletHistory {
let on_chain = self.inner.get_on_chain_history();
WalletHistory { on_chain }
}
pub fn process_incoming_dlc_messages(&self) {
if !self
.inner
.dlc_message_handler
.has_pending_messages_to_process()
{
return;
}
let messages = self
.inner
.dlc_message_handler
.get_and_clear_received_messages();
for (node_id, msg) in messages {
let msg_name = tentenone_message_name(&msg);
let msg_type = msg.get_tentenone_message_type();
if let Err(e) = self.process_dlc_message(to_secp_pk_30(node_id), msg) {
tracing::error!(
from = %node_id,
kind = %msg_name,
"Failed to process incoming DLC message: {e:#}"
);
let task_status = TaskStatus::Failed(format!("{e:#}"));
let task = match msg_type {
TenTenOneMessageType::Trade => BackgroundTask::AsyncTrade(task_status),
TenTenOneMessageType::Expire => BackgroundTask::Expire(task_status),
TenTenOneMessageType::Liquidate => BackgroundTask::Liquidate(task_status),
TenTenOneMessageType::Rollover => BackgroundTask::Rollover(task_status),
TenTenOneMessageType::Other => {
tracing::warn!("Ignoring error received from coordinator unrelated to a trade or rollover.");
continue;
}
};
event::publish(&EventInternal::BackgroundNotification(task));
}
}
}
/// [`process_dlc_message`] processes incoming dlc messages and updates the 10101
/// position accordingly.
/// - Any other message will be ignored.
/// - Any dlc message that has already been processed will be skipped.
///
/// If an offer is received [`TenTenOneMessage::Offer`], [`TenTenOneMessage::SettleOffer`],
/// [`TenTenOneMessage::CollaborativeCloseOffer`], [`TenTenOneMessage::RenewOffer`] will get
/// automatically accepted. Unless the maturity date of the offer is already outdated.
///
/// FIXME(holzeis): This function manipulates different data objects in different data sources
/// and should use a transaction to make all changes atomic. Not doing so risks of ending up in
/// an inconsistent state. One way of fixing that could be to
/// (1) use a single data source for the 10101 data and the rust-dlc data.
/// (2) wrap the function into a db transaction which can be atomically rolled back on error or
/// committed on success.
fn process_dlc_message(&self, node_id: PublicKey, msg: TenTenOneMessage) -> Result<()> {
tracing::info!(
from = %node_id,
kind = %tentenone_message_name(&msg),
"Processing message"
);
tracing::debug!(
from = %node_id,
"Received message"
);
let inbound_msg = {
let mut conn = db::connection()?;
let serialized_inbound_message = SerializedDlcMessage::try_from(&msg)?;
let inbound_msg = DlcMessage::new(node_id, serialized_inbound_message, true)?;
match db::dlc_messages::DlcMessage::get(&mut conn, &inbound_msg.message_hash)? {
Some(_) => {
tracing::debug!(%node_id, kind=%tentenone_message_name(&msg), "Received message that has already been processed, skipping.");
return Ok(());
}
None => inbound_msg,
}
};
let resp = match self
.inner
.process_tentenone_message(msg.clone(), node_id)
.with_context(|| {
format!(
"Failed to handle {} message from {node_id}",
tentenone_message_name(&msg)
)
}) {
Ok(resp) => resp,
Err(e) => {
match &msg {
TenTenOneMessage::Offer(TenTenOneOfferChannel {
offer_channel:
OfferChannel {
temporary_channel_id: channel_id,
reference_id,
..
},
..
})
| TenTenOneMessage::SettleOffer(TenTenOneSettleOffer {
settle_offer:
SettleOffer {
channel_id,
reference_id,
..
},
..
})
| TenTenOneMessage::RenewOffer(TenTenOneRenewOffer {
renew_offer:
RenewOffer {
channel_id,
reference_id,
..
},
..
})
| TenTenOneMessage::RolloverOffer(TenTenOneRolloverOffer {
renew_offer:
RenewOffer {
channel_id,
reference_id,
..
},
..
}) => {
if let Err(e) = self.force_reject_offer(node_id, *channel_id, *reference_id)
{
tracing::error!(
channel_id = hex::encode(channel_id),
"Failed to reject offer. Error: {e:#}"
);
}
}
_ => {}
}
return Err(e);
}
};
if let Some(msg) = resp.clone() {
// store dlc message immediately so we do not lose the response if something
// goes wrong afterwards.
self.inner
.event_handler
.publish(NodeEvent::StoreDlcMessage { peer: node_id, msg });
}
{
let mut conn = db::connection()?;
db::dlc_messages::DlcMessage::insert(&mut conn, inbound_msg)?;
}
match msg {
TenTenOneMessage::Offer(offer) => {
tracing::info!(
channel_id = hex::encode(offer.offer_channel.temporary_channel_id),
"Automatically accepting dlc channel offer"
);
self.process_dlc_channel_offer(&offer)?;
}
TenTenOneMessage::SettleOffer(offer) => {
tracing::info!(
channel_id = hex::encode(offer.settle_offer.channel_id),
"Automatically accepting settle offer"
);
self.process_settle_offer(&offer)?;
}
TenTenOneMessage::RenewOffer(offer) => {
tracing::info!(
channel_id = hex::encode(offer.renew_offer.channel_id),
"Automatically accepting renew offer"
);
self.process_renew_offer(&offer)?;
}
TenTenOneMessage::RolloverOffer(offer) => {
tracing::info!(
channel_id = hex::encode(offer.renew_offer.channel_id),
"Automatically accepting rollover offer"
);
self.process_rollover_offer(&offer)?;
}
TenTenOneMessage::RenewRevoke(TenTenOneRenewRevoke {
renew_revoke,
order_id,
}) => {
let channel_id_hex = hex::encode(renew_revoke.channel_id);
tracing::info!(
order_id = %order_id,
channel_id = %channel_id_hex,
"Finished renew protocol"
);
let expiry_timestamp = self
.inner
.get_expiry_for_confirmed_dlc_channel(&renew_revoke.channel_id)?;
let filled_order = order::handler::order_filled(Some(order_id))
.context("Cannot mark order as filled for confirmed DLC")?;
update_position_after_dlc_channel_creation_or_update(
filled_order,
expiry_timestamp,
)
.context("Failed to update position after DLC update")?;
event::publish(&EventInternal::BackgroundNotification(
BackgroundTask::AsyncTrade(TaskStatus::Success),
));
}
TenTenOneMessage::RolloverRevoke(TenTenOneRolloverRevoke { renew_revoke }) => {
let channel_id_hex = hex::encode(renew_revoke.channel_id);
tracing::info!(
channel_id = %channel_id_hex,
"Finished rollover protocol"
);
let position = update_position_after_rollover()
.context("Failed to update position after rollover protocol finished")?;
mark_funding_fee_events_as_paid(position.contract_symbol, position.created)
.context("Failed to mark funding fee events as paid")?;
event::publish(&EventInternal::BackgroundNotification(
BackgroundTask::Rollover(TaskStatus::Success),
));
}
TenTenOneMessage::Sign(TenTenOneSignChannel {
order_id,
sign_channel,
}) => {
let expiry_timestamp = self
.inner
.get_expiry_for_confirmed_dlc_channel(&sign_channel.channel_id)?;
let filled_order = order::handler::order_filled(Some(order_id))
.context("Cannot mark order as filled for confirmed DLC")?;
update_position_after_dlc_channel_creation_or_update(
filled_order,
expiry_timestamp,
)
.context("Failed to update position after DLC creation")?;
event::publish(&EventInternal::BackgroundNotification(
BackgroundTask::AsyncTrade(TaskStatus::Success),
));
}
TenTenOneMessage::SettleConfirm(TenTenOneSettleConfirm { order_id, .. }) => {
tracing::debug!("Position based on DLC channel is being closed");
let filled_order = order::handler::order_filled(Some(order_id))?;
update_position_after_dlc_closure(filled_order.clone())
.context("Failed to update position after DLC closure")?;
let task = match filled_order.reason.into() {
OrderReason::Manual => BackgroundTask::AsyncTrade(TaskStatus::Success),
OrderReason::Expired => BackgroundTask::Expire(TaskStatus::Success),
OrderReason::CoordinatorLiquidated | OrderReason::TraderLiquidated => {
BackgroundTask::Liquidate(TaskStatus::Success)
}
};
event::publish(&EventInternal::BackgroundNotification(task));
}
TenTenOneMessage::CollaborativeCloseOffer(TenTenOneCollaborativeCloseOffer {
collaborative_close_offer: CollaborativeCloseOffer { channel_id, .. },
}) => {
event::publish(&EventInternal::BackgroundNotification(
BackgroundTask::CloseChannel(TaskStatus::Pending),
));
let channel_id_hex_string = hex::encode(channel_id);
tracing::info!(
channel_id = channel_id_hex_string,
node_id = node_id.to_string(),
"Received an offer to collaboratively close a channel"
);
// TODO(bonomat): we should verify that the proposed amount is acceptable
self.inner
.accept_dlc_channel_collaborative_close(&channel_id)
.inspect_err(|e| {
event::publish(&EventInternal::BackgroundNotification(
BackgroundTask::CloseChannel(TaskStatus::Failed(format!("{e:#}"))),
))
})?;
event::publish(&EventInternal::BackgroundNotification(
BackgroundTask::CloseChannel(TaskStatus::Success),
));
}
_ => (),
}
if let Some(msg) = resp {
// Everything has been processed successfully, we can safely send the last dlc message,
// that has been stored before.
tracing::info!(
to = %node_id,
kind = %tentenone_message_name(&msg),
"Sending message"
);
self.inner
.event_handler
.publish(NodeEvent::SendLastDlcMessage { peer: node_id });
}
Ok(())
}
/// Rejects a offer that failed to get processed during the [`dlc_manager.on_dlc_message`].
///
/// This function will simply update the 10101 meta data and send the reject message without
/// going through rust-dlc.
///
/// Note we can't use the rust-dlc api to reject the offer as the processing failed
/// and the `Channel::Offered`, `Channel::RenewOffered`, `Channel::SettledOffered` have not
/// been stored to the dlc store. The corresponding reject offer would fail in rust-dlc,
/// because this the expected channel can't be found by the provided `channel_id`.
#[instrument(fields(channel_id = hex::encode(channel_id), %counterparty),skip_all, err(Debug))]
pub fn force_reject_offer(
&self,
counterparty: PublicKey,
channel_id: DlcChannelId,
reference_id: Option<ReferenceId>,
) -> Result<()> {
let now = std::time::SystemTime::now();
let now = now
.duration_since(std::time::UNIX_EPOCH)
.expect("Unexpected time error")
.as_secs();
let reject = Reject {
channel_id,
timestamp: now,
reference_id,
};
order::handler::order_failed(
None,
FailureReason::InvalidDlcOffer(InvalidSubchannelOffer::Unacceptable),
anyhow!("Failed to accept offer"),
)
.context("Could not set order to failed")?;
self.send_dlc_message(
counterparty,
TenTenOneMessage::Reject(TenTenOneReject { reject }),
)
}
#[instrument(fields(channel_id = hex::encode(channel_id)),skip_all, err(Debug))]
pub fn reject_dlc_channel_offer(
&self,
order_id: Option<Uuid>,
channel_id: &DlcChannelId,
) -> Result<()> {
tracing::warn!("Rejecting dlc channel offer!");
let (reject, counterparty) = self
.inner
.dlc_manager
.reject_channel(channel_id)
.with_context(|| {
format!(
"Failed to reject DLC channel offer for channel {}",
hex::encode(channel_id)
)
})?;
order::handler::order_failed(
order_id,
FailureReason::InvalidDlcOffer(InvalidSubchannelOffer::Unacceptable),
anyhow!("Failed to accept dlc channel offer"),
)
.context("Could not set order to failed")?;
self.send_dlc_message(
to_secp_pk_30(counterparty),
TenTenOneMessage::Reject(TenTenOneReject { reject }),
)
}
#[instrument(fields(channel_id = hex::encode(offer.offer_channel.temporary_channel_id)),skip_all, err(Debug))]
pub fn process_dlc_channel_offer(&self, offer: &TenTenOneOfferChannel) -> Result<()> {
// TODO(holzeis): We should check if the offered amounts are expected.
self.set_order_to_filling(offer.filled_with.clone())?;
let order_id = offer.filled_with.order_id;
let channel_id = offer.offer_channel.temporary_channel_id;
match self
.inner
.dlc_manager
.accept_channel(
&channel_id,
offer
.offer_channel
.fee_config
.map(dlc::FeeConfig::from)
.unwrap_or(dlc::FeeConfig::EvenSplit),
)
.map_err(anyhow::Error::new)
{
Ok((accept_channel, _, _, node_id)) => {
self.send_dlc_message(
to_secp_pk_30(node_id),
TenTenOneMessage::Accept(TenTenOneAcceptChannel {
accept_channel,
order_id: offer.filled_with.order_id,
}),
)?;
}
Err(e) => {
tracing::error!("Failed to accept DLC channel offer: {e:#}");
self.reject_dlc_channel_offer(Some(order_id), &channel_id)?;
}
}
Ok(())
}
#[instrument(fields(channel_id = hex::encode(channel_id)),skip_all, err(Debug))]
pub fn reject_settle_offer(
&self,
order_id: Option<Uuid>,
channel_id: &DlcChannelId,
) -> Result<()> {
tracing::warn!("Rejecting pending dlc channel collaborative settlement offer!");
let (reject, counterparty) = self.inner.dlc_manager.reject_settle_offer(channel_id)?;
order::handler::order_failed(
order_id,
FailureReason::InvalidDlcOffer(InvalidSubchannelOffer::Unacceptable),
anyhow!("Failed to accept settle offer"),
)?;
self.send_dlc_message(
to_secp_pk_30(counterparty),
TenTenOneMessage::Reject(TenTenOneReject { reject }),
)
}
#[instrument(fields(channel_id = hex::encode(offer.settle_offer.channel_id)),skip_all, err(Debug))]
pub fn process_settle_offer(&self, offer: &TenTenOneSettleOffer) -> Result<()> {
// TODO(holzeis): We should check if the offered amounts are expected.
let order_reason = offer.order.clone().order_reason;
let order_id = offer.order.id;
match order_reason {
OrderReason::Expired
| OrderReason::CoordinatorLiquidated
| OrderReason::TraderLiquidated => {
tracing::info!(
%order_id,
"Received an async match from orderbook. Reason: {order_reason:?}"
);
let task = if order_reason == OrderReason::Expired {
BackgroundTask::Expire(TaskStatus::Pending)
} else {
BackgroundTask::Liquidate(TaskStatus::Pending)
};
event::publish(&EventInternal::BackgroundNotification(task));
order::handler::async_order_filling(&offer.order, &offer.filled_with)
.with_context(||
format!("Failed to process async match update from orderbook. order_id {order_id}"))?;
}
// Received a regular settle offer after a manual order.
//
// TODO(holzeis): Eventually this should as well start the trade dialog. At the moment
// we automatically show the trade dialog since we expect a synchronous response from
// the orderbook.
OrderReason::Manual => self.set_order_to_filling(offer.filled_with.clone())?,
}
let order_id = offer.order.id;
let channel_id = offer.settle_offer.channel_id;
if let Err(e) = self.inner.accept_dlc_channel_collaborative_settlement(
offer.filled_with.order_id,
order_reason,
&channel_id,
) {
tracing::error!("Failed to accept dlc channel collaborative settlement offer. {e:#}");
self.reject_settle_offer(Some(order_id), &channel_id)?;
}
Ok(())
}
fn set_order_to_filling(&self, filled_with: commons::FilledWith) -> Result<()> {
let order_id = filled_with.order_id;
tracing::info!(%order_id, "Received match from orderbook");
let execution_price = filled_with
.average_execution_price()
.to_f32()
.expect("to fit into f32");
let matching_fee = filled_with.order_matching_fee();
order::handler::order_filling(order_id, execution_price, matching_fee).with_context(|| {
format!("Failed to process match update from orderbook. order_id = {order_id}")
})
}
#[instrument(fields(channel_id = hex::encode(channel_id)),skip_all, err(Debug))]
pub fn reject_renew_offer(
&self,
order_id: Option<Uuid>,
channel_id: &DlcChannelId,
) -> Result<()> {
tracing::warn!("Rejecting dlc channel renew offer!");
let (reject, counterparty) = self.inner.dlc_manager.reject_renew_offer(channel_id)?;
order::handler::order_failed(
order_id,
FailureReason::InvalidDlcOffer(InvalidSubchannelOffer::Unacceptable),
anyhow!("Failed to accept renew offer"),
)?;
self.send_dlc_message(
to_secp_pk_30(counterparty),
TenTenOneMessage::Reject(TenTenOneReject { reject }),
)
}
#[instrument(fields(channel_id = hex::encode(offer.renew_offer.channel_id)),skip_all, err(Debug))]
pub fn process_renew_offer(&self, offer: &TenTenOneRenewOffer) -> Result<()> {
// TODO(holzeis): We should check if the offered amounts are expected.
let order_id = offer.filled_with.order_id;
let channel_id = offer.renew_offer.channel_id;
match self.inner.dlc_manager.accept_renew_offer(&channel_id) {
Ok((renew_accept, node_id)) => {
self.set_order_to_filling(offer.filled_with.clone())?;
position::handler::handle_renew_offer()?;
self.send_dlc_message(
to_secp_pk_30(node_id),
TenTenOneMessage::RenewAccept(TenTenOneRenewAccept {
renew_accept,
order_id: offer.filled_with.order_id,
}),
)?;
}
Err(e) => {
tracing::error!("Failed to accept dlc channel renew offer. {e:#}");
self.reject_renew_offer(Some(order_id), &channel_id)?;
}
};
Ok(())
}
#[instrument(fields(channel_id = hex::encode(channel_id)),skip_all, err(Debug))]
pub fn reject_rollover_offer(&self, channel_id: &DlcChannelId) -> Result<()> {
tracing::warn!("Rejecting rollover offer!");
let (reject, counterparty) = self.inner.dlc_manager.reject_renew_offer(channel_id)?;
self.send_dlc_message(
to_secp_pk_30(counterparty),
TenTenOneMessage::Reject(TenTenOneReject { reject }),
)
}
#[instrument(fields(channel_id = hex::encode(offer.renew_offer.channel_id)),skip_all, err(Debug))]
pub fn process_rollover_offer(&self, offer: &TenTenOneRolloverOffer) -> Result<()> {
event::publish(&EventInternal::BackgroundNotification(
BackgroundTask::Rollover(TaskStatus::Pending),
));
let expiry_timestamp = OffsetDateTime::from_unix_timestamp(
offer.renew_offer.contract_info.get_closest_maturity_date() as i64,
)?;
let channel_id = offer.renew_offer.channel_id;
match self.inner.dlc_manager.accept_renew_offer(&channel_id) {
Ok((renew_accept, node_id)) => {
let positions = get_positions()?;
let position = positions.first().context("No position to roll over")?;
let new_unpaid_funding_fee_events = handle_unpaid_funding_fee_events(
&offer
.funding_fee_events
.iter()
.map(|e| {
FundingFeeEvent::unpaid(
position.contract_symbol,
Decimal::try_from(position.quantity).expect("to fit"),
position.direction,
e.price,
e.funding_fee,
e.due_date,
)
})
.collect_vec(),
)?;
handle_rollover_offer(expiry_timestamp, &new_unpaid_funding_fee_events)?;
self.send_dlc_message(
to_secp_pk_30(node_id),
TenTenOneMessage::RolloverAccept(TenTenOneRolloverAccept { renew_accept }),
)?;
}
Err(e) => {
tracing::error!("Failed to accept DLC channel rollover offer: {e}");
event::publish(&EventInternal::BackgroundNotification(
BackgroundTask::Rollover(TaskStatus::Failed(format!("{e}"))),
));
self.reject_rollover_offer(&channel_id)?;
}
};
Ok(())
}
pub fn send_dlc_message(&self, node_id: PublicKey, msg: TenTenOneMessage) -> Result<()> {
tracing::info!(
to = %node_id,
kind = %tentenone_message_name(&msg),
"Sending message"
);
self.inner.event_handler.publish(NodeEvent::SendDlcMessage {
peer: node_id,
msg: msg.clone(),
});
Ok(())
}
pub async fn keep_connected(&self, peer: NodeInfo) {
let reconnect_interval = Duration::from_secs(1);
loop {
let connection_closed_future = match self.inner.connect(peer).await {
Ok(fut) => fut,
Err(e) => {
tracing::warn!(
%peer,
?reconnect_interval,
"Connection failed: {e:#}; reconnecting"
);
tokio::time::sleep(reconnect_interval).await;
continue;
}
};
connection_closed_future.await;
tracing::debug!(
%peer,
?reconnect_interval,
"Connection lost; reconnecting"
);
tokio::time::sleep(reconnect_interval).await;
}
}
}
#[derive(Clone)]
pub struct NodeStorage;
impl node::Storage for NodeStorage {
// Spendable outputs
fn insert_spendable_output(&self, descriptor: SpendableOutputDescriptor) -> Result<()> {
use SpendableOutputDescriptor::*;
let outpoint = match &descriptor {
// Static outputs don't need to be persisted because they pay directly to an address
// owned by the on-chain wallet
StaticOutput { .. } => return Ok(()),
DelayedPaymentOutput(DelayedPaymentOutputDescriptor { outpoint, .. }) => outpoint,
StaticPaymentOutput(StaticPaymentOutputDescriptor { outpoint, .. }) => outpoint,
};
db::insert_spendable_output(*outpoint, descriptor)
}
fn get_spendable_output(
&self,
outpoint: &OutPoint,
) -> Result<Option<SpendableOutputDescriptor>> {
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | true |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/mobile/native/src/dlc/mod.rs | mobile/native/src/dlc/mod.rs | use crate::api::FeeConfig;
use crate::api::PaymentFlow;
use crate::api::Status;
use crate::api::WalletHistoryItem;
use crate::api::WalletHistoryItemType;
use crate::backup::DBBackupSubscriber;
use crate::commons::reqwest_client;
use crate::config;
use crate::db;
use crate::dlc::dlc_handler::DlcHandler;
use crate::dlc::node::Node;
use crate::dlc::node::NodeStorage;
use crate::dlc::node::WalletHistory;
use crate::event;
use crate::event::EventInternal;
use crate::health::Tx;
use crate::orderbook;
use crate::position::ForceCloseDlcChannelSubscriber;
use crate::state;
use crate::storage::TenTenOneNodeStorage;
use crate::trade::order;
use crate::trade::order::FailureReason;
use crate::trade::order::Order;
use crate::trade::order::OrderReason;
use crate::trade::order::OrderState;
use crate::trade::order::OrderType;
use crate::trade::position;
use crate::watcher::InvoiceWatcher;
use anyhow::anyhow;
use anyhow::Context;
use anyhow::Result;
use bdk::wallet::Balance;
use bdk::FeeRate;
use bitcoin::address::NetworkUnchecked;
use bitcoin::key::XOnlyPublicKey;
use bitcoin::secp256k1::rand::thread_rng;
use bitcoin::secp256k1::rand::RngCore;
use bitcoin::secp256k1::PublicKey;
use bitcoin::secp256k1::SecretKey;
use bitcoin::secp256k1::SECP256K1;
use bitcoin::Address;
use bitcoin::Amount;
use bitcoin::Txid;
use dlc::PartyParams;
use dlc_manager::channel::Channel;
use itertools::chain;
use itertools::Itertools;
use lightning::chain::chaininterface::ConfirmationTarget;
use lightning::sign::KeysManager;
use rust_decimal::prelude::ToPrimitive;
use rust_decimal::Decimal;
use rust_decimal_macros::dec;
use std::net::IpAddr;
use std::net::Ipv4Addr;
use std::net::SocketAddr;
use std::net::TcpListener;
use std::path::Path;
use std::str::FromStr;
use std::sync::mpsc;
use std::sync::Arc;
use std::time::Duration;
use std::time::SystemTime;
use time::OffsetDateTime;
use tokio::runtime;
use tokio::runtime::Runtime;
use tokio::sync::broadcast;
use tokio::task::spawn_blocking;
use uuid::Uuid;
use xxi_node::bitcoin_conversion::to_ecdsa_signature_30;
use xxi_node::bitcoin_conversion::to_script_29;
use xxi_node::bitcoin_conversion::to_secp_sk_30;
use xxi_node::bitcoin_conversion::to_tx_30;
use xxi_node::bitcoin_conversion::to_txid_29;
use xxi_node::bitcoin_conversion::to_txid_30;
use xxi_node::commons::CollaborativeRevertTraderResponse;
use xxi_node::commons::OrderbookRequest;
use xxi_node::node::dlc_channel::estimated_dlc_channel_fee_reserve;
use xxi_node::node::dlc_channel::estimated_funding_transaction_fee;
use xxi_node::node::event::NodeEventHandler;
use xxi_node::node::rust_dlc_manager::channel::signed_channel::SignedChannel;
use xxi_node::node::rust_dlc_manager::channel::ClosedChannel;
use xxi_node::node::rust_dlc_manager::DlcChannelId;
use xxi_node::node::rust_dlc_manager::Signer;
use xxi_node::node::rust_dlc_manager::Storage as DlcStorage;
use xxi_node::node::XXINodeSettings;
use xxi_node::seed::Bip39Seed;
use xxi_node::storage::DlcChannelEvent;
use xxi_node::ConfirmationStatus;
pub mod dlc_handler;
mod subscriber;
pub mod node;
const PROCESS_INCOMING_DLC_MESSAGES_INTERVAL: Duration = Duration::from_millis(200);
const UPDATE_WALLET_HISTORY_INTERVAL: Duration = Duration::from_secs(5);
const CHECK_OPEN_ORDERS_INTERVAL: Duration = Duration::from_secs(60);
const NODE_SYNC_INTERVAL: Duration = Duration::from_secs(300);
/// The name of the BDK wallet database file.
const WALLET_DB_FILE_NAME: &str = "bdk-wallet";
/// The prefix to the [`bdk_file_store`] database file where BDK persists
/// [`bdk::wallet::ChangeSet`]s.
///
/// We hard-code the prefix so that we can always be sure that we are loading the correct file on
/// start-up.
const WALLET_DB_PREFIX: &str = "10101-app";
/// Trigger an on-chain sync followed by an update to the wallet balance and history.
///
/// We do not wait for the triggered task to finish, because the effect will be reflected
/// asynchronously on the UI.
pub async fn refresh_wallet_info() -> Result<()> {
let node = state::get_node();
let runtime = state::get_or_create_tokio_runtime()?;
sync_node(runtime.handle()).await;
// Spawn into the blocking thread pool of the dedicated backend runtime to avoid blocking the UI
// thread.
runtime.spawn_blocking(move || {
if let Err(e) = keep_wallet_balance_and_history_up_to_date(&node) {
tracing::error!("Failed to keep wallet history up to date: {e:#}");
}
});
Ok(())
}
pub async fn sync_node(runtime: &runtime::Handle) {
let node = state::get_node();
if let Err(e) = node.inner.sync_on_chain_wallet().await {
tracing::error!("On-chain sync failed: {e:#}");
}
runtime
.spawn_blocking(move || {
if let Err(e) = node.inner.dlc_manager.periodic_check() {
tracing::error!("Failed to run DLC manager periodic check: {e:#}");
};
})
.await
.expect("task to complete");
}
pub async fn full_sync(stop_gap: usize) -> Result<()> {
let runtime = state::get_or_create_tokio_runtime()?;
runtime
.spawn({
let node = state::get_node();
async move {
node.inner.full_sync(stop_gap).await?;
anyhow::Ok(())
}
})
.await
.expect("task to complete")?;
Ok(())
}
pub fn get_seed_phrase() -> Vec<String> {
state::get_seed().get_seed_phrase()
}
pub fn get_maintenance_margin_rate() -> Decimal {
match state::try_get_tentenone_config() {
Some(config) => {
Decimal::try_from(config.maintenance_margin_rate).expect("to fit into decimal")
}
None => {
tracing::warn!("The ten ten one config is not ready yet. Returning default value!");
dec!(0.1)
}
}
}
pub fn get_order_matching_fee_rate(deduct_rebate: bool) -> Decimal {
match state::try_get_tentenone_config() {
Some(config) => {
let fee_percent =
Decimal::try_from(config.order_matching_fee_rate).expect("to fit into decimal");
if deduct_rebate {
let fee_discount = config.referral_status.referral_fee_bonus;
fee_percent - (fee_percent * fee_discount)
} else {
fee_percent
}
}
None => dec!(0.003),
}
}
/// Gets the seed from the storage or from disk. However it will panic if the seed can not be found.
/// No new seed will be created.
fn get_seed() -> Bip39Seed {
match state::try_get_seed() {
Some(seed) => seed,
None => {
let seed_dir = config::get_seed_dir();
let network = config::get_network();
let seed_path = Path::new(&seed_dir).join(network.to_string()).join("seed");
assert!(seed_path.exists());
let seed = Bip39Seed::initialize(&seed_path).expect("to read seed file");
state::set_seed(seed.clone());
seed
}
}
}
pub fn get_node_key() -> SecretKey {
match state::try_get_node() {
Some(node) => node.inner.node_key(),
// TODO: This seems pretty suspicious.
None => {
let seed = get_seed();
let time_since_unix_epoch = SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
.expect("unix epos to not be earlier than now");
let keys_manager = KeysManager::new(
&seed.lightning_seed(),
time_since_unix_epoch.as_secs(),
time_since_unix_epoch.subsec_nanos(),
);
to_secp_sk_30(keys_manager.get_node_secret_key())
}
}
}
pub fn get_node_pubkey() -> PublicKey {
get_node_key().public_key(SECP256K1)
}
pub async fn update_node_settings(settings: XXINodeSettings) {
let node = state::get_node();
node.inner.update_settings(settings).await;
}
pub fn get_oracle_pubkey() -> XOnlyPublicKey {
state::get_node().inner.oracle_pubkey
}
/// Gets the 10101 node storage, initializes the storage if not found yet.
pub fn get_storage() -> TenTenOneNodeStorage {
match state::try_get_storage() {
Some(storage) => storage,
None => {
// storage is only initialized before the node is started if a new wallet is created
// or restored.
let storage = TenTenOneNodeStorage::new(
config::get_data_dir(),
config::get_network(),
get_node_key(),
);
tracing::info!("Initialized 10101 storage!");
state::set_storage(storage.clone());
storage
}
}
}
/// Start the node
///
/// Assumes that the seed has already been initialized
pub fn run(
runtime: &Runtime,
tx: Tx,
fcm_token: String,
tx_websocket: broadcast::Sender<OrderbookRequest>,
) -> Result<()> {
runtime.block_on(async move {
event::publish(&EventInternal::Init("Starting full ldk node".to_string()));
let mut ephemeral_randomness = [0; 32];
thread_rng().fill_bytes(&mut ephemeral_randomness);
let address = {
let listener = TcpListener::bind("0.0.0.0:0")?;
listener.local_addr().expect("To get a free local address")
};
let node_storage = Arc::new(NodeStorage);
let storage = get_storage();
event::subscribe(DBBackupSubscriber::new(storage.clone().client));
event::subscribe(ForceCloseDlcChannelSubscriber);
let (ln_sender, _) = broadcast::channel::<String>(5);
event::subscribe(InvoiceWatcher {
sender: ln_sender.clone(),
});
state::set_ln_payment_watcher(ln_sender);
let node_event_handler = Arc::new(NodeEventHandler::new());
let wallet_storage = {
let wallet_dir = Path::new(&config::get_data_dir()).join(WALLET_DB_FILE_NAME);
bdk_file_store::Store::open_or_create_new(WALLET_DB_PREFIX.as_bytes(), wallet_dir)?
};
let (dlc_event_sender, dlc_event_receiver) = mpsc::channel::<DlcChannelEvent>();
let node = xxi_node::node::Node::new(
"10101",
config::get_network(),
Path::new(&storage.data_dir),
storage.clone(),
node_storage,
wallet_storage,
address,
SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), address.port()),
config::get_electrs_endpoint(),
state::get_seed(),
ephemeral_randomness,
xxi_node_settings(),
vec![config::get_oracle_info().into()],
config::get_oracle_info().public_key,
node_event_handler.clone(),
dlc_event_sender,
)?;
let node = Arc::new(node);
let _running = node.start(dlc_event_receiver)?;
let node = Arc::new(Node::new(node, _running));
state::set_node(node.clone());
orderbook::subscribe(
node.inner.node_key(),
runtime,
tx.orderbook,
fcm_token,
tx_websocket,
)?;
if let Err(e) = spawn_blocking({
let node = node.clone();
move || keep_wallet_balance_and_history_up_to_date(&node)
})
.await
.expect("To spawn blocking task")
{
tracing::error!("Failed to update balance and history: {e:#}");
}
let dlc_handler = DlcHandler::new(node.clone());
runtime.spawn(async move {
dlc_handler::handle_outbound_dlc_messages(dlc_handler, node_event_handler.subscribe())
.await
});
node.spawn_listen_dlc_channels_event_task();
runtime.spawn({
let node = node.clone();
async move {
loop {
tokio::time::sleep(UPDATE_WALLET_HISTORY_INTERVAL).await;
let node = node.clone();
if let Err(e) =
spawn_blocking(move || keep_wallet_balance_and_history_up_to_date(&node))
.await
.expect("To spawn blocking task")
{
tracing::error!("Failed to update balance and history: {e:#}");
}
}
}
});
runtime.spawn({
let runtime = runtime.handle().clone();
async move {
loop {
sync_node(&runtime).await;
tokio::time::sleep(NODE_SYNC_INTERVAL).await;
}
}
});
let coordinator_info = config::get_coordinator_info();
runtime.spawn({
let node = node.clone();
async move { node.keep_connected(coordinator_info).await }
});
runtime.spawn({
let node = node.clone();
async move {
loop {
let node = node.clone();
spawn_blocking(move || node.process_incoming_dlc_messages())
.await
.expect("To spawn blocking thread");
tokio::time::sleep(PROCESS_INCOMING_DLC_MESSAGES_INTERVAL).await;
}
}
});
runtime.spawn(async move {
loop {
if let Err(e) = spawn_blocking(order::handler::check_open_orders)
.await
.expect("To spawn blocking task")
{
tracing::error!("Error while checking open orders: {e:#}");
}
tokio::time::sleep(CHECK_OPEN_ORDERS_INTERVAL).await;
}
});
event::publish(&EventInternal::Init("10101 is ready.".to_string()));
tokio::spawn(full_sync_on_wallet_db_migration());
Ok(())
})
}
pub async fn full_sync_on_wallet_db_migration() {
let node = state::get_node();
let old_wallet_dir = Path::new(&config::get_data_dir())
.join(config::get_network().to_string())
.join("on_chain");
if old_wallet_dir.exists() {
event::publish(&EventInternal::BackgroundNotification(
event::BackgroundTask::FullSync(event::TaskStatus::Pending),
));
let stop_gap = 20;
tracing::info!(
%stop_gap,
"Old wallet directory detected. Attempting to populate new wallet with full sync"
);
match node.inner.full_sync(stop_gap).await {
Ok(_) => {
tracing::info!("Full sync successful");
// Spawn into the blocking thread pool of the dedicated backend runtime to avoid
// blocking the UI thread.
if let Ok(runtime) = state::get_or_create_tokio_runtime() {
runtime
.spawn_blocking(move || {
if let Err(e) = keep_wallet_balance_and_history_up_to_date(&node) {
tracing::error!("Failed to keep wallet history up to date: {e:#}");
}
})
.await
.expect("task to complete");
}
event::publish(&EventInternal::BackgroundNotification(
event::BackgroundTask::FullSync(event::TaskStatus::Success),
));
if let Err(e) = std::fs::remove_dir_all(old_wallet_dir) {
tracing::info!("Failed to delete old wallet directory: {e:#}");
}
}
Err(e) => {
tracing::error!("Full sync failed: {e:#}");
event::publish(&EventInternal::BackgroundNotification(
event::BackgroundTask::FullSync(event::TaskStatus::Failed(format!("{e:#}"))),
));
}
};
}
}
pub fn init_new_mnemonic(target_seed_file: &Path) -> Result<()> {
let seed = Bip39Seed::initialize(target_seed_file)?;
state::set_seed(seed);
Ok(())
}
pub async fn restore_from_mnemonic(seed_words: &str, target_seed_file: &Path) -> Result<()> {
let seed = Bip39Seed::restore_from_mnemonic(seed_words, target_seed_file)?;
state::set_seed(seed);
let storage = TenTenOneNodeStorage::new(
config::get_data_dir(),
config::get_network(),
get_node_key(),
);
tracing::info!("Initialized 10101 storage!");
state::set_storage(storage.clone());
storage.client.restore(storage.dlc_storage).await
}
fn keep_wallet_balance_and_history_up_to_date(node: &Node) -> Result<()> {
let wallet_balances = node.get_wallet_balances();
let WalletHistory { on_chain } = node.get_wallet_history();
// If we find fund transactions among the on-chain transactions we are aware of, we treat them
// as a special case so that they can be displayed with extra information.
let dlc_channels = node.inner.list_signed_dlc_channels()?;
let dlc_channel_funding_tx_details = on_chain.iter().filter_map(|details| {
match dlc_channels
.iter()
.find(|item| item.fund_tx.txid() == to_txid_29(details.transaction.txid()))
{
None => None,
Some(channel) => {
let amount_sats = match details.sent.checked_sub(details.received) {
Some(amount_sats) => amount_sats,
None => {
tracing::warn!("Omitting DLC channel funding transaction that pays to us!");
return None;
}
};
let (status, timestamp) =
confirmation_status_to_status_and_timestamp(&details.confirmation_status);
Some(WalletHistoryItem {
flow: PaymentFlow::Outbound,
amount_sats: amount_sats.to_sat(),
timestamp,
status,
wallet_type: WalletHistoryItemType::DlcChannelFunding {
funding_txid: details.transaction.txid().to_string(),
// this is not 100% correct as fees are not exactly divided by 2. The share
// of the funding transaction fee that the user has paid depends on their
// inputs and change outputs.
funding_tx_fee_sats: details
.fee
.as_ref()
.map(|fee| (*fee / 2).to_sat())
.ok(),
confirmations: details.confirmation_status.n_confirmations() as u64,
our_channel_input_amount_sats: channel.own_params.collateral,
},
})
}
}
});
let on_chain = on_chain.iter().filter(|details| {
!dlc_channels
.iter()
.any(|channel| channel.fund_tx.txid() == to_txid_29(details.transaction.txid()))
});
let on_chain = on_chain.filter_map(|details| {
let net_sats = match details.net_amount() {
Ok(net_amount) => net_amount.to_sat(),
Err(e) => {
tracing::error!(
?details,
"Failed to calculate net amount for transaction: {e:#}"
);
return None;
}
};
let (flow, amount_sats) = if net_sats >= 0 {
(PaymentFlow::Inbound, net_sats as u64)
} else {
(PaymentFlow::Outbound, net_sats.unsigned_abs())
};
let (status, timestamp) =
confirmation_status_to_status_and_timestamp(&details.confirmation_status);
let wallet_type = WalletHistoryItemType::OnChain {
txid: details.transaction.txid().to_string(),
fee_sats: details.fee.as_ref().map(|fee| Amount::to_sat(*fee)).ok(),
confirmations: details.confirmation_status.n_confirmations() as u64,
};
Some(WalletHistoryItem {
flow,
amount_sats,
timestamp,
status,
wallet_type,
})
});
let trades = db::get_all_trades()?;
// We reverse the `Trade`s so that they are already pre-sorted _from oldest to newest_ in terms
// of insertion. This is important because we sometimes insert `Trade`s back-to-back, so the
// timestamps can coincide.
let trades = trades.iter().rev().map(|trade| {
let flow = if trade.trade_cost.is_positive() {
PaymentFlow::Outbound
} else {
PaymentFlow::Inbound
};
let amount_sats = trade.trade_cost.abs().to_sat() as u64;
let timestamp = trade.timestamp;
// TODO: Add context about direction + contracts!
WalletHistoryItem {
flow,
amount_sats,
timestamp: timestamp.unix_timestamp() as u64,
status: Status::Confirmed,
wallet_type: WalletHistoryItemType::Trade {
order_id: trade.order_id.to_string(),
fee_sat: trade.fee.to_sat(),
pnl: trade.pnl.map(|pnl| pnl.to_sat()),
contracts: trade
.contracts
.ceil()
.to_u64()
.expect("Decimal to fit into u64"),
direction: trade.direction.to_string(),
},
}
});
let history = chain![on_chain, trades, dlc_channel_funding_tx_details]
.sorted_by(|a, b| b.timestamp.cmp(&a.timestamp))
.collect();
let wallet_info = event::api::WalletInfo {
balances: wallet_balances.into(),
history,
};
event::publish(&EventInternal::WalletInfoUpdateNotification(wallet_info));
Ok(())
}
pub fn get_unused_address() -> Result<String> {
let address = state::get_node().inner.get_unused_address()?;
Ok(address.to_string())
}
pub fn get_new_address() -> Result<String> {
let address = state::get_node().inner.get_new_address()?;
Ok(address.to_string())
}
pub async fn close_channel(is_force_close: bool) -> Result<()> {
let node = state::get_node();
let channels = node.inner.list_signed_dlc_channels()?;
let channel_details = channels.first().context("No channel to close")?;
node.inner
.close_dlc_channel(channel_details.channel_id, is_force_close)
.await?;
Ok(())
}
pub fn get_signed_dlc_channels() -> Result<Vec<SignedChannel>> {
let node = match state::try_get_node() {
Some(node) => node,
None => return Ok(vec![]),
};
node.inner.list_signed_dlc_channels()
}
pub fn get_onchain_balance() -> Balance {
let node = match state::try_get_node() {
Some(node) => node,
None => return Balance::default(),
};
node.inner.get_on_chain_balance()
}
pub fn get_usable_dlc_channel_balance() -> Result<Amount> {
let node = match state::try_get_node() {
Some(node) => node,
None => return Ok(Amount::ZERO),
};
node.inner.get_dlc_channels_usable_balance()
}
pub fn get_usable_dlc_channel_balance_counterparty() -> Result<Amount> {
let node = state::get_node();
node.inner.get_dlc_channels_usable_balance_counterparty()
}
pub fn collaborative_revert_channel(
channel_id: DlcChannelId,
coordinator_address: Address<NetworkUnchecked>,
coordinator_amount: Amount,
trader_amount: Amount,
execution_price: Decimal,
) -> Result<()> {
let node = state::get_node();
let node = node.inner.clone();
let coordinator_address = coordinator_address.require_network(node.network)?;
let channel_id_hex = hex::encode(channel_id);
let dlc_channels = node.list_signed_dlc_channels()?;
let signed_channel = dlc_channels
.into_iter()
.find(|c| c.channel_id == channel_id)
.with_context(|| format!("Could not find signed channel {channel_id_hex}"))?;
let fund_output_value = signed_channel.fund_tx.output[signed_channel.fund_output_index].value;
tracing::debug!(
channel_id = channel_id_hex,
trader_amount_sats = %trader_amount.to_sat(),
coordinator_amount_sats = %coordinator_amount.to_sat(),
"Accepting collaborative revert request");
let close_tx = dlc::channel::create_collaborative_close_transaction(
&PartyParams {
payout_script_pubkey: to_script_29(coordinator_address.script_pubkey()),
..signed_channel.counter_params.clone()
},
coordinator_amount.to_sat(),
&signed_channel.own_params,
trader_amount.to_sat(),
bitcoin_old::OutPoint {
txid: signed_channel.fund_tx.txid(),
vout: signed_channel.fund_output_index as u32,
},
0, // argument is not being used
);
let own_fund_sk = node
.dlc_wallet
.get_secret_key_for_pubkey(&signed_channel.own_params.fund_pubkey)?;
let close_signature = dlc::util::get_raw_sig_for_tx_input(
&bitcoin_old::secp256k1::Secp256k1::new(),
&close_tx,
0,
&signed_channel.fund_script_pubkey,
fund_output_value,
&own_fund_sk,
)?;
tracing::debug!(
tx_id = close_tx.txid().to_string(),
"Signed collab revert transaction"
);
let data = CollaborativeRevertTraderResponse {
channel_id: channel_id_hex,
transaction: to_tx_30(close_tx.clone()),
signature: to_ecdsa_signature_30(close_signature),
};
let client = reqwest_client();
let runtime = state::get_or_create_tokio_runtime()?;
runtime.spawn({
async move {
match client
.post(format!(
"http://{}/api/channels/confirm-collab-revert",
config::get_http_endpoint(),
))
.json(&data)
.send()
.await
{
Ok(response) => match response.text().await {
Ok(response) => {
tracing::info!(
response,
"Received response from confirming reverting a channel"
);
if let Err(e) =
update_state_after_collab_revert(&signed_channel, execution_price, to_txid_30(close_tx.txid()))
{
tracing::error!(
"Failed to update state after collaborative revert confirmation: {e:#}"
);
}
}
Err(e) => {
tracing::error!(
"Failed to decode collaborative revert confirmation response text: {e:#}"
);
}
},
Err(e) => {
tracing::error!("Failed to confirm collaborative revert: {e:#}");
}
}
}
});
Ok(())
}
fn update_state_after_collab_revert(
signed_channel: &SignedChannel,
execution_price: Decimal,
closing_txid: Txid,
) -> Result<()> {
let node = state::get_node();
let positions = db::get_positions()?;
let position = match positions.first() {
Some(position) => {
tracing::info!("Channel is reverted before the position got closed successfully.");
position
}
None => {
tracing::info!("Channel is reverted before the position got opened successfully.");
if let Some(order) = db::get_order_in_filling()? {
order::handler::order_failed(
Some(order.id),
FailureReason::CollabRevert,
anyhow!("Order failed due collab revert of the channel"),
)?;
}
return Ok(());
}
};
let filled_order = match order::handler::order_filled(None) {
Ok(order) => order,
Err(_) => {
let order = Order {
id: Uuid::new_v4(),
leverage: position.leverage,
quantity: position.quantity,
contract_symbol: position.contract_symbol,
direction: position.direction.opposite(),
order_type: OrderType::Market,
state: OrderState::Filled {
execution_price: execution_price.to_f32().expect("to fit into f32"),
// this fee here doesn't matter because it's not being used anywhere
matching_fee: Amount::ZERO,
},
creation_timestamp: OffsetDateTime::now_utc(),
order_expiry_timestamp: OffsetDateTime::now_utc(),
reason: OrderReason::Expired,
stable: position.stable,
failure_reason: None,
};
db::insert_order(order.clone())?;
event::publish(&EventInternal::OrderUpdateNotification(order.clone()));
order
}
};
position::handler::update_position_after_dlc_closure(filled_order)?;
let node = node.inner.clone();
node.dlc_manager
.get_store()
.upsert_channel(
Channel::CollaborativelyClosed(ClosedChannel {
counter_party: signed_channel.counter_party,
temporary_channel_id: signed_channel.temporary_channel_id,
channel_id: signed_channel.channel_id,
reference_id: None,
closing_txid: to_txid_29(closing_txid),
}),
// The contract doesn't matter anymore
None,
)
.map_err(|e| anyhow!("{e:#}"))
}
pub fn get_signed_dlc_channel() -> Result<Option<SignedChannel>> {
let node = match state::try_get_node() {
Some(node) => node,
None => return Ok(None),
};
let signed_channels = node.inner.list_signed_dlc_channels()?;
Ok(signed_channels.first().cloned())
}
pub fn list_dlc_channels() -> Result<Vec<Channel>> {
let node = match state::try_get_node() {
Some(node) => node,
None => return Ok(vec![]),
};
let dlc_channels = node.inner.list_dlc_channels()?;
Ok(dlc_channels)
}
pub fn delete_dlc_channel(dlc_channel_id: &DlcChannelId) -> Result<()> {
let node = state::get_node();
node.inner
.dlc_manager
.get_store()
.delete_channel(dlc_channel_id)?;
Ok(())
}
pub async fn check_if_signed_channel_is_confirmed() -> Result<bool> {
let node = match state::try_get_node() {
Some(node) => node,
None => return Ok(false),
};
let counterparty = config::get_coordinator_info().pubkey;
node.inner
.check_if_signed_channel_is_confirmed(counterparty)
.await
}
pub fn get_fee_rate_for_target(target: ConfirmationTarget) -> FeeRate {
let node = match state::try_get_node() {
Some(node) => node,
None => return FeeRate::default_min_relay_fee(),
};
node.inner.fee_rate_estimator.get(target)
}
pub fn estimated_fee_reserve() -> Result<Amount> {
let node = match state::try_get_node() {
Some(node) => node,
None => return Ok(Amount::ZERO),
};
// Here we assume that the coordinator will use the same confirmation target AND that their fee
// rate source agrees with ours.
let fee_rate = node
.inner
.fee_rate_estimator
.get(ConfirmationTarget::Normal);
let reserve = estimated_dlc_channel_fee_reserve(fee_rate.as_sat_per_vb() as f64);
// The reserve is split evenly between the two parties.
let reserve = reserve / 2;
Ok(reserve)
}
pub async fn send_payment(amount: u64, address: String, fee: FeeConfig) -> Result<Txid> {
let address = Address::from_str(&address)?;
let txid = state::get_node()
.inner
.send_to_address(address, amount, fee.into())
.await?;
Ok(txid)
}
pub fn estimated_funding_tx_fee() -> Result<Amount> {
let node = match state::try_get_node() {
Some(node) => node,
None => return Ok(Amount::ZERO),
};
// Here we assume that the coordinator will use the same confirmation target AND that
// their fee rate source agrees with ours.
let fee_rate = node
.inner
.fee_rate_estimator
.get(ConfirmationTarget::Normal);
let fee = estimated_funding_transaction_fee(fee_rate.as_sat_per_vb() as f64);
// The estimated fee is split evenly between the two parties. In reality, each party will have
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | true |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/mobile/native/src/dlc/subscriber.rs | mobile/native/src/dlc/subscriber.rs | use crate::dlc::node::Node;
use crate::dlc::DlcChannel;
use crate::event;
use crate::event::EventInternal;
use tokio::sync::broadcast::error::RecvError;
use xxi_node::node::event::NodeEvent;
impl Node {
pub fn spawn_listen_dlc_channels_event_task(&self) {
let mut receiver = self.inner.event_handler.subscribe();
tokio::spawn({
let node = self.clone();
async move {
loop {
match receiver.recv().await {
Ok(NodeEvent::DlcChannelEvent { dlc_channel_event }) => {
if let Some(reference_id) = dlc_channel_event.get_reference_id() {
match node.inner.get_dlc_channel_by_reference_id(reference_id) {
Ok(channel) => event::publish(&EventInternal::DlcChannelEvent(
DlcChannel::from(&channel),
)),
Err(e) => tracing::error!(
?reference_id,
"Failed to get dlc channel by reference id. Error: {e:#}"
),
}
}
}
Ok(NodeEvent::Connected { .. })
| Ok(NodeEvent::SendDlcMessage { .. })
| Ok(NodeEvent::StoreDlcMessage { .. })
| Ok(NodeEvent::SendLastDlcMessage { .. }) => {} // ignored
Err(RecvError::Lagged(skipped)) => {
tracing::warn!("Skipped {skipped} messages");
}
Err(RecvError::Closed) => {
tracing::error!("Lost connection to sender!");
break;
}
}
}
}
});
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/mobile/native/src/dlc/dlc_handler.rs | mobile/native/src/dlc/dlc_handler.rs | use crate::db;
use crate::dlc::node::Node;
use crate::event;
use crate::event::BackgroundTask;
use crate::event::EventInternal;
use crate::event::TaskStatus;
use anyhow::Context;
use anyhow::Result;
use bitcoin::secp256k1::PublicKey;
use std::sync::Arc;
use tokio::sync::broadcast;
use tokio::sync::broadcast::error::RecvError;
use xxi_node::dlc_message::DlcMessage;
use xxi_node::dlc_message::SerializedDlcMessage;
use xxi_node::message_handler::TenTenOneMessage;
use xxi_node::node::dlc_channel::send_dlc_message;
use xxi_node::node::event::NodeEvent;
use xxi_node::node::rust_dlc_manager::channel::signed_channel::SignedChannel;
use xxi_node::node::rust_dlc_manager::channel::signed_channel::SignedChannelState;
use xxi_node::node::rust_dlc_manager::channel::Channel;
/// The DlcHandler is responsible for sending dlc messages and marking received ones as
/// processed. It's main purpose is to ensure the following.
///
/// 1. Mark all received inbound messages as processed.
/// 2. Save the last outbound dlc message, so it can be resend on the next reconnect.
/// 3. Check if a receive message has already been processed and if so inform to skip the message.
#[derive(Clone)]
pub struct DlcHandler {
node: Arc<Node>,
}
impl DlcHandler {
pub fn new(node: Arc<Node>) -> Self {
DlcHandler { node }
}
}
/// Handles sending outbound dlc messages as well as keeping track of what
/// dlc messages have already been processed and what was the last outbound dlc message
/// so it can be resend on reconnect.
pub async fn handle_outbound_dlc_messages(
dlc_handler: DlcHandler,
mut receiver: broadcast::Receiver<NodeEvent>,
) {
loop {
match receiver.recv().await {
Ok(NodeEvent::Connected { peer }) => {
if let Err(e) = dlc_handler.on_connect(peer) {
tracing::error!(peer=%peer, "Failed to process on connect event. {e:#}");
}
}
Ok(NodeEvent::SendDlcMessage { peer, msg }) => {
if let Err(e) = dlc_handler.send_dlc_message(peer, msg) {
tracing::error!(peer=%peer, "Failed to send dlc message. {e:#}")
}
}
Ok(NodeEvent::StoreDlcMessage { peer, msg }) => {
if let Err(e) = dlc_handler.store_dlc_message(peer, msg) {
tracing::error!(peer=%peer, "Failed to store dlc message. {e:#}");
}
}
Ok(NodeEvent::SendLastDlcMessage { peer }) => {
if let Err(e) = dlc_handler.send_last_dlc_message(peer) {
tracing::error!(peer=%peer, "Failed to send last dlc message. {e:#}")
}
}
Ok(NodeEvent::DlcChannelEvent { .. }) => {} // ignored
Err(RecvError::Lagged(skipped)) => {
tracing::warn!("Skipped {skipped} messages");
}
Err(RecvError::Closed) => {
tracing::error!("Lost connection to sender!");
break;
}
}
}
}
impl DlcHandler {
pub fn send_dlc_message(&self, peer: PublicKey, msg: TenTenOneMessage) -> Result<()> {
self.store_dlc_message(peer, msg.clone())?;
send_dlc_message(
&self.node.inner.dlc_message_handler,
&self.node.inner.peer_manager,
peer,
msg,
);
Ok(())
}
pub fn store_dlc_message(&self, peer: PublicKey, msg: TenTenOneMessage) -> Result<()> {
let mut conn = db::connection()?;
let serialized_outbound_message = SerializedDlcMessage::try_from(&msg)?;
let outbound_msg = DlcMessage::new(peer, serialized_outbound_message.clone(), false)?;
db::dlc_messages::DlcMessage::insert(&mut conn, outbound_msg)?;
db::last_outbound_dlc_messages::LastOutboundDlcMessage::upsert(
&mut conn,
&peer,
serialized_outbound_message,
)
}
pub fn send_last_dlc_message(&self, peer: PublicKey) -> Result<()> {
let mut conn = db::connection()?;
let last_serialized_message =
db::last_outbound_dlc_messages::LastOutboundDlcMessage::get(&mut conn, &peer)?;
if let Some(last_serialized_message) = last_serialized_message {
let message = TenTenOneMessage::try_from(&last_serialized_message)?;
send_dlc_message(
&self.node.inner.dlc_message_handler,
&self.node.inner.peer_manager,
peer,
message,
);
} else {
tracing::debug!(%peer, "No last dlc message found. Nothing todo.");
}
Ok(())
}
/// Rejects all pending dlc channel offers. This is important as there might be several
/// pending dlc channel offers due to a bug before we had fixed the reject handling properly,
/// leaving the positions in proposes on the coordinator side.
///
/// By rejecting them we ensure that all hanging dlc channel offers and positions are dealt
/// with.
pub fn reject_pending_dlc_channel_offers(&self) -> Result<()> {
let dlc_channels = self.node.inner.list_dlc_channels()?;
let offered_channels = dlc_channels
.iter()
.filter(|c| matches!(c, Channel::Offered(_)))
.collect::<Vec<&Channel>>();
if offered_channels.is_empty() {
return Ok(());
}
event::publish(&EventInternal::BackgroundNotification(
BackgroundTask::RecoverDlc(TaskStatus::Pending),
));
for offered_channel in offered_channels.iter() {
tracing::info!(
channel_id = hex::encode(offered_channel.get_id()),
"Rejecting pending dlc channel offer."
);
// Pending dlc channel offer not yet confirmed on-chain
self.node
.reject_dlc_channel_offer(None, &offered_channel.get_temporary_id())
.context("Failed to reject pending dlc channel offer")?;
}
event::publish(&EventInternal::BackgroundNotification(
BackgroundTask::RecoverDlc(TaskStatus::Success),
));
Ok(())
}
pub fn on_connect(&self, peer: PublicKey) -> Result<()> {
self.reject_pending_dlc_channel_offers()?;
if let Some(channel) = self.node.inner.list_signed_dlc_channels()?.first() {
match channel {
SignedChannel {
channel_id,
state: SignedChannelState::SettledReceived { .. },
..
} => {
tracing::info!("Rejecting pending dlc channel settle offer.");
// Pending dlc channel settle offer with a dlc channel already confirmed
// on-chain
event::publish(&EventInternal::BackgroundNotification(
BackgroundTask::RecoverDlc(TaskStatus::Pending),
));
self.node
.reject_settle_offer(None, channel_id)
.context("Failed to reject pending settle offer")?;
event::publish(&EventInternal::BackgroundNotification(
BackgroundTask::RecoverDlc(TaskStatus::Success),
));
return Ok(());
}
SignedChannel {
channel_id,
state: SignedChannelState::RenewOffered { .. },
..
} => {
tracing::info!("Rejecting pending dlc channel renew offer.");
// Pending dlc channel renew (resize) offer with a dlc channel already confirmed
// on-chain
event::publish(&EventInternal::BackgroundNotification(
BackgroundTask::RecoverDlc(TaskStatus::Pending),
));
// FIXME(holzeis): We need to be able to differentiate between a
// SignedChannelState::RenewOffered and a RolloverOffer. This differentiation
// currently only lives in 10101 and in the dlc messages, but not on the channel
// state. Hence at the moment we also reject pending `RolloverOffers` the same
// way we reject `RenewOffers`
self.node
.reject_renew_offer(None, channel_id)
.context("Failed to reject pending renew offer")?;
event::publish(&EventInternal::BackgroundNotification(
BackgroundTask::RecoverDlc(TaskStatus::Success),
));
return Ok(());
}
SignedChannel {
channel_id,
state:
SignedChannelState::CollaborativeCloseOffered {
is_offer: false, ..
},
..
} => {
tracing::info!("Accepting pending dlc channel close offer.");
// Pending dlc channel close offer with the intend to close the dlc channel
// on-chain
// TODO(bonomat): we should verify that the proposed amount is acceptable
self.node
.inner
.accept_dlc_channel_collaborative_close(channel_id)?;
return Ok(());
}
signed_channel => {
// If the signed channel state is anything else but `Established`, `Settled` or
// `Closing` at reconnect. It means the protocol got interrupted.
if !matches!(
signed_channel.state,
SignedChannelState::Established { .. }
| SignedChannelState::Settled { .. }
| SignedChannelState::SettledClosing { .. }
| SignedChannelState::Closing { .. }
| SignedChannelState::CollaborativeCloseOffered { .. }
) {
event::publish(&EventInternal::BackgroundNotification(
BackgroundTask::RecoverDlc(TaskStatus::Pending),
));
}
}
};
}
self.send_last_dlc_message(peer)?;
Ok(())
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/mobile/native/src/commons/api.rs | mobile/native/src/commons/api.rs | use flutter_rust_bridge::frb;
use rust_decimal::prelude::ToPrimitive;
use rust_decimal::Decimal;
use xxi_node::commons;
#[frb]
#[derive(Clone, Debug, Default)]
pub struct Price {
pub bid: f64,
pub ask: f64,
}
impl From<commons::Price> for Price {
fn from(value: commons::Price) -> Self {
Price {
bid: value.bid.to_f64().expect("price bid to fit into f64"),
ask: value.ask.to_f64().expect("price ask to fit into f64"),
}
}
}
impl From<Price> for commons::Price {
fn from(value: Price) -> Self {
commons::Price {
bid: Decimal::try_from(value.bid).expect("price bid to fit into Decimal"),
ask: Decimal::try_from(value.ask).expect("price ask to fit into Decimal"),
}
}
}
pub struct ChannelInfo {
/// The total capacity of the channel as defined by the funding output
pub channel_capacity: u64,
pub reserve: Option<u64>,
pub liquidity_option_id: Option<i32>,
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/mobile/native/src/commons/mod.rs | mobile/native/src/commons/mod.rs | pub mod api;
/// Provide a reqwest client with a specified 10 seconds timeout.
//
// FIXME: Ideally, we should reuse the same reqwest client for all requests.
pub fn reqwest_client() -> reqwest::Client {
reqwest::Client::builder()
.timeout(std::time::Duration::from_secs(30))
.build()
.expect("Failed to build reqwest client")
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/mobile/native/src/calculations/mod.rs | mobile/native/src/calculations/mod.rs | use anyhow::Result;
use rust_decimal::prelude::ToPrimitive;
use rust_decimal::Decimal;
use xxi_node::cfd;
use xxi_node::commons::Direction;
use xxi_node::commons::Price;
/// Calculate the collateral in BTC.
pub fn calculate_margin(opening_price: f32, quantity: f32, leverage: f32) -> u64 {
let opening_price = Decimal::try_from(opening_price).expect("price to fit into decimal");
cfd::calculate_margin(opening_price, quantity, leverage).to_sat()
}
/// Calculate the quantity from price, collateral and leverage
/// Margin in sats, calculation in BTC
pub fn calculate_quantity(opening_price: f32, margin: u64, leverage: f32) -> f32 {
cfd::calculate_quantity(opening_price, margin, leverage)
}
/// PnL is calculated using the margin without fees to show the effective profit or loss.
pub fn calculate_pnl(
opening_price: f32,
closing_price: Price,
quantity: f32,
leverage: f32,
direction: Direction,
) -> Result<i64> {
// FIXME: We can no longer assume that the coordinator always has the same leverage! It needs to
// be passed in as an argument. Unfortunately the coordinator leverage is not passed around at
// the moment. Perhaps we should add it to the `TradeParams`.
let (long_leverage, short_leverage) = match direction {
Direction::Long => (leverage, 2.0),
Direction::Short => (2.0, leverage),
};
let long_margin = calculate_margin(opening_price, quantity, long_leverage);
let short_margin = calculate_margin(opening_price, quantity, short_leverage);
let opening_price = Decimal::try_from(opening_price).expect("price to fit into decimal");
let closing_price = closing_price.get_price_for_direction(direction.opposite());
cfd::calculate_pnl(
opening_price,
closing_price,
quantity,
direction,
long_margin,
short_margin,
)
}
pub fn calculate_liquidation_price(
price: f32,
leverage: f32,
direction: Direction,
maintenance_margin_rate: Decimal,
) -> f32 {
let initial_price = Decimal::try_from(price).expect("Price to fit");
tracing::trace!("Initial price: {}", price);
let leverage = Decimal::try_from(leverage).expect("leverage to fix into decimal");
let liquidation_price = match direction {
Direction::Long => {
cfd::calculate_long_liquidation_price(leverage, initial_price, maintenance_margin_rate)
}
Direction::Short => {
cfd::calculate_short_liquidation_price(leverage, initial_price, maintenance_margin_rate)
}
};
let liquidation_price = liquidation_price.to_f32().expect("price to fit into f32");
tracing::trace!("Liquidation_price: {liquidation_price}");
liquidation_price
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/mobile/native/src/db/last_outbound_dlc_messages.rs | mobile/native/src/db/last_outbound_dlc_messages.rs | use crate::db::dlc_messages::MessageType;
use crate::schema;
use crate::schema::dlc_messages;
use crate::schema::last_outbound_dlc_messages;
use anyhow::ensure;
use anyhow::Result;
use bitcoin::secp256k1::PublicKey;
use diesel::AsChangeset;
use diesel::ExpressionMethods;
use diesel::Insertable;
use diesel::JoinOnDsl;
use diesel::OptionalExtension;
use diesel::QueryDsl;
use diesel::QueryResult;
use diesel::Queryable;
use diesel::RunQueryDsl;
use diesel::SqliteConnection;
use time::OffsetDateTime;
use xxi_node::dlc_message::SerializedDlcMessage;
#[derive(Insertable, Queryable, Debug, Clone, PartialEq, AsChangeset)]
#[diesel(table_name = last_outbound_dlc_messages)]
pub(crate) struct LastOutboundDlcMessage {
pub peer_id: String,
pub message_hash: String,
pub message: String,
pub timestamp: i64,
}
impl LastOutboundDlcMessage {
pub(crate) fn get(
conn: &mut SqliteConnection,
peer_id: &PublicKey,
) -> QueryResult<Option<SerializedDlcMessage>> {
let last_outbound_dlc_message = last_outbound_dlc_messages::table
.inner_join(
dlc_messages::table
.on(dlc_messages::message_hash.eq(last_outbound_dlc_messages::message_hash)),
)
.filter(last_outbound_dlc_messages::peer_id.eq(peer_id.to_string()))
.select((
dlc_messages::message_type,
last_outbound_dlc_messages::message,
))
.first::<(MessageType, String)>(conn)
.optional()?;
let serialized_dlc_message =
last_outbound_dlc_message.map(|(message_type, message)| SerializedDlcMessage {
message,
message_type: xxi_node::dlc_message::DlcMessageType::from(message_type),
});
Ok(serialized_dlc_message)
}
pub(crate) fn upsert(
conn: &mut SqliteConnection,
peer_id: &PublicKey,
sdm: SerializedDlcMessage,
) -> Result<()> {
let values = (
last_outbound_dlc_messages::peer_id.eq(peer_id.to_string()),
last_outbound_dlc_messages::message_hash.eq(sdm.generate_hash().to_string()),
last_outbound_dlc_messages::message.eq(sdm.message),
last_outbound_dlc_messages::timestamp.eq(OffsetDateTime::now_utc().unix_timestamp()),
);
let affected_rows = diesel::insert_into(last_outbound_dlc_messages::table)
.values(&values.clone())
.on_conflict(schema::last_outbound_dlc_messages::peer_id)
.do_update()
.set(values)
.execute(conn)?;
ensure!(
affected_rows > 0,
"Could not upsert last outbound dlc messages"
);
Ok(())
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/mobile/native/src/db/dlc_messages.rs | mobile/native/src/db/dlc_messages.rs | use crate::schema;
use anyhow::ensure;
use anyhow::Result;
use bitcoin::secp256k1::PublicKey;
use diesel::prelude::*;
use diesel::sql_types::Text;
use diesel::AsChangeset;
use diesel::AsExpression;
use diesel::FromSqlRow;
use diesel::Insertable;
use diesel::OptionalExtension;
use diesel::QueryResult;
use diesel::Queryable;
use diesel::QueryableByName;
use diesel::RunQueryDsl;
use diesel::SqliteConnection;
use schema::dlc_messages;
use std::str::FromStr;
use time::OffsetDateTime;
#[derive(Insertable, QueryableByName, Queryable, Debug, Clone, PartialEq, AsChangeset)]
#[diesel(table_name = dlc_messages)]
pub(crate) struct DlcMessage {
pub message_hash: String,
pub inbound: bool,
pub peer_id: String,
pub message_type: MessageType,
pub timestamp: i64,
}
#[derive(Debug, Clone, Copy, PartialEq, FromSqlRow, AsExpression)]
#[diesel(sql_type = Text)]
pub enum MessageType {
Offer,
Accept,
Sign,
SettleOffer,
SettleAccept,
SettleConfirm,
SettleFinalize,
RenewOffer,
RenewAccept,
RenewConfirm,
RenewFinalize,
RenewRevoke,
RolloverOffer,
RolloverAccept,
RolloverConfirm,
RolloverFinalize,
RolloverRevoke,
CollaborativeCloseOffer,
Reject,
}
impl DlcMessage {
pub(crate) fn get(
conn: &mut SqliteConnection,
message_hash: &str,
) -> QueryResult<Option<xxi_node::dlc_message::DlcMessage>> {
let result = schema::dlc_messages::table
.filter(schema::dlc_messages::message_hash.eq(message_hash.to_string()))
.first::<DlcMessage>(conn)
.optional()?;
Ok(result.map(|q| q.into()))
}
pub(crate) fn insert(
conn: &mut SqliteConnection,
dlc_message: xxi_node::dlc_message::DlcMessage,
) -> Result<()> {
let affected_rows = diesel::insert_into(schema::dlc_messages::table)
.values(DlcMessage::from(dlc_message))
.execute(conn)?;
ensure!(affected_rows > 0, "Could not insert dlc message");
Ok(())
}
}
impl From<xxi_node::dlc_message::DlcMessage> for DlcMessage {
fn from(value: xxi_node::dlc_message::DlcMessage) -> Self {
Self {
message_hash: value.clone().message_hash,
peer_id: value.peer_id.to_string(),
message_type: MessageType::from(value.message_type),
timestamp: value.timestamp.unix_timestamp(),
inbound: value.inbound,
}
}
}
impl From<xxi_node::dlc_message::DlcMessageType> for MessageType {
fn from(value: xxi_node::dlc_message::DlcMessageType) -> Self {
match value {
xxi_node::dlc_message::DlcMessageType::Offer => Self::Offer,
xxi_node::dlc_message::DlcMessageType::Accept => Self::Accept,
xxi_node::dlc_message::DlcMessageType::Sign => Self::Sign,
xxi_node::dlc_message::DlcMessageType::SettleOffer => Self::SettleOffer,
xxi_node::dlc_message::DlcMessageType::SettleAccept => Self::SettleAccept,
xxi_node::dlc_message::DlcMessageType::SettleConfirm => Self::SettleConfirm,
xxi_node::dlc_message::DlcMessageType::SettleFinalize => Self::SettleFinalize,
xxi_node::dlc_message::DlcMessageType::RenewOffer => Self::RenewOffer,
xxi_node::dlc_message::DlcMessageType::RenewAccept => Self::RenewAccept,
xxi_node::dlc_message::DlcMessageType::RenewConfirm => Self::RenewConfirm,
xxi_node::dlc_message::DlcMessageType::RenewFinalize => Self::RenewFinalize,
xxi_node::dlc_message::DlcMessageType::RenewRevoke => Self::RenewRevoke,
xxi_node::dlc_message::DlcMessageType::RolloverOffer => Self::RolloverOffer,
xxi_node::dlc_message::DlcMessageType::RolloverAccept => Self::RolloverAccept,
xxi_node::dlc_message::DlcMessageType::RolloverConfirm => Self::RolloverConfirm,
xxi_node::dlc_message::DlcMessageType::RolloverFinalize => Self::RolloverFinalize,
xxi_node::dlc_message::DlcMessageType::RolloverRevoke => Self::RolloverRevoke,
xxi_node::dlc_message::DlcMessageType::CollaborativeCloseOffer => {
Self::CollaborativeCloseOffer
}
xxi_node::dlc_message::DlcMessageType::Reject => Self::Reject,
}
}
}
impl From<DlcMessage> for xxi_node::dlc_message::DlcMessage {
fn from(value: DlcMessage) -> Self {
let dlc_message_type =
xxi_node::dlc_message::DlcMessageType::from(value.clone().message_type);
Self {
message_hash: value.message_hash,
inbound: value.inbound,
message_type: dlc_message_type,
peer_id: PublicKey::from_str(&value.peer_id).expect("valid public key"),
timestamp: OffsetDateTime::from_unix_timestamp(value.timestamp)
.expect("valid timestamp"),
}
}
}
impl From<MessageType> for xxi_node::dlc_message::DlcMessageType {
fn from(value: MessageType) -> Self {
match value {
MessageType::Offer => xxi_node::dlc_message::DlcMessageType::Offer,
MessageType::Accept => xxi_node::dlc_message::DlcMessageType::Accept,
MessageType::Sign => xxi_node::dlc_message::DlcMessageType::Sign,
MessageType::SettleOffer => xxi_node::dlc_message::DlcMessageType::SettleOffer,
MessageType::SettleAccept => xxi_node::dlc_message::DlcMessageType::SettleAccept,
MessageType::SettleConfirm => xxi_node::dlc_message::DlcMessageType::SettleConfirm,
MessageType::SettleFinalize => xxi_node::dlc_message::DlcMessageType::SettleFinalize,
MessageType::RenewOffer => xxi_node::dlc_message::DlcMessageType::RenewOffer,
MessageType::RenewAccept => xxi_node::dlc_message::DlcMessageType::RenewAccept,
MessageType::RenewConfirm => xxi_node::dlc_message::DlcMessageType::RenewConfirm,
MessageType::RenewFinalize => xxi_node::dlc_message::DlcMessageType::RenewFinalize,
MessageType::RenewRevoke => xxi_node::dlc_message::DlcMessageType::RenewRevoke,
MessageType::RolloverOffer => xxi_node::dlc_message::DlcMessageType::RolloverOffer,
MessageType::RolloverAccept => xxi_node::dlc_message::DlcMessageType::RolloverAccept,
MessageType::RolloverConfirm => xxi_node::dlc_message::DlcMessageType::RolloverConfirm,
MessageType::RolloverFinalize => {
xxi_node::dlc_message::DlcMessageType::RolloverFinalize
}
MessageType::RolloverRevoke => xxi_node::dlc_message::DlcMessageType::RolloverRevoke,
MessageType::CollaborativeCloseOffer => {
xxi_node::dlc_message::DlcMessageType::CollaborativeCloseOffer
}
MessageType::Reject => xxi_node::dlc_message::DlcMessageType::Reject,
}
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/mobile/native/src/db/polls.rs | mobile/native/src/db/polls.rs | use crate::schema;
use crate::schema::answered_polls;
use anyhow::ensure;
use anyhow::Result;
use diesel::Insertable;
use diesel::QueryResult;
use diesel::Queryable;
use diesel::QueryableByName;
use diesel::RunQueryDsl;
use diesel::SqliteConnection;
use time::OffsetDateTime;
#[derive(Insertable, Debug, Clone, PartialEq)]
#[diesel(table_name = answered_polls)]
pub struct NewAnsweredOrIgnored {
pub poll_id: i32,
pub timestamp: i64,
}
#[derive(QueryableByName, Queryable, Debug, Clone, PartialEq)]
#[diesel(table_name = answered_polls)]
pub struct AnsweredOrIgnored {
pub id: i32,
pub poll_id: i32,
pub timestamp: i64,
}
pub(crate) fn get(conn: &mut SqliteConnection) -> QueryResult<Vec<AnsweredOrIgnored>> {
let result = schema::answered_polls::table.load(conn)?;
Ok(result)
}
pub(crate) fn insert(conn: &mut SqliteConnection, poll_id: i32) -> Result<()> {
let affected_rows = diesel::insert_into(schema::answered_polls::table)
.values(NewAnsweredOrIgnored {
poll_id,
timestamp: OffsetDateTime::now_utc().unix_timestamp(),
})
.execute(conn)?;
ensure!(affected_rows > 0, "Could not insert answered poll");
Ok(())
}
pub(crate) fn delete_all(conn: &mut SqliteConnection) -> Result<()> {
diesel::delete(schema::answered_polls::table).execute(conn)?;
Ok(())
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/mobile/native/src/db/mod.rs | mobile/native/src/db/mod.rs | use crate::config;
use crate::db::models::FailureReason;
use crate::db::models::FundingFeeEvent;
use crate::db::models::NewTrade;
use crate::db::models::Order;
use crate::db::models::OrderState;
use crate::db::models::Position;
use crate::db::models::SpendableOutputInsertable;
use crate::db::models::SpendableOutputQueryable;
use crate::db::models::Trade;
use crate::db::models::Transaction;
use crate::db::models::UnpaidFundingFeeEvent;
use crate::trade;
use anyhow::anyhow;
use anyhow::Context;
use anyhow::Result;
use bitcoin::Amount;
use bitcoin::Network;
use diesel::connection::SimpleConnection;
use diesel::r2d2;
use diesel::r2d2::ConnectionManager;
use diesel::r2d2::Pool;
use diesel::r2d2::PooledConnection;
use diesel::OptionalExtension;
use diesel::SqliteConnection;
use diesel_migrations::embed_migrations;
use diesel_migrations::EmbeddedMigrations;
use diesel_migrations::MigrationHarness;
use parking_lot::Mutex;
use rusqlite::backup::Backup;
use rusqlite::Connection;
use rusqlite::OpenFlags;
use state::Storage;
use std::path::Path;
use std::sync::Arc;
use time::Duration;
use time::OffsetDateTime;
use uuid::Uuid;
use xxi_node::commons;
mod custom_types;
pub mod dlc_messages;
pub mod last_outbound_dlc_messages;
pub mod models;
pub mod polls;
pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!();
/// Sets the number of max connections to the DB.
///
/// We are only allowing 1 connection at a time because given the simplicity of the app currently
/// there is no need for concurrent access to the database.
const MAX_DB_POOL_SIZE: u32 = 1;
static DB: Storage<Arc<Pool<ConnectionManager<SqliteConnection>>>> = Storage::new();
static BACKUP_CONNECTION: Storage<Arc<Mutex<Connection>>> = Storage::new();
#[derive(Debug)]
pub struct ConnectionOptions {
pub enable_wal: bool,
pub enable_foreign_keys: bool,
pub busy_timeout: Option<Duration>,
}
impl r2d2::CustomizeConnection<SqliteConnection, r2d2::Error> for ConnectionOptions {
fn on_acquire(&self, conn: &mut SqliteConnection) -> Result<(), r2d2::Error> {
(|| {
if let Some(d) = self.busy_timeout {
conn.batch_execute(&format!(
"PRAGMA busy_timeout = {};",
d.whole_milliseconds()
))?;
}
if self.enable_wal {
conn.batch_execute("PRAGMA journal_mode = WAL; PRAGMA synchronous = NORMAL; PRAGMA wal_autocheckpoint = 1000; PRAGMA wal_checkpoint(TRUNCATE);")?;
}
if self.enable_foreign_keys {
conn.batch_execute("PRAGMA foreign_keys = ON;")?;
}
Ok(())
})()
.map_err(diesel::r2d2::Error::QueryError)
}
}
pub fn init_db(db_dir: &str, network: Network) -> Result<()> {
if DB.try_get().is_some() {
return Ok(());
}
let database_url = format!("sqlite://{db_dir}/trades-{network}.sqlite");
let manager = ConnectionManager::<SqliteConnection>::new(database_url);
let pool = r2d2::Pool::builder()
.max_size(MAX_DB_POOL_SIZE)
.connection_customizer(Box::new(ConnectionOptions {
enable_wal: true,
enable_foreign_keys: true,
busy_timeout: Some(Duration::seconds(30)),
}))
.build(manager)?;
let mut connection = pool.get()?;
connection
.run_pending_migrations(MIGRATIONS)
.map_err(|e| anyhow!("could not run db migration: {e:#}"))?;
tracing::debug!("Database migration run - db initialized");
DB.set(Arc::new(pool));
tracing::debug!("Opening read-only backup connection");
let backup_conn = Connection::open_with_flags(
format!("{db_dir}/trades-{network}.sqlite"),
// [`OpenFlags::SQLITE_OPEN_READ_ONLY`]: The database is opened in read-only mode. If the database does not already exist, an error is returned
// [`OpenFlags::SQLITE_OPEN_NO_MUTEX`]: The new database connection will use the "multi-thread" threading mode. This means that separate threads are allowed to use SQLite at the same time, as long as each thread is using a different database connection.
// https://www.sqlite.org/c3ref/open.html
OpenFlags::SQLITE_OPEN_READ_ONLY | OpenFlags::SQLITE_OPEN_NO_MUTEX,
)?;
BACKUP_CONNECTION.set(Arc::new(Mutex::new(backup_conn)));
Ok(())
}
/// Creates a backup of the database
///
/// Returns the path to the file of the database backup
pub fn back_up() -> Result<String> {
let connection = BACKUP_CONNECTION.get().lock();
let backup_dir = config::get_backup_dir();
let dst_path = Path::new(&backup_dir).join("trades.sqlite");
let mut dst = Connection::open(dst_path.clone())?;
let backup = Backup::new(&connection, &mut dst)?;
backup.run_to_completion(100, std::time::Duration::from_millis(250), None)?;
Ok(dst_path.to_string_lossy().to_string())
}
pub fn connection() -> Result<PooledConnection<ConnectionManager<SqliteConnection>>> {
let pool = DB.try_get().context("DB uninitialised").cloned()?;
pool.get()
.map_err(|e| anyhow!("cannot acquire database connection: {e:#}"))
}
pub fn insert_order(order: trade::order::Order) -> Result<trade::order::Order> {
let mut db = connection()?;
let order = Order::insert(order.into(), &mut db)?;
Ok(order.try_into()?)
}
impl From<trade::order::OrderState> for OrderState {
fn from(value: trade::order::OrderState) -> Self {
match value {
trade::order::OrderState::Initial => OrderState::Initial,
trade::order::OrderState::Rejected => OrderState::Rejected,
trade::order::OrderState::Open => OrderState::Open,
trade::order::OrderState::Filling { .. } => OrderState::Filling,
trade::order::OrderState::Failed { .. } => OrderState::Failed,
trade::order::OrderState::Filled { .. } => OrderState::Filled,
}
}
}
pub fn set_order_state_to_failed(
order_id: Uuid,
failure_reason: FailureReason,
execution_price: Option<f32>,
) -> Result<trade::order::Order> {
let mut db = connection()?;
let order = Order::set_order_state_to_failed(
order_id.to_string(),
execution_price,
None,
failure_reason,
&mut db,
)
.context("Failed to set order state to failed")?;
Ok(order.try_into()?)
}
pub fn set_order_state_to_open(order_id: Uuid) -> Result<trade::order::Order> {
let mut db = connection()?;
let order = Order::set_order_state_to_open(order_id.to_string(), &mut db)
.context("Failed to set order state to open")?;
Ok(order.try_into()?)
}
pub fn get_order(order_id: Uuid) -> Result<Option<trade::order::Order>> {
let mut db = connection()?;
let order = Order::get(order_id.to_string(), &mut db)?;
let order = match order {
Some(order) => Some(trade::order::Order::try_from(order)?),
None => None,
};
Ok(order)
}
pub fn get_orders_for_ui() -> Result<Vec<trade::order::Order>> {
let mut db = connection()?;
let orders = Order::get_without_rejected_and_initial(&mut db)?;
Ok(orders
.into_iter()
.map(TryInto::try_into)
.collect::<Result<_, _>>()?)
}
pub fn get_filled_orders() -> Result<Vec<trade::order::Order>> {
let mut db = connection()?;
let orders = Order::get_by_state(OrderState::Filled, &mut db)?;
let orders = orders
.into_iter()
.map(|order| {
order
.try_into()
.context("Failed to convert to trade::order::Order")
})
.collect::<Result<Vec<_>>>()?;
Ok(orders)
}
/// Returns all open orders
pub fn maybe_get_open_orders() -> Result<Vec<trade::order::Order>> {
let mut db = connection()?;
let orders = Order::get_by_state(OrderState::Open, &mut db)?;
let orders = orders
.into_iter()
.map(|order| {
order
.try_into()
.context("Failed to convert to trade::order::Order")
})
.collect::<Result<Vec<_>>>()?;
Ok(orders)
}
pub fn get_last_failed_order() -> Result<Option<trade::order::Order>> {
let mut db = connection()?;
let mut orders = Order::get_by_state(OrderState::Failed, &mut db)?;
orders.sort_by(|a, b| b.creation_timestamp.cmp(&a.creation_timestamp));
let order = match orders.first() {
Some(order) => Some(order.clone().try_into()?),
None => None,
};
Ok(order)
}
pub fn set_order_state_to_filled(
order_id: Uuid,
execution_price: f32,
matching_fee: Amount,
) -> Result<trade::order::Order> {
let mut connection = connection()?;
let order =
Order::set_order_state_to_filled(order_id, execution_price, matching_fee, &mut connection)?;
Ok(order.try_into()?)
}
pub fn set_order_state_to_filling(
order_id: Uuid,
execution_price: f32,
matching_fee: Amount,
) -> Result<trade::order::Order> {
let mut connection = connection()?;
let order = Order::set_order_state_to_filling(
order_id,
execution_price,
matching_fee,
&mut connection,
)?;
Ok(order.try_into()?)
}
/// Return an [`Order`] that is currently in [`OrderState::Filling`].
pub fn get_order_in_filling() -> Result<Option<trade::order::Order>> {
let mut db = connection()?;
let mut orders = Order::get_by_state(OrderState::Filling, &mut db)?;
orders.sort_by(|a, b| b.creation_timestamp.cmp(&a.creation_timestamp));
let order = match orders.as_slice() {
[] => return Ok(None),
[order] => order,
// We strive to only have one order at a time in `OrderState::Filling`. But, if we do not
// manage, we take the most oldest one.
[oldest_order, rest @ ..] => {
tracing::warn!(
id = %oldest_order.id,
"Found more than one order in filling. Using oldest one",
);
// Clean up other orders in `OrderState::Filling`.
for order in rest {
tracing::debug!(
id = %order.id,
"Setting unexpected Filling order to Failed"
);
if let Err(e) = Order::set_order_state_to_failed(
order.id.clone(),
order.execution_price,
None,
FailureReason::TimedOut,
&mut db,
) {
tracing::error!("Failed to set old Filling order to Failed: {e:#}");
};
}
oldest_order
}
};
Ok(Some(order.clone().try_into()?))
}
pub fn delete_order(order_id: Uuid) -> Result<()> {
let mut db = connection()?;
Order::delete(order_id.to_string(), &mut db)?;
Ok(())
}
pub fn insert_position(position: trade::position::Position) -> Result<trade::position::Position> {
let mut db = connection()?;
let position = Position::insert(position.into(), &mut db)?;
Ok(position.into())
}
/// We only allow one [`Position`] per [`ContractSymbol`] in the database.
pub fn get_position(contract_symbol: commons::ContractSymbol) -> Result<trade::position::Position> {
let mut conn = connection()?;
let position = Position::get_position(&mut conn, contract_symbol.into())?;
Ok(position.into())
}
pub fn get_positions() -> Result<Vec<trade::position::Position>> {
let mut db = connection()?;
let positions = Position::get_all(&mut db)?;
let positions = positions
.into_iter()
.map(|position| position.into())
.collect();
Ok(positions)
}
pub fn delete_positions() -> Result<()> {
let mut db = connection()?;
Position::delete_all(&mut db)?;
Ok(())
}
pub fn update_position_state(
contract_symbol: commons::ContractSymbol,
position_state: trade::position::PositionState,
) -> Result<trade::position::Position> {
let mut db = connection()?;
let position = Position::update_state(contract_symbol.into(), position_state.into(), &mut db)
.context("Failed to update position state")?;
Ok(position.into())
}
pub fn update_position(updated_position: trade::position::Position) -> Result<()> {
let mut db = connection()?;
Position::update_position(&mut db, updated_position.into())
.context("Failed to update position")?;
Ok(())
}
pub fn start_position_rollover(updated_position: trade::position::Position) -> Result<()> {
let mut db = connection()?;
Position::start_rollover(&mut db, updated_position.into())
.context("Failed to start position rollover")?;
Ok(())
}
pub fn finish_position_rollover(updated_position: trade::position::Position) -> Result<()> {
let mut db = connection()?;
Position::finish_rollover(&mut db, updated_position.into())
.context("Failed to finish position rollover")?;
Ok(())
}
pub fn insert_spendable_output(
outpoint: lightning::chain::transaction::OutPoint,
descriptor: lightning::sign::SpendableOutputDescriptor,
) -> Result<()> {
tracing::debug!(?descriptor, "Inserting spendable output");
let mut db = connection()?;
SpendableOutputInsertable::insert((outpoint, descriptor).into(), &mut db)?;
Ok(())
}
pub fn get_spendable_output(
outpoint: lightning::chain::transaction::OutPoint,
) -> Result<Option<lightning::sign::SpendableOutputDescriptor>> {
tracing::debug!(?outpoint, "Getting spendable output");
let mut db = connection()?;
let output = SpendableOutputQueryable::get(outpoint, &mut db).optional()?;
output.map(|output| output.try_into()).transpose()
}
pub fn delete_spendable_output(outpoint: lightning::chain::transaction::OutPoint) -> Result<()> {
tracing::debug!(?outpoint, "Removing spendable output");
let mut db = connection()?;
SpendableOutputQueryable::delete(outpoint, &mut db)?;
Ok(())
}
pub fn get_spendable_outputs() -> Result<Vec<lightning::sign::SpendableOutputDescriptor>> {
let mut db = connection()?;
let outputs = SpendableOutputQueryable::get_all(&mut db)?;
let outputs = outputs
.into_iter()
.map(|output| output.try_into())
.collect::<Result<Vec<_>>>()?;
tracing::debug!(?outputs, "Got all spendable outputs");
Ok(outputs)
}
// Transaction
pub fn upsert_transaction(transaction: xxi_node::transaction::Transaction) -> Result<()> {
tracing::debug!(?transaction, "Upserting transaction");
let mut db = connection()?;
Transaction::upsert(transaction.into(), &mut db)
}
pub fn get_transaction(txid: &str) -> Result<Option<xxi_node::transaction::Transaction>> {
tracing::debug!(%txid, "Getting transaction");
let mut db = connection()?;
let transaction = Transaction::get(txid, &mut db)
.map_err(|e| anyhow!("{e:#}"))?
.map(|t| t.into());
Ok(transaction)
}
pub fn get_all_transactions_without_fees() -> Result<Vec<xxi_node::transaction::Transaction>> {
let mut db = connection()?;
let transactions = Transaction::get_all_without_fees(&mut db)?
.into_iter()
.map(|t| t.into())
.collect::<Vec<_>>();
tracing::debug!(?transactions, "Got all transactions");
Ok(transactions)
}
pub fn get_all_trades() -> Result<Vec<crate::trade::Trade>> {
let mut db = connection()?;
let trades = Trade::get_all(&mut db)?;
let trades = trades
.into_iter()
.map(|trade| trade.into())
.collect::<Vec<_>>();
Ok(trades)
}
pub fn insert_trades(trades: &[crate::trade::Trade]) -> Result<()> {
let mut db = connection()?;
let trades = trades.iter().copied().map(|trade| trade.into()).collect();
NewTrade::insert(&mut db, trades)?;
Ok(())
}
/// Returns a list of polls which have been answered or should be ignored
pub fn load_ignored_or_answered_polls() -> Result<Vec<polls::AnsweredOrIgnored>> {
let mut db = connection()?;
let answered_polls = polls::get(&mut db)?;
for i in &answered_polls {
tracing::debug!(id = i.poll_id, "Ignored poll")
}
Ok(answered_polls)
}
/// A poll inserted into this table was either answered or should be ignored in the future.
pub fn set_poll_to_ignored_or_answered(poll_id: i32) -> Result<()> {
let mut db = connection()?;
polls::insert(&mut db, poll_id)?;
Ok(())
}
pub fn delete_answered_poll_cache() -> Result<()> {
let mut db = connection()?;
polls::delete_all(&mut db)?;
Ok(())
}
pub fn get_all_funding_fee_events() -> Result<Vec<crate::trade::FundingFeeEvent>> {
let mut db = connection()?;
let funding_fee_events = FundingFeeEvent::get_all(&mut db)?;
Ok(funding_fee_events)
}
/// Attempt to insert a list of unpaid funding fee events. Unpaid funding fee events that are
/// already in the database are ignored.
///
/// Unpaid funding fee events that are confirmed to be new are returned.
pub fn insert_unpaid_funding_fee_events(
funding_fee_events: &[crate::trade::FundingFeeEvent],
) -> Result<Vec<crate::trade::FundingFeeEvent>> {
let mut db = connection()?;
let inserted_events = funding_fee_events
.iter()
.filter_map(|e| match UnpaidFundingFeeEvent::insert(&mut db, *e) {
Ok(event) => event,
Err(e) => {
tracing::error!(?e, "Failed to insert unpaid funding fee event");
None
}
})
.collect();
Ok(inserted_events)
}
pub fn mark_funding_fee_events_as_paid(
contract_symbol: commons::ContractSymbol,
since: OffsetDateTime,
) -> Result<()> {
let mut db = connection()?;
UnpaidFundingFeeEvent::mark_as_paid(&mut db, contract_symbol, since)
.context("Failed to mark funding fee events as paid")?;
Ok(())
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/mobile/native/src/db/custom_types.rs | mobile/native/src/db/custom_types.rs | use crate::db::dlc_messages::MessageType;
use crate::db::models::ChannelState;
use crate::db::models::ContractSymbol;
use crate::db::models::Direction;
use crate::db::models::FailureReason;
use crate::db::models::Flow;
use crate::db::models::OrderReason;
use crate::db::models::OrderState;
use crate::db::models::OrderType;
use crate::db::models::PositionState;
use diesel::backend;
use diesel::deserialize;
use diesel::deserialize::FromSql;
use diesel::serialize;
use diesel::serialize::IsNull;
use diesel::serialize::Output;
use diesel::serialize::ToSql;
use diesel::sql_types::Text;
use diesel::sqlite::Sqlite;
impl ToSql<Text, Sqlite> for OrderType {
fn to_sql(&self, out: &mut Output<Sqlite>) -> serialize::Result {
let text = match *self {
OrderType::Market => "market".to_string(),
OrderType::Limit => "limit".to_string(),
};
out.set_value(text);
Ok(IsNull::No)
}
}
impl FromSql<Text, Sqlite> for OrderType {
fn from_sql(bytes: backend::RawValue<Sqlite>) -> deserialize::Result<Self> {
let string = <String as FromSql<Text, Sqlite>>::from_sql(bytes)?;
return match string.as_str() {
"market" => Ok(OrderType::Market),
"limit" => Ok(OrderType::Limit),
_ => Err("Unrecognized enum variant".into()),
};
}
}
impl ToSql<Text, Sqlite> for OrderReason {
fn to_sql(&self, out: &mut Output<Sqlite>) -> serialize::Result {
let text = match *self {
OrderReason::Manual => "Manual".to_string(),
OrderReason::Expired => "Expired".to_string(),
OrderReason::CoordinatorLiquidated => "CoordinatorLiquidated".to_string(),
OrderReason::TraderLiquidated => "TraderLiquidated".to_string(),
};
out.set_value(text);
Ok(IsNull::No)
}
}
impl FromSql<Text, Sqlite> for OrderReason {
fn from_sql(bytes: backend::RawValue<Sqlite>) -> deserialize::Result<Self> {
let string = <String as FromSql<Text, Sqlite>>::from_sql(bytes)?;
return match string.as_str() {
"Manual" => Ok(OrderReason::Manual),
"Expired" => Ok(OrderReason::Expired),
"CoordinatorLiquidated" => Ok(OrderReason::CoordinatorLiquidated),
"TraderLiquidated" => Ok(OrderReason::TraderLiquidated),
_ => Err("Unrecognized enum variant".into()),
};
}
}
impl ToSql<Text, Sqlite> for OrderState {
fn to_sql(&self, out: &mut Output<Sqlite>) -> serialize::Result {
let text = match *self {
OrderState::Initial => "initial".to_string(),
OrderState::Rejected => "rejected".to_string(),
OrderState::Open => "open".to_string(),
OrderState::Failed => "failed".to_string(),
OrderState::Filled => "filled".to_string(),
OrderState::Filling => "filling".to_string(),
};
out.set_value(text);
Ok(IsNull::No)
}
}
impl FromSql<Text, Sqlite> for OrderState {
fn from_sql(bytes: backend::RawValue<Sqlite>) -> deserialize::Result<Self> {
let string = <String as FromSql<Text, Sqlite>>::from_sql(bytes)?;
return match string.as_str() {
"initial" => Ok(OrderState::Initial),
"rejected" => Ok(OrderState::Rejected),
"open" => Ok(OrderState::Open),
"failed" => Ok(OrderState::Failed),
"filled" => Ok(OrderState::Filled),
"filling" => Ok(OrderState::Filling),
_ => Err("Unrecognized enum variant".into()),
};
}
}
impl ToSql<Text, Sqlite> for ContractSymbol {
fn to_sql(&self, out: &mut Output<Sqlite>) -> serialize::Result {
let text = match *self {
ContractSymbol::BtcUsd => "BtcUsd",
};
out.set_value(text);
Ok(IsNull::No)
}
}
impl FromSql<Text, Sqlite> for ContractSymbol {
fn from_sql(bytes: backend::RawValue<Sqlite>) -> deserialize::Result<Self> {
let string = <String as FromSql<Text, Sqlite>>::from_sql(bytes)?;
return match string.as_str() {
"BtcUsd" => Ok(ContractSymbol::BtcUsd),
_ => Err("Unrecognized enum variant".into()),
};
}
}
impl ToSql<Text, Sqlite> for Direction {
fn to_sql(&self, out: &mut Output<Sqlite>) -> serialize::Result {
let text = match *self {
Direction::Long => "Long",
Direction::Short => "Short",
};
out.set_value(text);
Ok(IsNull::No)
}
}
impl FromSql<Text, Sqlite> for Direction {
fn from_sql(bytes: backend::RawValue<Sqlite>) -> deserialize::Result<Self> {
let string = <String as FromSql<Text, Sqlite>>::from_sql(bytes)?;
return match string.as_str() {
"Long" => Ok(Direction::Long),
"Short" => Ok(Direction::Short),
_ => Err("Unrecognized enum variant".into()),
};
}
}
impl ToSql<Text, Sqlite> for FailureReason {
fn to_sql(&self, out: &mut Output<Sqlite>) -> serialize::Result {
let text = serde_json::to_string(self)?;
out.set_value(text);
Ok(IsNull::No)
}
}
impl FromSql<Text, Sqlite> for FailureReason {
fn from_sql(bytes: backend::RawValue<Sqlite>) -> deserialize::Result<Self> {
let string = <String as FromSql<Text, Sqlite>>::from_sql(bytes)?;
match serde_json::from_str(string.as_str()) {
Ok(reason) => Ok(reason),
Err(_) => Ok(FailureReason::Unknown),
}
}
}
impl ToSql<Text, Sqlite> for PositionState {
fn to_sql(&self, out: &mut Output<Sqlite>) -> serialize::Result {
let text = match *self {
PositionState::Open => "Open",
PositionState::Closing => "Closing",
PositionState::Rollover => "Rollover",
PositionState::Resizing => "Resizing",
};
out.set_value(text);
Ok(IsNull::No)
}
}
impl FromSql<Text, Sqlite> for PositionState {
fn from_sql(bytes: backend::RawValue<Sqlite>) -> deserialize::Result<Self> {
let string = <String as FromSql<Text, Sqlite>>::from_sql(bytes)?;
return match string.as_str() {
"Open" => Ok(PositionState::Open),
"Closing" => Ok(PositionState::Closing),
"Rollover" => Ok(PositionState::Rollover),
"Resizing" => Ok(PositionState::Resizing),
_ => Err("Unrecognized enum variant".into()),
};
}
}
impl ToSql<Text, Sqlite> for Flow {
fn to_sql(&self, out: &mut Output<Sqlite>) -> serialize::Result {
let text = match *self {
Flow::Inbound => "Inbound",
Flow::Outbound => "Outbound",
};
out.set_value(text);
Ok(IsNull::No)
}
}
impl FromSql<Text, Sqlite> for Flow {
fn from_sql(bytes: backend::RawValue<Sqlite>) -> deserialize::Result<Self> {
let string = <String as FromSql<Text, Sqlite>>::from_sql(bytes)?;
return match string.as_str() {
"Inbound" => Ok(Flow::Inbound),
"Outbound" => Ok(Flow::Outbound),
_ => Err("Unrecognized enum variant".into()),
};
}
}
impl ToSql<Text, Sqlite> for ChannelState {
fn to_sql(&self, out: &mut Output<Sqlite>) -> serialize::Result {
let text = match *self {
ChannelState::Open => "Open",
ChannelState::OpenUnpaid => "OpenUnpaid",
ChannelState::Announced => "Announced",
ChannelState::Pending => "Pending",
ChannelState::Closed => "Closed",
ChannelState::ForceClosedRemote => "ForceClosedRemote",
ChannelState::ForceClosedLocal => "ForceClosedLocal",
};
out.set_value(text);
Ok(IsNull::No)
}
}
impl FromSql<Text, Sqlite> for ChannelState {
fn from_sql(bytes: backend::RawValue<Sqlite>) -> deserialize::Result<Self> {
let string = <String as FromSql<Text, Sqlite>>::from_sql(bytes)?;
return match string.as_str() {
"Open" => Ok(ChannelState::Open),
"OpenUnpaid" => Ok(ChannelState::OpenUnpaid),
"Announced" => Ok(ChannelState::Announced),
"Pending" => Ok(ChannelState::Pending),
"Closed" => Ok(ChannelState::Closed),
"ForceClosedRemote" => Ok(ChannelState::ForceClosedRemote),
"ForceClosedLocal" => Ok(ChannelState::ForceClosedLocal),
_ => Err("Unrecognized enum variant".into()),
};
}
}
impl ToSql<Text, Sqlite> for MessageType {
fn to_sql(&self, out: &mut Output<Sqlite>) -> serialize::Result {
let text = match *self {
MessageType::Offer => "Offer",
MessageType::Accept => "Accept",
MessageType::Sign => "Sign",
MessageType::SettleOffer => "SettleOffer",
MessageType::SettleAccept => "SettleAccept",
MessageType::SettleConfirm => "SettleConfirm",
MessageType::SettleFinalize => "SettleFinalize",
MessageType::RenewOffer => "RenewOffer",
MessageType::RenewAccept => "RenewAccept",
MessageType::RenewConfirm => "RenewConfirm",
MessageType::RenewFinalize => "RenewFinalize",
MessageType::RenewRevoke => "RenewRevoke",
MessageType::RolloverOffer => "RolloverOffer",
MessageType::RolloverAccept => "RolloverAccept",
MessageType::RolloverConfirm => "RolloverConfirm",
MessageType::RolloverFinalize => "RolloverFinalize",
MessageType::RolloverRevoke => "RolloverRevoke",
MessageType::CollaborativeCloseOffer => "CollaborativeCloseOffer",
MessageType::Reject => "Reject",
};
out.set_value(text);
Ok(IsNull::No)
}
}
impl FromSql<Text, Sqlite> for MessageType {
fn from_sql(bytes: backend::RawValue<Sqlite>) -> deserialize::Result<Self> {
let string = <String as FromSql<Text, Sqlite>>::from_sql(bytes)?;
return match string.as_str() {
"Offer" => Ok(MessageType::Offer),
"Accept" => Ok(MessageType::Accept),
"Sign" => Ok(MessageType::Sign),
"SettleOffer" => Ok(MessageType::SettleOffer),
"SettleAccept" => Ok(MessageType::SettleAccept),
"SettleConfirm" => Ok(MessageType::SettleConfirm),
"SettleFinalize" => Ok(MessageType::SettleFinalize),
"RenewOffer" => Ok(MessageType::RenewOffer),
"RenewAccept" => Ok(MessageType::RenewAccept),
"RenewConfirm" => Ok(MessageType::RenewConfirm),
"RenewFinalize" => Ok(MessageType::RenewFinalize),
"RenewRevoke" => Ok(MessageType::RenewRevoke),
"RolloverOffer" => Ok(MessageType::RolloverOffer),
"RolloverAccept" => Ok(MessageType::RolloverAccept),
"RolloverConfirm" => Ok(MessageType::RolloverConfirm),
"RolloverFinalize" => Ok(MessageType::RolloverFinalize),
"RolloverRevoke" => Ok(MessageType::RolloverRevoke),
"CollaborativeCloseOffer" => Ok(MessageType::CollaborativeCloseOffer),
"Reject" => Ok(MessageType::Reject),
_ => Err("Unrecognized enum variant".into()),
};
}
}
#[cfg(test)]
mod tests {
use crate::db::custom_types::tests::customstruct::id;
use crate::db::models::ContractSymbol;
use crate::db::models::Direction;
use crate::db::models::OrderState;
use crate::db::models::OrderType;
use diesel::connection::SimpleConnection;
use diesel::insert_into;
use diesel::prelude::*;
use diesel::Connection;
use diesel::RunQueryDsl;
use diesel::SqliteConnection;
#[derive(Insertable, Queryable, Identifiable, Debug, PartialEq, Clone)]
#[diesel(table_name = customstruct)]
struct SampleStruct {
id: String,
order_type: OrderType,
order_state: OrderState,
contract_symbol: ContractSymbol,
direction: Direction,
}
diesel::table! {
customstruct (id) {
id -> Text,
order_type -> Text,
order_state -> Text,
contract_symbol -> Text,
direction -> Text,
}
}
#[test]
fn roundtrip_for_custom_types() {
let mut connection = SqliteConnection::establish(":memory:").unwrap();
connection
.batch_execute(
r#"
create table customstruct (
id TEXT PRIMARY KEY NOT NULL,
order_type TEXT NOT NULL,
order_state TEXT NOT NULL,
contract_symbol TEXT NOT NULL,
direction TEXT NOT NULL
)"#,
)
.unwrap();
let sample_struct = SampleStruct {
id: "1".to_string(),
order_type: OrderType::Limit,
order_state: OrderState::Filled,
contract_symbol: ContractSymbol::BtcUsd,
direction: Direction::Short,
};
let i = insert_into(crate::db::custom_types::tests::customstruct::dsl::customstruct)
.values(sample_struct.clone())
.execute(&mut connection)
.unwrap();
assert_eq!(i, 1);
let vec = crate::db::custom_types::tests::customstruct::dsl::customstruct
.filter(id.eq("1".to_string()))
.load::<SampleStruct>(&mut connection)
.unwrap();
assert_eq!(vec.len(), 1);
let loaded_struct = vec.first().unwrap();
assert_eq!(loaded_struct, &sample_struct);
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/mobile/native/src/db/models.rs | mobile/native/src/db/models.rs | use crate::schema;
use crate::schema::channels;
use crate::schema::orders;
use crate::schema::positions;
use crate::schema::spendable_outputs;
use crate::schema::trades;
use crate::schema::transactions;
use crate::trade::order::InvalidSubchannelOffer;
use anyhow::anyhow;
use anyhow::bail;
use anyhow::ensure;
use anyhow::Result;
use bitcoin::Amount;
use bitcoin::SignedAmount;
use bitcoin::Txid;
use diesel::prelude::*;
use diesel::sql_types::Text;
use diesel::AsExpression;
use diesel::FromSqlRow;
use diesel::Queryable;
use lightning::util::ser::Readable;
use lightning::util::ser::Writeable;
use rust_decimal::prelude::FromPrimitive;
use rust_decimal::prelude::ToPrimitive;
use rust_decimal::Decimal;
use serde::Deserialize;
use serde::Serialize;
use std::str::FromStr;
use time::OffsetDateTime;
use uuid::Uuid;
use xxi_node::commons;
mod funding_fee_event;
pub(crate) use funding_fee_event::FundingFeeEvent;
pub(crate) use funding_fee_event::UnpaidFundingFeeEvent;
#[derive(thiserror::Error, Debug)]
pub enum Error {
#[error("Invalid id when converting string to uuid: {0}")]
InvalidId(#[from] uuid::Error),
#[error("Limit order has to have a price")]
MissingPriceForLimitOrder,
#[error("A filling or filled order has to have an execution price")]
MissingExecutionPrice,
#[error("A filled order has to have a matching fee")]
MissingMatchingFee,
}
#[derive(Queryable, QueryableByName, Insertable, Debug, Clone, PartialEq)]
#[diesel(table_name = orders)]
pub(crate) struct Order {
pub id: String,
pub leverage: f32,
pub quantity: f32,
pub contract_symbol: ContractSymbol,
pub direction: Direction,
pub order_type: OrderType,
pub state: OrderState,
pub creation_timestamp: i64,
pub limit_price: Option<f32>,
pub execution_price: Option<f32>,
pub failure_reason: Option<FailureReason>,
pub order_expiry_timestamp: i64,
pub reason: OrderReason,
pub stable: bool,
pub matching_fee_sats: Option<i64>,
}
impl Order {
/// inserts the given order into the db. Returns the order if successful
pub fn insert(order: Order, conn: &mut SqliteConnection) -> Result<Order> {
let affected_rows = diesel::insert_into(orders::table)
.values(&order)
.execute(conn)?;
if affected_rows > 0 {
Ok(order)
} else {
bail!("Could not insert order")
}
}
/// Sets all filling orders to failed. Only be used for emergency recoveries!
pub fn set_all_filling_orders_to_failed(conn: &mut SqliteConnection) -> Result<()> {
let affected_rows = diesel::update(orders::table)
.filter(schema::orders::state.eq(OrderState::Filling))
.set((
orders::state.eq(OrderState::Failed),
orders::failure_reason.eq(FailureReason::Unknown),
))
.execute(conn)?;
tracing::info!("Updated {affected_rows} orders from Filling to Failed");
Ok(())
}
pub fn set_order_state_to_failed(
order_id: String,
execution_price: Option<f32>,
matching_fee: Option<Amount>,
failure_reason: FailureReason,
conn: &mut SqliteConnection,
) -> Result<Order> {
Self::update_state(
order_id,
OrderState::Failed,
execution_price,
matching_fee,
Some(failure_reason),
conn,
)
}
pub fn set_order_state_to_open(order_id: String, conn: &mut SqliteConnection) -> Result<Order> {
Self::update_state(order_id, OrderState::Open, None, None, None, conn)
}
pub fn set_order_state_to_filling(
order_id: Uuid,
execution_price: f32,
matching_fee: Amount,
conn: &mut SqliteConnection,
) -> Result<Order> {
Self::update_state(
order_id.to_string(),
OrderState::Filling,
Some(execution_price),
Some(matching_fee),
None,
conn,
)
}
pub fn set_order_state_to_filled(
order_id: Uuid,
execution_price: f32,
matching_fee: Amount,
conn: &mut SqliteConnection,
) -> Result<Order> {
Self::update_state(
order_id.to_string(),
OrderState::Filled,
Some(execution_price),
Some(matching_fee),
None,
conn,
)
}
/// updates the status of the given order in the db
fn update_state(
order_id: String,
order_state: OrderState,
execution_price: Option<f32>,
matching_fee: Option<Amount>,
failure_reason: Option<FailureReason>,
conn: &mut SqliteConnection,
) -> Result<Order> {
conn.exclusive_transaction::<Order, _, _>(|conn| {
let affected_rows = diesel::update(orders::table)
.filter(orders::id.eq(order_id.clone()))
.set(orders::state.eq(order_state))
.execute(conn)?;
if affected_rows == 0 {
bail!("Could not update order state")
}
tracing::info!(new_state = ?order_state, %order_id, "Updated order state");
if let Some(execution_price) = execution_price {
let affected_rows = diesel::update(orders::table)
.filter(orders::id.eq(order_id.clone()))
.set(orders::execution_price.eq(execution_price))
.execute(conn)?;
if affected_rows == 0 {
bail!("Could not update order execution price")
}
}
if let Some(matching_fee) = matching_fee {
let affected_rows = diesel::update(orders::table)
.filter(orders::id.eq(order_id.clone()))
.set(orders::matching_fee_sats.eq(matching_fee.to_sat() as i64))
.execute(conn)?;
if affected_rows == 0 {
bail!("Could not update order matching fee")
}
}
if let Some(failure_reason) = failure_reason {
let affected_rows = diesel::update(orders::table)
.filter(orders::id.eq(order_id.clone()))
.set(orders::failure_reason.eq(failure_reason))
.execute(conn)?;
if affected_rows == 0 {
bail!("Could not update order failure reason")
}
}
let order = orders::table
.filter(orders::id.eq(order_id.clone()))
.first(conn)?;
Ok(order)
})
}
pub fn get(order_id: String, conn: &mut SqliteConnection) -> QueryResult<Option<Order>> {
orders::table
.filter(schema::orders::id.eq(order_id))
.first(conn)
.optional()
}
/// Fetch all orders that are not in initial and rejected state
pub fn get_without_rejected_and_initial(
conn: &mut SqliteConnection,
) -> QueryResult<Vec<Order>> {
orders::table
.filter(
schema::orders::state
.ne(OrderState::Initial)
.and(schema::orders::state.ne(OrderState::Rejected)),
)
.load(conn)
}
pub fn get_by_state(
order_state: OrderState,
conn: &mut SqliteConnection,
) -> QueryResult<Vec<Order>> {
orders::table
.filter(schema::orders::state.eq(order_state))
.load(conn)
}
/// Deletes given order from DB, in case of success, returns > 0, else 0 or Err
pub fn delete(order_id: String, conn: &mut SqliteConnection) -> QueryResult<usize> {
diesel::delete(orders::table)
.filter(orders::id.eq(order_id))
.execute(conn)
}
}
impl From<crate::trade::order::Order> for Order {
fn from(value: crate::trade::order::Order) -> Self {
let (order_type, limit_price) = value.order_type.into();
let execution_price = value.execution_price();
let matching_fee = value.matching_fee();
let (status, _, failure_reason) = value.state.into();
Order {
id: value.id.to_string(),
leverage: value.leverage,
quantity: value.quantity,
contract_symbol: value.contract_symbol.into(),
direction: value.direction.into(),
order_type,
state: status,
creation_timestamp: value.creation_timestamp.unix_timestamp(),
limit_price,
execution_price,
failure_reason,
order_expiry_timestamp: value.order_expiry_timestamp.unix_timestamp(),
reason: value.reason.into(),
stable: value.stable,
matching_fee_sats: matching_fee.map(|fee| fee.to_sat() as i64),
}
}
}
impl From<crate::trade::order::OrderReason> for OrderReason {
fn from(value: crate::trade::order::OrderReason) -> Self {
match value {
crate::trade::order::OrderReason::Manual => OrderReason::Manual,
crate::trade::order::OrderReason::Expired => OrderReason::Expired,
crate::trade::order::OrderReason::CoordinatorLiquidated => {
OrderReason::CoordinatorLiquidated
}
crate::trade::order::OrderReason::TraderLiquidated => OrderReason::TraderLiquidated,
}
}
}
impl From<OrderReason> for crate::trade::order::OrderReason {
fn from(value: OrderReason) -> Self {
match value {
OrderReason::Manual => crate::trade::order::OrderReason::Manual,
OrderReason::Expired => crate::trade::order::OrderReason::Expired,
OrderReason::CoordinatorLiquidated => {
crate::trade::order::OrderReason::CoordinatorLiquidated
}
OrderReason::TraderLiquidated => crate::trade::order::OrderReason::TraderLiquidated,
}
}
}
impl TryFrom<Order> for crate::trade::order::Order {
type Error = Error;
fn try_from(value: Order) -> std::result::Result<Self, Self::Error> {
let order = crate::trade::order::Order {
id: Uuid::parse_str(value.id.as_str()).map_err(Error::InvalidId)?,
leverage: value.leverage,
quantity: value.quantity,
contract_symbol: value.contract_symbol.into(),
direction: value.direction.into(),
order_type: (value.order_type, value.limit_price).try_into()?,
state: derive_order_state(
value.state,
value.execution_price,
value.matching_fee_sats,
value.failure_reason.clone(),
)?,
creation_timestamp: OffsetDateTime::from_unix_timestamp(value.creation_timestamp)
.expect("unix timestamp to fit in itself"),
order_expiry_timestamp: OffsetDateTime::from_unix_timestamp(
value.order_expiry_timestamp,
)
.expect("unix timestamp to fit in itself"),
reason: value.reason.into(),
stable: value.stable,
failure_reason: value.failure_reason.map(|reason| reason.into()),
};
Ok(order)
}
}
fn derive_order_state(
order_state: OrderState,
execution_price: Option<f32>,
matching_fee: Option<i64>,
failure_reason: Option<FailureReason>,
) -> Result<crate::trade::order::OrderState, Error> {
let state = match order_state {
OrderState::Initial => crate::trade::order::OrderState::Initial,
OrderState::Rejected => crate::trade::order::OrderState::Rejected,
OrderState::Open => crate::trade::order::OrderState::Open,
OrderState::Filling => match execution_price {
None => return Err(Error::MissingExecutionPrice),
Some(execution_price) => crate::trade::order::OrderState::Filling {
execution_price,
matching_fee: Amount::from_sat(matching_fee.unwrap_or_default() as u64),
},
},
OrderState::Failed => crate::trade::order::OrderState::Failed {
execution_price,
matching_fee: matching_fee.map(|fee| Amount::from_sat(fee as u64)),
reason: failure_reason.unwrap_or_default().into(),
},
OrderState::Filled => {
let execution_price = if let Some(execution_price) = execution_price {
execution_price
} else {
return Err(Error::MissingExecutionPrice);
};
let matching_fee = if let Some(matching_fee) = matching_fee {
matching_fee
} else {
return Err(Error::MissingMatchingFee);
};
crate::trade::order::OrderState::Filled {
execution_price,
matching_fee: Amount::from_sat(matching_fee as u64),
}
}
};
Ok(state)
}
#[derive(Queryable, AsChangeset, QueryableByName, Insertable, Debug, Clone, PartialEq)]
#[diesel(table_name = positions)]
pub(crate) struct Position {
pub contract_symbol: ContractSymbol,
pub leverage: f32,
pub quantity: f32,
pub direction: Direction,
pub average_entry_price: f32,
pub liquidation_price: f32,
pub state: PositionState,
pub collateral: i64,
pub creation_timestamp: i64,
pub expiry_timestamp: i64,
pub updated_timestamp: i64,
pub stable: bool,
pub order_matching_fees: i64,
}
#[derive(Debug, Clone, Copy, PartialEq, FromSqlRow, AsExpression)]
#[diesel(sql_type = Text)]
pub enum PositionState {
Open,
Closing,
Rollover,
Resizing,
}
impl Position {
/// inserts the given position into the db. Returns the position if successful
pub fn insert(position: Position, conn: &mut SqliteConnection) -> Result<Position> {
let affected_rows = diesel::insert_into(positions::table)
.values(&position)
.execute(conn)?;
if affected_rows > 0 {
Ok(position)
} else {
bail!("Could not insert position")
}
}
pub fn get_all(conn: &mut SqliteConnection) -> QueryResult<Vec<Position>> {
positions::table.load(conn)
}
pub fn get_position(
conn: &mut SqliteConnection,
contract_symbol: ContractSymbol,
) -> QueryResult<Position> {
positions::table
.filter(positions::contract_symbol.eq(contract_symbol))
.first(conn)
}
/// Update the status of the [`Position`] identified by the given [`ContractSymbol`].
pub fn update_state(
contract_symbol: ContractSymbol,
state: PositionState,
conn: &mut SqliteConnection,
) -> Result<Position> {
let affected_rows = diesel::update(positions::table)
.filter(schema::positions::contract_symbol.eq(contract_symbol))
.set(schema::positions::state.eq(state))
.execute(conn)?;
if affected_rows == 0 {
bail!("Could not update position")
}
let position = positions::table
.filter(positions::contract_symbol.eq(contract_symbol))
.first(conn)?;
Ok(position)
}
/// Update the position after the rollover protocol has started.
pub fn start_rollover(conn: &mut SqliteConnection, updated_position: Position) -> Result<()> {
let affected_rows = diesel::update(positions::table)
.filter(schema::positions::contract_symbol.eq(updated_position.contract_symbol))
.set(updated_position)
.execute(conn)?;
if affected_rows == 0 {
bail!("Could not start rollover in DB");
}
Ok(())
}
/// Update the position after the rollover protocol has ended.
pub fn finish_rollover(conn: &mut SqliteConnection, updated_position: Position) -> Result<()> {
let affected_rows = diesel::update(positions::table)
.filter(positions::state.eq(PositionState::Rollover))
.set(updated_position)
.execute(conn)?;
if affected_rows == 0 {
bail!("Could not finish rollover in DB");
}
Ok(())
}
pub fn update_position(conn: &mut SqliteConnection, updated_position: Position) -> Result<()> {
let affected_rows = diesel::update(positions::table)
.set(updated_position)
.execute(conn)?;
if affected_rows == 0 {
bail!("Could not update position")
}
Ok(())
}
// TODO: This is obviously only for the MVP :)
/// deletes all positions in the database
pub fn delete_all(conn: &mut SqliteConnection) -> QueryResult<usize> {
diesel::delete(positions::table).execute(conn)
}
}
impl From<Position> for crate::trade::position::Position {
fn from(value: Position) -> Self {
Self {
leverage: value.leverage,
quantity: value.quantity,
contract_symbol: value.contract_symbol.into(),
direction: value.direction.into(),
average_entry_price: value.average_entry_price,
liquidation_price: value.liquidation_price,
position_state: value.state.into(),
collateral: value.collateral as u64,
expiry: OffsetDateTime::from_unix_timestamp(value.expiry_timestamp)
.expect("to fit into unix timestamp"),
updated: OffsetDateTime::from_unix_timestamp(value.updated_timestamp)
.expect("to fit into unix timestamp"),
created: OffsetDateTime::from_unix_timestamp(value.creation_timestamp)
.expect("to fit into unix timestamp"),
stable: value.stable,
order_matching_fees: Amount::from_sat(value.order_matching_fees as u64),
}
}
}
impl From<crate::trade::position::Position> for Position {
fn from(value: crate::trade::position::Position) -> Self {
Self {
contract_symbol: value.contract_symbol.into(),
leverage: value.leverage,
quantity: value.quantity,
direction: value.direction.into(),
average_entry_price: value.average_entry_price,
liquidation_price: value.liquidation_price,
state: value.position_state.into(),
collateral: value.collateral as i64,
creation_timestamp: OffsetDateTime::now_utc().unix_timestamp(),
updated_timestamp: OffsetDateTime::now_utc().unix_timestamp(),
expiry_timestamp: value.expiry.unix_timestamp(),
stable: value.stable,
order_matching_fees: value.order_matching_fees.to_sat() as i64,
}
}
}
impl From<crate::trade::position::PositionState> for PositionState {
fn from(value: crate::trade::position::PositionState) -> Self {
match value {
crate::trade::position::PositionState::Open => PositionState::Open,
crate::trade::position::PositionState::Closing => PositionState::Closing,
crate::trade::position::PositionState::Rollover => PositionState::Rollover,
crate::trade::position::PositionState::Resizing => PositionState::Resizing,
}
}
}
impl From<PositionState> for crate::trade::position::PositionState {
fn from(value: PositionState) -> Self {
match value {
PositionState::Open => crate::trade::position::PositionState::Open,
PositionState::Closing => crate::trade::position::PositionState::Closing,
PositionState::Rollover => crate::trade::position::PositionState::Rollover,
PositionState::Resizing => crate::trade::position::PositionState::Resizing,
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, FromSqlRow, AsExpression)]
#[diesel(sql_type = Text)]
pub enum ContractSymbol {
BtcUsd,
}
impl From<commons::ContractSymbol> for ContractSymbol {
fn from(value: commons::ContractSymbol) -> Self {
match value {
commons::ContractSymbol::BtcUsd => ContractSymbol::BtcUsd,
}
}
}
impl From<ContractSymbol> for commons::ContractSymbol {
fn from(value: ContractSymbol) -> Self {
match value {
ContractSymbol::BtcUsd => commons::ContractSymbol::BtcUsd,
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, FromSqlRow, AsExpression)]
#[diesel(sql_type = Text)]
pub enum Direction {
Long,
Short,
}
impl From<commons::Direction> for Direction {
fn from(value: commons::Direction) -> Self {
match value {
commons::Direction::Long => Direction::Long,
commons::Direction::Short => Direction::Short,
}
}
}
impl From<Direction> for commons::Direction {
fn from(value: Direction) -> Self {
match value {
Direction::Long => commons::Direction::Long,
Direction::Short => commons::Direction::Short,
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, FromSqlRow, AsExpression)]
#[diesel(sql_type = Text)]
pub enum OrderType {
Market,
Limit,
}
impl From<crate::trade::order::OrderType> for (OrderType, Option<f32>) {
fn from(value: crate::trade::order::OrderType) -> Self {
match value {
crate::trade::order::OrderType::Market => (OrderType::Market, None),
crate::trade::order::OrderType::Limit { price } => (OrderType::Limit, Some(price)),
}
}
}
impl TryFrom<(OrderType, Option<f32>)> for crate::trade::order::OrderType {
type Error = Error;
fn try_from(value: (OrderType, Option<f32>)) -> std::result::Result<Self, Self::Error> {
let order_type = match value.0 {
OrderType::Market => crate::trade::order::OrderType::Market,
OrderType::Limit => match value.1 {
None => return Err(Error::MissingPriceForLimitOrder),
Some(price) => crate::trade::order::OrderType::Limit { price },
},
};
Ok(order_type)
}
}
#[derive(Debug, Clone, Copy, PartialEq, FromSqlRow, AsExpression)]
#[diesel(sql_type = Text)]
pub enum OrderReason {
Manual,
Expired,
CoordinatorLiquidated,
TraderLiquidated,
}
#[derive(Debug, Clone, Copy, PartialEq, FromSqlRow, AsExpression)]
#[diesel(sql_type = Text)]
pub enum OrderState {
Initial,
Rejected,
Open,
Filling,
Failed,
Filled,
}
impl From<crate::trade::order::OrderState> for (OrderState, Option<f32>, Option<FailureReason>) {
fn from(value: crate::trade::order::OrderState) -> Self {
match value {
crate::trade::order::OrderState::Initial => (OrderState::Initial, None, None),
crate::trade::order::OrderState::Rejected => (OrderState::Rejected, None, None),
crate::trade::order::OrderState::Open => (OrderState::Open, None, None),
crate::trade::order::OrderState::Failed {
execution_price,
reason,
..
} => (OrderState::Failed, execution_price, Some(reason.into())),
crate::trade::order::OrderState::Filled {
execution_price, ..
} => (OrderState::Filled, Some(execution_price), None),
crate::trade::order::OrderState::Filling {
execution_price, ..
} => (OrderState::Filling, Some(execution_price), None),
}
}
}
#[derive(Debug, Clone, PartialEq, FromSqlRow, AsExpression, Serialize, Deserialize, Default)]
#[diesel(sql_type = Text)]
pub enum FailureReason {
FailedToSetToFilling,
TradeRequest,
TradeResponse(String),
CollabRevert,
OrderNotAcceptable,
TimedOut,
SubchannelOfferOutdated,
SubchannelOfferDateUndetermined,
SubchannelOfferUnacceptable,
OrderRejected(String),
#[default]
Unknown,
}
impl From<FailureReason> for crate::trade::order::FailureReason {
fn from(value: FailureReason) -> Self {
match value {
FailureReason::TradeRequest => crate::trade::order::FailureReason::TradeRequest,
FailureReason::TradeResponse(details) => {
crate::trade::order::FailureReason::TradeResponse(details)
}
FailureReason::CollabRevert => crate::trade::order::FailureReason::CollabRevert,
FailureReason::FailedToSetToFilling => {
crate::trade::order::FailureReason::FailedToSetToFilling
}
FailureReason::OrderNotAcceptable => {
crate::trade::order::FailureReason::OrderNotAcceptable
}
FailureReason::TimedOut => crate::trade::order::FailureReason::TimedOut,
FailureReason::SubchannelOfferOutdated => {
crate::trade::order::FailureReason::InvalidDlcOffer(
InvalidSubchannelOffer::Outdated,
)
}
FailureReason::SubchannelOfferDateUndetermined => {
crate::trade::order::FailureReason::InvalidDlcOffer(
InvalidSubchannelOffer::UndeterminedMaturityDate,
)
}
FailureReason::SubchannelOfferUnacceptable => {
crate::trade::order::FailureReason::InvalidDlcOffer(
InvalidSubchannelOffer::Unacceptable,
)
}
FailureReason::OrderRejected(reason) => {
crate::trade::order::FailureReason::OrderRejected(reason)
}
FailureReason::Unknown => crate::trade::order::FailureReason::Unknown,
}
}
}
impl From<crate::trade::order::FailureReason> for FailureReason {
fn from(value: crate::trade::order::FailureReason) -> Self {
match value {
crate::trade::order::FailureReason::TradeRequest => FailureReason::TradeRequest,
crate::trade::order::FailureReason::TradeResponse(details) => {
FailureReason::TradeResponse(details)
}
crate::trade::order::FailureReason::CollabRevert => FailureReason::CollabRevert,
crate::trade::order::FailureReason::FailedToSetToFilling => {
FailureReason::FailedToSetToFilling
}
crate::trade::order::FailureReason::OrderNotAcceptable => {
FailureReason::OrderNotAcceptable
}
crate::trade::order::FailureReason::TimedOut => FailureReason::TimedOut,
crate::trade::order::FailureReason::InvalidDlcOffer(reason) => match reason {
InvalidSubchannelOffer::Outdated => FailureReason::SubchannelOfferOutdated,
InvalidSubchannelOffer::UndeterminedMaturityDate => {
FailureReason::SubchannelOfferDateUndetermined
}
InvalidSubchannelOffer::Unacceptable => FailureReason::SubchannelOfferUnacceptable,
},
crate::trade::order::FailureReason::OrderRejected(reason) => {
FailureReason::OrderRejected(reason)
}
crate::trade::order::FailureReason::Unknown => FailureReason::Unknown,
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, FromSqlRow, AsExpression)]
#[diesel(sql_type = Text)]
pub enum Flow {
Inbound,
Outbound,
}
impl From<xxi_node::PaymentFlow> for Flow {
fn from(value: xxi_node::PaymentFlow) -> Self {
match value {
xxi_node::PaymentFlow::Inbound => Self::Inbound,
xxi_node::PaymentFlow::Outbound => Self::Outbound,
}
}
}
impl From<Flow> for xxi_node::PaymentFlow {
fn from(value: Flow) -> Self {
match value {
Flow::Inbound => Self::Inbound,
Flow::Outbound => Self::Outbound,
}
}
}
#[derive(Insertable, Debug, Clone, PartialEq)]
#[diesel(table_name = spendable_outputs)]
pub(crate) struct SpendableOutputInsertable {
#[diesel(sql_type = Text)]
pub outpoint: String,
#[diesel(sql_type = Text)]
pub descriptor: String,
}
impl SpendableOutputInsertable {
pub fn insert(output: SpendableOutputInsertable, conn: &mut SqliteConnection) -> Result<()> {
let affected_rows = diesel::insert_into(spendable_outputs::table)
.values(&output)
.execute(conn)?;
ensure!(affected_rows > 0, "Could not insert spendable");
Ok(())
}
}
#[derive(Queryable, Debug, Clone, PartialEq)]
#[diesel(table_name = spendable_outputs)]
pub(crate) struct SpendableOutputQueryable {
pub id: i32,
pub outpoint: String,
pub descriptor: String,
}
impl SpendableOutputQueryable {
pub fn get(
outpoint: lightning::chain::transaction::OutPoint,
conn: &mut SqliteConnection,
) -> QueryResult<Self> {
let outpoint = outpoint_to_string(outpoint);
spendable_outputs::table
.filter(schema::spendable_outputs::outpoint.eq(outpoint))
.first(conn)
}
pub fn delete(
outpoint: lightning::chain::transaction::OutPoint,
conn: &mut SqliteConnection,
) -> Result<()> {
let outpoint = outpoint_to_string(outpoint);
let affected_rows = diesel::delete(
spendable_outputs::table.filter(schema::spendable_outputs::outpoint.eq(outpoint)),
)
.execute(conn)?;
ensure!(affected_rows > 0, "Could not delete spendable output");
Ok(())
}
pub fn get_all(conn: &mut SqliteConnection) -> QueryResult<Vec<SpendableOutputQueryable>> {
spendable_outputs::table.load(conn)
}
}
fn outpoint_to_string(outpoint: lightning::chain::transaction::OutPoint) -> String {
format!("{}:{}", outpoint.txid, outpoint.index)
}
impl
From<(
lightning::chain::transaction::OutPoint,
lightning::sign::SpendableOutputDescriptor,
)> for SpendableOutputInsertable
{
fn from(
(outpoint, descriptor): (
lightning::chain::transaction::OutPoint,
lightning::sign::SpendableOutputDescriptor,
),
) -> Self {
let outpoint = outpoint_to_string(outpoint);
let descriptor = hex::encode(descriptor.encode());
Self {
outpoint,
descriptor,
}
}
}
impl TryFrom<SpendableOutputQueryable> for lightning::sign::SpendableOutputDescriptor {
type Error = anyhow::Error;
fn try_from(value: SpendableOutputQueryable) -> Result<Self, Self::Error> {
let bytes = hex::decode(value.descriptor)?;
let descriptor = Self::read(&mut lightning::io::Cursor::new(bytes))
.map_err(|e| anyhow!("Failed to decode spendable output descriptor: {e}"))?;
Ok(descriptor)
}
}
#[derive(Debug, Clone, Copy, PartialEq, FromSqlRow, AsExpression)]
#[diesel(sql_type = Text)]
pub enum ChannelState {
Announced,
Pending,
OpenUnpaid,
Open,
Closed,
ForceClosedRemote,
ForceClosedLocal,
}
// TODO: Get rid of `Channel` and matching table in DB.
#[derive(Insertable, QueryableByName, Queryable, Debug, Clone, PartialEq, AsChangeset)]
#[diesel(table_name = channels)]
pub struct Channel {
pub user_channel_id: String,
pub channel_id: Option<String>,
pub inbound: i64,
pub outbound: i64,
pub funding_txid: Option<String>,
pub channel_state: ChannelState,
pub counterparty_pubkey: String,
pub created_at: i64,
pub updated_at: i64,
pub liquidity_option_id: Option<i32>,
pub fee_sats: Option<i64>,
pub open_channel_payment_hash: Option<String>,
}
impl Channel {
pub fn get(user_channel_id: &str, conn: &mut SqliteConnection) -> QueryResult<Option<Channel>> {
channels::table
.filter(schema::channels::user_channel_id.eq(user_channel_id))
.first(conn)
.optional()
}
pub fn get_announced_channel(
conn: &mut SqliteConnection,
counterparty_pubkey: &str,
) -> QueryResult<Option<Channel>> {
channels::table
.filter(schema::channels::counterparty_pubkey.eq(counterparty_pubkey))
.filter(schema::channels::channel_state.eq(ChannelState::Announced))
.first(conn)
.optional()
}
pub fn get_by_channel_id(
conn: &mut SqliteConnection,
channel_id: &str,
) -> QueryResult<Option<Channel>> {
channels::table
.filter(schema::channels::channel_id.eq(channel_id))
.first(conn)
.optional()
}
pub fn get_channel_by_payment_hash(
conn: &mut SqliteConnection,
payment_hash: &str,
) -> QueryResult<Option<Channel>> {
channels::table
.filter(schema::channels::open_channel_payment_hash.eq(payment_hash))
.first(conn)
.optional()
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | true |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/mobile/native/src/db/models/funding_fee_event.rs | mobile/native/src/db/models/funding_fee_event.rs | use crate::db::models::ContractSymbol;
use crate::db::models::Direction;
use crate::schema::funding_fee_events;
use bitcoin::SignedAmount;
use diesel::prelude::*;
use diesel::Queryable;
use rust_decimal::prelude::ToPrimitive;
use rust_decimal::Decimal;
use time::OffsetDateTime;
use xxi_node::commons;
#[derive(Insertable, Debug, Clone, PartialEq)]
#[diesel(table_name = funding_fee_events)]
pub(crate) struct UnpaidFundingFeeEvent {
contract_symbol: ContractSymbol,
contracts: f32,
direction: Direction,
price: f32,
fee: i64,
due_date: i64,
}
#[derive(Queryable, Debug, Clone, PartialEq)]
#[diesel(table_name = funding_fee_events)]
pub(crate) struct FundingFeeEvent {
id: i32,
contract_symbol: ContractSymbol,
contracts: f32,
direction: Direction,
price: f32,
fee: i64,
due_date: i64,
paid_date: Option<i64>,
}
impl UnpaidFundingFeeEvent {
pub fn insert(
conn: &mut SqliteConnection,
funding_fee_event: crate::trade::FundingFeeEvent,
) -> QueryResult<Option<crate::trade::FundingFeeEvent>> {
let affected_rows = diesel::insert_into(funding_fee_events::table)
.values(UnpaidFundingFeeEvent::from(funding_fee_event))
.on_conflict((
funding_fee_events::contract_symbol,
funding_fee_events::due_date,
))
.do_nothing()
.execute(conn)?;
if affected_rows >= 1 {
Ok(Some(funding_fee_event))
} else {
Ok(None)
}
}
pub fn mark_as_paid(
conn: &mut SqliteConnection,
contract_symbol: commons::ContractSymbol,
since: OffsetDateTime,
) -> QueryResult<()> {
diesel::update(funding_fee_events::table)
.filter(
funding_fee_events::contract_symbol
.eq(ContractSymbol::from(contract_symbol))
.and(funding_fee_events::due_date.ge(since.unix_timestamp()))
.and(funding_fee_events::paid_date.is_null()),
)
.set(funding_fee_events::paid_date.eq(OffsetDateTime::now_utc().unix_timestamp()))
.execute(conn)?;
Ok(())
}
}
impl FundingFeeEvent {
pub fn get_all(conn: &mut SqliteConnection) -> QueryResult<Vec<crate::trade::FundingFeeEvent>> {
let funding_fee_events: Vec<FundingFeeEvent> = funding_fee_events::table.load(conn)?;
let funding_fee_events = funding_fee_events
.into_iter()
.map(crate::trade::FundingFeeEvent::from)
.collect();
Ok(funding_fee_events)
}
}
impl From<crate::trade::FundingFeeEvent> for UnpaidFundingFeeEvent {
fn from(
crate::trade::FundingFeeEvent {
contract_symbol,
contracts,
direction,
price,
fee,
due_date,
// An unpaid funding fee event should not have a `paid_date`.
paid_date: _,
}: crate::trade::FundingFeeEvent,
) -> Self {
Self {
contract_symbol: contract_symbol.into(),
contracts: contracts.to_f32().expect("to fit"),
direction: direction.into(),
price: price.to_f32().expect("to fit"),
fee: fee.to_sat(),
due_date: due_date.unix_timestamp(),
}
}
}
impl From<FundingFeeEvent> for crate::trade::FundingFeeEvent {
fn from(
FundingFeeEvent {
id: _,
contract_symbol,
contracts,
direction,
price,
fee,
due_date,
paid_date,
}: FundingFeeEvent,
) -> Self {
Self {
contract_symbol: contract_symbol.into(),
contracts: Decimal::try_from(contracts).expect("to fit"),
direction: direction.into(),
price: Decimal::try_from(price).expect("to fit"),
fee: SignedAmount::from_sat(fee),
due_date: OffsetDateTime::from_unix_timestamp(due_date).expect("valid"),
paid_date: paid_date
.map(OffsetDateTime::from_unix_timestamp)
.transpose()
.expect("valid"),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::db::MIGRATIONS;
use diesel::Connection;
use diesel::SqliteConnection;
use diesel_migrations::MigrationHarness;
use itertools::Itertools;
use rust_decimal_macros::dec;
use time::ext::NumericalDuration;
use time::OffsetDateTime;
#[test]
fn test_funding_fee_event() {
let mut conn = SqliteConnection::establish(":memory:").unwrap();
conn.run_pending_migrations(MIGRATIONS).unwrap();
let contract_symbol = xxi_node::commons::ContractSymbol::BtcUsd;
let due_date = OffsetDateTime::from_unix_timestamp(1_546_300_800).unwrap();
let funding_fee_event = crate::trade::FundingFeeEvent::unpaid(
contract_symbol,
Decimal::ONE_HUNDRED,
xxi_node::commons::Direction::Long,
dec!(70_000),
SignedAmount::from_sat(100),
due_date,
);
UnpaidFundingFeeEvent::insert(&mut conn, funding_fee_event).unwrap();
// Does nothing, since `contract_symbol` and `due_date` are the same.
UnpaidFundingFeeEvent::insert(
&mut conn,
crate::trade::FundingFeeEvent {
contracts: Decimal::ONE_THOUSAND,
direction: xxi_node::commons::Direction::Short,
price: dec!(35_000),
fee: SignedAmount::from_sat(-1_000),
..funding_fee_event
},
)
.unwrap();
let funding_fee_event_2 = crate::trade::FundingFeeEvent::unpaid(
contract_symbol,
Decimal::ONE_HUNDRED,
xxi_node::commons::Direction::Long,
dec!(70_000),
SignedAmount::from_sat(100),
due_date - 60.minutes(),
);
UnpaidFundingFeeEvent::insert(&mut conn, funding_fee_event_2).unwrap();
let funding_fee_events = FundingFeeEvent::get_all(&mut conn).unwrap();
assert_eq!(funding_fee_events.len(), 2);
assert!(funding_fee_events.contains(&funding_fee_event));
assert!(funding_fee_events.contains(&funding_fee_event_2));
// We only mark as paid the funding fee event which has a due date after the third argument
// to `mark_as_paid`.
UnpaidFundingFeeEvent::mark_as_paid(&mut conn, contract_symbol, due_date - 30.minutes())
.unwrap();
let funding_fee_events = FundingFeeEvent::get_all(&mut conn).unwrap();
assert!(funding_fee_events
.iter()
.filter(|event| event.paid_date.is_some())
.exactly_one()
.is_ok());
assert!(funding_fee_events
.iter()
.filter(|event| event.paid_date.is_none())
.exactly_one()
.is_ok());
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/mobile/native/src/bridge_generated/mod.rs | mobile/native/src/bridge_generated/mod.rs | #[allow(clippy::module_inception)]
mod bridge_generated;
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/mobile/native/src/event/api.rs | mobile/native/src/event/api.rs | use crate::api::DlcChannel;
use crate::api::TenTenOneConfig;
use crate::api::WalletHistoryItem;
use crate::dlc_channel;
use crate::event;
use crate::event::subscriber::Subscriber;
use crate::event::EventInternal;
use crate::event::EventType;
use crate::health::ServiceUpdate;
use crate::trade::order::api::Order;
use crate::trade::position::api::Position;
use crate::trade::trades::api::Trade;
use core::convert::From;
use flutter_rust_bridge::frb;
use flutter_rust_bridge::StreamSink;
use rust_decimal::prelude::ToPrimitive;
use xxi_node::commons::ContractSymbol;
#[frb]
#[derive(Clone)]
pub enum Event {
Init(String),
Log(String),
OrderUpdateNotification(Order),
WalletInfoUpdateNotification(WalletInfo),
PositionUpdateNotification(Position),
PositionClosedNotification(PositionClosed),
AskPriceUpdateNotification(f32),
BidPriceUpdateNotification(f32),
ServiceHealthUpdate(ServiceUpdate),
BackgroundNotification(BackgroundTask),
Authenticated(TenTenOneConfig),
DlcChannelEvent(DlcChannel),
FundingChannelNotification(FundingChannelTask),
LnPaymentReceived { r_hash: String },
NewTrade(Trade),
NextFundingRate(FundingRate),
}
#[frb]
#[derive(Clone)]
pub enum BackgroundTask {
/// The order book submitted an trade which was matched asynchronously
AsyncTrade(TaskStatus),
/// The coordinator expired the users trade
Expire(TaskStatus),
/// The order book liquidated the users trade
Liquidate(TaskStatus),
/// The order book submitted its intention to rollover the about to expire position.
Rollover(TaskStatus),
/// The app was started with a dlc channel in an intermediate state. This task is in pending
/// until the dlc protocol reaches a final state.
RecoverDlc(TaskStatus),
/// The coordinator wants to collaboratively close a ln channel with a stuck position.
CollabRevert(TaskStatus),
/// The app is performing a full sync of the on-chain wallet.
FullSync(TaskStatus),
/// The app is closing its dlc channel
CloseChannel(TaskStatus),
}
impl From<EventInternal> for Event {
fn from(value: EventInternal) -> Self {
match value {
EventInternal::Init(value) => Event::Init(value),
EventInternal::Log(value) => Event::Log(value),
EventInternal::OrderUpdateNotification(value) => {
Event::OrderUpdateNotification(value.into())
}
EventInternal::WalletInfoUpdateNotification(value) => {
Event::WalletInfoUpdateNotification(value)
}
EventInternal::PositionUpdateNotification(position) => {
Event::PositionUpdateNotification(position.into())
}
EventInternal::PositionCloseNotification(contract_symbol) => {
Event::PositionClosedNotification(PositionClosed { contract_symbol })
}
EventInternal::ServiceHealthUpdate(update) => Event::ServiceHealthUpdate(update),
EventInternal::BackgroundNotification(task) => {
Event::BackgroundNotification(task.into())
}
EventInternal::SpendableOutputs => {
unreachable!("This internal event is not exposed to the UI")
}
EventInternal::Authenticated(config) => Event::Authenticated(config.into()),
EventInternal::DlcChannelEvent(channel) => {
Event::DlcChannelEvent(dlc_channel::DlcChannel::from(channel))
}
EventInternal::AskPriceUpdateNotification(price) => {
Event::AskPriceUpdateNotification(price.to_f32().expect("to fit"))
}
EventInternal::BidPriceUpdateNotification(price) => {
Event::BidPriceUpdateNotification(price.to_f32().expect("to fit"))
}
EventInternal::FundingChannelNotification(status) => {
Event::FundingChannelNotification(status.into())
}
EventInternal::LnPaymentReceived { r_hash } => Event::LnPaymentReceived { r_hash },
EventInternal::NewTrade(trade) => Event::NewTrade(trade.into()),
EventInternal::NextFundingRate(funding_rate) => Event::NextFundingRate(FundingRate {
rate: funding_rate.rate().to_f32().expect("to fit"),
end_date: funding_rate.end_date().unix_timestamp(),
}),
EventInternal::FundingFeeEvent(event) => Event::NewTrade(event.into()),
}
}
}
/// Wrapper struct needed by frb
///
/// The mirrored `ContractSymbol` does not get picked up correctly when using it directly as
/// type in an enum variant, so we wrap it in a struct.
#[frb]
#[derive(Clone, Copy)]
pub struct PositionClosed {
pub contract_symbol: ContractSymbol,
}
#[derive(Clone)]
pub struct FlutterSubscriber {
stream: StreamSink<Event>,
}
/// Subscribes to event relevant for flutter and forwards them to the stream sink.
impl Subscriber for FlutterSubscriber {
fn notify(&self, event: &EventInternal) {
self.stream.add(event.clone().into());
}
fn events(&self) -> Vec<EventType> {
vec![
EventType::Init,
EventType::WalletInfoUpdateNotification,
EventType::OrderUpdateNotification,
EventType::PositionUpdateNotification,
EventType::PositionClosedNotification,
EventType::AskPriceUpdateNotification,
EventType::BidPriceUpdateNotification,
EventType::ServiceHealthUpdate,
EventType::ChannelStatusUpdate,
EventType::BackgroundNotification,
EventType::FundingChannelNotification,
EventType::Authenticated,
EventType::DlcChannelEvent,
EventType::NewTrade,
EventType::NextFundingRate,
]
}
}
impl FlutterSubscriber {
pub fn new(stream: StreamSink<Event>) -> Self {
FlutterSubscriber { stream }
}
}
impl From<event::BackgroundTask> for BackgroundTask {
fn from(value: event::BackgroundTask) -> Self {
match value {
event::BackgroundTask::AsyncTrade(status) => BackgroundTask::AsyncTrade(status.into()),
event::BackgroundTask::Liquidate(status) => BackgroundTask::Liquidate(status.into()),
event::BackgroundTask::Expire(status) => BackgroundTask::Expire(status.into()),
event::BackgroundTask::Rollover(status) => BackgroundTask::Rollover(status.into()),
event::BackgroundTask::RecoverDlc(status) => BackgroundTask::RecoverDlc(status.into()),
event::BackgroundTask::CollabRevert(status) => {
BackgroundTask::CollabRevert(status.into())
}
event::BackgroundTask::FullSync(status) => BackgroundTask::FullSync(status.into()),
event::BackgroundTask::CloseChannel(status) => {
BackgroundTask::CloseChannel(status.into())
}
}
}
}
#[frb]
#[derive(Clone)]
pub enum TaskStatus {
Pending,
Failed(String),
Success,
}
impl From<event::TaskStatus> for TaskStatus {
fn from(value: event::TaskStatus) -> Self {
match value {
event::TaskStatus::Pending => TaskStatus::Pending,
event::TaskStatus::Failed(error) => TaskStatus::Failed(error),
event::TaskStatus::Success => TaskStatus::Success,
}
}
}
#[frb]
#[derive(Clone, Debug, Default)]
pub struct WalletInfo {
pub balances: Balances,
pub history: Vec<WalletHistoryItem>,
}
#[frb]
#[derive(Clone, Debug, Default)]
pub struct Balances {
pub on_chain: u64,
pub off_chain: Option<u64>,
}
#[frb]
#[derive(Clone)]
pub struct FundingRate {
pub rate: f32,
pub end_date: i64,
}
#[frb]
#[derive(Clone)]
pub enum FundingChannelTask {
Pending,
Funded,
Failed(String),
OrderCreated(String),
}
impl From<event::FundingChannelTask> for FundingChannelTask {
fn from(value: event::FundingChannelTask) -> Self {
match value {
event::FundingChannelTask::Pending => FundingChannelTask::Pending,
event::FundingChannelTask::Funded => FundingChannelTask::Funded,
event::FundingChannelTask::Failed(reason) => FundingChannelTask::Failed(reason),
event::FundingChannelTask::OrderCreated(order_id) => {
FundingChannelTask::OrderCreated(order_id)
}
}
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/mobile/native/src/event/mod.rs | mobile/native/src/event/mod.rs | use crate::dlc::DlcChannel;
use crate::event::api::WalletInfo;
use crate::event::event_hub::get;
use crate::event::subscriber::Subscriber;
use crate::health::ServiceUpdate;
use crate::trade::order::Order;
use crate::trade::position::Position;
use crate::trade::FundingFeeEvent;
use crate::trade::Trade;
use rust_decimal::Decimal;
use std::fmt;
use std::hash::Hash;
use xxi_node::commons::ContractSymbol;
use xxi_node::commons::FundingRate;
use xxi_node::commons::TenTenOneConfig;
mod event_hub;
pub mod api;
pub mod subscriber;
pub fn subscribe(subscriber: impl Subscriber + 'static + Send + Sync + Clone) {
get().subscribe(subscriber);
}
pub fn publish(event: &EventInternal) {
get().publish(event);
}
#[derive(Clone, Debug)]
pub enum EventInternal {
Init(String),
Log(String),
OrderUpdateNotification(Order),
WalletInfoUpdateNotification(WalletInfo),
PositionUpdateNotification(Position),
PositionCloseNotification(ContractSymbol),
AskPriceUpdateNotification(Decimal),
BidPriceUpdateNotification(Decimal),
ServiceHealthUpdate(ServiceUpdate),
Authenticated(TenTenOneConfig),
BackgroundNotification(BackgroundTask),
SpendableOutputs,
DlcChannelEvent(DlcChannel),
FundingChannelNotification(FundingChannelTask),
LnPaymentReceived { r_hash: String },
NewTrade(Trade),
FundingFeeEvent(FundingFeeEvent),
NextFundingRate(FundingRate),
}
#[derive(Clone, Debug)]
pub enum FundingChannelTask {
Pending,
Funded,
Failed(String),
OrderCreated(String),
}
#[derive(Clone, Debug)]
pub enum BackgroundTask {
Liquidate(TaskStatus),
Expire(TaskStatus),
AsyncTrade(TaskStatus),
Rollover(TaskStatus),
CollabRevert(TaskStatus),
RecoverDlc(TaskStatus),
FullSync(TaskStatus),
CloseChannel(TaskStatus),
}
#[derive(Clone, Debug)]
pub enum TaskStatus {
Pending,
Failed(String),
Success,
}
impl fmt::Display for EventInternal {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
EventInternal::Init(_) => "Init",
EventInternal::Log(_) => "Log",
EventInternal::OrderUpdateNotification(_) => "OrderUpdateNotification",
EventInternal::WalletInfoUpdateNotification(_) => "WalletInfoUpdateNotification",
EventInternal::PositionUpdateNotification(_) => "PositionUpdateNotification",
EventInternal::PositionCloseNotification(_) => "PositionCloseNotification",
EventInternal::ServiceHealthUpdate(_) => "ServiceHealthUpdate",
EventInternal::BackgroundNotification(_) => "BackgroundNotification",
EventInternal::SpendableOutputs => "SpendableOutputs",
EventInternal::Authenticated(_) => "Authenticated",
EventInternal::DlcChannelEvent(_) => "DlcChannelEvent",
EventInternal::AskPriceUpdateNotification(_) => "AskPriceUpdateNotification",
EventInternal::BidPriceUpdateNotification(_) => "BidPriceUpdateNotification",
EventInternal::FundingChannelNotification(_) => "FundingChannelNotification",
EventInternal::LnPaymentReceived { .. } => "LnPaymentReceived",
EventInternal::NewTrade(_) => "NewTrade",
EventInternal::FundingFeeEvent(_) => "FundingFeeEvent",
EventInternal::NextFundingRate(_) => "NextFundingRate",
}
.fmt(f)
}
}
impl From<EventInternal> for EventType {
fn from(value: EventInternal) -> Self {
match value {
EventInternal::Init(_) => EventType::Init,
EventInternal::Log(_) => EventType::Log,
EventInternal::OrderUpdateNotification(_) => EventType::OrderUpdateNotification,
EventInternal::WalletInfoUpdateNotification(_) => {
EventType::WalletInfoUpdateNotification
}
EventInternal::PositionUpdateNotification(_) => EventType::PositionUpdateNotification,
EventInternal::PositionCloseNotification(_) => EventType::PositionClosedNotification,
EventInternal::ServiceHealthUpdate(_) => EventType::ServiceHealthUpdate,
EventInternal::BackgroundNotification(_) => EventType::BackgroundNotification,
EventInternal::SpendableOutputs => EventType::SpendableOutputs,
EventInternal::Authenticated(_) => EventType::Authenticated,
EventInternal::DlcChannelEvent(_) => EventType::DlcChannelEvent,
EventInternal::AskPriceUpdateNotification(_) => EventType::AskPriceUpdateNotification,
EventInternal::BidPriceUpdateNotification(_) => EventType::BidPriceUpdateNotification,
EventInternal::FundingChannelNotification(_) => EventType::FundingChannelNotification,
EventInternal::LnPaymentReceived { .. } => EventType::LnPaymentReceived,
EventInternal::NewTrade(_) => EventType::NewTrade,
EventInternal::FundingFeeEvent(_) => EventType::NewTrade,
EventInternal::NextFundingRate(_) => EventType::NextFundingRate,
}
}
}
#[derive(Copy, Clone, Eq, Hash, PartialEq)]
pub enum EventType {
Init,
Log,
OrderUpdateNotification,
WalletInfoUpdateNotification,
OrderFilledWith,
PositionUpdateNotification,
PositionClosedNotification,
ChannelReady,
LnPaymentReceived,
ServiceHealthUpdate,
ChannelStatusUpdate,
BackgroundNotification,
SpendableOutputs,
Authenticated,
DlcChannelEvent,
AskPriceUpdateNotification,
BidPriceUpdateNotification,
FundingChannelNotification,
NewTrade,
NextFundingRate,
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/mobile/native/src/event/subscriber.rs | mobile/native/src/event/subscriber.rs | use crate::event::EventInternal;
use crate::event::EventType;
pub trait Subscriber {
/// Notifies the subcriber about an event. If false is returned the subscriber will be
/// unsubscribed, if true the subscriber will remain subscribed.
fn notify(&self, event: &EventInternal);
/// Returns a list of events the subscriber wants to subscribe to.
fn events(&self) -> Vec<EventType>;
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/mobile/native/src/event/event_hub.rs | mobile/native/src/event/event_hub.rs | use crate::event::subscriber::Subscriber;
use crate::event::EventInternal;
use crate::event::EventType;
use parking_lot::Mutex;
use parking_lot::MutexGuard;
use state::Storage;
use std::collections::hash_map::Entry;
use std::collections::HashMap;
use std::sync::Arc;
use std::vec;
static EVENT_HUB: Storage<Arc<Mutex<EventHub>>> = Storage::new();
pub(crate) fn get() -> MutexGuard<'static, EventHub> {
EVENT_HUB
.get_or_set(|| {
Arc::new(Mutex::new(EventHub {
subscribers: HashMap::new(),
}))
})
.lock()
}
pub struct EventHub {
subscribers: HashMap<EventType, Vec<Box<dyn Subscriber + 'static + Send + Sync>>>,
}
impl EventHub {
/// Subscribes the subscriber to the events registered through the filter implementation. Note,
/// that the filter hook will only be called once during the subscribe function and is not
/// considered anymore when publishing.
pub fn subscribe(&mut self, subscriber: impl Subscriber + 'static + Send + Sync + Clone) {
for event_type in subscriber.events() {
match self.subscribers.entry(event_type) {
Entry::Vacant(e) => {
e.insert(vec![Box::new(subscriber.clone())]);
}
Entry::Occupied(mut e) => {
e.get_mut().push(Box::new(subscriber.clone()));
}
}
}
}
/// Publishes the given event to all subscribers. Note, that this will be executed in a loop.
pub fn publish(&self, event: &EventInternal) {
if let Some(subscribers) = self.subscribers.get(&EventType::from(event.clone())) {
for subscriber in subscribers {
// todo: we should tokio spawn here.
subscriber.notify(event);
}
}
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/mobile/native/src/config/api.rs | mobile/native/src/config/api.rs | use crate::config::ConfigInternal;
use bitcoin::key::XOnlyPublicKey;
use bitcoin::Network;
use flutter_rust_bridge::frb;
use std::str::FromStr;
#[frb]
#[derive(Debug, Clone)]
pub struct Config {
pub coordinator_pubkey: String,
pub electrs_endpoint: String,
pub host: String,
pub p2p_port: u16,
pub http_port: u16,
pub network: String,
pub oracle_endpoint: String,
pub oracle_pubkey: String,
pub health_check_interval_secs: u64,
pub meme_endpoint: String,
}
pub struct Directories {
pub app_dir: String,
pub seed_dir: String,
}
impl From<(Config, Directories)> for ConfigInternal {
fn from(value: (Config, Directories)) -> Self {
let config = value.0;
let dirs = value.1;
tracing::debug!(?config, "Parsing config from flutter");
Self {
coordinator_pubkey: config.coordinator_pubkey.parse().expect("PK to be valid"),
electrs_endpoint: config.electrs_endpoint,
http_endpoint: format!("{}:{}", config.host, config.http_port)
.parse()
.expect("host and http_port to be valid"),
p2p_endpoint: format!("{}:{}", config.host, config.p2p_port)
.parse()
.expect("host and p2p_port to be valid"),
network: parse_network(&config.network),
oracle_endpoint: config.oracle_endpoint,
oracle_pubkey: XOnlyPublicKey::from_str(config.oracle_pubkey.as_str())
.expect("Valid oracle public key"),
health_check_interval: std::time::Duration::from_secs(
config.health_check_interval_secs,
),
data_dir: dirs.app_dir,
seed_dir: dirs.seed_dir,
}
}
}
pub fn parse_network(network: &str) -> Network {
match network {
"signet" => Network::Signet,
"testnet" => Network::Testnet,
"mainnet" => Network::Bitcoin,
"bitcoin" => Network::Bitcoin,
_ => Network::Regtest,
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/mobile/native/src/config/mod.rs | mobile/native/src/config/mod.rs | use bitcoin::key::XOnlyPublicKey;
use bitcoin::secp256k1::PublicKey;
use bitcoin::Network;
use std::net::SocketAddr;
use std::path::Path;
use std::time::Duration;
use xxi_node::node::NodeInfo;
use xxi_node::node::OracleInfo;
pub mod api;
#[derive(Clone)]
pub struct ConfigInternal {
coordinator_pubkey: PublicKey,
electrs_endpoint: String,
http_endpoint: SocketAddr,
#[allow(dead_code)] // Irrelevant when using websockets
p2p_endpoint: SocketAddr,
network: Network,
oracle_endpoint: String,
oracle_pubkey: XOnlyPublicKey,
health_check_interval: Duration,
data_dir: String,
seed_dir: String,
}
pub fn coordinator_health_endpoint() -> String {
let config = crate::state::get_config();
format!("http://{}/health", config.http_endpoint)
}
pub fn health_check_interval() -> Duration {
crate::state::get_config().health_check_interval
}
pub fn get_coordinator_info() -> NodeInfo {
let config = crate::state::get_config();
#[cfg(feature = "ws")]
#[allow(unused_variables)] // In case both features are enabled
let (address, is_ws) = (config.http_endpoint, true);
#[cfg(feature = "native_tcp")]
let (address, is_ws) = (config.p2p_endpoint, false);
NodeInfo {
pubkey: config.coordinator_pubkey,
address,
is_ws,
}
}
pub fn get_electrs_endpoint() -> String {
crate::state::get_config().electrs_endpoint
}
pub fn get_oracle_info() -> OracleInfo {
let config = crate::state::get_config();
OracleInfo {
endpoint: config.oracle_endpoint.clone(),
public_key: config.oracle_pubkey,
}
}
pub fn get_http_endpoint() -> SocketAddr {
crate::state::get_config().http_endpoint
}
pub fn get_network() -> Network {
crate::state::get_config().network
}
pub fn get_data_dir() -> String {
crate::state::get_config().data_dir
}
pub fn get_seed_dir() -> String {
crate::state::get_config().seed_dir
}
pub fn get_backup_dir() -> String {
Path::new(&get_data_dir())
.join(get_network().to_string())
.join("backup")
.to_string_lossy()
.to_string()
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/mobile/native/src/trade/mod.rs | mobile/native/src/trade/mod.rs | pub mod funding_fee_event;
pub mod order;
pub mod position;
pub mod trades;
pub mod users;
pub use funding_fee_event::FundingFeeEvent;
pub use trades::Trade;
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/mobile/native/src/trade/position/api.rs | mobile/native/src/trade/position/api.rs | use crate::trade::position;
use flutter_rust_bridge::frb;
use xxi_node::commons::ContractSymbol;
use xxi_node::commons::Direction;
#[frb]
#[derive(Debug, Clone, PartialEq, Copy)]
pub enum PositionState {
/// The position is open
///
/// Open in the sense, that there is an active position that is being rolled-over.
/// Note that a "closed" position does not exist, but is just removed.
/// During the process of getting closed (after creating the counter-order that will wipe out
/// the position), the position is in state "Closing".
///
/// Transitions:
/// ->Open
/// Rollover->Open
Open,
/// The position is in the process of being closed
///
/// The user has created an order that will wipe out the position.
/// Once this order has been filled the "closed" the position is not shown in the user
/// interface, so we don't have a "closed" state because no position data will be provided to
/// the user interface.
/// Transitions:
/// Open->Closing
Closing,
/// The position is in rollover
///
/// This is a technical intermediate state indicating that a rollover is currently in progress.
///
/// Transitions:
/// Open->Rollover
Rollover,
/// The position is being resized.
///
/// Transitions:
/// Open->Resizing.
Resizing,
}
#[frb]
#[derive(Debug, Clone)]
pub struct Position {
pub leverage: f32,
pub quantity: f32,
pub contract_symbol: ContractSymbol,
pub direction: Direction,
pub average_entry_price: f32,
pub liquidation_price: f32,
pub position_state: PositionState,
pub collateral: u64,
pub expiry: i64,
pub stable: bool,
}
impl From<position::PositionState> for PositionState {
fn from(value: position::PositionState) -> Self {
match value {
position::PositionState::Open => PositionState::Open,
position::PositionState::Closing => PositionState::Closing,
position::PositionState::Rollover => PositionState::Rollover,
position::PositionState::Resizing => PositionState::Resizing,
}
}
}
impl From<position::Position> for Position {
fn from(value: position::Position) -> Self {
Position {
leverage: value.leverage,
quantity: value.quantity,
contract_symbol: value.contract_symbol,
direction: value.direction,
average_entry_price: value.average_entry_price,
liquidation_price: value.liquidation_price,
position_state: value.position_state.into(),
collateral: value.collateral,
expiry: value.expiry.unix_timestamp(),
stable: value.stable,
}
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/mobile/native/src/trade/position/mod.rs | mobile/native/src/trade/position/mod.rs | use crate::calculations::calculate_liquidation_price;
use crate::calculations::calculate_pnl;
use crate::get_maintenance_margin_rate;
use crate::trade::order::Order;
use crate::trade::order::OrderState;
use crate::trade::order::OrderType;
use crate::trade::FundingFeeEvent;
use crate::trade::Trade;
use anyhow::bail;
use anyhow::ensure;
use anyhow::Result;
use bitcoin::Amount;
use bitcoin::SignedAmount;
use rust_decimal::prelude::FromPrimitive;
use rust_decimal::prelude::ToPrimitive;
use rust_decimal::Decimal;
use rust_decimal::RoundingStrategy;
use serde::Serialize;
use time::OffsetDateTime;
use xxi_node::cfd::calculate_leverage;
use xxi_node::commons;
use xxi_node::commons::ContractSymbol;
use xxi_node::commons::Direction;
use xxi_node::node::ProtocolId;
pub mod api;
pub mod handler;
#[derive(Debug, Clone, PartialEq, Copy, Serialize)]
pub enum PositionState {
/// The position is open
///
/// Open in the sense, that there is an active position that is being rolled-over.
/// Note that a "closed" position does not exist, but is just removed.
/// During the process of getting closed (after creating the counter-order that will wipe out
/// the position), the position is in state "Closing".
///
/// Transitions:
/// ->Open
/// Rollover->Open
Open,
/// The position is in the process of being closed
///
/// The user has created an order that will wipe out the position.
/// Once this order has been filled the "closed" the position is not shown in the user
/// interface, so we don't have a "closed" state because no position data will be provided to
/// the user interface.
/// Transitions:
/// Open->Closing
Closing,
/// The position is in rollover
///
/// This is a technical intermediate state indicating that a rollover is currently in progress.
///
/// Transitions:
/// Open->Rollover
Rollover,
/// The position is being resized.
///
/// Transitions:
/// Open->Resizing.
Resizing,
}
#[derive(Debug, Clone, Copy, Serialize)]
pub struct Position {
pub leverage: f32,
pub quantity: f32,
pub contract_symbol: ContractSymbol,
pub direction: Direction,
pub average_entry_price: f32,
pub liquidation_price: f32,
pub position_state: PositionState,
pub collateral: u64,
#[serde(with = "time::serde::rfc3339")]
pub expiry: OffsetDateTime,
#[serde(with = "time::serde::rfc3339")]
pub updated: OffsetDateTime,
#[serde(with = "time::serde::rfc3339")]
pub created: OffsetDateTime,
pub stable: bool,
#[serde(with = "bitcoin::amount::serde::as_sat")]
pub order_matching_fees: Amount,
}
impl Position {
/// Construct a new open position from an initial [`OrderState::Filled`] order.
pub fn new_open(order: Order, expiry: OffsetDateTime) -> (Self, Trade) {
let now_timestamp = OffsetDateTime::now_utc();
let average_entry_price = order.execution_price().expect("order to be filled");
let matching_fee = order.matching_fee().expect("order to be filled");
let maintenance_margin_rate = get_maintenance_margin_rate();
let liquidation_price = calculate_liquidation_price(
average_entry_price,
order.leverage,
order.direction,
maintenance_margin_rate,
);
let contracts = decimal_from_f32(order.quantity);
let collateral = {
let leverage = decimal_from_f32(order.leverage);
let average_entry_price = decimal_from_f32(average_entry_price);
let collateral_btc = contracts / (leverage * average_entry_price);
let collateral_btc = collateral_btc
.round_dp_with_strategy(8, RoundingStrategy::MidpointAwayFromZero)
.to_f64()
.expect("to fit");
Amount::from_btc(collateral_btc).expect("to fit")
};
let position = Self {
leverage: order.leverage,
quantity: order.quantity,
contract_symbol: order.contract_symbol,
direction: order.direction,
average_entry_price,
liquidation_price,
position_state: PositionState::Open,
collateral: collateral.to_sat(),
expiry,
updated: now_timestamp,
created: now_timestamp,
stable: order.stable,
order_matching_fees: matching_fee,
};
let average_entry_price = decimal_from_f32(average_entry_price);
let margin_diff = collateral.to_signed().expect("to fit");
let trade_cost = trade_cost(margin_diff, SignedAmount::ZERO, matching_fee);
let trade = Trade {
order_id: order.id,
contract_symbol: order.contract_symbol,
contracts,
direction: order.direction,
trade_cost,
fee: matching_fee,
pnl: None,
price: average_entry_price,
timestamp: now_timestamp,
};
(position, trade)
}
pub fn apply_order(
self,
order: Order,
expiry: OffsetDateTime,
) -> Result<(Option<Self>, Vec<Trade>)> {
match order {
Order {
state: OrderState::Filled { .. } | OrderState::Filling { .. },
..
} => {}
_ => bail!("Cannot apply order that is not filling or filled"),
};
ensure!(
self.contract_symbol == order.contract_symbol,
"Cannot apply order to position if contract symbol does not match"
);
ensure!(
order.order_type == OrderType::Market,
"Cannot apply limit order to position"
);
let matching_fee = order.matching_fee().unwrap_or_default();
let mut trades = Vec::new();
let position = self.apply_order_recursive(order, expiry, matching_fee, &mut trades)?;
Ok((position, trades))
}
/// Apply an [`Order`] to the [`Position`] recursively until the order is fully applied. Each
/// application of the order can (1) reduce the position, (2) reduce the position down to zero
/// contracts or (3) increase the position. By combining (2) and (3) through recursion we are
/// able to apply orders which change the direction of the position.
///
/// NOTE: The order's leverage is ignored when applying an order to an existing position. It
/// does not seem to make much sense to allow a trader to both change the number and/or
/// direction of contracts (the point of creating a new order) _and_ to change the leverage.
/// Also, it's not so straightforward to calculate combined leverages, particularly when
/// reducing a position.
///
/// TODO: This highlights the fact that orders really shouldn't have a leverage associated with
/// them!
fn apply_order_recursive(
self,
order: Order,
expiry: OffsetDateTime,
matching_fee: Amount,
trades: &mut Vec<Trade>,
) -> Result<Option<Self>> {
// The order has been fully applied.
if order.quantity == 0.0 {
// The position has vanished.
if self.quantity == 0.0 {
return Ok(None);
}
// What is left of the position after fully applying the order.
else {
return Ok(Some(self));
}
}
let now_timestamp = OffsetDateTime::now_utc();
let order_id = order.id;
let starting_contracts = decimal_from_f32(self.quantity);
let starting_leverage = decimal_from_f32(self.leverage);
let starting_average_execution_price = decimal_from_f32(self.average_entry_price);
let order_contracts = decimal_from_f32(order.quantity);
let order_execution_price = decimal_from_f32(
order
.execution_price()
.expect("order to have an execution price"),
);
// If the directions differ (and the position has contracts!) we must reduce the position
// (first).
let (position, order, trade, leftover_matching_fee): (Position, Order, Trade, Amount) =
if self.quantity != 0.0 && order.direction != self.direction {
let contract_diff = self.quantity - order.quantity;
// Reduce position and order to 0.
if contract_diff == 0.0 {
// The margin difference corresponds to the entire margin for the position being
// closed, as a negative number.
let margin_diff = {
let margin_before_btc = starting_contracts
/ (starting_leverage * starting_average_execution_price);
let margin_before_btc = margin_before_btc
.round_dp_with_strategy(8, RoundingStrategy::MidpointAwayFromZero)
.to_f64()
.expect("margin to fit into f64");
// `margin_before_btc` is a positive number so we have to make it negative
// so that reducing the position results in a
// negative `trade_cost` i.e. money into
// the off-chain wallet.
SignedAmount::from_btc(-margin_before_btc)
.expect("margin diff to fit into SignedAmount")
};
let pnl = {
let pnl = calculate_pnl(
self.average_entry_price,
commons::Price {
bid: order_execution_price,
ask: order_execution_price,
},
order.quantity,
self.leverage,
self.direction,
)?;
SignedAmount::from_sat(pnl)
};
let trade_cost = trade_cost(margin_diff, pnl, matching_fee);
let trade = Trade {
order_id,
contract_symbol: order.contract_symbol,
contracts: order_contracts,
direction: order.direction,
trade_cost,
fee: matching_fee,
pnl: Some(pnl),
price: order_execution_price,
timestamp: now_timestamp,
};
let position = Position {
quantity: 0.0,
order_matching_fees: self.order_matching_fees + matching_fee,
..self
};
let order = Order {
quantity: 0.0,
..order
};
(position, order, trade, Amount::ZERO)
}
// Reduce position and consume entire order.
else if contract_diff.is_sign_positive() {
let order_contracts_relative =
compute_relative_contracts(order.quantity, order.direction);
let (new_collateral, closed_collateral) = {
let contract_diff = Decimal::try_from(contract_diff).expect("to fit");
let new_collateral_btc =
contract_diff / (starting_leverage * starting_average_execution_price);
let new_collateral_btc = new_collateral_btc
.round_dp_with_strategy(8, RoundingStrategy::MidpointAwayFromZero)
.to_f64()
.expect("to fit");
let new_collateral = Amount::from_btc(new_collateral_btc).expect("to fit");
let closed_collateral_btc = order_contracts
/ (starting_leverage * starting_average_execution_price);
let closed_collateral_btc = closed_collateral_btc
.abs()
.round_dp_with_strategy(8, RoundingStrategy::MidpointAwayFromZero)
.to_f64()
.expect("collateral to fit into f64");
let closed_collateral = Amount::from_btc(closed_collateral_btc)
.expect("collateral to fit into Amount");
(new_collateral, closed_collateral)
};
let position = Position {
leverage: f32_from_decimal(starting_leverage),
quantity: contract_diff,
contract_symbol: self.contract_symbol,
direction: self.direction,
average_entry_price: self.average_entry_price,
liquidation_price: self.liquidation_price,
position_state: PositionState::Open,
collateral: new_collateral.to_sat(),
expiry,
updated: now_timestamp,
created: self.created,
stable: self.stable,
order_matching_fees: self.order_matching_fees + matching_fee,
};
let pnl = {
let pnl = calculate_pnl(
self.average_entry_price,
commons::Price {
bid: order_execution_price,
ask: order_execution_price,
},
order.quantity,
self.leverage,
self.direction,
)?;
SignedAmount::from_sat(pnl)
};
let trade_cost = {
// Negative cost for closing a position i.e. gain since the collateral is
// returned.
let closed_collateral = closed_collateral.to_signed().expect("to fit") * -1;
trade_cost(closed_collateral, pnl, matching_fee)
};
let trade = Trade {
order_id,
contract_symbol: self.contract_symbol,
contracts: order_contracts_relative.abs(),
direction: order.direction,
trade_cost,
fee: matching_fee,
pnl: Some(pnl),
price: order_execution_price,
timestamp: now_timestamp,
};
let order = Order {
quantity: 0.0,
..order
};
(position, order, trade, Amount::ZERO)
}
// Reduce position to 0, with leftover order.
else {
let leftover_order_contracts = contract_diff.abs();
let (matching_fee_this_trade, matching_fee_next_trade) = {
let leftover_order_contracts = decimal_from_f32(leftover_order_contracts);
let full_matching_fee = Decimal::from(matching_fee.to_sat());
let matching_fee_next_trade =
(leftover_order_contracts / order_contracts) * full_matching_fee;
let matching_fee_next_trade =
Amount::from_sat(matching_fee_next_trade.to_u64().expect("to fit"));
let matching_fee_this_trade = matching_fee - matching_fee_next_trade;
(matching_fee_this_trade, matching_fee_next_trade)
};
let closed_collateral = {
let closed_collateral_btc = starting_contracts
/ (starting_leverage * starting_average_execution_price);
let closed_collateral_btc = closed_collateral_btc
.abs()
.round_dp_with_strategy(8, RoundingStrategy::MidpointAwayFromZero)
.to_f64()
.expect("collateral to fit into f64");
Amount::from_btc(closed_collateral_btc)
.expect("collateral to fit into Amount")
};
let pnl = {
let pnl = calculate_pnl(
self.average_entry_price,
commons::Price {
bid: order_execution_price,
ask: order_execution_price,
},
self.quantity,
self.leverage,
self.direction,
)?;
SignedAmount::from_sat(pnl)
};
let trade_cost = {
// Negative cost for closing a position i.e. gain since the collateral is
// returned.
let closed_collateral = closed_collateral.to_signed().expect("to fit") * -1;
trade_cost(closed_collateral, pnl, matching_fee_this_trade)
};
let trade = Trade {
order_id,
contract_symbol: order.contract_symbol,
contracts: starting_contracts,
direction: order.direction,
trade_cost,
fee: matching_fee_this_trade,
pnl: Some(pnl),
price: order_execution_price,
timestamp: now_timestamp,
};
let position = Position {
quantity: 0.0,
order_matching_fees: self.order_matching_fees + matching_fee_this_trade,
..self
};
// Reduce the order without vanishing it.
let order = Order {
quantity: leftover_order_contracts,
..order
};
(position, order, trade, matching_fee_next_trade)
}
}
// If the directions agree or the position has no contracts we must increase the
// position.
else {
let starting_contracts_relative =
compute_relative_contracts(self.quantity, self.direction);
let order_contracts_relative =
compute_relative_contracts(order.quantity, order.direction);
let total_contracts_relative =
starting_contracts_relative + order_contracts_relative;
let updated_average_execution_price = total_contracts_relative
/ (starting_contracts_relative / starting_average_execution_price
+ order_contracts_relative / order_execution_price);
let maintenance_margin_rate = get_maintenance_margin_rate();
let updated_liquidation_price = calculate_liquidation_price(
f32_from_decimal(updated_average_execution_price),
f32_from_decimal(starting_leverage),
order.direction,
maintenance_margin_rate,
);
let updated_collateral = {
let updated_collateral_btc = total_contracts_relative
/ (starting_leverage * updated_average_execution_price);
let updated_collateral_btc = updated_collateral_btc
.abs()
.round_dp_with_strategy(8, RoundingStrategy::MidpointAwayFromZero)
.to_f64()
.expect("collateral to fit into f64");
Amount::from_btc(updated_collateral_btc)
.expect("collateral to fit into Amount")
.to_sat()
};
let stable = self.stable && order.stable && self.direction == Direction::Short;
let position = Position {
leverage: f32_from_decimal(starting_leverage),
quantity: f32_from_decimal(total_contracts_relative.abs()),
contract_symbol: self.contract_symbol,
direction: order.direction,
average_entry_price: f32_from_decimal(updated_average_execution_price),
liquidation_price: updated_liquidation_price,
position_state: PositionState::Open,
collateral: updated_collateral,
expiry,
updated: now_timestamp,
created: self.created,
stable,
order_matching_fees: self.order_matching_fees + matching_fee,
};
let margin_diff = {
let margin_before_btc = starting_contracts_relative.abs()
/ (starting_leverage * starting_average_execution_price);
let margin_after_btc = total_contracts_relative.abs()
/ (starting_leverage * updated_average_execution_price);
let margin_diff_btc = (margin_after_btc - margin_before_btc)
.round_dp_with_strategy(8, RoundingStrategy::MidpointAwayFromZero)
.to_f64()
.expect("margin to fit into f64");
SignedAmount::from_btc(margin_diff_btc)
.expect("margin to fit into SignedAmount")
};
let trade_cost = trade_cost(margin_diff, SignedAmount::ZERO, matching_fee);
let trade = Trade {
order_id,
contract_symbol: order.contract_symbol,
contracts: order_contracts,
direction: order.direction,
trade_cost,
fee: matching_fee,
pnl: None,
price: order_execution_price,
timestamp: now_timestamp,
};
let order = Order {
quantity: 0.0,
..order
};
(position, order, trade, Amount::ZERO)
};
trades.push(trade);
position.apply_order_recursive(order, expiry, leftover_matching_fee, trades)
}
/// Apply [`FundingFeeEvent`]s to a [`Position`].
fn apply_funding_fee_events(self, funding_fee_events: &[FundingFeeEvent]) -> Result<Self> {
let funding_fee: SignedAmount = funding_fee_events.iter().map(|event| event.fee).sum();
let (collateral, leverage, liquidation_price) = if funding_fee.is_positive() {
// Trader pays.
let collateral = self
.collateral
.checked_sub(funding_fee.to_sat() as u64)
.unwrap_or_default();
let collateral = Amount::from_sat(collateral);
let leverage = {
let quantity = Decimal::try_from(self.quantity).expect("to fit");
let average_entry_price =
Decimal::try_from(self.average_entry_price).expect("to fit");
let leverage = calculate_leverage(quantity, collateral, average_entry_price);
leverage.to_f32().expect("to fit")
};
let maintenance_margin_rate = get_maintenance_margin_rate();
let liquidation_price = calculate_liquidation_price(
self.average_entry_price,
leverage,
self.direction,
maintenance_margin_rate,
);
(collateral.to_sat(), leverage, liquidation_price)
} else {
// Coordinator pays.
(self.collateral, self.leverage, self.liquidation_price)
};
Ok(Self {
collateral,
leverage,
liquidation_price,
updated: OffsetDateTime::now_utc(),
..self
})
}
/// Start rollover protocol.
fn start_rollover(self, expiry: OffsetDateTime) -> Self {
Self {
expiry,
position_state: PositionState::Rollover,
updated: OffsetDateTime::now_utc(),
..self
}
}
/// Finish rollover protocol.
fn finish_rollover(self) -> Self {
Self {
position_state: PositionState::Open,
updated: OffsetDateTime::now_utc(),
..self
}
}
}
/// The _cost_ of a trade is computed as the change in margin (positive if the margin _increases_),
/// plus the PNL (positive if the PNL is a loss), plus the fee (always positive because fees are
/// always a cost).
fn trade_cost(margin_diff: SignedAmount, pnl: SignedAmount, fee: Amount) -> SignedAmount {
let fee = fee.to_signed().expect("fee to fit into SignedAmount");
// We have to flip the sign of the PNL because it inherently uses _negative numbers for losses_,
// but here we want _costs to be positive_.
margin_diff - pnl + fee
}
#[track_caller]
fn decimal_from_f32(float: f32) -> Decimal {
Decimal::from_f32(float).expect("f32 to fit into Decimal")
}
#[track_caller]
fn f32_from_decimal(decimal: Decimal) -> f32 {
decimal.to_f32().expect("Decimal to fit into f32")
}
/// Compute the number of contracts for the [`Order`] relative to its [`Direction`].
fn compute_relative_contracts(contracts: f32, direction: Direction) -> Decimal {
let contracts = decimal_from_f32(contracts)
// We round to 2 decimal places to avoid slight differences between opening and
// closing orders.
.round_dp_with_strategy(2, RoundingStrategy::MidpointAwayFromZero);
match direction {
Direction::Long => contracts,
Direction::Short => -contracts,
}
}
/// The rollover parameters can be stored after receiving a [`TenTenOneRolloverOffer`], so that they
/// can be used to modify the [`Position`] after the rollover has been finalized.
#[derive(Copy, Clone, Debug, PartialEq)]
pub struct RolloverParams {
pub protocol_id: ProtocolId,
/// The contract symbol identifies the position, since we can only have one position per
/// contract symbol.
pub contract_symbol: ContractSymbol,
/// The sign determines who pays who. A positive signs denotes that the trader pays the
/// coordinator. A negative sign denotes that the coordinator pays the trader.
pub funding_fee: SignedAmount,
/// Rolling over sets a new expiry time.
pub expiry: OffsetDateTime,
}
#[cfg(test)]
mod tests {
use super::*;
use crate::trade::order::OrderReason;
use rust_decimal_macros::dec;
use uuid::Uuid;
// TODO: Use `insta`.
#[test]
fn open_position() {
let now = OffsetDateTime::now_utc();
let order = Order {
id: Uuid::new_v4(),
leverage: 1.0,
quantity: 25.0,
contract_symbol: ContractSymbol::BtcUsd,
direction: Direction::Short,
order_type: OrderType::Market,
state: OrderState::Filled {
execution_price: 32_000.0,
matching_fee: Amount::from_sat(1000),
},
creation_timestamp: now,
order_expiry_timestamp: now,
reason: OrderReason::Manual,
stable: true,
failure_reason: None,
};
let (position, opening_trade) = Position::new_open(order.clone(), now);
assert_eq!(position.leverage, 1.0);
assert_eq!(position.quantity, 25.0);
assert_eq!(position.contract_symbol, position.contract_symbol);
assert_eq!(position.direction, order.direction);
assert_eq!(position.average_entry_price, 32_000.0);
assert_eq!(position.liquidation_price, 1_048_575.0);
assert_eq!(position.position_state, PositionState::Open);
assert_eq!(position.collateral, 78_125);
assert!(position.stable);
assert_eq!(position.order_matching_fees, Amount::from_sat(1000));
assert_eq!(opening_trade.order_id, order.id);
assert_eq!(opening_trade.contract_symbol, order.contract_symbol);
assert_eq!(opening_trade.contracts, dec!(25));
assert_eq!(opening_trade.direction, order.direction);
assert_eq!(opening_trade.trade_cost, SignedAmount::from_sat(79_125));
assert_eq!(opening_trade.fee, Amount::from_sat(1000));
assert_eq!(opening_trade.pnl, None);
assert_eq!(
opening_trade.price,
decimal_from_f32(order.execution_price().unwrap())
);
}
#[test]
fn close_position() {
let now = OffsetDateTime::now_utc();
let position = Position {
leverage: 2.0,
quantity: 10.0,
contract_symbol: ContractSymbol::BtcUsd,
direction: Direction::Long,
average_entry_price: 36_469.5,
liquidation_price: 24_313.0,
position_state: PositionState::Open,
collateral: 13_710,
expiry: now,
updated: now,
created: now,
stable: false,
order_matching_fees: Amount::from_sat(1000),
};
let order = Order {
id: Uuid::new_v4(),
leverage: 2.0,
quantity: 10.0,
contract_symbol: ContractSymbol::BtcUsd,
direction: Direction::Short,
order_type: OrderType::Market,
state: OrderState::Filled {
execution_price: 36_401.5,
matching_fee: Amount::from_sat(1000),
},
creation_timestamp: now,
order_expiry_timestamp: now,
reason: OrderReason::Manual,
stable: false,
failure_reason: None,
};
let (updated_position, trades) = position.apply_order(order.clone(), now).unwrap();
assert!(updated_position.is_none());
let closing_trade = match trades.as_slice() {
[closing_trade] => closing_trade,
trades => panic!("Unexpected number of trades: {}", trades.len()),
};
assert_eq!(closing_trade.order_id, order.id);
assert_eq!(closing_trade.contract_symbol, order.contract_symbol);
assert_eq!(closing_trade.contracts, Decimal::TEN);
assert_eq!(closing_trade.direction, order.direction);
assert_eq!(closing_trade.trade_cost, SignedAmount::from_sat(-12_659));
assert_eq!(closing_trade.fee, Amount::from_sat(1000));
assert_eq!(closing_trade.pnl, Some(SignedAmount::from_sat(-51)));
assert_eq!(
closing_trade.price,
decimal_from_f32(order.execution_price().unwrap())
);
}
#[test]
fn extend_position() {
let now = OffsetDateTime::now_utc();
let position = Position {
leverage: 2.0,
quantity: 10.0,
contract_symbol: ContractSymbol::BtcUsd,
direction: Direction::Long,
average_entry_price: 36_469.5,
liquidation_price: 24_313.0,
position_state: PositionState::Resizing,
collateral: 13_710,
expiry: now,
updated: now,
created: now,
stable: false,
order_matching_fees: Amount::from_sat(1000),
};
let order = Order {
id: Uuid::new_v4(),
leverage: 2.0,
quantity: 5.0,
contract_symbol: ContractSymbol::BtcUsd,
direction: Direction::Long,
order_type: OrderType::Market,
state: OrderState::Filled {
execution_price: 36_401.5,
matching_fee: Amount::from_sat(1000),
},
creation_timestamp: now,
order_expiry_timestamp: now,
reason: OrderReason::Manual,
stable: false,
failure_reason: None,
};
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | true |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/mobile/native/src/trade/position/handler.rs | mobile/native/src/trade/position/handler.rs | use crate::db;
use crate::event;
use crate::event::EventInternal;
use crate::trade::order::Order;
use crate::trade::position::Position;
use crate::trade::position::PositionState;
use crate::trade::trades::handler::new_trades;
use crate::trade::FundingFeeEvent;
use anyhow::bail;
use anyhow::Context;
use anyhow::Result;
use rust_decimal::Decimal;
use time::OffsetDateTime;
use xxi_node::commons::ContractSymbol;
use xxi_node::commons::Direction;
/// Fetch the positions from the database
pub fn get_positions() -> Result<Vec<Position>> {
db::get_positions()
}
/// Update the position once an order was submitted
///
/// If the new order submitted is an order that closes the current position, then the position will
/// be updated to `Closing` state.
pub fn update_position_after_order_submitted(submitted_order: &Order) -> Result<()> {
if let Some(position) = get_position_matching_order(submitted_order)? {
let position = db::update_position_state(position.contract_symbol, PositionState::Closing)?;
event::publish(&EventInternal::PositionUpdateNotification(position));
}
Ok(())
}
/// If the submitted order would close the current [`Position`], return the [`position`].
pub fn get_position_matching_order(order: &Order) -> Result<Option<Position>> {
match db::get_positions()?.first() {
Some(position)
if position.direction != order.direction && position.quantity == order.quantity =>
{
Ok(Some(*position))
}
_ => Ok(None),
}
}
/// Set the position to the given [`PositionState`].
pub fn set_position_state(state: PositionState) -> Result<()> {
if let Some(position) = db::get_positions()?.first() {
let position = db::update_position_state(position.contract_symbol, state)?;
event::publish(&EventInternal::PositionUpdateNotification(position));
}
Ok(())
}
pub fn handle_renew_offer() -> Result<()> {
if let Some(position) = db::get_positions()?.first() {
tracing::debug!("Received renew offer to resize position");
let position =
db::update_position_state(position.contract_symbol, PositionState::Resizing)?;
event::publish(&EventInternal::PositionUpdateNotification(position));
} else {
// If we have no position, we must be opening a new one.
tracing::info!("Received renew offer to open new position");
}
Ok(())
}
pub fn handle_rollover_offer(
expiry_timestamp: OffsetDateTime,
funding_fee_events: &[FundingFeeEvent],
) -> Result<()> {
tracing::debug!("Setting position state to rollover");
let positions = &db::get_positions()?;
let position = positions.first().context("No position to roll over")?;
// TODO: Update the `expiry_timestamp` only after the rollover protocol is finished. We only do
// it so that we don't have to store the `expiry_timestamp` in the database.
let position = position
.start_rollover(expiry_timestamp)
.apply_funding_fee_events(funding_fee_events)?;
db::start_position_rollover(position)?;
event::publish(&EventInternal::PositionUpdateNotification(position));
Ok(())
}
/// Update position after completing rollover protocol.
pub fn update_position_after_rollover() -> Result<Position> {
tracing::debug!("Setting position state from rollover back to open");
let positions = &db::get_positions()?;
let position = positions
.first()
.context("No position to finish rollover")?;
let position = position.finish_rollover();
db::finish_position_rollover(position)?;
event::publish(&EventInternal::PositionUpdateNotification(position));
Ok(position)
}
/// The app will sometimes receive [`FundingFeeEvent`]s from the coordinator which are not directly
/// linked to a channel update. These need to be applied to the [`Position`] to keep it in sync with
/// the coordinator.
pub fn handle_funding_fee_events(funding_fee_events: &[FundingFeeEvent]) -> Result<()> {
if funding_fee_events.is_empty() {
return Ok(());
}
tracing::debug!(
?funding_fee_events,
"Applying funding fee events to position"
);
let positions = &db::get_positions()?;
let position = positions
.first()
.context("No position to apply funding fee events")?;
let position = position.apply_funding_fee_events(funding_fee_events)?;
db::update_position(position)?;
event::publish(&EventInternal::PositionUpdateNotification(position));
Ok(())
}
/// Create or insert a position after filling an order.
pub fn update_position_after_dlc_channel_creation_or_update(
filled_order: Order,
expiry: OffsetDateTime,
) -> Result<()> {
let (position, trades) = match db::get_positions()?.first() {
None => {
// TODO: This log message seems to assume that we can only reach this branch if the
// channel was just created. Is that true?
tracing::debug!(
order = ?filled_order,
"Creating position after DLC channel creation"
);
let (position, trade) = Position::new_open(filled_order, expiry);
tracing::info!(?trade, ?position, "Position created");
db::insert_position(position)?;
(position, vec![trade])
}
Some(
position @ Position {
position_state: PositionState::Resizing,
..
},
) => {
tracing::info!("Calculating new position after DLC channel has been resized");
let (position, trades) = position.apply_order(filled_order, expiry)?;
let position = position.context("Resized position has vanished")?;
db::update_position(position)?;
(position, trades)
}
Some(position) => {
bail!(
"Cannot resize position in state {:?}",
position.position_state
);
}
};
new_trades(trades)?;
event::publish(&EventInternal::PositionUpdateNotification(position));
Ok(())
}
/// Delete a position after closing a DLC channel.
pub fn update_position_after_dlc_closure(filled_order: Order) -> Result<()> {
tracing::debug!(?filled_order, "Removing position after DLC channel closure");
let positions = &db::get_positions()?;
let position = match positions.as_slice() {
[position] => position,
[position, ..] => {
tracing::warn!("Found more than one position. Taking the first one");
position
}
[] => {
tracing::warn!("No position to remove");
return Ok(());
}
};
tracing::debug!(
?position,
?filled_order,
"Calculating closing trades for position"
);
// After closing the DLC channel we do not need to update the position's expiry anymore.
let expiry = position.expiry;
let (new_position, trades) = position.apply_order(filled_order, expiry)?;
tracing::debug!(?trades, "Calculated closing trades");
if let Some(new_position) = new_position {
tracing::warn!(
?new_position,
"Expected computed position to vanish after applying closing order"
);
}
new_trades(trades)?;
db::delete_positions()?;
event::publish(&EventInternal::PositionCloseNotification(
ContractSymbol::BtcUsd,
));
Ok(())
}
pub fn price_update(price: Decimal, direction: Direction) {
match direction {
Direction::Long => {
tracing::debug!(?price, "Updating long price");
}
Direction::Short => {
tracing::debug!(?price, "Updating short price");
event::publish(&EventInternal::AskPriceUpdateNotification(price));
}
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/mobile/native/src/trade/funding_fee_event/mod.rs | mobile/native/src/trade/funding_fee_event/mod.rs | use bitcoin::SignedAmount;
use rust_decimal::Decimal;
use time::OffsetDateTime;
use xxi_node::commons::ContractSymbol;
use xxi_node::commons::Direction;
pub mod handler;
#[derive(Debug, Clone, Copy, PartialEq)]
pub struct FundingFeeEvent {
pub contract_symbol: ContractSymbol,
pub contracts: Decimal,
pub direction: Direction,
pub price: Decimal,
/// A positive amount indicates that the trader pays the coordinator; a negative amount
/// indicates that the coordinator pays the trader.
pub fee: SignedAmount,
pub due_date: OffsetDateTime,
pub paid_date: Option<OffsetDateTime>,
}
impl FundingFeeEvent {
pub fn unpaid(
contract_symbol: ContractSymbol,
contracts: Decimal,
direction: Direction,
price: Decimal,
fee: SignedAmount,
due_date: OffsetDateTime,
) -> Self {
Self {
contract_symbol,
contracts,
direction,
price,
fee,
due_date,
paid_date: None,
}
}
}
impl From<xxi_node::FundingFeeEvent> for FundingFeeEvent {
fn from(value: xxi_node::FundingFeeEvent) -> Self {
Self {
contract_symbol: value.contract_symbol,
contracts: value.contracts,
direction: value.direction,
price: value.price,
fee: value.fee,
due_date: value.due_date,
paid_date: None,
}
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/mobile/native/src/trade/funding_fee_event/handler.rs | mobile/native/src/trade/funding_fee_event/handler.rs | use crate::db;
use crate::event;
use crate::event::EventInternal;
use crate::trade::FundingFeeEvent;
use anyhow::Result;
use time::OffsetDateTime;
use xxi_node::commons::ContractSymbol;
pub fn get_funding_fee_events() -> Result<Vec<FundingFeeEvent>> {
db::get_all_funding_fee_events()
}
/// Attempt to insert a list of unpaid funding fee events. Unpaid funding fee events that are
/// already in the database are ignored.
///
/// Unpaid funding fee events that are confirmed to be new are propagated via an [`EventInternal`]
/// and returned.
pub fn handle_unpaid_funding_fee_events(
funding_fee_events: &[FundingFeeEvent],
) -> Result<Vec<FundingFeeEvent>> {
let new_events = db::insert_unpaid_funding_fee_events(funding_fee_events)?;
for event in new_events.iter() {
event::publish(&EventInternal::FundingFeeEvent(*event));
}
Ok(new_events)
}
pub fn mark_funding_fee_events_as_paid(
contract_symbol: ContractSymbol,
since: OffsetDateTime,
) -> Result<()> {
db::mark_funding_fee_events_as_paid(contract_symbol, since)
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/mobile/native/src/trade/order/api.rs | mobile/native/src/trade/order/api.rs | use crate::trade::order;
use flutter_rust_bridge::frb;
use time::OffsetDateTime;
use uuid::Uuid;
pub use xxi_node::commons::ContractSymbol;
pub use xxi_node::commons::Direction;
#[frb(mirror(ContractSymbol))]
#[derive(Debug, Clone, Copy)]
pub enum _ContractSymbol {
BtcUsd,
}
#[frb(mirror(Direction))]
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum _Direction {
Long,
Short,
}
#[frb]
#[derive(Debug, Clone, Copy)]
pub enum OrderType {
Market,
Limit { price: f32 },
}
/// State of an order
///
/// Please refer to [`crate::trade::order::OrderState`]
#[frb]
#[derive(Debug, Clone, Copy)]
pub enum OrderState {
Open,
Filling,
Filled,
Failed,
Rejected,
}
#[frb]
#[derive(Debug, Clone, Copy)]
pub enum OrderReason {
Manual,
Expired,
Liquidated,
}
#[frb]
#[derive(Debug, Clone)]
pub enum FailureReason {
FailedToSetToFilling,
TradeRequest,
TradeResponse(String),
NodeAccess,
NoUsableChannel,
CollabRevert,
OrderNotAcceptable,
TimedOut,
InvalidDlcOffer,
OrderRejected(String),
Unknown,
}
#[frb]
#[derive(Debug, Clone)]
pub struct NewOrder {
#[frb(non_final)]
pub leverage: f32,
#[frb(non_final)]
pub quantity: f32,
#[frb(non_final)]
pub contract_symbol: ContractSymbol,
#[frb(non_final)]
pub direction: Direction,
// Box needed for complex enum, otherwise generated Rust code complains about Default impl
// missing
#[frb(non_final)]
pub order_type: Box<OrderType>,
#[frb(non_final)]
pub stable: bool,
}
#[frb]
#[derive(Debug, Clone)]
pub struct Order {
pub id: String,
pub leverage: f32,
pub quantity: f32,
pub contract_symbol: ContractSymbol,
pub direction: Direction,
pub order_type: Box<OrderType>,
pub state: OrderState,
pub execution_price: Option<f32>,
pub creation_timestamp: i64,
pub order_expiry_timestamp: i64,
pub reason: OrderReason,
pub failure_reason: Option<FailureReason>,
}
impl From<order::OrderType> for OrderType {
fn from(value: order::OrderType) -> Self {
match value {
order::OrderType::Market => OrderType::Market,
order::OrderType::Limit { price } => OrderType::Limit { price },
}
}
}
impl From<order::Order> for Order {
fn from(value: order::Order) -> Self {
let execution_price = match value.state {
order::OrderState::Filled {
execution_price, ..
} => Some(execution_price),
_ => None,
};
Order {
id: value.id.to_string(),
leverage: value.leverage,
quantity: value.quantity,
contract_symbol: value.contract_symbol,
direction: value.direction,
order_type: Box::new(value.order_type.into()),
state: value.state.into(),
execution_price,
creation_timestamp: value.creation_timestamp.unix_timestamp(),
order_expiry_timestamp: value.order_expiry_timestamp.unix_timestamp(),
reason: value.reason.into(),
failure_reason: value.failure_reason.map(|reason| reason.into()),
}
}
}
impl From<order::OrderReason> for OrderReason {
fn from(value: order::OrderReason) -> Self {
match value {
order::OrderReason::Manual => OrderReason::Manual,
order::OrderReason::Expired => OrderReason::Expired,
order::OrderReason::CoordinatorLiquidated => OrderReason::Liquidated,
order::OrderReason::TraderLiquidated => OrderReason::Liquidated,
}
}
}
impl From<order::FailureReason> for FailureReason {
fn from(value: order::FailureReason) -> Self {
match value {
order::FailureReason::FailedToSetToFilling => FailureReason::FailedToSetToFilling,
order::FailureReason::TradeRequest => FailureReason::TradeRequest,
order::FailureReason::TradeResponse(details) => FailureReason::TradeResponse(details),
order::FailureReason::OrderNotAcceptable => FailureReason::OrderNotAcceptable,
order::FailureReason::TimedOut => FailureReason::TimedOut,
order::FailureReason::InvalidDlcOffer(_) => FailureReason::InvalidDlcOffer,
order::FailureReason::OrderRejected(reason) => FailureReason::OrderRejected(reason),
order::FailureReason::CollabRevert => FailureReason::CollabRevert,
order::FailureReason::Unknown => FailureReason::Unknown,
}
}
}
impl From<OrderType> for order::OrderType {
fn from(value: OrderType) -> Self {
match value {
OrderType::Market => order::OrderType::Market,
OrderType::Limit { price } => order::OrderType::Limit { price },
}
}
}
impl From<order::OrderState> for OrderState {
fn from(value: order::OrderState) -> Self {
match value {
order::OrderState::Open => OrderState::Open,
order::OrderState::Filled { .. } => OrderState::Filled,
order::OrderState::Failed { .. } => OrderState::Failed,
order::OrderState::Rejected => OrderState::Rejected,
order::OrderState::Filling { .. } => OrderState::Filling,
order::OrderState::Initial => unimplemented!(
"don't expose orders that were not submitted into the orderbook to the frontend!"
),
}
}
}
impl From<NewOrder> for order::Order {
fn from(value: NewOrder) -> Self {
order::Order {
id: Uuid::new_v4(),
leverage: value.leverage,
quantity: value.quantity,
contract_symbol: value.contract_symbol,
direction: value.direction,
order_type: (*value.order_type).into(),
state: order::OrderState::Initial,
creation_timestamp: OffsetDateTime::now_utc(),
// We do not support setting order expiry from the frontend for now
order_expiry_timestamp: OffsetDateTime::now_utc() + time::Duration::minutes(1),
reason: order::OrderReason::Manual,
stable: value.stable,
failure_reason: None,
}
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/mobile/native/src/trade/order/orderbook_client.rs | mobile/native/src/trade/order/orderbook_client.rs | use crate::commons::reqwest_client;
use crate::dlc::get_node_key;
use anyhow::bail;
use anyhow::Result;
use reqwest::Url;
use xxi_node::commons::ChannelOpeningParams;
use xxi_node::commons::NewMarketOrder;
use xxi_node::commons::NewOrder;
use xxi_node::commons::NewOrderRequest;
pub struct OrderbookClient {
url: Url,
}
impl OrderbookClient {
pub fn new(url: Url) -> Self {
Self { url }
}
pub(crate) async fn post_new_market_order(
&self,
order: NewMarketOrder,
channel_opening_params: Option<ChannelOpeningParams>,
) -> Result<()> {
let secret_key = get_node_key();
let message = order.message();
let signature = secret_key.sign_ecdsa(message);
let new_order_request = NewOrderRequest {
value: NewOrder::Market(order),
signature,
channel_opening_params,
};
let url = self.url.join("/api/orderbook/orders")?;
let client = reqwest_client();
let response = client.post(url).json(&new_order_request).send().await?;
if response.status().as_u16() == 200 {
Ok(())
} else {
let error = response.text().await?;
bail!("Could not create new order: {error}")
}
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/mobile/native/src/trade/order/mod.rs | mobile/native/src/trade/order/mod.rs | use crate::calculations::calculate_margin;
use crate::dlc;
use bitcoin::Amount;
use rust_decimal::prelude::FromPrimitive;
use rust_decimal::Decimal;
use serde::Serialize;
use time::OffsetDateTime;
use uuid::Uuid;
use xxi_node::commons;
use xxi_node::commons::ContractSymbol;
use xxi_node::commons::Direction;
pub mod api;
pub mod handler;
mod orderbook_client;
// When naming this the same as `api_model::order::OrderType` the generated code somehow uses
// `trade::OrderType` and contains errors, hence different name is used.
// This is likely a bug in frb.
#[derive(Debug, Clone, Copy, PartialEq, Serialize)]
pub enum OrderType {
Market,
Limit { price: f32 },
}
/// Internal type so we still have Copy on order
#[derive(Debug, Clone, Serialize, PartialEq)]
pub enum FailureReason {
/// An error occurred when setting the Order to filling in our DB
FailedToSetToFilling,
/// The order failed because we failed sending the trade request
TradeRequest,
/// A failure happened during the initial phase of the protocol. I.e. after sending the trade
/// request
TradeResponse(String),
/// The order failed due to collaboratively reverting the position
CollabRevert,
/// MVP scope: Can only close the order, not reduce or extend
OrderNotAcceptable,
/// The order timed out, i.e. we did not receive a match in time
TimedOut,
InvalidDlcOffer(InvalidSubchannelOffer),
/// The order has been rejected by the orderbook
OrderRejected(String),
Unknown,
}
#[derive(Debug, Clone, Copy, Serialize, PartialEq)]
pub enum InvalidSubchannelOffer {
/// Received offer was outdated
Outdated,
UndeterminedMaturityDate,
Unacceptable,
}
#[derive(Debug, Clone, PartialEq)]
pub enum OrderState {
/// Not submitted to orderbook yet
///
/// In order to be able to track how many failed orders we have we store the order in the
/// database and update it once the orderbook returns success.
/// Transitions:
/// - Initial->Open
/// - Initial->Rejected
Initial,
/// Rejected by the orderbook upon submission
///
/// If the orderbook returns failure upon submission.
/// Note that we will not be able to query this order from the orderbook again, because it was
/// rejected upon submission. This is a final state.
Rejected,
/// Successfully submit to orderbook
///
/// If the orderbook returns success upon submission.
/// Transitions:
/// - Open->Failed (if we fail to set up the trade)
/// - Open->Filled (if we successfully set up the trade)
Open,
/// The orderbook has matched the order and it is being filled
///
/// Once the order is being filled we know the execution price and store it.
/// Since it's a non-custodial setup filling an order involves setting up a DLC.
/// This state is set once we receive the TradeParams from the orderbook.
/// This state covers the complete trade execution until we have a DLC or we run into a failure
/// scenario. We don't allow re-trying the trade execution; if the app is started and we
/// detect an order that is in the `Filling` state, we will have to evaluate if there is a DLC
/// currently being set up. If yes the order remains in `Filling` state, if there is no DLC
/// currently being set up we move the order into `Failed` state.
///
/// Transitions:
/// Filling->Filled (if we eventually end up with a DLC)
/// Filling->Failed (if we experience an error when executing the trade or the DLC manager
/// reported back failure/rejection)
Filling {
execution_price: f32,
matching_fee: Amount,
},
/// The order failed to be filled
///
/// In order to reach this state the orderbook must have provided trade params to start trade
/// execution, and the trade execution failed; i.e. it did not result in setting up a DLC.
Failed {
execution_price: Option<f32>,
matching_fee: Option<Amount>,
reason: FailureReason,
},
/// Successfully set up trade
///
/// In order to reach this state the orderbook must have provided trade params to start trade
/// execution, and the trade execution succeeded. This state assumes that a DLC exists, and
/// the order is reflected in a position. Note that only complete filling is supported,
/// partial filling not depicted yet.
/// This is a final state
Filled {
/// The execution price that the order was filled with
execution_price: f32,
matching_fee: Amount,
},
}
impl OrderState {
pub fn matching_fee(&self) -> Option<Amount> {
match self {
OrderState::Initial
| OrderState::Rejected
| OrderState::Failed { .. }
| OrderState::Open => None,
OrderState::Filling { matching_fee, .. } | OrderState::Filled { matching_fee, .. } => {
Some(*matching_fee)
}
}
}
pub fn execution_price(&self) -> Option<f32> {
match self {
OrderState::Initial
| OrderState::Rejected
| OrderState::Failed { .. }
| OrderState::Open => None,
OrderState::Filling {
execution_price, ..
}
| OrderState::Filled {
execution_price, ..
} => Some(*execution_price),
}
}
pub fn failure_reason(&self) -> Option<FailureReason> {
match self {
OrderState::Initial | OrderState::Rejected | OrderState::Open => None,
OrderState::Filling { .. } | OrderState::Filled { .. } => None,
OrderState::Failed { reason, .. } => Some(reason.clone()),
}
}
}
#[derive(Debug, Clone, Copy)]
pub enum OrderReason {
Manual,
Expired,
CoordinatorLiquidated,
TraderLiquidated,
}
impl From<OrderReason> for commons::OrderReason {
fn from(value: OrderReason) -> Self {
match value {
OrderReason::Manual => commons::OrderReason::Manual,
OrderReason::Expired => commons::OrderReason::Expired,
OrderReason::CoordinatorLiquidated => commons::OrderReason::CoordinatorLiquidated,
OrderReason::TraderLiquidated => commons::OrderReason::TraderLiquidated,
}
}
}
impl From<commons::OrderReason> for OrderReason {
fn from(value: commons::OrderReason) -> Self {
match value {
commons::OrderReason::Manual => OrderReason::Manual,
commons::OrderReason::Expired => OrderReason::Expired,
commons::OrderReason::CoordinatorLiquidated => OrderReason::CoordinatorLiquidated,
commons::OrderReason::TraderLiquidated => OrderReason::TraderLiquidated,
}
}
}
#[derive(Debug, Clone)]
pub struct Order {
pub id: Uuid,
pub leverage: f32,
pub quantity: f32,
pub contract_symbol: ContractSymbol,
pub direction: Direction,
pub order_type: OrderType,
pub state: OrderState,
pub creation_timestamp: OffsetDateTime,
pub order_expiry_timestamp: OffsetDateTime,
pub reason: OrderReason,
pub stable: bool,
// FIXME: Why is this failure_reason duplicated? It's also in the `order_state`?
pub failure_reason: Option<FailureReason>,
}
impl Order {
/// This returns the executed price once known
pub fn execution_price(&self) -> Option<f32> {
match self.state {
OrderState::Filling {
execution_price, ..
}
| OrderState::Filled {
execution_price, ..
}
| OrderState::Failed {
execution_price: Some(execution_price),
..
} => Some(execution_price),
_ => None,
}
}
/// This returns the matching fee once known
pub fn matching_fee(&self) -> Option<Amount> {
match self.state {
OrderState::Filling { matching_fee, .. }
| OrderState::Filled { matching_fee, .. }
| OrderState::Failed {
matching_fee: Some(matching_fee),
..
} => Some(matching_fee),
_ => None,
}
}
/// This returns the trader's margin once known (based on the execution price).
pub fn trader_margin(&self) -> Option<u64> {
let opening_price = self.execution_price()?;
Some(calculate_margin(
opening_price,
self.quantity,
self.leverage,
))
}
}
impl From<Order> for commons::NewMarketOrder {
fn from(order: Order) -> Self {
let quantity = Decimal::try_from(order.quantity).expect("to parse into decimal");
let trader_id = dlc::get_node_pubkey();
commons::NewMarketOrder {
id: order.id,
contract_symbol: order.contract_symbol,
quantity,
trader_id,
direction: order.direction,
leverage: Decimal::from_f32(order.leverage).expect("to fit into f32"),
expiry: order.order_expiry_timestamp,
stable: order.stable,
}
}
}
impl From<OrderType> for commons::OrderType {
fn from(order_type: OrderType) -> Self {
match order_type {
OrderType::Market => commons::OrderType::Market,
OrderType::Limit { .. } => commons::OrderType::Limit,
}
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/mobile/native/src/trade/order/handler.rs | mobile/native/src/trade/order/handler.rs | use crate::config;
use crate::db;
use crate::db::get_order_in_filling;
use crate::db::maybe_get_open_orders;
use crate::dlc;
use crate::dlc::check_if_signed_channel_is_confirmed;
use crate::event;
use crate::event::BackgroundTask;
use crate::event::EventInternal;
use crate::event::TaskStatus;
use crate::report_error_to_coordinator;
use crate::trade::order::orderbook_client::OrderbookClient;
use crate::trade::order::FailureReason;
use crate::trade::order::Order;
use crate::trade::order::OrderReason;
use crate::trade::order::OrderState;
use crate::trade::order::OrderType;
use crate::trade::position;
use crate::trade::position::handler::update_position_after_order_submitted;
use crate::trade::position::PositionState;
use anyhow::anyhow;
use anyhow::bail;
use anyhow::Context;
use anyhow::Result;
use bitcoin::Amount;
use dlc_manager::channel::signed_channel::SignedChannel;
use dlc_manager::channel::signed_channel::SignedChannelState;
use reqwest::Url;
use rust_decimal::prelude::ToPrimitive;
use time::Duration;
use time::OffsetDateTime;
use uuid::Uuid;
use xxi_node::commons;
use xxi_node::commons::ChannelOpeningParams;
use xxi_node::commons::Direction;
use xxi_node::node::signed_channel_state_name;
const ORDER_OUTDATED_AFTER: Duration = Duration::minutes(5);
#[derive(thiserror::Error, Debug)]
pub enum SubmitOrderError {
/// Generic problem related to the storage layer (sqlite, sled).
#[error("Storage failed: {0}")]
Storage(anyhow::Error),
#[error("DLC channel not yet confirmed")]
UnconfirmedChannel,
#[error("DLC Channel in invalid state: expected {expected_channel_state}, got {actual_channel_state}")]
InvalidChannelState {
expected_channel_state: String,
actual_channel_state: String,
},
#[error("Missing DLC channel: {0}")]
MissingChannel(String),
#[error(
"Another order is already being filled: {contracts} contracts {direction} at {leverage}x leverage"
)]
OtherOrderInFilling {
contracts: f32,
direction: Direction,
leverage: f32,
},
#[error("Failed to post order to orderbook: {0}")]
Orderbook(anyhow::Error),
}
pub async fn submit_order(
order: Order,
channel_opening_params: Option<ChannelOpeningParams>,
) -> Result<Uuid, SubmitOrderError> {
event::publish(&EventInternal::BackgroundNotification(
BackgroundTask::AsyncTrade(TaskStatus::Pending),
));
submit_order_internal(order, channel_opening_params)
.await
.inspect_err(report_error_to_coordinator)
.inspect_err(|e| {
event::publish(&EventInternal::BackgroundNotification(
BackgroundTask::AsyncTrade(TaskStatus::Failed(format!("{e:#}"))),
))
})
}
pub async fn submit_order_internal(
order: Order,
channel_opening_params: Option<ChannelOpeningParams>,
) -> Result<Uuid, SubmitOrderError> {
check_channel_state().await?;
// Having an order in `Filling` should mean that the subchannel is in the midst of an update.
// Since we currently only support one subchannel per app, it does not make sense to start
// another update (by submitting a new order to the orderbook) until the current one is
// finished.
if let Some(filling_order) = get_order_in_filling().map_err(SubmitOrderError::Storage)? {
return Err(SubmitOrderError::OtherOrderInFilling {
contracts: filling_order.quantity,
direction: filling_order.direction,
leverage: filling_order.leverage,
});
}
db::insert_order(order.clone()).map_err(SubmitOrderError::Storage)?;
let url = format!("http://{}", config::get_http_endpoint());
let url = Url::parse(&url).expect("correct URL");
let orderbook_client = OrderbookClient::new(url);
set_order_to_open_and_update_ui(order.id).map_err(SubmitOrderError::Storage)?;
if let Err(err) = orderbook_client
.post_new_market_order(order.clone().into(), channel_opening_params)
.await
{
let order_id = order.id.clone().to_string();
tracing::error!(order_id, "Failed to post new order: {err:#}");
set_order_to_failed_and_update_ui(
order.id,
FailureReason::OrderRejected(err.to_string()),
order.execution_price(),
)
.map_err(SubmitOrderError::Storage)?;
position::handler::set_position_state(PositionState::Open)
.context("Could not reset position to open")
.map_err(SubmitOrderError::Storage)?;
return Err(SubmitOrderError::Orderbook(err));
}
update_position_after_order_submitted(&order).map_err(SubmitOrderError::Storage)?;
Ok(order.id)
}
/// Checks if the channel is in a valid state to post the order.
///
/// Will fail in the following scenarios
/// 1. Open position, but no channel in state [`SignedChannelState::Established`]
/// 2. Open position and not enough confirmations on the funding txid.
/// 3. No position and a channel which is not in state [`SignedChannelState::Settled`]
async fn check_channel_state() -> Result<(), SubmitOrderError> {
let channel = dlc::get_signed_dlc_channel().map_err(SubmitOrderError::Storage)?;
if position::handler::get_positions()
.map_err(SubmitOrderError::Storage)?
.first()
.is_some()
{
match channel {
Some(SignedChannel {
state: SignedChannelState::Established { .. },
..
}) => {} // all good we can continue with the order
Some(channel) => {
return Err(SubmitOrderError::InvalidChannelState {
expected_channel_state: "Established".to_string(),
actual_channel_state: signed_channel_state_name(&channel),
})
}
None => {
return Err(SubmitOrderError::MissingChannel(
"Expected established dlc channel.".to_string(),
))
}
}
// If we have an open position, we should not allow any further trading until the current
// DLC channel is confirmed on-chain. Otherwise we can run into pesky DLC protocol
// failures.
if !check_if_signed_channel_is_confirmed()
.await
.map_err(SubmitOrderError::Storage)?
{
return Err(SubmitOrderError::UnconfirmedChannel);
}
} else {
match channel {
None
| Some(SignedChannel {
state: SignedChannelState::Settled { .. },
..
}) => {} // all good we can continue with the order
Some(channel) => {
return Err(SubmitOrderError::InvalidChannelState {
expected_channel_state: "Settled".to_string(),
actual_channel_state: signed_channel_state_name(&channel),
});
}
}
}
Ok(())
}
pub(crate) fn async_order_filling(
order: &commons::Order,
filled_with: &commons::FilledWith,
) -> Result<()> {
let order_type = match order.order_type {
commons::OrderType::Market => OrderType::Market,
commons::OrderType::Limit => OrderType::Limit {
price: order.price.to_f32().expect("to fit into f32"),
},
};
let execution_price = filled_with
.average_execution_price()
.to_f32()
.expect("to fit into f32");
let matching_fee = filled_with.order_matching_fee();
let order = match db::get_order(order.id)? {
None => {
let order = Order {
id: order.id,
leverage: order.leverage,
quantity: order.quantity.to_f32().expect("to fit into f32"),
contract_symbol: order.contract_symbol,
direction: order.direction,
order_type,
state: OrderState::Filling {
execution_price,
matching_fee,
},
creation_timestamp: order.timestamp,
order_expiry_timestamp: order.expiry,
reason: order.order_reason.clone().into(),
stable: order.stable,
failure_reason: None,
};
db::insert_order(order.clone())?
}
Some(mut order) => {
// the order has already been inserted to the database. Most likely because the async
// match has already been received. We still want to retry this order as the previous
// attempt seems to have failed.
let order_state = OrderState::Filling {
execution_price,
matching_fee,
};
db::set_order_state_to_filling(order.id, execution_price, matching_fee)?;
order.state = order_state;
order
}
};
event::publish(&EventInternal::OrderUpdateNotification(order.clone()));
Ok(())
}
/// Update order to state [`OrderState::Filling`].
pub(crate) fn order_filling(
order_id: Uuid,
execution_price: f32,
matching_fee: Amount,
) -> Result<()> {
if let Err(e) = set_order_to_filling_and_update_ui(order_id, execution_price, matching_fee) {
let e_string = format!("{e:#}");
match order_failed(Some(order_id), FailureReason::FailedToSetToFilling, e) {
Ok(()) => {
tracing::debug!(
%order_id,
"Set order to failed, after failing to set it to filling"
);
}
Err(e) => {
tracing::error!(
%order_id,
"Failed to set order to failed, after failing to set it to filling: {e:#}"
);
}
};
bail!("Failed to set order {order_id} to filling: {e_string}");
}
Ok(())
}
/// Sets filling order to filled. Returns an error if no order in `Filling`
pub(crate) fn order_filled(order_id: Option<Uuid>) -> Result<Order> {
let order = match order_id {
None => get_order_in_filling(),
Some(order_id) => db::get_order(order_id),
}?
.with_context(|| format!("Could not find order. order_id = {order_id:?}"))?;
let execution_price = order.execution_price();
let matching_fee = order.matching_fee();
if let (Some(execution_price), Some(matching_fee)) = (execution_price, matching_fee) {
let filled_order =
set_order_to_filled_and_update_ui(order.id, execution_price, matching_fee)?;
tracing::debug!(order = ?filled_order, "Order filled");
return Ok(filled_order);
}
tracing::warn!(
"Couldn't set order to filling due to missing execution price and / or matching fee"
);
Ok(order.clone())
}
/// Update the [`Order`]'s state to [`OrderState::Failed`].
pub(crate) fn order_failed(
order_id: Option<Uuid>,
reason: FailureReason,
error: anyhow::Error,
) -> Result<()> {
tracing::error!(?order_id, ?reason, "Failed to execute trade: {error:#}");
let order = match order_id {
None => get_order_in_filling(),
Some(order_id) => db::get_order(order_id),
}
.inspect_err(|e| {
// it doesn't matter that we send here an async trade failed even though we do not exactly
// know what kind of background task failed, because the error screen looks always the same.
event::publish(&EventInternal::BackgroundNotification(
BackgroundTask::AsyncTrade(TaskStatus::Failed(format!("{e:#}"))),
))
})?;
let task_status = TaskStatus::Failed(format!("{error:#}"));
let task = match order {
Some(order) => match order.reason {
OrderReason::Manual => BackgroundTask::AsyncTrade(task_status),
OrderReason::Expired => BackgroundTask::Expire(task_status),
OrderReason::CoordinatorLiquidated | OrderReason::TraderLiquidated => {
BackgroundTask::Expire(task_status)
}
},
None => {
// if we can't find a filling order it must have been a rollover. Note this is not very
// nice, but we are missing the required context information at the moment.
BackgroundTask::Rollover(task_status)
}
};
event::publish(&EventInternal::BackgroundNotification(task));
if let Some(order_id) = order_id {
set_order_to_failed_and_update_ui(order_id, reason, None)?;
}
// TODO: fixme. this so ugly, even a Sphynx cat is beautiful against this.
// In this function we set the order to failed but here we try to set the position to open.
// This is basically a roll back of a former action. It only works because we do not have a
// concept of a closed position on the client side. However, this function is being called
// in various places where (most of the time) we only want to set the order to failed. If we
// were to introduce a `PostionState::Closed` the below code would be wrong and would
// accidentally set a closed position to open again. This should be cleaned up.
if let Err(e) = position::handler::set_position_state(PositionState::Open) {
bail!("Could not reset position to open because of {e:#}");
}
Ok(())
}
pub async fn get_orders_for_ui() -> Result<Vec<Order>> {
db::get_orders_for_ui()
}
pub fn check_open_orders() -> Result<()> {
let open_orders = match maybe_get_open_orders() {
Ok(orders_being_filled) => orders_being_filled,
Err(e) => {
bail!("Error when loading open orders from database: {e:#}");
}
};
let now = OffsetDateTime::now_utc();
for open_order in open_orders {
tracing::debug!(?open_order, "Checking order if it is still up to date");
if open_order.creation_timestamp + ORDER_OUTDATED_AFTER < now {
order_failed(
Some(open_order.id),
FailureReason::TimedOut,
anyhow!("Order was not matched within {ORDER_OUTDATED_AFTER:?}"),
)?;
}
}
Ok(())
}
fn set_order_to_failed_and_update_ui(
order_id: Uuid,
failure_reason: FailureReason,
execution_price: Option<f32>,
) -> Result<Order> {
let order = db::set_order_state_to_failed(order_id, failure_reason.into(), execution_price)
.with_context(|| format!("Failed to update order {order_id} to state failed"))?;
ui_update(order.clone());
Ok(order)
}
fn set_order_to_open_and_update_ui(order_id: Uuid) -> Result<Order> {
let order = db::set_order_state_to_open(order_id)
.with_context(|| format!("Failed to update order {order_id} to state failed"))?;
ui_update(order.clone());
Ok(order)
}
fn set_order_to_filled_and_update_ui(
order_id: Uuid,
execution_price: f32,
matching_fee: Amount,
) -> Result<Order> {
let order = db::set_order_state_to_filled(order_id, execution_price, matching_fee)
.with_context(|| format!("Failed to update order {order_id} to state filled"))?;
ui_update(order.clone());
Ok(order)
}
fn set_order_to_filling_and_update_ui(
order_id: Uuid,
execution_price: f32,
matching_fee: Amount,
) -> Result<Order> {
let order = db::set_order_state_to_filling(order_id, execution_price, matching_fee)
.with_context(|| format!("Failed to update order {order_id} to state filling"))?;
ui_update(order.clone());
Ok(order)
}
fn ui_update(order: Order) {
event::publish(&EventInternal::OrderUpdateNotification(order));
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/mobile/native/src/trade/users/mod.rs | mobile/native/src/trade/users/mod.rs | use crate::commons::reqwest_client;
use crate::config;
use crate::dlc;
use anyhow::anyhow;
use anyhow::Context;
use anyhow::Result;
use xxi_node::commons::RegisterParams;
use xxi_node::commons::UpdateUsernameParams;
use xxi_node::commons::User;
/// Enroll the user in the beta program
pub async fn register_beta(
contact: String,
version: String,
referral_code: Option<String>,
) -> Result<()> {
let name = crate::names::get_new_name();
let register = RegisterParams {
pubkey: dlc::get_node_pubkey(),
contact: Some(contact),
nickname: Some(name),
version: Some(version.clone()),
os: Some(std::env::consts::OS.to_string()),
referral_code,
};
tracing::debug!(
pubkey = register.pubkey.to_string(),
contact = register.contact,
referral_code = register.referral_code,
version,
"Registering user"
);
let client = reqwest_client();
let response = client
.post(format!("http://{}/api/users", config::get_http_endpoint()))
.json(®ister)
.send()
.await
.context("Failed to register beta program with coordinator")?;
let status_code = response.status();
if !status_code.is_success() {
let response_text = match response.text().await {
Ok(text) => text,
Err(err) => {
format!("could not decode response {err:#}")
}
};
return Err(anyhow!(
"Could not register with coordinator: HTTP${status_code}: {response_text}"
));
}
tracing::info!("Registered into beta program successfully");
Ok(())
}
/// Retrieve latest user details
pub async fn get_user_details() -> Result<User> {
let key = dlc::get_node_pubkey();
let client = reqwest_client();
let response = client
.get(format!(
"http://{}/api/users/{}",
config::get_http_endpoint(),
key
))
.send()
.await
.context("Failed to retrieve user details")?;
let user = response.json::<User>().await?;
tracing::info!("Received user details {user:?}");
Ok(user)
}
/// Update a user's name on the coordinator
pub async fn update_username(name: String) -> Result<()> {
let update_nickname = UpdateUsernameParams {
pubkey: dlc::get_node_pubkey(),
nickname: Some(name),
};
tracing::debug!(
pubkey = update_nickname.pubkey.to_string(),
nickname = update_nickname.nickname,
"Updating user nickname"
);
let client = reqwest_client();
let response = client
.put(format!(
"http://{}/api/users/nickname",
config::get_http_endpoint()
))
.json(&update_nickname)
.send()
.await
.context("Failed to register beta program with coordinator")?;
let status_code = response.status();
if !status_code.is_success() {
let response_text = match response.text().await {
Ok(text) => text,
Err(err) => {
format!("could not decode response {err:#}")
}
};
return Err(anyhow!(
"Could not register with coordinator: HTTP${status_code}: {response_text}"
));
}
tracing::info!("Updated user nickname successfully");
Ok(())
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/mobile/native/src/trade/trades/api.rs | mobile/native/src/trade/trades/api.rs | use bitcoin::SignedAmount;
use flutter_rust_bridge::frb;
use rust_decimal::prelude::ToPrimitive;
use xxi_node::commons::ContractSymbol;
use xxi_node::commons::Direction;
// TODO: Include fee rate.
#[frb]
#[derive(Debug, Clone)]
pub struct Trade {
pub trade_type: TradeType,
pub contract_symbol: ContractSymbol,
pub contracts: f32,
pub price: f32,
/// Either a funding fee or an order-matching fee.
pub fee: i64,
/// Direction of the associated order.
pub direction: Direction,
/// Some trades may have a PNL associated with them.
pub pnl: Option<i64>,
pub timestamp: i64,
pub is_done: bool,
}
#[frb]
#[derive(Debug, Clone)]
pub enum TradeType {
Funding,
Trade,
}
impl From<crate::trade::Trade> for Trade {
fn from(value: crate::trade::Trade) -> Self {
Self {
trade_type: TradeType::Trade,
contract_symbol: value.contract_symbol,
contracts: value.contracts.to_f32().expect("to fit"),
price: value.price.to_f32().expect("to fit"),
fee: value.fee.to_sat() as i64,
direction: value.direction,
pnl: value.pnl.map(SignedAmount::to_sat),
timestamp: value.timestamp.unix_timestamp(),
is_done: true,
}
}
}
impl From<crate::trade::FundingFeeEvent> for Trade {
fn from(value: crate::trade::FundingFeeEvent) -> Self {
Self {
trade_type: TradeType::Funding,
contract_symbol: value.contract_symbol,
contracts: value.contracts.to_f32().expect("to fit"),
price: value.price.to_f32().expect("to fit"),
fee: value.fee.to_sat(),
direction: value.direction,
pnl: None,
timestamp: value.due_date.unix_timestamp(),
is_done: value.paid_date.is_some(),
}
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/mobile/native/src/trade/trades/mod.rs | mobile/native/src/trade/trades/mod.rs | use bitcoin::Amount;
use bitcoin::SignedAmount;
use rust_decimal::Decimal;
use time::OffsetDateTime;
use uuid::Uuid;
use xxi_node::commons::ContractSymbol;
use xxi_node::commons::Direction;
pub mod api;
pub mod handler;
/// A trade is an event that moves funds between the DLC channel collateral reserve and a DLC
/// channel.
///
/// Every trade is associated with a single market order, but an order can be associated with
/// multiple trades.
///
/// If an order changes the direction of the underlying position, it must be split into _two_
/// trades: one to close the original position and another one to open the new position in the
/// opposite direction. We do so to keep the model as simple as possible.
#[derive(Debug, Clone, Copy, PartialEq)]
pub struct Trade {
/// The executed order which resulted in this trade.
pub order_id: Uuid,
pub contract_symbol: ContractSymbol,
pub contracts: Decimal,
/// Direction of the associated order.
pub direction: Direction,
/// How many coins were moved between the DLC channel collateral reserve and the DLC.
///
/// A positive value indicates that the money moved out of the reserve; a negative value
/// indicates that the money moved into the reserve.
pub trade_cost: SignedAmount,
pub fee: Amount,
/// If a position was reduced or closed because of this trade, how profitable it was.
///
/// Set to [`None`] if the position was extended.
pub pnl: Option<SignedAmount>,
/// The price at which the associated order was executed.
pub price: Decimal,
pub timestamp: OffsetDateTime,
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/mobile/native/src/trade/trades/handler.rs | mobile/native/src/trade/trades/handler.rs | use crate::db;
use crate::event;
use crate::event::EventInternal;
use crate::trade::Trade;
use anyhow::Result;
pub fn new_trades(trades: Vec<Trade>) -> Result<()> {
db::insert_trades(&trades)?;
for trade in trades {
event::publish(&EventInternal::NewTrade(trade));
}
Ok(())
}
pub fn get_trades() -> Result<Vec<Trade>> {
db::get_all_trades()
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/fund/src/lib.rs | crates/fund/src/lib.rs | #![allow(clippy::unwrap_used)]
pub mod bitcoind;
pub mod coordinator;
pub mod http;
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/fund/src/http.rs | crates/fund/src/http.rs | use reqwest::Client;
pub fn init_reqwest() -> Client {
Client::builder()
.timeout(std::time::Duration::from_secs(30))
.build()
.unwrap()
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/fund/src/bitcoind.rs | crates/fund/src/bitcoind.rs | use anyhow::bail;
use anyhow::Result;
use bitcoin::Address;
use bitcoin::Amount;
use reqwest::Client;
use reqwest::Response;
use serde::Deserialize;
use std::time::Duration;
/// A wrapper over the bitcoind HTTP API
///
/// It does not aim to be complete, functionality will be added as needed
pub struct Bitcoind {
client: Client,
host: String,
}
impl Bitcoind {
pub fn new(client: Client, host: String) -> Self {
Self { client, host }
}
pub fn new_local(client: Client) -> Self {
let host = "http://localhost:8080/bitcoin".to_string();
Self::new(client, host)
}
/// Instructs `bitcoind` to generate to address.
pub async fn mine(&self, n: u16) -> Result<()> {
#[derive(Deserialize, Debug)]
struct BitcoindResponse {
result: String,
}
let response: BitcoindResponse = self
.client
.post(&self.host)
.body(r#"{"jsonrpc": "1.0", "method": "getnewaddress", "params": []}"#.to_string())
.send()
.await?
.json()
.await?;
self.client
.post(&self.host)
.body(format!(
r#"{{"jsonrpc": "1.0", "method": "generatetoaddress", "params": [{}, "{}"]}}"#,
n, response.result
))
.send()
.await?;
// For the mined blocks to be picked up by the subsequent wallet syncs
tokio::time::sleep(Duration::from_secs(5)).await;
Ok(())
}
/// An alias for send_to_address
pub async fn fund(&self, address: &Address, amount: Amount) -> Result<Response> {
self.send_to_address(address, amount).await
}
pub async fn send_to_address(&self, address: &Address, amount: Amount) -> Result<Response> {
let response = self
.client
.post(&self.host)
.body(format!(
r#"{{"jsonrpc": "1.0", "method": "sendtoaddress", "params": ["{}", "{}", "", "", false, false, null, null, false, 1.0]}}"#,
address,
amount.to_btc(),
))
.send()
.await?;
Ok(response)
}
pub async fn post(&self, endpoint: &str, body: Option<String>) -> Result<Response> {
let mut builder = self.client.post(endpoint.to_string());
if let Some(body) = body {
builder = builder.body(body);
}
let response = builder.send().await?;
if !response.status().is_success() {
bail!(response.text().await?)
}
Ok(response)
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/fund/src/coordinator.rs | crates/fund/src/coordinator.rs | use anyhow::Context;
use anyhow::Result;
use bitcoin::address::NetworkUnchecked;
use bitcoin::secp256k1::PublicKey;
use bitcoin::Address;
use reqwest::Client;
use serde::Deserialize;
use serde::Serialize;
use std::net::SocketAddr;
#[derive(Debug, Clone, Copy, Deserialize, Serialize)]
pub struct NodeInfo {
pub pubkey: PublicKey,
pub address: SocketAddr,
}
#[derive(Debug, Deserialize, Serialize)]
pub struct InvoiceParams {
pub amount: Option<u64>,
pub description: Option<String>,
pub expiry: Option<u32>,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct Balance {
pub dlc_channel: u64,
pub onchain: u64,
}
/// A wrapper over the coordinator HTTP API.
///
/// It does not aim to be complete, functionality will be added as needed.
pub struct Coordinator {
client: Client,
host: String,
}
#[derive(Deserialize)]
pub struct DlcChannels {
#[serde(flatten)]
pub channel_details: Vec<DlcChannel>,
}
#[derive(Deserialize, Debug)]
pub struct DlcChannel {
pub channel_id: String,
pub dlc_channel_id: Option<String>,
pub counter_party: String,
}
#[derive(Deserialize, Debug)]
pub struct Channel {
pub channel_id: String,
pub counterparty: String,
pub funding_txo: Option<String>,
pub original_funding_txo: Option<String>,
pub outbound_capacity_msat: u64,
}
impl Coordinator {
pub fn new(client: Client, host: &str) -> Self {
Self {
client,
host: host.to_string(),
}
}
pub fn new_local(client: Client) -> Self {
Self::new(client, "http://localhost:8000")
}
/// Check whether the coordinator is running.
pub async fn is_running(&self) -> bool {
self.get("/health").await.is_ok()
}
pub async fn is_node_connected(&self, node_id: &str) -> Result<bool> {
let result = self
.get(&format!("/api/admin/is_connected/{node_id}"))
.await?
.status()
.is_success();
Ok(result)
}
pub async fn sync_wallet(&self) -> Result<()> {
self.post("/api/admin/sync").await?;
Ok(())
}
pub async fn get_new_address(&self) -> Result<Address<NetworkUnchecked>> {
Ok(self.get("/api/newaddress").await?.text().await?.parse()?)
}
pub async fn get_balance(&self) -> Result<Balance> {
Ok(self.get("/api/admin/wallet/balance").await?.json().await?)
}
pub async fn get_node_info(&self) -> Result<NodeInfo> {
self.get("/api/node")
.await?
.json()
.await
.context("could not parse json")
}
pub async fn broadcast_node_announcement(&self) -> Result<reqwest::Response> {
let status = self
.post("/api/admin/broadcast_announcement")
.await?
.error_for_status()?;
Ok(status)
}
pub async fn get_dlc_channels(&self) -> Result<Vec<DlcChannel>> {
Ok(self.get("/api/admin/dlc_channels").await?.json().await?)
}
pub async fn get_channels(&self) -> Result<Vec<Channel>> {
Ok(self.get("/api/admin/channels").await?.json().await?)
}
async fn get(&self, path: &str) -> Result<reqwest::Response> {
self.client
.get(format!("{0}{path}", self.host))
.send()
.await
.context("Could not send GET request to coordinator")?
.error_for_status()
.context("Coordinator did not return 200 OK")
}
async fn post(&self, path: &str) -> Result<reqwest::Response> {
self.client
.post(format!("{0}{path}", self.host))
.send()
.await
.context("Could not send POST request to coordinator")?
.error_for_status()
.context("Coordinator did not return 200 OK")
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/fund/examples/fund.rs | crates/fund/examples/fund.rs | use anyhow::Context;
use anyhow::Result;
use bitcoin::Amount;
use clap::Parser;
use fund::bitcoind;
use fund::coordinator::Coordinator;
use fund::http::init_reqwest;
use tracing::metadata::LevelFilter;
use tracing_subscriber::filter::Directive;
use tracing_subscriber::layer::SubscriberExt;
use tracing_subscriber::util::SubscriberInitExt;
use tracing_subscriber::EnvFilter;
const RUST_LOG_ENV: &str = "RUST_LOG";
#[derive(Parser)]
pub struct Opts {
/// Faucet address
#[clap(long, default_value = "http://localhost:8080")]
pub faucet: String,
/// Coordinator address
#[clap(long, default_value = "http://localhost:8000")]
pub coordinator: String,
/// Maker address
#[clap(long, default_value = "http://localhost:18000")]
pub maker: String,
}
#[tokio::main]
async fn main() -> Result<()> {
init_tracing(LevelFilter::DEBUG).expect("tracing to initialise");
let opts = Opts::parse();
fund_everything(&opts.faucet, &opts.coordinator).await
}
async fn fund_everything(faucet: &str, coordinator: &str) -> Result<()> {
let client = init_reqwest();
let coordinator = Coordinator::new(client.clone(), coordinator);
let coord_addr = coordinator.get_new_address().await?;
let coord_addr = coord_addr.assume_checked();
let bitcoind = bitcoind::Bitcoind::new(client, faucet.to_string() + "/bitcoin");
for _ in 0..5 {
bitcoind
.fund(&coord_addr, Amount::ONE_BTC)
.await
.context("Could not fund the faucet's on-chain wallet")?;
}
bitcoind.mine(10).await?;
coordinator.sync_wallet().await?;
let coordinator_balance = coordinator.get_balance().await?;
tracing::info!(
onchain = %Amount::from_sat(coordinator_balance.onchain),
offchain = %Amount::from_sat(coordinator_balance.dlc_channel),
"Coordinator balance",
);
let coordinator_node_info = coordinator.get_node_info().await?;
tracing::info!(?coordinator_node_info);
Ok(())
}
// Configure and initialise tracing subsystem
fn init_tracing(level: LevelFilter) -> Result<()> {
if level == LevelFilter::OFF {
return Ok(());
}
let mut filter = EnvFilter::new("")
.add_directive(Directive::from(level))
.add_directive("hyper=warn".parse()?)
.add_directive("rustls=warn".parse()?)
.add_directive("reqwest=warn".parse()?)
.add_directive("lightning_transaction_sync=warn".parse()?);
// Parse additional log directives from env variable
let filter = match std::env::var_os(RUST_LOG_ENV).map(|s| s.into_string()) {
Some(Ok(env)) => {
for directive in env.split(',') {
#[allow(clippy::print_stdout)]
match directive.parse() {
Ok(d) => filter = filter.add_directive(d),
Err(e) => println!("WARN ignoring log directive: `{directive}`: {e}"),
};
}
filter
}
_ => filter,
};
let fmt_layer = tracing_subscriber::fmt::layer()
.with_writer(std::io::stderr)
.with_ansi(true);
tracing_subscriber::registry()
.with(filter)
.with(fmt_layer)
.try_init()
.context("Failed to init tracing")?;
tracing::info!("Initialized logger");
Ok(())
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/xxi-node/src/fee_rate_estimator.rs | crates/xxi-node/src/fee_rate_estimator.rs | use anyhow::Result;
use bdk::FeeRate;
use bitcoin::Network;
use bitcoin::Weight;
use lightning::chain::chaininterface::ConfirmationTarget;
use lightning::chain::chaininterface::FeeEstimator;
use lightning::chain::chaininterface::FEERATE_FLOOR_SATS_PER_KW;
use parking_lot::RwLock;
use std::collections::HashMap;
/// Default values used when constructing the [`FeeRateEstimator`] if the fee rate sever cannot give
/// us up-to-date values.
///
/// In sats/kwu.
const FEE_RATE_DEFAULTS: [(ConfirmationTarget, u32); 4] = [
(ConfirmationTarget::MempoolMinimum, 1000),
(ConfirmationTarget::Background, 2000),
(ConfirmationTarget::Normal, 3000),
(ConfirmationTarget::HighPriority, 4000),
];
pub struct FeeRateEstimator {
client: mempool::MempoolFeeRateEstimator,
fee_rate_cache: RwLock<HashMap<ConfirmationTarget, FeeRate>>,
}
pub trait EstimateFeeRate {
fn estimate(&self, target: ConfirmationTarget) -> FeeRate;
}
impl EstimateFeeRate for FeeRateEstimator {
fn estimate(&self, target: ConfirmationTarget) -> FeeRate {
self.get(target)
}
}
fn to_mempool_network(value: Network) -> mempool::Network {
match value {
Network::Bitcoin => mempool::Network::Mainnet,
Network::Testnet => mempool::Network::Testnet,
Network::Signet => mempool::Network::Signet,
Network::Regtest => mempool::Network::Local,
_ => unreachable!(),
}
}
impl FeeRateEstimator {
/// Constructor for the [`FeeRateEstimator`].
pub fn new(network: Network) -> Self {
let client = mempool::MempoolFeeRateEstimator::new(to_mempool_network(network));
tracing::warn!(defaults = ?FEE_RATE_DEFAULTS, "Initializing fee rate cache with default values.");
let initial_fee_rates = HashMap::from_iter(
FEE_RATE_DEFAULTS
.into_iter()
.map(|(target, fee_rate)| (target, FeeRate::from_sat_per_kwu(fee_rate as f32))),
);
let fee_rate_cache = RwLock::new(initial_fee_rates);
Self {
client,
fee_rate_cache,
}
}
pub fn get(&self, target: ConfirmationTarget) -> FeeRate {
self.fee_rate_cache
.read()
.get(&target)
.copied()
.expect("to have entries for all confirmation targets")
}
pub(crate) async fn update(&self) -> Result<()> {
let estimates = self.client.fetch_fee().await?;
let mut locked_fee_rate_cache = self.fee_rate_cache.write();
locked_fee_rate_cache.insert(
ConfirmationTarget::MempoolMinimum,
FeeRate::from_sat_per_vb(estimates.minimum_fee as f32),
);
locked_fee_rate_cache.insert(
ConfirmationTarget::Background,
FeeRate::from_sat_per_vb(estimates.economy_fee as f32),
);
locked_fee_rate_cache.insert(
ConfirmationTarget::Normal,
FeeRate::from_sat_per_vb(estimates.hour_fee as f32),
);
locked_fee_rate_cache.insert(
ConfirmationTarget::HighPriority,
FeeRate::from_sat_per_vb(estimates.fastest_fee as f32),
);
Ok(())
}
}
impl FeeEstimator for FeeRateEstimator {
fn get_est_sat_per_1000_weight(&self, confirmation_target: ConfirmationTarget) -> u32 {
(self
.estimate(confirmation_target)
.fee_wu(Weight::from_wu(1000)) as u32)
.max(FEERATE_FLOOR_SATS_PER_KW)
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/xxi-node/src/config.rs | crates/xxi-node/src/config.rs | use lightning::chain::chaininterface::ConfirmationTarget;
use lightning::ln::channelmanager::MIN_CLTV_EXPIRY_DELTA;
use lightning::util::config::ChannelConfig;
use lightning::util::config::ChannelHandshakeConfig;
use lightning::util::config::ChannelHandshakeLimits;
use lightning::util::config::UserConfig;
/// The speed at which we want a transaction to confirm used for feerate estimation.
///
/// We set it to high priority because the channel funding transaction should be included fast.
pub const CONFIRMATION_TARGET: ConfirmationTarget = ConfirmationTarget::HighPriority;
pub fn app_config() -> UserConfig {
UserConfig {
channel_handshake_config: ChannelHandshakeConfig {
// The app will only accept private channels. As we are forcing the apps announced
// channel preferences, the coordinator needs to override this config to match the apps
// preferences.
announced_channel: false,
minimum_depth: 1,
// There is no risk in the leaf channel to receive 100% of the channel capacity.
max_inbound_htlc_value_in_flight_percent_of_channel: 100,
// We want the coordinator to recover force-close funds as soon as possible. We choose
// 144 because we can't go any lower according to LDK.
our_to_self_delay: 144,
..Default::default()
},
channel_handshake_limits: ChannelHandshakeLimits {
max_minimum_depth: 1,
trust_own_funding_0conf: true,
// Enforces that incoming channels will be private.
force_announced_channel_preference: true,
// We want app users to only have to wait ~24 hours in case of a force-close. We choose
// 144 because we can't go any lower according to LDK.
their_to_self_delay: 144,
max_funding_satoshis: 100_000_000,
..Default::default()
},
channel_config: ChannelConfig {
cltv_expiry_delta: MIN_CLTV_EXPIRY_DELTA,
// Allows the coordinator to charge us a channel-opening fee after intercepting the
// app's funding HTLC.
accept_underpaying_htlcs: true,
..Default::default()
},
// we want to accept 0-conf channels from the coordinator
manually_accept_inbound_channels: true,
..Default::default()
}
}
pub fn coordinator_config() -> UserConfig {
UserConfig {
channel_handshake_config: ChannelHandshakeConfig {
// The coordinator will by default only accept public channels (see also
// `force_announced_channel_preference`). In order to open a private channel with the
// mobile app this config gets overwritten during the creation of the just-in-time
// channel. Note, public channels need 6 confirmations to get announced (and usable for
// multi-hop payments). This is a requirement of BOLT 7.
announced_channel: true,
// The minimum amount of confirmations before the inbound channel is deemed usable,
// between the counterparties.
minimum_depth: 1,
// We set this 100% as the coordinator is online 24/7 and can take the risk.
max_inbound_htlc_value_in_flight_percent_of_channel: 100,
// Our channel peers are allowed to get back their funds ~24 hours after a
// force-closure.
our_to_self_delay: 144,
..Default::default()
},
channel_handshake_limits: ChannelHandshakeLimits {
// The minimum amount of confirmations before the outbound channel is deemed usable,
// between the counterparties.
max_minimum_depth: 3,
trust_own_funding_0conf: true,
// Enforces incoming channels to the coordinator to be public! We
// only want to open private channels to our 10101 app.
force_announced_channel_preference: true,
// LND's max to_self_delay is 2016, so we want to be compatible.
their_to_self_delay: 2016,
max_funding_satoshis: 500_000_000,
..Default::default()
},
channel_config: ChannelConfig {
cltv_expiry_delta: MIN_CLTV_EXPIRY_DELTA,
// Proportional fee charged for forwarding a payment (outbound through a channel of
// ours).
forwarding_fee_proportional_millionths: 50,
// A base fee of 0 is chosen to simplify path-finding.
forwarding_fee_base_msat: 0,
..Default::default()
},
// This is needed to intercept payments to open just-in-time channels. This will produce the
// HTLCIntercepted event.
accept_intercept_htlcs: true,
// This config is needed to forward payments to the 10101 app, which only have private
// channels with the coordinator.
accept_forwards_to_priv_channels: true,
// The coordinator automatically accepts any inbound channels if they adhere to its channel
// preferences.
manually_accept_inbound_channels: false,
..Default::default()
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/xxi-node/src/shadow.rs | crates/xxi-node/src/shadow.rs | use crate::node::Storage;
use crate::on_chain_wallet::BdkStorage;
use crate::on_chain_wallet::OnChainWallet;
use anyhow::Result;
use bdk::chain::tx_graph::CalculateFeeError;
use std::sync::Arc;
pub struct Shadow<D: BdkStorage, N: Storage> {
storage: Arc<N>,
wallet: Arc<OnChainWallet<D>>,
}
impl<D: BdkStorage, N: Storage> Shadow<D, N> {
pub fn new(storage: Arc<N>, wallet: Arc<OnChainWallet<D>>) -> Self {
Shadow { storage, wallet }
}
pub fn sync_transactions(&self) -> Result<()> {
let transactions = self.storage.all_transactions_without_fees()?;
tracing::debug!("Syncing {} shadow transactions", transactions.len());
let wallet = self.wallet.clone();
for transaction in transactions.iter() {
let txid = transaction.txid();
match wallet.get_transaction(&txid) {
Some(tx) => match wallet.calculate_fee(&tx) {
Ok(fee) => {
self.storage
.upsert_transaction(transaction.clone().with_fee(fee))?;
}
Err(e @ CalculateFeeError::NegativeFee(_)) => {
tracing::error!(%txid, "Failed to get fee: {e}");
}
Err(e @ CalculateFeeError::MissingTxOut(_)) => {
tracing::warn!(%txid, "Failed to get fee: {e}");
// TODO: We should consider calling `insert_txout` to add all the `TxOut`s
// that we don't own so that BDK can actually calculate the fee. Of course,
// the fee will be shared with other wallets if we don't own all the
// transaction inputs, and BDK won't be able to decide on the split.
}
},
None => {
tracing::warn!(%txid, "Failed to get transaction details");
}
};
}
Ok(())
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/xxi-node/src/lib.rs | crates/xxi-node/src/lib.rs | use crate::dlc::TracingLogger;
use crate::message_handler::TenTenOneMessageHandler;
use crate::networking::DynamicSocketDescriptor;
use dlc_custom_signer::CustomKeysManager;
use lightning::ln::peer_handler::ErroringMessageHandler;
use lightning::ln::peer_handler::IgnoringMessageHandler;
use std::fmt;
use std::sync::Arc;
mod blockchain;
mod dlc_custom_signer;
mod dlc_wallet;
mod fee_rate_estimator;
mod on_chain_wallet;
mod shadow;
pub mod bitcoin_conversion;
pub mod bitmex_client;
pub mod cfd;
pub mod commons;
pub mod config;
pub mod dlc;
pub mod dlc_message;
pub mod message_handler;
pub mod networking;
pub mod node;
pub mod seed;
pub mod storage;
pub mod transaction;
pub use commons::FundingFeeEvent;
pub use config::CONFIRMATION_TARGET;
pub use dlc::ContractDetails;
pub use dlc::DlcChannelDetails;
pub use lightning;
pub use on_chain_wallet::ConfirmationStatus;
pub use on_chain_wallet::FeeConfig;
pub use on_chain_wallet::TransactionDetails;
#[cfg(test)]
mod tests;
pub(crate) type PeerManager<D> = lightning::ln::peer_handler::PeerManager<
DynamicSocketDescriptor,
Arc<ErroringMessageHandler>,
Arc<IgnoringMessageHandler>,
Arc<TenTenOneMessageHandler>,
Arc<TracingLogger>,
Arc<TenTenOneMessageHandler>,
Arc<CustomKeysManager<D>>,
>;
#[derive(Debug, Clone, Copy)]
pub enum PaymentFlow {
Inbound,
Outbound,
}
impl fmt::Display for PaymentFlow {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
PaymentFlow::Inbound => "Inbound".fmt(f),
PaymentFlow::Outbound => "Outbound".fmt(f),
}
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/xxi-node/src/bitcoin_conversion.rs | crates/xxi-node/src/bitcoin_conversion.rs | use bitcoin::consensus;
use bitcoin::hashes::Hash;
use bitcoin::psbt::PartiallySignedTransaction;
use bitcoin::secp256k1::ecdsa::Signature;
use bitcoin::Address;
use bitcoin::Block;
use bitcoin::BlockHash;
use bitcoin::Network;
use bitcoin::OutPoint;
use bitcoin::ScriptBuf;
use bitcoin::Sequence;
use bitcoin::Transaction;
use bitcoin::TxIn;
use bitcoin::TxOut;
use bitcoin::Txid;
use bitcoin::Witness;
use std::str::FromStr;
pub fn to_tx_30(tx: bitcoin_old::Transaction) -> Transaction {
let bytes = bitcoin_old::consensus::serialize(&tx);
consensus::deserialize(&bytes).expect("valid conversion")
}
pub fn to_tx_29(tx: Transaction) -> bitcoin_old::Transaction {
let bytes = consensus::serialize(&tx);
bitcoin_old::consensus::deserialize(&bytes).expect("valid conversion")
}
pub fn to_txin_30(txin: bitcoin_old::TxIn) -> TxIn {
let bitcoin_old::TxIn {
previous_output: bitcoin_old::OutPoint { txid, vout },
script_sig,
sequence,
witness,
} = txin;
let txid = to_txid_30(txid);
let previous_output = OutPoint { txid, vout };
let script_sig = to_script_30(script_sig);
let sequence = Sequence(sequence.0);
let witness = Witness::from_slice(&witness.to_vec());
TxIn {
previous_output,
script_sig,
sequence,
witness,
}
}
pub fn to_outpoint_30(outpoint: bitcoin_old::OutPoint) -> OutPoint {
let txid = to_txid_30(outpoint.txid);
OutPoint {
txid,
vout: outpoint.vout,
}
}
pub fn to_outpoint_29(outpoint: OutPoint) -> bitcoin_old::OutPoint {
let txid = to_txid_29(outpoint.txid);
bitcoin_old::OutPoint {
txid,
vout: outpoint.vout,
}
}
pub fn to_txout_30(txout: bitcoin_old::TxOut) -> TxOut {
let value = txout.value;
let script_pubkey = to_script_30(txout.script_pubkey);
TxOut {
value,
script_pubkey,
}
}
pub fn to_txout_29(txout: TxOut) -> bitcoin_old::TxOut {
let value = txout.value;
let script_pubkey = to_script_29(txout.script_pubkey);
bitcoin_old::TxOut {
value,
script_pubkey,
}
}
pub fn to_script_30(script: bitcoin_old::Script) -> ScriptBuf {
ScriptBuf::from_bytes(script.to_bytes())
}
pub fn to_script_29(script: ScriptBuf) -> bitcoin_old::Script {
bitcoin_old::Script::from(script.to_bytes())
}
pub fn to_txid_30(txid: bitcoin_old::Txid) -> Txid {
Txid::from_slice(bitcoin_old::hashes::Hash::as_inner(&txid).as_slice())
.expect("valid conversion")
}
pub fn to_txid_29(txid: Txid) -> bitcoin_old::Txid {
bitcoin_old::hashes::Hash::from_slice(bitcoin::hashes::Hash::as_byte_array(&txid).as_slice())
.expect("valid conversion")
}
pub fn to_address_29(address: Address) -> bitcoin_old::Address {
let s = address.to_string();
bitcoin_old::Address::from_str(&s).expect("valid address")
}
pub fn to_block_29(block: Block) -> bitcoin_old::Block {
let bytes = consensus::serialize(&block);
bitcoin_old::consensus::deserialize(&bytes).expect("valid conversion")
}
pub fn to_block_hash_30(block_hash: bitcoin_old::BlockHash) -> BlockHash {
Hash::from_slice(bitcoin_old::hashes::Hash::as_inner(&block_hash)).expect("valid conversion")
}
pub fn to_block_hash_29(block_hash: BlockHash) -> bitcoin_old::BlockHash {
bitcoin_old::hashes::Hash::from_slice(Hash::as_byte_array(&block_hash))
.expect("valid conversion")
}
pub fn to_network_29(network: Network) -> bitcoin_old::Network {
match network {
Network::Bitcoin => bitcoin_old::Network::Bitcoin,
Network::Testnet => bitcoin_old::Network::Testnet,
Network::Signet => bitcoin_old::Network::Signet,
Network::Regtest => bitcoin_old::Network::Regtest,
_ => unreachable!(),
}
}
pub fn to_psbt_30(
psbt: bitcoin_old::psbt::PartiallySignedTransaction,
) -> PartiallySignedTransaction {
let bytes = bitcoin_old::consensus::serialize(&psbt);
PartiallySignedTransaction::deserialize(&bytes).expect("valid conversion")
}
pub fn to_psbt_29(
psbt: PartiallySignedTransaction,
) -> bitcoin_old::psbt::PartiallySignedTransaction {
let bytes = psbt.serialize();
bitcoin_old::consensus::deserialize(&bytes).expect("valid conversion")
}
pub fn to_secp_pk_30(pk: bitcoin_old::secp256k1::PublicKey) -> bitcoin::secp256k1::PublicKey {
let pk = pk.serialize();
bitcoin::secp256k1::PublicKey::from_slice(&pk).expect("valid conversion")
}
pub fn to_secp_pk_29(pk: bitcoin::secp256k1::PublicKey) -> bitcoin_old::secp256k1::PublicKey {
let pk = pk.serialize();
bitcoin_old::secp256k1::PublicKey::from_slice(&pk).expect("valid conversion")
}
pub fn to_xonly_pk_30(pk: bitcoin_old::XOnlyPublicKey) -> bitcoin::secp256k1::XOnlyPublicKey {
let pk = pk.serialize();
bitcoin::secp256k1::XOnlyPublicKey::from_slice(&pk).expect("valid conversion")
}
pub fn to_xonly_pk_29(pk: bitcoin::secp256k1::XOnlyPublicKey) -> bitcoin_old::XOnlyPublicKey {
let pk = pk.serialize();
bitcoin_old::XOnlyPublicKey::from_slice(&pk).expect("valid conversion")
}
pub fn to_secp_sk_30(sk: bitcoin_old::secp256k1::SecretKey) -> bitcoin::secp256k1::SecretKey {
let sk = sk.secret_bytes();
bitcoin::secp256k1::SecretKey::from_slice(&sk).expect("valid conversion")
}
pub fn to_secp_sk_29(sk: bitcoin::secp256k1::SecretKey) -> bitcoin_old::secp256k1::SecretKey {
let sk = sk.secret_bytes();
bitcoin_old::secp256k1::SecretKey::from_slice(&sk).expect("valid conversion")
}
pub fn to_ecdsa_signature_30(signature: bitcoin_old::secp256k1::ecdsa::Signature) -> Signature {
let sig = signature.serialize_compact();
Signature::from_compact(&sig).expect("valid conversion")
}
pub fn to_ecdsa_signature_29(signature: Signature) -> bitcoin_old::secp256k1::ecdsa::Signature {
let sig = signature.serialize_compact();
bitcoin_old::secp256k1::ecdsa::Signature::from_compact(&sig).expect("valid conversion")
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/xxi-node/src/cfd.rs | crates/xxi-node/src/cfd.rs | use crate::commons::Direction;
use anyhow::Context;
use anyhow::Result;
use bitcoin::Amount;
use bitcoin::Denomination;
use rust_decimal::prelude::FromPrimitive;
use rust_decimal::prelude::ToPrimitive;
use rust_decimal::Decimal;
use rust_decimal_macros::dec;
use std::ops::Neg;
pub const BTCUSD_MAX_PRICE: u64 = 1_048_575;
/// Calculate the collateral in sats.
pub fn calculate_margin(open_price: Decimal, quantity: f32, leverage: f32) -> Amount {
let quantity = Decimal::try_from(quantity).expect("quantity to fit into decimal");
let leverage = Decimal::try_from(leverage).expect("leverage to fix into decimal");
if open_price == Decimal::ZERO || leverage == Decimal::ZERO {
// just to avoid div by 0 errors
return Amount::ZERO;
}
let margin = quantity / (open_price * leverage);
// TODO: Shift the decimal without going into float
let margin =
margin.round_dp_with_strategy(8, rust_decimal::RoundingStrategy::MidpointAwayFromZero);
let margin = margin.to_f64().expect("collateral to fit into f64");
bitcoin::Amount::from_btc(margin).expect("collateral to fit in amount")
}
/// Calculate leverage.
pub fn calculate_leverage(quantity: Decimal, margin: Amount, open_price: Decimal) -> Decimal {
let margin_btc = Decimal::try_from(margin.to_btc()).expect("to fit");
quantity
.checked_div(margin_btc * open_price)
// We use a leverage of 10_000 to represent a kind of maximum leverage that we can work
// with.
.unwrap_or(Decimal::TEN * Decimal::ONE_THOUSAND)
}
/// Calculate the quantity from price, collateral and leverage Margin in sats, calculation in BTC
pub fn calculate_quantity(opening_price: f32, margin: u64, leverage: f32) -> f32 {
let margin_amount = bitcoin::Amount::from_sat(margin);
let margin = Decimal::try_from(margin_amount.to_float_in(Denomination::Bitcoin))
.expect("collateral to fit into decimal");
let open_price = Decimal::try_from(opening_price).expect("price to fit into decimal");
let leverage = Decimal::try_from(leverage).expect("leverage to fit into decimal");
let quantity = margin * open_price * leverage;
quantity.to_f32().expect("quantity to fit into f32")
}
pub fn calculate_long_bankruptcy_price(leverage: Decimal, price: Decimal) -> Decimal {
calculate_long_liquidation_price(leverage, price, Decimal::ZERO)
}
pub fn calculate_long_liquidation_price(
leverage: Decimal,
price: Decimal,
maintenance_margin_rate: Decimal,
) -> Decimal {
price * leverage / (leverage + Decimal::ONE - (maintenance_margin_rate * leverage))
}
pub fn calculate_short_bankruptcy_price(leverage: Decimal, price: Decimal) -> Decimal {
calculate_short_liquidation_price(leverage, price, Decimal::ZERO)
}
/// Calculate liquidation price for the party going short.
pub fn calculate_short_liquidation_price(
leverage: Decimal,
price: Decimal,
maintenance_margin_rate: Decimal,
) -> Decimal {
// If the leverage is equal to 1, the liquidation price will go towards infinity
if leverage == Decimal::ONE {
return Decimal::from(BTCUSD_MAX_PRICE);
}
price * leverage / (leverage - Decimal::ONE + (maintenance_margin_rate * leverage))
}
/// Compute the payout for the given CFD parameters at a particular `closing_price`.
///
/// The `opening_price` of the position is the weighted opening price per quantity.
/// The `opening_price` is aggregated from all the execution prices of the orders that filled the
/// position; weighted by quantity. The closing price is the best bid/ask according to the orderbook
/// at a certain time.
///
/// Both leverages are supplied so that the total margin can be calculated and the PnL is capped by
/// the total margin available.
pub fn calculate_pnl(
opening_price: Decimal,
closing_price: Decimal,
quantity: f32,
direction: Direction,
initial_margin_long: u64,
initial_margin_short: u64,
) -> Result<i64> {
let uncapped_pnl_long = {
let quantity = Decimal::try_from(quantity).expect("quantity to fit into decimal");
let uncapped_pnl = match opening_price != Decimal::ZERO && closing_price != Decimal::ZERO {
true => (quantity / opening_price) - (quantity / closing_price),
false => dec!(0.0),
};
let uncapped_pnl = uncapped_pnl * dec!(100_000_000);
// we need to round to zero or else we might lose some sats somewhere
uncapped_pnl.round_dp_with_strategy(0, rust_decimal::RoundingStrategy::MidpointTowardZero)
};
let short_margin = Decimal::from_u64(initial_margin_short).context("be able to parse u64")?;
let long_margin = Decimal::from_u64(initial_margin_long).context("to be abble to parse u64")?;
let pnl = match direction {
Direction::Long => {
let max_win = uncapped_pnl_long.min(short_margin);
if max_win.is_sign_negative() {
max_win.max(long_margin.neg())
} else {
max_win
}
}
Direction::Short => {
let max_win = uncapped_pnl_long.neg().min(long_margin);
if max_win.is_sign_negative() {
max_win.max(short_margin.neg())
} else {
max_win
}
}
};
pnl.to_i64().context("to be able to convert into i64")
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn given_position_when_price_same_then_zero_pnl() {
let opening_price = Decimal::from(20000);
let closing_price = Decimal::from(20000);
let quantity = 1.0;
let long_leverage = 2.0;
let short_leverage = 1.0;
let long_margin = calculate_margin(opening_price, quantity, long_leverage);
let short_margin = calculate_margin(opening_price, quantity, short_leverage);
let pnl_long = calculate_pnl(
opening_price,
closing_price,
quantity,
Direction::Long,
long_margin.to_sat(),
short_margin.to_sat(),
)
.unwrap();
let pnl_short = calculate_pnl(
opening_price,
closing_price,
quantity,
Direction::Short,
long_margin.to_sat(),
short_margin.to_sat(),
)
.unwrap();
assert_eq!(pnl_long, 0);
assert_eq!(pnl_short, 0);
}
#[test]
fn given_long_position_when_price_doubles_then_we_get_double() {
let opening_price = Decimal::from(20000);
let closing_price = Decimal::from(40000);
let quantity = 100.0;
let long_leverage = 2.0;
let short_leverage = 1.0;
let long_margin = calculate_margin(opening_price, quantity, long_leverage);
let short_margin = calculate_margin(opening_price, quantity, short_leverage);
let pnl_long = calculate_pnl(
opening_price,
closing_price,
quantity,
Direction::Long,
long_margin.to_sat(),
short_margin.to_sat(),
)
.unwrap();
assert_eq!(pnl_long, 250000);
}
#[test]
fn given_long_position_when_price_halfs_then_we_loose_all() {
let opening_price = Decimal::from(20000);
let closing_price = Decimal::from(10000);
let quantity = 100.0;
let long_leverage = 2.0;
let short_leverage = 1.0;
let long_margin = calculate_margin(opening_price, quantity, long_leverage);
let short_margin = calculate_margin(opening_price, quantity, short_leverage);
let pnl_long = calculate_pnl(
opening_price,
closing_price,
quantity,
Direction::Long,
long_margin.to_sat(),
short_margin.to_sat(),
)
.unwrap();
// This is a liquidation, our margin is consumed by the loss
assert_eq!(pnl_long, -250000);
}
#[test]
fn given_short_position_when_price_doubles_then_we_loose_all() {
let opening_price = Decimal::from(20000);
let closing_price = Decimal::from(40000);
let quantity = 100.0;
let long_leverage = 1.0;
let short_leverage = 2.0;
let long_margin = calculate_margin(opening_price, quantity, long_leverage);
let short_margin = calculate_margin(opening_price, quantity, short_leverage);
let pnl_long = calculate_pnl(
opening_price,
closing_price,
quantity,
Direction::Short,
long_margin.to_sat(),
short_margin.to_sat(),
)
.unwrap();
assert_eq!(pnl_long, -250000);
}
#[test]
fn given_short_position_when_price_halfs_then_we_get_double() {
let opening_price = Decimal::from(20000);
let closing_price = Decimal::from(10000);
let quantity = 100.0;
let long_leverage = 1.0;
let short_leverage = 2.0;
let long_margin = calculate_margin(opening_price, quantity, long_leverage);
let short_margin = calculate_margin(opening_price, quantity, short_leverage);
let pnl_long = calculate_pnl(
opening_price,
closing_price,
quantity,
Direction::Short,
long_margin.to_sat(),
short_margin.to_sat(),
)
.unwrap();
// This is a liquidation, our margin is consumed by the loss
assert_eq!(pnl_long, 500000);
}
#[test]
fn given_long_position_when_price_10_pc_up_then_18pc_profit() {
let opening_price = Decimal::from(20000);
let closing_price = Decimal::from(22000);
let quantity = 20000.0;
let long_leverage = 2.0;
let short_leverage = 1.0;
let long_margin = calculate_margin(opening_price, quantity, long_leverage);
let short_margin = calculate_margin(opening_price, quantity, short_leverage);
let pnl_long = calculate_pnl(
opening_price,
closing_price,
quantity,
Direction::Long,
long_margin.to_sat(),
short_margin.to_sat(),
)
.unwrap();
// Value taken from our CFD hedging model sheet
assert_eq!(pnl_long, 9_090_909);
}
#[test]
fn given_short_position_when_price_10_pc_up_then_18pc_loss() {
let opening_price = Decimal::from(20000);
let closing_price = Decimal::from(22000);
let quantity = 20000.0;
let long_leverage = 2.0;
let short_leverage = 1.0;
let long_margin = calculate_margin(opening_price, quantity, long_leverage);
let short_margin = calculate_margin(opening_price, quantity, short_leverage);
let pnl_long = calculate_pnl(
opening_price,
closing_price,
quantity,
Direction::Short,
long_margin.to_sat(),
short_margin.to_sat(),
)
.unwrap();
// Value taken from our CFD hedging model sheet
assert_eq!(pnl_long, -9_090_909);
}
#[test]
fn given_long_position_when_price_10_pc_down_then_22pc_loss() {
let opening_price = Decimal::from(20000);
let closing_price = Decimal::from(18000);
let quantity = 20000.0;
let long_leverage = 2.0;
let short_leverage = 1.0;
let long_margin = calculate_margin(opening_price, quantity, long_leverage);
let short_margin = calculate_margin(opening_price, quantity, short_leverage);
let pnl_long = calculate_pnl(
opening_price,
closing_price,
quantity,
Direction::Long,
long_margin.to_sat(),
short_margin.to_sat(),
)
.unwrap();
// Value taken from our CFD hedging model sheet
assert_eq!(pnl_long, -11_111_111);
}
#[test]
fn given_short_position_when_price_10_pc_down_then_22pc_profit() {
let opening_price = Decimal::from(20000);
let closing_price = Decimal::from(18000);
let quantity = 20000.0;
let long_leverage = 2.0;
let short_leverage = 1.0;
let long_margin = calculate_margin(opening_price, quantity, long_leverage);
let short_margin = calculate_margin(opening_price, quantity, short_leverage);
let pnl_long = calculate_pnl(
opening_price,
closing_price,
quantity,
Direction::Short,
long_margin.to_sat(),
short_margin.to_sat(),
)
.unwrap();
// Value taken from our CFD hedging model sheet
assert_eq!(pnl_long, 11_111_111);
}
#[test]
fn given_short_position_when_price_0() {
let opening_price = Decimal::from(20000);
let closing_price = Decimal::from(0);
let quantity = 20000.0;
let long_leverage = 2.0;
let short_leverage = 1.0;
let long_margin = calculate_margin(opening_price, quantity, long_leverage);
let short_margin = calculate_margin(opening_price, quantity, short_leverage);
let pnl_long = calculate_pnl(
opening_price,
closing_price,
quantity,
Direction::Short,
long_margin.to_sat(),
short_margin.to_sat(),
)
.unwrap();
// Value taken from our CFD hedging model sheet
assert_eq!(pnl_long, 0);
}
#[test]
fn given_uneven_price_should_round_down() {
let opening_price = Decimal::from(1000);
let closing_price = Decimal::from(1234);
let quantity = 10.0;
let long_leverage = 2.0;
let short_leverage = 1.0;
let long_margin = calculate_margin(opening_price, quantity, long_leverage);
let short_margin = calculate_margin(opening_price, quantity, short_leverage);
let pnl_long = calculate_pnl(
opening_price,
closing_price,
quantity,
Direction::Long,
long_margin.to_sat(),
short_margin.to_sat(),
)
.unwrap();
// --> pnl should be ==> quantity / ((1/opening_price)-(1/closing_price))
// should be 189,627.23 Sats , or 189,628 Sats away from zero
assert_eq!(pnl_long, 189_627);
}
#[test]
fn pnl_example_calculation() {
let opening_price = Decimal::from(30_000);
let closing_price = Decimal::from(20_002);
let quantity = 60_000.0;
let long_leverage = 2.0;
let short_leverage = 2.0;
let long_margin = calculate_margin(opening_price, quantity, long_leverage);
let short_margin = calculate_margin(opening_price, quantity, short_leverage);
let pnl_long = calculate_pnl(
opening_price,
closing_price,
quantity,
Direction::Short,
long_margin.to_sat(),
short_margin.to_sat(),
)
.unwrap();
// --> pnl should be ==> quantity / ((1/opening_price)-(1/closing_price))
// should be 0.99970003 BTC or 99970003 Sats
assert_eq!(pnl_long, 99_970_003);
}
#[test]
fn assert_to_not_lose_more_than_margin_when_short() {
let opening_price = Decimal::from(30_000);
let closing_price = Decimal::from(100_000);
let quantity = 60_000.0;
let long_leverage = 2.0;
let short_leverage = 3.0;
let long_margin = calculate_margin(opening_price, quantity, long_leverage);
let short_margin = calculate_margin(opening_price, quantity, short_leverage);
let margin = calculate_margin(opening_price, quantity, short_leverage);
let pnl_short = calculate_pnl(
opening_price,
closing_price,
quantity,
Direction::Short,
long_margin.to_sat(),
short_margin.to_sat(),
)
.unwrap();
assert_eq!(pnl_short, (margin.to_sat() as i64).neg());
}
#[test]
fn assert_to_not_lose_more_than_margin_when_long() {
let opening_price = Decimal::from(30_000);
let closing_price = Decimal::from(1);
let quantity = 60_000.0;
let long_leverage = 5.0;
let short_leverage = 1.0;
let long_margin = calculate_margin(opening_price, quantity, long_leverage);
let short_margin = calculate_margin(opening_price, quantity, short_leverage);
let margin = calculate_margin(opening_price, quantity, long_leverage);
let pnl_short = calculate_pnl(
opening_price,
closing_price,
quantity,
Direction::Long,
long_margin.to_sat(),
short_margin.to_sat(),
)
.unwrap();
assert_eq!(pnl_short, (margin.to_sat() as i64).neg());
}
#[test]
pub fn test_calculate_long_liquidation_price() {
let leverage = dec!(2);
let price = dec!(30_000);
let maintenance_margin_rate = dec!(0);
let liquidation_price =
calculate_long_liquidation_price(leverage, price, maintenance_margin_rate);
let bankruptcy_price = calculate_long_bankruptcy_price(leverage, price);
assert_eq!(dec!(20_000), liquidation_price);
assert_eq!(liquidation_price, bankruptcy_price);
}
#[test]
pub fn test_calculate_short_liquidation_price() {
let leverage = dec!(2);
let price = dec!(30_000);
let maintenance_margin_rate = dec!(0);
let liquidation_price =
calculate_short_liquidation_price(leverage, price, maintenance_margin_rate);
let bankruptcy_price = calculate_short_bankruptcy_price(leverage, price);
assert_eq!(dec!(60_000), liquidation_price);
assert_eq!(liquidation_price, bankruptcy_price);
}
#[test]
pub fn test_calculate_long_liquidation_price_with_maintenance_margin_rate() {
let leverage = dec!(2);
let price = dec!(30_000);
let maintenance_margin_rate = dec!(0.1);
let liquidation_price =
calculate_long_liquidation_price(leverage, price, maintenance_margin_rate);
let bankruptcy_price = calculate_long_bankruptcy_price(leverage, price);
assert_eq!(dec!(21428.571428571428571428571429), liquidation_price);
assert_ne!(liquidation_price, bankruptcy_price);
}
#[test]
pub fn test_calculate_short_liquidation_price_with_maintenance_margin_rate() {
let leverage = dec!(2);
let price = dec!(30_000);
let maintenance_margin_rate = dec!(0.1);
let liquidation_price =
calculate_short_liquidation_price(leverage, price, maintenance_margin_rate);
let bankruptcy_price = calculate_short_bankruptcy_price(leverage, price);
assert_eq!(dec!(50000), liquidation_price);
assert_ne!(liquidation_price, bankruptcy_price);
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/xxi-node/src/seed.rs | crates/xxi-node/src/seed.rs | use anyhow::bail;
use anyhow::Context;
use anyhow::Result;
use bip39::Language;
use bip39::Mnemonic;
use bitcoin::bip32::ExtendedPrivKey;
use bitcoin::Network;
use hkdf::Hkdf;
use sha2::Sha256;
use std::fs::create_dir_all;
use std::path::Path;
#[derive(Clone, PartialEq, Eq)]
pub struct Bip39Seed {
mnemonic: Mnemonic,
}
impl Bip39Seed {
pub fn new() -> Result<Self> {
let mut rng = rand::thread_rng();
let word_count = 12;
let mnemonic = Mnemonic::generate_in_with(&mut rng, Language::English, word_count)?;
Ok(Self { mnemonic })
}
/// Restore a [`Seed`] from a mnemonic. Writes the seed to the given path.
pub fn restore_from_mnemonic(seed_words: &str, target_seed_file: &Path) -> Result<Self> {
let mnemonic = Mnemonic::parse(seed_words)?;
let seed = Self { mnemonic };
// Ensure parent directory exists
if let Some(parent) = target_seed_file.parent() {
create_dir_all(parent)?;
}
seed.write_to(target_seed_file)
.context("cannot write to file")?;
Ok(seed)
}
/// Initialise a [`Seed`] from a path.
/// Generates new seed if there was no seed found in the given path
pub fn initialize(seed_file: &Path) -> Result<Self> {
// Ensure parent directory exists
if let Some(parent) = seed_file.parent() {
create_dir_all(parent)?;
}
let seed = if !seed_file.exists() {
tracing::info!("No seed found. Generating new seed");
let seed = Self::new()?;
seed.write_to(seed_file)?;
seed
} else {
Bip39Seed::read_from(seed_file)?
};
Ok(seed)
}
fn seed(&self) -> [u8; 64] {
// passing an empty string here is the expected argument if the seed should not be
// additionally password protected (according to https://github.com/bitcoin/bips/blob/master/bip-0039.mediawiki#from-mnemonic-to-seed)
self.mnemonic.to_seed_normalized("")
}
pub fn lightning_seed(&self) -> LightningSeed {
let mut seed = [0u8; 32];
Hkdf::<Sha256>::new(None, &self.seed())
.expand(b"LIGHTNING_WALLET_SEED", &mut seed)
.expect("array is of correct length");
seed
}
pub fn wallet_seed(&self) -> WalletSeed {
let mut ext_priv_key_seed = [0u8; 64];
Hkdf::<Sha256>::new(None, &self.seed())
.expand(b"BITCOIN_WALLET_SEED", &mut ext_priv_key_seed)
.expect("array is of correct length");
WalletSeed {
seed: ext_priv_key_seed,
}
}
pub fn get_seed_phrase(&self) -> Vec<String> {
self.mnemonic.word_iter().map(|word| word.into()).collect()
}
// Read the entropy used to generate Mnemonic from disk
fn read_from(path: &Path) -> Result<Self> {
let bytes = std::fs::read(path)?;
let seed: Bip39Seed = TryInto::try_into(bytes)?;
Ok(seed)
}
// Store the entropy used to generate Mnemonic on disk
fn write_to(&self, path: &Path) -> Result<()> {
if path.exists() {
let path = path.display();
bail!("Refusing to overwrite file at {path}")
}
std::fs::write(path, self.mnemonic.to_entropy())?;
Ok(())
}
}
pub struct WalletSeed {
seed: [u8; 64],
}
impl WalletSeed {
pub fn derive_extended_priv_key(&self, network: Network) -> Result<ExtendedPrivKey> {
let ext_priv_key = ExtendedPrivKey::new_master(network, &self.seed)?;
Ok(ext_priv_key)
}
}
impl TryFrom<Vec<u8>> for Bip39Seed {
type Error = anyhow::Error;
fn try_from(bytes: Vec<u8>) -> Result<Self, Self::Error> {
let mnemonic = Mnemonic::from_entropy(&bytes)?;
Ok(Bip39Seed { mnemonic })
}
}
impl From<Mnemonic> for Bip39Seed {
fn from(mnemonic: Mnemonic) -> Self {
Bip39Seed { mnemonic }
}
}
pub type LightningSeed = [u8; 32];
#[cfg(test)]
mod tests {
use crate::seed::Bip39Seed;
use bip39::Mnemonic;
use std::env::temp_dir;
#[test]
fn create_bip39_seed() {
let seed = Bip39Seed::new().expect("seed to be generated");
let phrase = seed.get_seed_phrase();
assert_eq!(12, phrase.len());
}
#[test]
fn reinitialised_seed_is_the_same() {
let mut path = temp_dir();
path.push("seed");
let seed_1 = Bip39Seed::initialize(&path).unwrap();
let seed_2 = Bip39Seed::initialize(&path).unwrap();
assert_eq!(
seed_1.mnemonic, seed_2.mnemonic,
"Reinitialised wallet should contain the same mnemonic"
);
assert_eq!(
seed_1.seed(),
seed_2.seed(),
"Seed derived from mnemonic should be the same"
);
}
#[test]
fn deterministic_seed() {
let mnemonic = Mnemonic::parse(
"rule segment glance broccoli glove seminar plunge element artist stock clown thank",
)
.unwrap();
let seed = Bip39Seed::from(mnemonic);
let wallet_seed = seed.seed();
let ln_seed = seed.lightning_seed();
assert_eq!(hex::encode(wallet_seed), "32ea66d60c979ec4392e6364ce3debc38823d33864dfdb31b8aef227ee60813b850be5af70a758d93e50faf9f8b9eecea0c7e928fad9a2edb6a2af1f8c1a2bfd");
assert_eq!(
hex::encode(ln_seed),
"1cf21ab62bf5a5ee40896158cbbc18b9ad75805e1824a252d8060c6c075b228f"
);
}
#[test]
fn restore_same_seed_from_exported_mnemonic() {
let seed = Bip39Seed::new().unwrap();
let seed_words = seed.get_seed_phrase().join(" ");
let restore_path = &temp_dir().join("seed_restored");
let seed_restored = Bip39Seed::restore_from_mnemonic(&seed_words, restore_path).unwrap();
assert!(
seed == seed_restored,
"Restored seed should be the same as the original seed"
);
std::fs::remove_file(restore_path).unwrap(); // clear the temp file
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/xxi-node/src/message_handler.rs | crates/xxi-node/src/message_handler.rs | use crate::bitcoin_conversion::to_secp_pk_30;
use crate::commons::FilledWith;
use crate::commons::Order;
use crate::commons::OrderReason;
use crate::node::event::NodeEvent;
use crate::node::event::NodeEventHandler;
use anyhow::Result;
use bitcoin::SignedAmount;
use dlc_manager::ReferenceId;
use dlc_messages::channel::AcceptChannel;
use dlc_messages::channel::CollaborativeCloseOffer;
use dlc_messages::channel::OfferChannel;
use dlc_messages::channel::Reject;
use dlc_messages::channel::RenewAccept;
use dlc_messages::channel::RenewConfirm;
use dlc_messages::channel::RenewFinalize;
use dlc_messages::channel::RenewOffer;
use dlc_messages::channel::RenewRevoke;
use dlc_messages::channel::SettleAccept;
use dlc_messages::channel::SettleConfirm;
use dlc_messages::channel::SettleFinalize;
use dlc_messages::channel::SettleOffer;
use dlc_messages::channel::SignChannel;
use dlc_messages::field_read;
use dlc_messages::field_write;
use dlc_messages::impl_dlc_writeable;
use dlc_messages::segmentation;
use dlc_messages::segmentation::get_segments;
use dlc_messages::segmentation::segment_reader::SegmentReader;
use dlc_messages::segmentation::SegmentChunk;
use dlc_messages::segmentation::SegmentStart;
use dlc_messages::ser_impls::read_i64;
use dlc_messages::ser_impls::read_string;
use dlc_messages::ser_impls::write_i64;
use dlc_messages::ser_impls::write_string;
use dlc_messages::ChannelMessage;
use dlc_messages::Message;
use lightning::events::OnionMessageProvider;
use lightning::ln::features::InitFeatures;
use lightning::ln::features::NodeFeatures;
use lightning::ln::msgs;
use lightning::ln::msgs::DecodeError;
use lightning::ln::msgs::LightningError;
use lightning::ln::msgs::OnionMessage;
use lightning::ln::msgs::OnionMessageHandler;
use lightning::ln::peer_handler::CustomMessageHandler;
use lightning::ln::wire::CustomMessageReader;
use lightning::ln::wire::Type;
use lightning::util::ser::Readable;
use lightning::util::ser::Writeable;
use lightning::util::ser::Writer;
use lightning::util::ser::MAX_BUF_SIZE;
use rust_decimal::Decimal;
use secp256k1_zkp::PublicKey;
use serde::Deserialize;
use serde::Serialize;
use std::collections::HashMap;
use std::collections::VecDeque;
use std::fmt::Display;
use std::io::Cursor;
use std::str::FromStr;
use std::sync::Arc;
use std::sync::Mutex;
use time::OffsetDateTime;
use uuid::Uuid;
/// TenTenOneMessageHandler is used to send and receive messages through the custom
/// message handling mechanism of the LDK. It also handles message segmentation
/// by splitting large messages when sending and re-constructing them when
/// receiving.
pub struct TenTenOneMessageHandler {
handler: Arc<NodeEventHandler>,
msg_events: Mutex<VecDeque<(PublicKey, WireMessage)>>,
msg_received: Mutex<Vec<(PublicKey, TenTenOneMessage)>>,
segment_readers: Mutex<HashMap<PublicKey, SegmentReader>>,
}
impl TenTenOneMessageHandler {
pub fn new(handler: Arc<NodeEventHandler>) -> Self {
Self {
handler,
msg_events: Mutex::new(Default::default()),
msg_received: Mutex::new(vec![]),
segment_readers: Mutex::new(Default::default()),
}
}
}
/// Copied from the IgnoringMessageHandler
impl OnionMessageProvider for TenTenOneMessageHandler {
fn next_onion_message_for_peer(&self, _peer_node_id: PublicKey) -> Option<OnionMessage> {
None
}
}
/// Copied primarily from the IgnoringMessageHandler. Using the peer_connected hook to get notified
/// once a peer successfully connected. (This also includes that the Init Message has been processed
/// and the connection is ready to use).
impl OnionMessageHandler for TenTenOneMessageHandler {
fn handle_onion_message(&self, _their_node_id: &PublicKey, _msg: &OnionMessage) {}
fn peer_connected(
&self,
their_node_id: &PublicKey,
_init: &msgs::Init,
inbound: bool,
) -> Result<(), ()> {
tracing::info!(%their_node_id, inbound, "Peer connected!");
self.handler.publish(NodeEvent::Connected {
peer: to_secp_pk_30(*their_node_id),
});
Ok(())
}
fn peer_disconnected(&self, _their_node_id: &PublicKey) {}
fn provided_node_features(&self) -> NodeFeatures {
NodeFeatures::empty()
}
fn provided_init_features(&self, _their_node_id: &PublicKey) -> InitFeatures {
InitFeatures::empty()
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum WireMessage {
Message(TenTenOneMessage),
SegmentStart(SegmentStart),
SegmentChunk(SegmentChunk),
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[allow(clippy::large_enum_variant)]
pub enum TenTenOneMessage {
Reject(TenTenOneReject),
Offer(TenTenOneOfferChannel),
Accept(TenTenOneAcceptChannel),
Sign(TenTenOneSignChannel),
SettleOffer(TenTenOneSettleOffer),
SettleAccept(TenTenOneSettleAccept),
SettleConfirm(TenTenOneSettleConfirm),
SettleFinalize(TenTenOneSettleFinalize),
RenewOffer(TenTenOneRenewOffer),
RenewAccept(TenTenOneRenewAccept),
RenewConfirm(TenTenOneRenewConfirm),
RenewFinalize(TenTenOneRenewFinalize),
RenewRevoke(TenTenOneRenewRevoke),
RolloverOffer(TenTenOneRolloverOffer),
RolloverAccept(TenTenOneRolloverAccept),
RolloverConfirm(TenTenOneRolloverConfirm),
RolloverFinalize(TenTenOneRolloverFinalize),
RolloverRevoke(TenTenOneRolloverRevoke),
CollaborativeCloseOffer(TenTenOneCollaborativeCloseOffer),
}
pub enum TenTenOneMessageType {
/// open channel, open, close or resize a position
Trade,
// expired position
Expire,
// liquidated position
Liquidate,
/// rollover position
Rollover,
/// reject or close channel
Other,
}
impl TenTenOneMessage {
pub fn get_tentenone_message_type(&self) -> TenTenOneMessageType {
match self {
TenTenOneMessage::Offer(_)
| TenTenOneMessage::Accept(_)
| TenTenOneMessage::Sign(_)
| TenTenOneMessage::RenewOffer(_)
| TenTenOneMessage::RenewAccept(_)
| TenTenOneMessage::RenewConfirm(_)
| TenTenOneMessage::RenewFinalize(_)
| TenTenOneMessage::RenewRevoke(_) => TenTenOneMessageType::Trade,
TenTenOneMessage::SettleOffer(TenTenOneSettleOffer {
order: Order { order_reason, .. },
..
})
| TenTenOneMessage::SettleAccept(TenTenOneSettleAccept { order_reason, .. })
| TenTenOneMessage::SettleConfirm(TenTenOneSettleConfirm { order_reason, .. })
| TenTenOneMessage::SettleFinalize(TenTenOneSettleFinalize { order_reason, .. }) => {
match order_reason {
OrderReason::Manual => TenTenOneMessageType::Trade,
OrderReason::Expired => TenTenOneMessageType::Expire,
OrderReason::CoordinatorLiquidated | OrderReason::TraderLiquidated => {
TenTenOneMessageType::Liquidate
}
}
}
TenTenOneMessage::RolloverOffer(_)
| TenTenOneMessage::RolloverAccept(_)
| TenTenOneMessage::RolloverConfirm(_)
| TenTenOneMessage::RolloverFinalize(_)
| TenTenOneMessage::RolloverRevoke(_) => TenTenOneMessageType::Rollover,
_ => TenTenOneMessageType::Other,
}
}
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct TenTenOneReject {
pub reject: Reject,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TenTenOneOfferChannel {
pub filled_with: FilledWith,
pub offer_channel: OfferChannel,
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct TenTenOneAcceptChannel {
pub order_id: Uuid,
pub accept_channel: AcceptChannel,
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct TenTenOneSignChannel {
pub order_id: Uuid,
pub sign_channel: SignChannel,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TenTenOneSettleOffer {
pub order: Order,
pub filled_with: FilledWith,
pub settle_offer: SettleOffer,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TenTenOneSettleAccept {
pub order_reason: OrderReason,
pub order_id: Uuid,
pub settle_accept: SettleAccept,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TenTenOneSettleConfirm {
pub order_reason: OrderReason,
pub order_id: Uuid,
pub settle_confirm: SettleConfirm,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TenTenOneSettleFinalize {
pub order_reason: OrderReason,
pub order_id: Uuid,
pub settle_finalize: SettleFinalize,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TenTenOneRenewOffer {
pub filled_with: FilledWith,
pub renew_offer: RenewOffer,
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct TenTenOneRenewAccept {
pub order_id: Uuid,
pub renew_accept: RenewAccept,
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct TenTenOneRenewConfirm {
pub order_id: Uuid,
pub renew_confirm: RenewConfirm,
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct TenTenOneRenewFinalize {
pub order_id: Uuid,
pub renew_finalize: RenewFinalize,
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct TenTenOneRenewRevoke {
pub order_id: Uuid,
pub renew_revoke: RenewRevoke,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TenTenOneRolloverOffer {
pub renew_offer: RenewOffer,
// TODO: The funding fee should be extracted from the `RenewOffer`, but this is more
// convenient.
pub funding_fee_events: Vec<FundingFeeEvent>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FundingFeeEvent {
pub due_date: OffsetDateTime,
pub funding_rate: Decimal,
pub price: Decimal,
#[serde(with = "bitcoin::amount::serde::as_sat")]
pub funding_fee: SignedAmount,
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct TenTenOneRolloverAccept {
pub renew_accept: RenewAccept,
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct TenTenOneRolloverConfirm {
pub renew_confirm: RenewConfirm,
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct TenTenOneRolloverFinalize {
pub renew_finalize: RenewFinalize,
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct TenTenOneRolloverRevoke {
pub renew_revoke: RenewRevoke,
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct TenTenOneCollaborativeCloseOffer {
pub collaborative_close_offer: CollaborativeCloseOffer,
}
impl TenTenOneMessageHandler {
/// Returns whether there are any new received messages to process.
pub fn has_pending_messages_to_process(&self) -> bool {
!self.msg_received.lock().expect("to get lock").is_empty()
}
/// Returns the messages received by the message handler and empty the
/// receiving buffer.
pub fn get_and_clear_received_messages(&self) -> Vec<(PublicKey, TenTenOneMessage)> {
let mut ret = Vec::new();
std::mem::swap(
&mut *self.msg_received.lock().expect("to get lock"),
&mut ret,
);
ret
}
/// Send a message to the peer with given node id. Not that the message is not
/// sent right away, but only when the LDK
/// [`lightning::ln::peer_handler::PeerManager::process_events`] is next called.
pub fn send_message(&self, node_id: PublicKey, msg: TenTenOneMessage) {
if msg.serialized_length() > MAX_BUF_SIZE {
let (seg_start, seg_chunks) = get_segments(msg.encode(), msg.type_id());
let mut msg_events = self.msg_events.lock().expect("to get lock");
msg_events.push_back((node_id, WireMessage::SegmentStart(seg_start)));
for chunk in seg_chunks {
msg_events.push_back((node_id, WireMessage::SegmentChunk(chunk)));
}
} else {
self.msg_events
.lock()
.expect("to get lock")
.push_back((node_id, WireMessage::Message(msg)));
}
}
/// Returns whether the message handler has any message to be sent.
pub fn has_pending_messages(&self) -> bool {
!self.msg_events.lock().expect("to get lock").is_empty()
}
}
impl CustomMessageReader for TenTenOneMessageHandler {
type CustomMessage = WireMessage;
fn read<R: ::std::io::Read>(
&self,
msg_type: u16,
mut buffer: &mut R,
) -> Result<Option<WireMessage>, DecodeError> {
let decoded = match msg_type {
segmentation::SEGMENT_START_TYPE => {
WireMessage::SegmentStart(Readable::read(&mut buffer)?)
}
segmentation::SEGMENT_CHUNK_TYPE => {
WireMessage::SegmentChunk(Readable::read(&mut buffer)?)
}
_ => return read_tentenone_message(msg_type, buffer),
};
Ok(Some(decoded))
}
}
/// Implementation of the `CustomMessageHandler` trait is required to handle
/// custom messages in the LDK.
impl CustomMessageHandler for TenTenOneMessageHandler {
fn handle_custom_message(
&self,
msg: WireMessage,
org: &PublicKey,
) -> Result<(), LightningError> {
let mut segment_readers = self.segment_readers.lock().expect("to get lock");
let segment_reader = segment_readers.entry(*org).or_default();
if segment_reader.expecting_chunk() {
match msg {
WireMessage::SegmentChunk(s) => {
if let Some(msg) = segment_reader
.process_segment_chunk(s)
.map_err(|e| to_ln_error(e, "Error processing segment chunk"))?
{
let mut buf = Cursor::new(msg);
let message_type = <u16 as Readable>::read(&mut buf).map_err(|e| {
to_ln_error(e, "Could not reconstruct message from segments")
})?;
if let WireMessage::Message(m) = self
.read(message_type, &mut buf)
.map_err(|e| {
to_ln_error(e, "Could not reconstruct message from segments")
})?
.expect("to have a message")
{
self.msg_received
.lock()
.expect("to get lock")
.push((*org, m));
} else {
return Err(to_ln_error(
"Unexpected message type",
&message_type.to_string(),
));
}
}
return Ok(());
}
_ => {
// We were expecting a segment chunk but received something
// else, we reset the state.
segment_reader.reset();
}
}
}
match msg {
WireMessage::Message(m) => self
.msg_received
.lock()
.expect("to get lock")
.push((*org, m)),
WireMessage::SegmentStart(s) => segment_reader
.process_segment_start(s)
.map_err(|e| to_ln_error(e, "Error processing segment start"))?,
WireMessage::SegmentChunk(_) => {
return Err(LightningError {
err: "Received a SegmentChunk while not expecting one.".to_string(),
action: lightning::ln::msgs::ErrorAction::DisconnectPeer { msg: None },
});
}
};
Ok(())
}
fn get_and_clear_pending_msg(&self) -> Vec<(PublicKey, Self::CustomMessage)> {
self.msg_events
.lock()
.expect("to get lock")
.drain(..)
.collect()
}
fn provided_node_features(&self) -> NodeFeatures {
NodeFeatures::empty()
}
fn provided_init_features(&self, _their_node_id: &PublicKey) -> InitFeatures {
InitFeatures::empty()
}
}
#[inline]
fn to_ln_error<T: Display>(e: T, msg: &str) -> LightningError {
LightningError {
err: format!("{} :{}", msg, e),
action: lightning::ln::msgs::ErrorAction::DisconnectPeer { msg: None },
}
}
pub fn tentenone_message_name(msg: &TenTenOneMessage) -> String {
let name = match msg {
TenTenOneMessage::Offer(_) => "Offer",
TenTenOneMessage::Accept(_) => "Accept",
TenTenOneMessage::Sign(_) => "Sign",
TenTenOneMessage::SettleOffer(_) => "SettleOffer",
TenTenOneMessage::SettleAccept(_) => "SettleAccept",
TenTenOneMessage::SettleConfirm(_) => "SettleConfirm",
TenTenOneMessage::SettleFinalize(_) => "SettleFinalize",
TenTenOneMessage::RenewOffer(_) => "RenewOffer",
TenTenOneMessage::RenewAccept(_) => "RenewAccept",
TenTenOneMessage::RenewConfirm(_) => "RenewConfirm",
TenTenOneMessage::RenewFinalize(_) => "RenewFinalize",
TenTenOneMessage::RenewRevoke(_) => "RenewRevoke",
TenTenOneMessage::RolloverOffer(_) => "RolloverOffer",
TenTenOneMessage::RolloverAccept(_) => "RolloverAccept",
TenTenOneMessage::RolloverConfirm(_) => "RolloverConfirm",
TenTenOneMessage::RolloverFinalize(_) => "RolloverFinalize",
TenTenOneMessage::RolloverRevoke(_) => "RolloverRevoke",
TenTenOneMessage::CollaborativeCloseOffer(_) => "CollaborativeCloseOffer",
TenTenOneMessage::Reject(_) => "Reject",
};
name.to_string()
}
impl TenTenOneMessage {
/// Builds a 10101 message from the rust-dlc response message. Note, a response can never return
/// an offer so if an offer is passed the function will panic. This is most likely not a future
/// proof solution as we'd might want to enrich the response with 10101 metadata as well. If
/// that happens we will have to rework this part.
pub fn build_from_response(
message: Message,
order_id: Option<Uuid>,
order_reason: Option<OrderReason>,
) -> Result<Self> {
let msg = match (message, order_id) {
(Message::Channel(ChannelMessage::Accept(accept_channel)), Some(order_id)) => {
TenTenOneMessage::Accept(TenTenOneAcceptChannel {
accept_channel,
order_id,
})
}
(Message::Channel(ChannelMessage::Sign(sign_channel)), Some(order_id)) => {
TenTenOneMessage::Sign(TenTenOneSignChannel {
sign_channel,
order_id,
})
}
(Message::Channel(ChannelMessage::SettleAccept(settle_accept)), Some(order_id)) => {
TenTenOneMessage::SettleAccept(TenTenOneSettleAccept {
settle_accept,
order_id,
order_reason: order_reason.expect("to be some"),
})
}
(Message::Channel(ChannelMessage::SettleConfirm(settle_confirm)), Some(order_id)) => {
TenTenOneMessage::SettleConfirm(TenTenOneSettleConfirm {
settle_confirm,
order_id,
order_reason: order_reason.expect("to be some"),
})
}
(Message::Channel(ChannelMessage::SettleFinalize(settle_finalize)), Some(order_id)) => {
TenTenOneMessage::SettleFinalize(TenTenOneSettleFinalize {
settle_finalize,
order_id,
order_reason: order_reason.expect("to be some"),
})
}
(Message::Channel(ChannelMessage::RenewAccept(renew_accept)), None) => {
TenTenOneMessage::RolloverAccept(TenTenOneRolloverAccept { renew_accept })
}
(Message::Channel(ChannelMessage::RenewConfirm(renew_confirm)), None) => {
TenTenOneMessage::RolloverConfirm(TenTenOneRolloverConfirm { renew_confirm })
}
(Message::Channel(ChannelMessage::RenewFinalize(renew_finalize)), None) => {
TenTenOneMessage::RolloverFinalize(TenTenOneRolloverFinalize { renew_finalize })
}
(Message::Channel(ChannelMessage::RenewRevoke(renew_revoke)), None) => {
TenTenOneMessage::RolloverRevoke(TenTenOneRolloverRevoke { renew_revoke })
}
(Message::Channel(ChannelMessage::RenewAccept(renew_accept)), Some(order_id)) => {
TenTenOneMessage::RenewAccept(TenTenOneRenewAccept {
renew_accept,
order_id,
})
}
(Message::Channel(ChannelMessage::RenewConfirm(renew_confirm)), Some(order_id)) => {
TenTenOneMessage::RenewConfirm(TenTenOneRenewConfirm {
renew_confirm,
order_id,
})
}
(Message::Channel(ChannelMessage::RenewFinalize(renew_finalize)), Some(order_id)) => {
TenTenOneMessage::RenewFinalize(TenTenOneRenewFinalize {
renew_finalize,
order_id,
})
}
(Message::Channel(ChannelMessage::RenewRevoke(renew_revoke)), Some(order_id)) => {
TenTenOneMessage::RenewRevoke(TenTenOneRenewRevoke {
renew_revoke,
order_id,
})
}
(
Message::Channel(ChannelMessage::CollaborativeCloseOffer(
collaborative_close_offer,
)),
None,
) => TenTenOneMessage::CollaborativeCloseOffer(TenTenOneCollaborativeCloseOffer {
collaborative_close_offer,
}),
(Message::Channel(ChannelMessage::Reject(reject)), None) => {
TenTenOneMessage::Reject(TenTenOneReject { reject })
}
(_, _) => {
unreachable!()
}
};
Ok(msg)
}
pub fn get_order_id(&self) -> Option<Uuid> {
match self {
TenTenOneMessage::Offer(TenTenOneOfferChannel {
filled_with: FilledWith { order_id, .. },
..
})
| TenTenOneMessage::Accept(TenTenOneAcceptChannel { order_id, .. })
| TenTenOneMessage::Sign(TenTenOneSignChannel { order_id, .. })
| TenTenOneMessage::SettleOffer(TenTenOneSettleOffer {
order: Order { id: order_id, .. },
..
})
| TenTenOneMessage::SettleAccept(TenTenOneSettleAccept { order_id, .. })
| TenTenOneMessage::SettleConfirm(TenTenOneSettleConfirm { order_id, .. })
| TenTenOneMessage::SettleFinalize(TenTenOneSettleFinalize { order_id, .. })
| TenTenOneMessage::RenewOffer(TenTenOneRenewOffer {
filled_with: FilledWith { order_id, .. },
..
})
| TenTenOneMessage::RenewAccept(TenTenOneRenewAccept { order_id, .. })
| TenTenOneMessage::RenewConfirm(TenTenOneRenewConfirm { order_id, .. })
| TenTenOneMessage::RenewFinalize(TenTenOneRenewFinalize { order_id, .. })
| TenTenOneMessage::RenewRevoke(TenTenOneRenewRevoke { order_id, .. }) => {
Some(*order_id)
}
TenTenOneMessage::RolloverOffer(TenTenOneRolloverOffer { .. })
| TenTenOneMessage::RolloverAccept(TenTenOneRolloverAccept { .. })
| TenTenOneMessage::RolloverConfirm(TenTenOneRolloverConfirm { .. })
| TenTenOneMessage::RolloverFinalize(TenTenOneRolloverFinalize { .. })
| TenTenOneMessage::RolloverRevoke(TenTenOneRolloverRevoke { .. })
| TenTenOneMessage::CollaborativeCloseOffer(TenTenOneCollaborativeCloseOffer {
..
})
| TenTenOneMessage::Reject(TenTenOneReject { .. }) => None,
}
}
pub fn get_order_reason(&self) -> Option<OrderReason> {
match self {
TenTenOneMessage::SettleOffer(TenTenOneSettleOffer {
order: Order { order_reason, .. },
..
})
| TenTenOneMessage::SettleAccept(TenTenOneSettleAccept { order_reason, .. })
| TenTenOneMessage::SettleConfirm(TenTenOneSettleConfirm { order_reason, .. })
| TenTenOneMessage::SettleFinalize(TenTenOneSettleFinalize { order_reason, .. }) => {
Some(order_reason.clone())
}
TenTenOneMessage::Offer(_)
| TenTenOneMessage::Accept(_)
| TenTenOneMessage::Sign(_)
| TenTenOneMessage::RenewOffer(_)
| TenTenOneMessage::RenewAccept(_)
| TenTenOneMessage::RenewConfirm(_)
| TenTenOneMessage::RenewFinalize(_)
| TenTenOneMessage::RenewRevoke(_)
| TenTenOneMessage::RolloverOffer(_)
| TenTenOneMessage::RolloverAccept(_)
| TenTenOneMessage::RolloverConfirm(_)
| TenTenOneMessage::RolloverFinalize(_)
| TenTenOneMessage::RolloverRevoke(_)
| TenTenOneMessage::CollaborativeCloseOffer(_)
| TenTenOneMessage::Reject(_) => None,
}
}
pub fn get_reference_id(&self) -> Option<ReferenceId> {
match self {
TenTenOneMessage::Offer(TenTenOneOfferChannel {
offer_channel: OfferChannel { reference_id, .. },
..
})
| TenTenOneMessage::Accept(TenTenOneAcceptChannel {
accept_channel: AcceptChannel { reference_id, .. },
..
})
| TenTenOneMessage::Sign(TenTenOneSignChannel {
sign_channel: SignChannel { reference_id, .. },
..
})
| TenTenOneMessage::SettleOffer(TenTenOneSettleOffer {
settle_offer: SettleOffer { reference_id, .. },
..
})
| TenTenOneMessage::SettleAccept(TenTenOneSettleAccept {
settle_accept: SettleAccept { reference_id, .. },
..
})
| TenTenOneMessage::SettleConfirm(TenTenOneSettleConfirm {
settle_confirm: SettleConfirm { reference_id, .. },
..
})
| TenTenOneMessage::SettleFinalize(TenTenOneSettleFinalize {
settle_finalize: SettleFinalize { reference_id, .. },
..
})
| TenTenOneMessage::RenewOffer(TenTenOneRenewOffer {
renew_offer: RenewOffer { reference_id, .. },
..
})
| TenTenOneMessage::RolloverOffer(TenTenOneRolloverOffer {
renew_offer: RenewOffer { reference_id, .. },
..
})
| TenTenOneMessage::RolloverAccept(TenTenOneRolloverAccept {
renew_accept: RenewAccept { reference_id, .. },
})
| TenTenOneMessage::RolloverConfirm(TenTenOneRolloverConfirm {
renew_confirm: RenewConfirm { reference_id, .. },
})
| TenTenOneMessage::RolloverFinalize(TenTenOneRolloverFinalize {
renew_finalize: RenewFinalize { reference_id, .. },
})
| TenTenOneMessage::RolloverRevoke(TenTenOneRolloverRevoke {
renew_revoke: RenewRevoke { reference_id, .. },
})
| TenTenOneMessage::RenewAccept(TenTenOneRenewAccept {
renew_accept: RenewAccept { reference_id, .. },
..
})
| TenTenOneMessage::RenewConfirm(TenTenOneRenewConfirm {
renew_confirm: RenewConfirm { reference_id, .. },
..
})
| TenTenOneMessage::RenewFinalize(TenTenOneRenewFinalize {
renew_finalize: RenewFinalize { reference_id, .. },
..
})
| TenTenOneMessage::RenewRevoke(TenTenOneRenewRevoke {
renew_revoke: RenewRevoke { reference_id, .. },
..
})
| TenTenOneMessage::CollaborativeCloseOffer(TenTenOneCollaborativeCloseOffer {
collaborative_close_offer: CollaborativeCloseOffer { reference_id, .. },
})
| TenTenOneMessage::Reject(TenTenOneReject {
reject: Reject { reference_id, .. },
}) => *reference_id,
}
}
}
impl From<TenTenOneMessage> for Message {
fn from(value: TenTenOneMessage) -> Self {
let msg = ChannelMessage::from(value);
Message::Channel(msg)
}
}
impl From<TenTenOneMessage> for ChannelMessage {
fn from(value: TenTenOneMessage) -> Self {
match value {
TenTenOneMessage::Offer(TenTenOneOfferChannel { offer_channel, .. }) => {
ChannelMessage::Offer(offer_channel)
}
TenTenOneMessage::Accept(TenTenOneAcceptChannel { accept_channel, .. }) => {
ChannelMessage::Accept(accept_channel)
}
TenTenOneMessage::Sign(TenTenOneSignChannel { sign_channel, .. }) => {
ChannelMessage::Sign(sign_channel)
}
TenTenOneMessage::SettleOffer(TenTenOneSettleOffer { settle_offer, .. }) => {
ChannelMessage::SettleOffer(settle_offer)
}
TenTenOneMessage::SettleAccept(TenTenOneSettleAccept { settle_accept, .. }) => {
ChannelMessage::SettleAccept(settle_accept)
}
TenTenOneMessage::SettleConfirm(TenTenOneSettleConfirm { settle_confirm, .. }) => {
ChannelMessage::SettleConfirm(settle_confirm)
}
TenTenOneMessage::SettleFinalize(TenTenOneSettleFinalize {
settle_finalize, ..
}) => ChannelMessage::SettleFinalize(settle_finalize),
TenTenOneMessage::RenewOffer(TenTenOneRenewOffer { renew_offer, .. }) => {
ChannelMessage::RenewOffer(renew_offer)
}
TenTenOneMessage::RenewAccept(TenTenOneRenewAccept { renew_accept, .. }) => {
ChannelMessage::RenewAccept(renew_accept)
}
TenTenOneMessage::RenewConfirm(TenTenOneRenewConfirm { renew_confirm, .. }) => {
ChannelMessage::RenewConfirm(renew_confirm)
}
TenTenOneMessage::RenewFinalize(TenTenOneRenewFinalize { renew_finalize, .. }) => {
ChannelMessage::RenewFinalize(renew_finalize)
}
TenTenOneMessage::RenewRevoke(TenTenOneRenewRevoke { renew_revoke, .. }) => {
ChannelMessage::RenewRevoke(renew_revoke)
}
TenTenOneMessage::RolloverOffer(TenTenOneRolloverOffer { renew_offer, .. }) => {
ChannelMessage::RenewOffer(renew_offer)
}
TenTenOneMessage::RolloverAccept(TenTenOneRolloverAccept { renew_accept }) => {
ChannelMessage::RenewAccept(renew_accept)
}
TenTenOneMessage::RolloverConfirm(TenTenOneRolloverConfirm { renew_confirm }) => {
ChannelMessage::RenewConfirm(renew_confirm)
}
TenTenOneMessage::RolloverFinalize(TenTenOneRolloverFinalize { renew_finalize }) => {
ChannelMessage::RenewFinalize(renew_finalize)
}
TenTenOneMessage::RolloverRevoke(TenTenOneRolloverRevoke { renew_revoke }) => {
ChannelMessage::RenewRevoke(renew_revoke)
}
TenTenOneMessage::CollaborativeCloseOffer(TenTenOneCollaborativeCloseOffer {
collaborative_close_offer,
}) => ChannelMessage::CollaborativeCloseOffer(collaborative_close_offer),
TenTenOneMessage::Reject(TenTenOneReject { reject }) => ChannelMessage::Reject(reject),
}
}
}
/// Writes an uuid to the given writer.
pub fn write_uuid<W: Writer>(
uuid: &Uuid,
writer: &mut W,
) -> std::result::Result<(), ::std::io::Error> {
write_string(&uuid.to_string(), writer)
}
/// Reads an uuid from the given reader.
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | true |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/xxi-node/src/networking.rs | crates/xxi-node/src/networking.rs | use crate::node::NodeInfo;
use futures::future;
use lightning::ln::peer_handler::APeerManager;
use lightning::ln::peer_handler::SocketDescriptor;
use std::future::Future;
use std::ops::Deref;
use tracing::debug;
#[cfg(feature = "ln_net_axum_ws")]
pub mod axum;
#[cfg(feature = "ln_net_tcp")]
pub mod tcp;
#[cfg(feature = "ln_net_ws")]
mod tungstenite;
#[allow(clippy::diverging_sub_expression, unused_variables, unreachable_code)] // From the panic!() below
pub async fn connect_outbound<PM: Deref + 'static + Send + Sync + Clone>(
peer_manager: PM,
peer: NodeInfo,
) -> Option<impl Future<Output = ()>>
where
PM::Target: APeerManager<Descriptor = DynamicSocketDescriptor>,
{
if peer.is_ws {
debug!("Connecting over WS");
#[cfg(not(feature = "ln_net_ws"))]
let ws: Option<future::Either<future::Ready<()>, _>> =
panic!("Cannot connect outbound over WS when ln_net_ws is not enabled");
#[cfg(feature = "ln_net_ws")]
let ws = tungstenite::connect_outbound(peer_manager, peer)
.await
.map(future::Either::Left);
ws
} else {
debug!("Connecting over TCP");
#[cfg(not(feature = "ln_net_tcp"))]
let tcp: Option<future::Either<_, future::Ready<()>>> =
panic!("Cannot connect outbound over TCP when ln_net_tcp is not enabled");
#[cfg(feature = "ln_net_tcp")]
let tcp = tcp::connect_outbound(peer_manager, peer.pubkey, peer.address)
.await
.map(future::Either::Right);
tcp
}
}
/// A dynamic socket descriptor that could either be over WASM (JS) websockets, TCP sockets
/// (lightning_net_tokio), or Axum websockets.
#[derive(Hash, Clone, Eq, PartialEq)]
pub enum DynamicSocketDescriptor {
#[cfg(feature = "ln_net_tcp")]
Tcp(tcp::SocketDescriptor),
#[cfg(feature = "ln_net_axum_ws")]
Axum(axum::SocketDescriptor),
#[cfg(feature = "ln_net_ws")]
Tungstenite(tungstenite::SocketDescriptor),
}
impl SocketDescriptor for DynamicSocketDescriptor {
fn send_data(&mut self, data: &[u8], resume_read: bool) -> usize {
match self {
#[cfg(feature = "ln_net_tcp")]
DynamicSocketDescriptor::Tcp(sock) => sock.send_data(data, resume_read),
#[cfg(feature = "ln_net_axum_ws")]
DynamicSocketDescriptor::Axum(sock) => sock.send_data(data, resume_read),
#[cfg(feature = "ln_net_ws")]
DynamicSocketDescriptor::Tungstenite(sock) => sock.send_data(data, resume_read),
}
}
fn disconnect_socket(&mut self) {
match self {
#[cfg(feature = "ln_net_tcp")]
DynamicSocketDescriptor::Tcp(sock) => sock.disconnect_socket(),
#[cfg(feature = "ln_net_axum_ws")]
DynamicSocketDescriptor::Axum(sock) => sock.disconnect_socket(),
#[cfg(feature = "ln_net_ws")]
DynamicSocketDescriptor::Tungstenite(sock) => sock.disconnect_socket(),
}
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/xxi-node/src/dlc_wallet.rs | crates/xxi-node/src/dlc_wallet.rs | use crate::bitcoin_conversion::to_address_29;
use crate::bitcoin_conversion::to_block_29;
use crate::bitcoin_conversion::to_network_29;
use crate::bitcoin_conversion::to_outpoint_29;
use crate::bitcoin_conversion::to_outpoint_30;
use crate::bitcoin_conversion::to_psbt_29;
use crate::bitcoin_conversion::to_psbt_30;
use crate::bitcoin_conversion::to_script_29;
use crate::bitcoin_conversion::to_tx_29;
use crate::bitcoin_conversion::to_tx_30;
use crate::bitcoin_conversion::to_txid_29;
use crate::bitcoin_conversion::to_txid_30;
use crate::bitcoin_conversion::to_txout_29;
use crate::blockchain::Blockchain;
use crate::node::Storage;
use crate::on_chain_wallet::BdkStorage;
use crate::on_chain_wallet::OnChainWallet;
use crate::storage::DlcStorageProvider;
use crate::storage::TenTenOneStorage;
use crate::storage::WalletStorage;
use anyhow::Result;
use bdk::LocalOutput;
use bdk::SignOptions;
use bdk_coin_select::metrics::LowestFee;
use bdk_coin_select::Candidate;
use bdk_coin_select::ChangePolicy;
use bdk_coin_select::CoinSelector;
use bdk_coin_select::DrainWeights;
use bdk_coin_select::Target;
use bitcoin::secp256k1::KeyPair;
use bitcoin::Network;
use bitcoin::TxIn;
use std::sync::Arc;
const COIN_SELECTION_MAX_ROUNDS: usize = 100_000;
#[derive(Clone)]
pub struct DlcWallet<D, S, N> {
on_chain_wallet: Arc<OnChainWallet<D>>,
blockchain: Arc<Blockchain<N>>,
dlc_storage: Arc<DlcStorageProvider<S>>,
}
impl<D, S, N> DlcWallet<D, S, N> {
pub fn new(
on_chain_wallet: Arc<OnChainWallet<D>>,
dlc_storage: Arc<DlcStorageProvider<S>>,
blockchain: Arc<Blockchain<N>>,
) -> Self {
Self {
on_chain_wallet,
blockchain,
dlc_storage,
}
}
}
impl<D, S, N> dlc_manager::Blockchain for DlcWallet<D, S, N>
where
D: BdkStorage,
N: Storage,
{
fn send_transaction(
&self,
tx: &bitcoin_old::Transaction,
) -> Result<(), dlc_manager::error::Error> {
let tx = to_tx_30(tx.clone());
self.blockchain
.broadcast_transaction_blocking(&tx)
.map_err(|e| dlc_manager::error::Error::WalletError(format!("{e:#}").into()))?;
Ok(())
}
fn get_network(
&self,
) -> Result<bitcoin_old::network::constants::Network, dlc_manager::error::Error> {
let network = self.on_chain_wallet.network;
let network = match network {
Network::Bitcoin => bitcoin_old::network::constants::Network::Bitcoin,
Network::Testnet => bitcoin_old::network::constants::Network::Testnet,
Network::Signet => bitcoin_old::network::constants::Network::Signet,
Network::Regtest => bitcoin_old::network::constants::Network::Regtest,
_ => {
return Err(dlc_manager::error::Error::BlockchainError(format!(
"Network not supported: {network}",
)));
}
};
Ok(network)
}
fn get_blockchain_height(&self) -> Result<u64, dlc_manager::error::Error> {
Ok(self.on_chain_wallet.get_tip() as u64)
}
fn get_block_at_height(
&self,
height: u64,
) -> Result<bitcoin_old::Block, dlc_manager::error::Error> {
let block_hash = self.blockchain.get_block_hash(height).map_err(|e| {
dlc_manager::error::Error::BlockchainError(format!(
"Could not find block hash at height {height}: {e:#}"
))
})?;
let block = self
.blockchain
.get_block_by_hash(&block_hash)
.map_err(|e| {
dlc_manager::error::Error::BlockchainError(format!(
"Could not find block by hash {}: {e:#}",
block_hash
))
})?;
Ok(to_block_29(block))
}
fn get_transaction(
&self,
txid: &bitcoin_old::Txid,
) -> Result<bitcoin_old::Transaction, dlc_manager::error::Error> {
let txid = to_txid_30(*txid);
let tx = self
.blockchain
.get_transaction(&txid)
.map_err(|e| {
dlc_manager::error::Error::BlockchainError(format!(
"Could not get transaction {txid}: {e:#}"
))
})?
.ok_or_else(|| {
dlc_manager::error::Error::BlockchainError(format!("Transaction {txid} not found"))
})?;
let tx = to_tx_29(tx);
Ok(tx)
}
fn get_transaction_confirmations(
&self,
txid: &bitcoin_old::Txid,
) -> Result<u32, dlc_manager::error::Error> {
let txid = to_txid_30(*txid);
let confirmations = self
.blockchain
.get_transaction_confirmations(&txid)
.map_err(|e| {
dlc_manager::error::Error::BlockchainError(format!(
"Could not get confirmations for transaction {txid}: {e:#}",
))
})?;
Ok(confirmations)
}
fn get_txo_confirmations(
&self,
txo: &bitcoin_old::OutPoint,
) -> Result<Option<(u32, bitcoin_old::Txid)>, dlc_manager::error::Error> {
let txo = to_outpoint_30(*txo);
let confirmations = self.blockchain.get_txo_confirmations(&txo).map_err(|e| {
dlc_manager::error::Error::BlockchainError(format!(
"Could not get confirmations for txo {txo}: {e:#}",
))
})?;
Ok(confirmations.map(|(confirmations, txid)| (confirmations, to_txid_29(txid))))
}
}
impl<D: BdkStorage, S: TenTenOneStorage, N> dlc_manager::Wallet for DlcWallet<D, S, N> {
fn get_new_address(&self) -> Result<bitcoin_old::Address, dlc_manager::error::Error> {
let address = self
.on_chain_wallet
.get_new_address()
.map_err(|e| dlc_manager::error::Error::WalletError((format!("{e:#}")).into()))?;
let address = to_address_29(address);
Ok(address)
}
// TODO: Use the extended private key, a special derivation path and an incremental index to
// generate the secret key.
fn get_new_secret_key(&self) -> Result<secp256k1_zkp::SecretKey, dlc_manager::error::Error> {
let kp = KeyPair::new(&self.on_chain_wallet.secp, &mut rand::thread_rng());
let sk = kp.secret_key();
self.dlc_storage
.upsert_key_pair(&kp.public_key(), &sk)
.map_err(|e| {
dlc_manager::error::Error::StorageError(format!("Failed to upsert key pair: {e:#}"))
})?;
let sk =
secp256k1_zkp::SecretKey::from_slice(&sk.secret_bytes()).expect("valid conversion");
Ok(sk)
}
/// Get UTXOs to create a DLC or a DLC channel.
fn get_utxos_for_amount(
&self,
amount: u64,
fee_rate: Option<u64>,
base_weight_wu: u64,
lock_utxos: bool,
) -> Result<Vec<dlc_manager::Utxo>, dlc_manager::error::Error> {
if amount == 0 {
return Ok(Vec::new());
}
let network = self.on_chain_wallet.network();
let fee_rate = fee_rate.expect("always set by rust-dlc");
// Get temporarily reserved UTXOs from in-memory storage.
let mut reserved_outpoints = self.on_chain_wallet.locked_utxos.lock();
let utxos = self.on_chain_wallet.list_unspent();
let utxos: Vec<&LocalOutput> = utxos
.iter()
.filter(|utxo| !reserved_outpoints.contains(&utxo.outpoint))
.filter(|utxo| !utxo.is_spent)
.collect();
// Filter out reserved and spent UTXOs to prevent double-spending attempts.
let candidates = utxos
.iter()
.map(|utxo| {
let tx_in = TxIn {
previous_output: utxo.outpoint,
..Default::default()
};
let segwit_weight = tx_in.segwit_weight();
// The 10101 wallet always generates SegWit addresses.
//
// TODO: Rework this once we use Taproot.
let is_witness_program = true;
Candidate::new(utxo.txout.value, segwit_weight as u32, is_witness_program)
})
.collect::<Vec<_>>();
let target = Target {
feerate: bdk_coin_select::FeeRate::from_sat_per_vb(fee_rate as f32),
min_fee: 0,
value: amount,
};
let available_candidates = candidates.iter().map(|can| can.value).sum::<u64>();
let mut coin_selector = CoinSelector::new(&candidates, base_weight_wu as u32);
let dust_limit = 0;
let long_term_feerate = bdk_coin_select::FeeRate::default_min_relay_fee();
let change_policy = ChangePolicy::min_value_and_waste(
DrainWeights::default(),
dust_limit,
target.feerate,
long_term_feerate,
);
let metric = LowestFee {
target,
long_term_feerate,
change_policy,
};
coin_selector
.run_bnb(metric, COIN_SELECTION_MAX_ROUNDS)
.map_err(|e| {
dlc_manager::error::Error::WalletError(
(format!("Wallet does not hold enough UTXOs to cover amount {amount} sats with fee rate {fee_rate} sats/vbyte because we only have {available_candidates} sats. {e:#}")).into(),
)
})?;
debug_assert!(coin_selector.is_target_met(target));
let indices = coin_selector.selected_indices();
let mut selected_utxos: Vec<dlc_manager::Utxo> = Vec::with_capacity(indices.len());
for index in indices {
let utxo = &utxos[*index];
let address = bitcoin_old::Address::from_script(
&to_script_29(utxo.txout.script_pubkey.clone()),
to_network_29(network),
)
.expect("to be a valid address");
let outpoint = utxo.outpoint;
let utxo = dlc_manager::Utxo {
tx_out: to_txout_29(utxo.txout.clone()),
outpoint: to_outpoint_29(outpoint),
address,
redeem_script: bitcoin_old::Script::new(),
reserved: false,
};
if lock_utxos {
// Add selected UTXOs to reserve to prevent future double-spend attempts.
reserved_outpoints.push(outpoint);
}
selected_utxos.push(utxo);
}
Ok(selected_utxos)
}
fn import_address(
&self,
address: &bitcoin_old::Address,
) -> Result<(), dlc_manager::error::Error> {
tracing::debug!(%address, "Choosing to ignore watching DLC-related address");
Ok(())
}
fn unreserve_utxos(
&self,
outpoints: &[bitcoin_old::OutPoint],
) -> Result<(), dlc_manager::error::Error> {
self.on_chain_wallet.unreserve_utxos(outpoints);
Ok(())
}
}
impl<D, S: TenTenOneStorage, N> dlc_manager::Signer for DlcWallet<D, S, N> {
fn sign_psbt_input(
&self,
psbt: &mut bitcoin_old::psbt::PartiallySignedTransaction,
_index: usize,
) -> Result<(), dlc_manager::error::Error> {
let mut psbt_30 = to_psbt_30(psbt.clone());
self.on_chain_wallet
.sign_psbt(
&mut psbt_30,
SignOptions {
trust_witness_utxo: true,
..Default::default()
},
)
.map_err(|e| dlc_manager::error::Error::WalletError((format!("{e:#}")).into()))?;
*psbt = to_psbt_29(psbt_30.clone());
Ok(())
}
fn get_secret_key_for_pubkey(
&self,
pk: &secp256k1_zkp::PublicKey,
) -> Result<secp256k1_zkp::SecretKey, dlc_manager::error::Error> {
let public_key =
bitcoin::secp256k1::PublicKey::from_slice(&pk.serialize()).expect("valid conversion");
let sk = self
.dlc_storage
.get_priv_key_for_pubkey(&public_key)
.map_err(|e| {
dlc_manager::error::Error::StorageError(format!("Failed to load SK: {e:#}"))
})?
.ok_or_else(|| dlc_manager::error::Error::StorageError("Unknown PK".to_string()))?;
let sk =
secp256k1_zkp::SecretKey::from_slice(&sk.secret_bytes()).expect("valid conversion");
Ok(sk)
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/xxi-node/src/on_chain_wallet.rs | crates/xxi-node/src/on_chain_wallet.rs | use crate::bitcoin_conversion::to_outpoint_29;
use crate::fee_rate_estimator::FeeRateEstimator;
use crate::seed::WalletSeed;
use anyhow::anyhow;
use anyhow::bail;
use anyhow::Result;
use bdk::chain::indexed_tx_graph::Indexer;
use bdk::chain::local_chain::LocalChain;
use bdk::chain::tx_graph::CalculateFeeError;
use bdk::chain::tx_graph::CanonicalTx;
use bdk::chain::Append;
use bdk::chain::ChainPosition;
use bdk::chain::PersistBackend;
use bdk::psbt::PsbtUtils;
use bdk::wallet::IsDust;
use bdk::FeeRate;
use bdk::KeychainKind;
use bdk::LocalOutput;
use bdk::SignOptions;
use bitcoin::psbt::PartiallySignedTransaction;
use bitcoin::secp256k1::All;
use bitcoin::secp256k1::Secp256k1;
use bitcoin::Address;
use bitcoin::Amount;
use bitcoin::Network;
use bitcoin::OutPoint;
use bitcoin::ScriptBuf;
use bitcoin::SignedAmount;
use bitcoin::Transaction;
use bitcoin::TxOut;
use bitcoin::Txid;
use lightning::chain::chaininterface::ConfirmationTarget;
use parking_lot::Mutex;
use parking_lot::RwLock;
use std::collections::BTreeMap;
use std::num::NonZeroU32;
use std::sync::Arc;
use time::OffsetDateTime;
/// Taken from mempool.space
const AVG_SEGWIT_TX_WEIGHT_VB: usize = 140;
#[derive(Clone)]
pub struct OnChainWallet<D> {
bdk: Arc<RwLock<bdk::Wallet<D>>>,
/// These `OutPoint`s are unlocked when the `OnChainWallet` is rebuilt or when
/// `unreserve_utxos` is called.
pub(crate) locked_utxos: Arc<Mutex<Vec<OutPoint>>>,
pub(crate) fee_rate_estimator: Arc<FeeRateEstimator>,
pub(crate) network: Network,
pub(crate) secp: Secp256k1<All>,
}
impl<D> OnChainWallet<D> {
pub fn get_balance(&self) -> bdk::wallet::Balance {
self.bdk.read().get_balance()
}
/// List all the transactions related to this wallet.
pub fn get_on_chain_history(&self) -> Vec<TransactionDetails> {
let bdk = self.bdk.read();
let txs = bdk.transactions().filter(|tx| {
let tx = tx.tx_node.tx;
bdk.spk_index().is_tx_relevant(tx)
});
txs.map(|tx| {
let (sent, received) = bdk.sent_and_received(&tx.tx_node);
let confirmation_status = self.get_confirmation_status(&tx.tx_node.txid());
let fee = bdk.calculate_fee(&tx.tx_node).map(Amount::from_sat);
TransactionDetails {
transaction: tx.tx_node.tx.clone(),
sent: Amount::from_sat(sent),
received: Amount::from_sat(received),
fee,
confirmation_status,
}
})
.collect()
}
pub fn network(&self) -> Network {
self.bdk.read().network()
}
pub(crate) fn list_unspent(&self) -> Vec<LocalOutput> {
self.bdk.read().list_unspent().collect()
}
pub(crate) fn unreserve_utxos(&self, outpoints: &[bitcoin_old::OutPoint]) {
self.locked_utxos
.lock()
.retain(|utxo| !outpoints.contains(&to_outpoint_29(*utxo)));
}
pub(crate) fn get_transaction(&self, txid: &Txid) -> Option<Transaction> {
let bdk = self.bdk.read();
bdk.get_tx(*txid).map(|tx| tx.tx_node.tx).cloned()
}
pub(crate) fn get_confirmation_status(&self, txid: &Txid) -> ConfirmationStatus {
let bdk = self.bdk.read();
let (confirmation_height, confirmation_time) = match bdk.get_tx(*txid) {
Some(CanonicalTx {
chain_position: ChainPosition::Confirmed(anchor),
..
}) => (anchor.confirmation_height, anchor.confirmation_time),
Some(CanonicalTx {
chain_position: ChainPosition::Unconfirmed(last_seen),
..
}) => {
let last_seen =
OffsetDateTime::from_unix_timestamp(last_seen as i64).expect("valid timestamp");
return ConfirmationStatus::Mempool { last_seen };
}
None => return ConfirmationStatus::Unknown,
};
let tip = self.get_tip();
let n_confirmations = match tip.checked_sub(confirmation_height) {
// Being included in a block counts as a confirmation!
Some(diff) => NonZeroU32::new(diff + 1).expect("non-zero"),
None => {
// The transaction shouldn't be ahead of the tip!
debug_assert!(false);
return ConfirmationStatus::Unknown;
}
};
let timestamp =
OffsetDateTime::from_unix_timestamp(confirmation_time as i64).expect("valid timestamp");
ConfirmationStatus::Confirmed {
n_confirmations,
timestamp,
}
}
pub(crate) fn get_tip(&self) -> u32 {
self.bdk.read().local_chain().tip().block_id().height
}
/// Similar to `list_unspent`, but more types of UTXO are included here.
pub(crate) fn get_utxos(&self) -> Vec<(OutPoint, TxOut)> {
let bdk = self.bdk.read();
bdk.list_unspent()
.map(|local_output| (local_output.outpoint, local_output.txout))
.collect()
}
pub(crate) fn is_mine(&self, script_pubkey: &ScriptBuf) -> bool {
self.bdk.read().is_mine(script_pubkey)
}
pub(crate) fn calculate_fee(
&self,
transaction: &Transaction,
) -> Result<u64, CalculateFeeError> {
self.bdk.read().calculate_fee(transaction)
}
pub(crate) fn sign_psbt(
&self,
psbt: &mut PartiallySignedTransaction,
sign_options: SignOptions,
) -> Result<()> {
self.bdk.read().sign(psbt, sign_options)?;
Ok(())
}
pub(crate) fn all_script_pubkeys(
&self,
) -> BTreeMap<KeychainKind, impl Iterator<Item = (u32, ScriptBuf)> + Clone> {
self.bdk.read().all_unbounded_spk_iters()
}
pub(crate) fn local_chain(&self) -> LocalChain {
self.bdk.read().local_chain().clone()
}
pub(crate) fn pre_sync_state(&self) -> (LocalChain, Vec<ScriptBuf>, Vec<Txid>, Vec<OutPoint>) {
let bdk = self.bdk.read();
let local_chain = bdk.local_chain().clone();
// We must watch every new address we generate (until it is used).
let unused_revealed_script_pubkeys = bdk
.spk_index()
.unused_spks()
.map(|(_, _, s)| ScriptBuf::from(s))
.collect();
let unconfirmed_txids = bdk
.tx_graph()
.list_chain_txs(&local_chain, local_chain.tip().block_id())
.filter(|tx| !tx.chain_position.is_confirmed())
.map(|tx| tx.tx_node.txid)
.collect();
let indexed_outpoints = bdk.spk_index().outpoints().iter().cloned();
let utxos = bdk
.tx_graph()
.filter_chain_unspents(
&local_chain,
local_chain.tip().block_id(),
indexed_outpoints,
)
.map(|(_, utxo)| utxo.outpoint)
.collect();
(
local_chain,
unused_revealed_script_pubkeys,
unconfirmed_txids,
utxos,
)
}
pub(crate) fn fee_rate_from_config(&self, fee_config: FeeConfig) -> FeeRate {
match fee_config {
FeeConfig::Priority(target) => self.fee_rate_estimator.get(target),
FeeConfig::FeeRate(fee_rate) => fee_rate,
}
}
}
impl<D> OnChainWallet<D>
where
D: BdkStorage,
{
pub fn new(
network: Network,
seed: WalletSeed,
db: D,
fee_rate_estimator: Arc<FeeRateEstimator>,
) -> Result<Self> {
let secp = Secp256k1::new();
tracing::info!(?network, "Creating on-chain wallet");
let ext_priv_key = seed.derive_extended_priv_key(network)?;
let bdk = bdk::Wallet::new_or_load(
bdk::template::Bip84(ext_priv_key, KeychainKind::External),
Some(bdk::template::Bip84(ext_priv_key, KeychainKind::Internal)),
db,
network,
)
.map_err(|e| anyhow!("{e:?}"))?;
let bdk = RwLock::new(bdk);
let bdk = Arc::new(bdk);
Ok(Self {
bdk,
locked_utxos: Default::default(),
fee_rate_estimator,
network,
secp,
})
}
pub fn get_new_address(&self) -> Result<Address> {
let address = self
.bdk
.write()
.try_get_address(bdk::wallet::AddressIndex::New)
.map_err(|e| anyhow!("{e:?}"))?;
Ok(address.address)
}
pub fn get_unused_address(&self) -> Result<Address> {
let address = self
.bdk
.write()
.try_get_address(bdk::wallet::AddressIndex::LastUnused)
.map_err(|e| anyhow!("{e:?}"))?;
Ok(address.address)
}
/// Send funds to the given address.
///
/// If `amount_sat_or_drain` is `0` the wallet will be drained, i.e., all available funds
/// will be spent.
pub(crate) fn build_on_chain_payment_tx(
&self,
recipient: &Address,
amount_sat_or_drain: u64,
fee_config: FeeConfig,
) -> Result<Transaction> {
if amount_sat_or_drain > 0 && amount_sat_or_drain.is_dust(&recipient.script_pubkey()) {
bail!("Send amount below dust: {amount_sat_or_drain} sat");
}
let tx = self
.build_and_sign_psbt(recipient, amount_sat_or_drain, fee_config)?
.extract_tx();
let input_utxos = tx
.input
.iter()
.map(|input| input.previous_output)
.collect::<Vec<_>>();
self.locked_utxos.lock().extend(input_utxos);
let txid = tx.txid();
let txo = tx
.output
.iter()
.find(|txo| txo.script_pubkey == recipient.script_pubkey())
.expect("transaction to have recipient TXO");
let amount = Amount::from_sat(txo.value);
tracing::info!(%txid, %amount, %recipient, "Built on-chain payment transaction");
Ok(tx)
}
/// Build a PSBT to send some sats to an [`Address`].
pub fn build_psbt(
&self,
recipient: &Address,
amount_sat_or_drain: u64,
fee_config: FeeConfig,
) -> Result<PartiallySignedTransaction> {
let script_pubkey = recipient.script_pubkey();
let wallet = &mut self.bdk.write();
let mut builder = wallet.build_tx();
let locked_utxos = self.locked_utxos.lock();
for outpoint in locked_utxos.iter() {
builder.add_unspendable(*outpoint);
}
if amount_sat_or_drain > 0 {
builder.add_recipient(script_pubkey, amount_sat_or_drain);
} else {
builder.drain_wallet().drain_to(script_pubkey);
}
let fee_rate = self.fee_rate_from_config(fee_config);
builder.fee_rate(fee_rate);
let psbt = builder.finish().map_err(|e| anyhow!("{e:?}"))?;
Ok(psbt)
}
pub fn build_and_sign_psbt(
&self,
recipient: &Address,
amount_sat_or_drain: u64,
fee_config: FeeConfig,
) -> Result<PartiallySignedTransaction> {
let mut psbt = self.build_psbt(recipient, amount_sat_or_drain, fee_config)?;
let finalized = self
.bdk
.write()
.sign(&mut psbt, SignOptions::default())
.map_err(|e| anyhow!("{e:?}"))?;
if !finalized {
bail!("PSBT not finalized");
}
Ok(psbt)
}
/// Estimate the fee for sending funds to a given [`Address`].
pub fn estimate_fee(&self, recipient: &Address, fee_config: FeeConfig) -> Result<Amount> {
// We're just estimating a fee, the send amount is irrelevant. But it needs to be over the
// dust limit (546 sats according to BDK).
let psbt = self.build_psbt(recipient, 1_000, fee_config)?;
let fee_sat = match psbt.fee_amount() {
Some(fee) => fee,
None => {
let rate = self.fee_rate_from_config(fee_config);
rate.fee_vb(AVG_SEGWIT_TX_WEIGHT_VB)
}
};
Ok(Amount::from_sat(fee_sat))
}
pub(crate) fn commit_wallet_update(&self, update: bdk::wallet::Update) -> Result<()> {
let mut bdk = self.bdk.write();
bdk.apply_update(update)?;
bdk.commit().map_err(|e| anyhow!("{e:?}"))?;
Ok(())
}
}
#[derive(Debug)]
pub struct TransactionDetails {
pub transaction: Transaction,
pub sent: Amount,
pub received: Amount,
// The fee is the only part of this struct that we might fail to compute. We forward the error
// so that consumers can decide how to proceed.
pub fee: Result<Amount, CalculateFeeError>,
pub confirmation_status: ConfirmationStatus,
}
impl TransactionDetails {
pub fn net_amount(&self) -> Result<SignedAmount> {
let received = self.received.to_signed()?;
let sent = self.sent.to_signed()?;
Ok(received - sent)
}
}
#[derive(Debug)]
pub enum ConfirmationStatus {
Unknown,
Mempool {
last_seen: OffsetDateTime,
},
Confirmed {
n_confirmations: NonZeroU32,
timestamp: OffsetDateTime,
},
}
impl ConfirmationStatus {
pub fn n_confirmations(&self) -> u32 {
match self {
ConfirmationStatus::Confirmed {
n_confirmations, ..
} => (*n_confirmations).into(),
ConfirmationStatus::Unknown | ConfirmationStatus::Mempool { .. } => 0,
}
}
}
/// Fee configuration for an on-chain transaction.
#[derive(Clone, Copy)]
pub enum FeeConfig {
/// The fee rate is derived from the configured priority.
Priority(ConfirmationTarget),
/// The fee rate is explicitly configured.
FeeRate(FeeRate),
}
pub trait BdkStorage: PersistBackend<bdk::wallet::ChangeSet> + Send + Sync + 'static {}
#[derive(Default)]
pub struct InMemoryStorage(Option<bdk::wallet::ChangeSet>);
impl InMemoryStorage {
#[cfg(test)]
pub fn new() -> Self {
Self::default()
}
}
impl<T> BdkStorage for T where T: PersistBackend<bdk::wallet::ChangeSet> + Send + Sync + 'static {}
impl PersistBackend<bdk::wallet::ChangeSet> for InMemoryStorage {
type WriteError = anyhow::Error;
type LoadError = anyhow::Error;
fn write_changes(
&mut self,
changeset: &bdk::wallet::ChangeSet,
) -> Result<(), Self::WriteError> {
if changeset.is_empty() {
return Ok(());
}
let original = self.0.get_or_insert(changeset.clone());
original.append(changeset.clone());
Ok(())
}
fn load_from_persistence(&mut self) -> Result<Option<bdk::wallet::ChangeSet>, Self::LoadError> {
Ok(self.0.clone())
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/xxi-node/src/dlc_message.rs | crates/xxi-node/src/dlc_message.rs | use crate::message_handler::TenTenOneMessage;
use anyhow::Result;
use bitcoin::secp256k1::PublicKey;
use sha2::digest::FixedOutput;
use sha2::Digest;
use sha2::Sha256;
use time::OffsetDateTime;
use ureq::serde_json;
#[derive(Clone)]
pub struct DlcMessage {
pub message_hash: String,
pub inbound: bool,
pub peer_id: PublicKey,
pub message_type: DlcMessageType,
pub timestamp: OffsetDateTime,
}
impl DlcMessage {
pub fn new(
peer_id: PublicKey,
serialized_message: SerializedDlcMessage,
inbound: bool,
) -> Result<DlcMessage> {
let message_hash = serialized_message.generate_hash();
Ok(Self {
message_hash,
inbound,
peer_id,
message_type: serialized_message.message_type,
timestamp: OffsetDateTime::now_utc(),
})
}
}
#[derive(Hash, Clone, Debug)]
pub struct SerializedDlcMessage {
pub message: String,
pub message_type: DlcMessageType,
}
impl SerializedDlcMessage {
pub fn generate_hash(&self) -> String {
let mut hasher = Sha256::new();
hasher.update(self.message.as_bytes());
hex::encode(hasher.finalize_fixed())
}
}
#[derive(Hash, Clone, Debug)]
pub enum DlcMessageType {
Offer,
Accept,
Sign,
SettleOffer,
SettleAccept,
SettleConfirm,
SettleFinalize,
RenewOffer,
RenewAccept,
RenewConfirm,
RenewFinalize,
RenewRevoke,
RolloverOffer,
RolloverAccept,
RolloverConfirm,
RolloverFinalize,
RolloverRevoke,
CollaborativeCloseOffer,
Reject,
}
impl TryFrom<&SerializedDlcMessage> for TenTenOneMessage {
type Error = anyhow::Error;
fn try_from(serialized_msg: &SerializedDlcMessage) -> Result<Self, Self::Error> {
let message = match serialized_msg.clone().message_type {
DlcMessageType::Reject => {
TenTenOneMessage::Reject(serde_json::from_str(&serialized_msg.message)?)
}
DlcMessageType::Offer => {
TenTenOneMessage::Offer(serde_json::from_str(&serialized_msg.message)?)
}
DlcMessageType::Accept => {
TenTenOneMessage::Accept(serde_json::from_str(&serialized_msg.message)?)
}
DlcMessageType::Sign => {
TenTenOneMessage::Sign(serde_json::from_str(&serialized_msg.message)?)
}
DlcMessageType::SettleOffer => {
TenTenOneMessage::SettleOffer(serde_json::from_str(&serialized_msg.message)?)
}
DlcMessageType::SettleAccept => {
TenTenOneMessage::SettleAccept(serde_json::from_str(&serialized_msg.message)?)
}
DlcMessageType::SettleConfirm => {
TenTenOneMessage::SettleConfirm(serde_json::from_str(&serialized_msg.message)?)
}
DlcMessageType::SettleFinalize => {
TenTenOneMessage::SettleFinalize(serde_json::from_str(&serialized_msg.message)?)
}
DlcMessageType::RenewOffer => {
TenTenOneMessage::RenewOffer(serde_json::from_str(&serialized_msg.message)?)
}
DlcMessageType::RenewAccept => {
TenTenOneMessage::RenewAccept(serde_json::from_str(&serialized_msg.message)?)
}
DlcMessageType::RenewConfirm => {
TenTenOneMessage::RenewConfirm(serde_json::from_str(&serialized_msg.message)?)
}
DlcMessageType::RenewFinalize => {
TenTenOneMessage::RenewFinalize(serde_json::from_str(&serialized_msg.message)?)
}
DlcMessageType::RenewRevoke => {
TenTenOneMessage::RenewRevoke(serde_json::from_str(&serialized_msg.message)?)
}
DlcMessageType::RolloverOffer => {
TenTenOneMessage::RolloverOffer(serde_json::from_str(&serialized_msg.message)?)
}
DlcMessageType::RolloverAccept => {
TenTenOneMessage::RolloverAccept(serde_json::from_str(&serialized_msg.message)?)
}
DlcMessageType::RolloverConfirm => {
TenTenOneMessage::RolloverConfirm(serde_json::from_str(&serialized_msg.message)?)
}
DlcMessageType::RolloverFinalize => {
TenTenOneMessage::RolloverFinalize(serde_json::from_str(&serialized_msg.message)?)
}
DlcMessageType::RolloverRevoke => {
TenTenOneMessage::RolloverRevoke(serde_json::from_str(&serialized_msg.message)?)
}
DlcMessageType::CollaborativeCloseOffer => TenTenOneMessage::CollaborativeCloseOffer(
serde_json::from_str(&serialized_msg.message)?,
),
};
Ok(message)
}
}
impl TryFrom<&TenTenOneMessage> for SerializedDlcMessage {
type Error = anyhow::Error;
fn try_from(msg: &TenTenOneMessage) -> Result<Self, Self::Error> {
let (message, message_type) = match &msg {
TenTenOneMessage::Offer(offer) => {
(serde_json::to_string(&offer)?, DlcMessageType::Offer)
}
TenTenOneMessage::Accept(accept) => {
(serde_json::to_string(&accept)?, DlcMessageType::Accept)
}
TenTenOneMessage::Sign(sign) => (serde_json::to_string(&sign)?, DlcMessageType::Sign),
TenTenOneMessage::SettleOffer(settle_offer) => (
serde_json::to_string(&settle_offer)?,
DlcMessageType::SettleOffer,
),
TenTenOneMessage::SettleAccept(settle_accept) => (
serde_json::to_string(&settle_accept)?,
DlcMessageType::SettleAccept,
),
TenTenOneMessage::SettleConfirm(settle_confirm) => (
serde_json::to_string(&settle_confirm)?,
DlcMessageType::SettleConfirm,
),
TenTenOneMessage::SettleFinalize(settle_finalize) => (
serde_json::to_string(&settle_finalize)?,
DlcMessageType::SettleFinalize,
),
TenTenOneMessage::RenewOffer(renew_offer) => (
serde_json::to_string(&renew_offer)?,
DlcMessageType::RenewOffer,
),
TenTenOneMessage::RenewAccept(renew_accept) => (
serde_json::to_string(&renew_accept)?,
DlcMessageType::RenewAccept,
),
TenTenOneMessage::RenewConfirm(renew_confirm) => (
serde_json::to_string(&renew_confirm)?,
DlcMessageType::RenewConfirm,
),
TenTenOneMessage::RenewFinalize(renew_finalize) => (
serde_json::to_string(&renew_finalize)?,
DlcMessageType::RenewFinalize,
),
TenTenOneMessage::RenewRevoke(renew_revoke) => (
serde_json::to_string(&renew_revoke)?,
DlcMessageType::RenewRevoke,
),
TenTenOneMessage::CollaborativeCloseOffer(collaborative_close_offer) => (
serde_json::to_string(&collaborative_close_offer)?,
DlcMessageType::CollaborativeCloseOffer,
),
TenTenOneMessage::Reject(reject) => {
(serde_json::to_string(&reject)?, DlcMessageType::Reject)
}
TenTenOneMessage::RolloverOffer(rollover_offer) => (
serde_json::to_string(&rollover_offer)?,
DlcMessageType::RolloverOffer,
),
TenTenOneMessage::RolloverAccept(rollover_accept) => (
serde_json::to_string(&rollover_accept)?,
DlcMessageType::RolloverAccept,
),
TenTenOneMessage::RolloverConfirm(rollover_confirm) => (
serde_json::to_string(&rollover_confirm)?,
DlcMessageType::RolloverConfirm,
),
TenTenOneMessage::RolloverFinalize(rollover_finalize) => (
serde_json::to_string(&rollover_finalize)?,
DlcMessageType::RolloverFinalize,
),
TenTenOneMessage::RolloverRevoke(rollover_revoke) => (
serde_json::to_string(&rollover_revoke)?,
DlcMessageType::RolloverRevoke,
),
};
Ok(Self {
message,
message_type,
})
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/xxi-node/src/blockchain.rs | crates/xxi-node/src/blockchain.rs | use crate::bitcoin_conversion::to_tx_30;
use crate::node::Storage;
use anyhow::Context;
use anyhow::Result;
use bdk_esplora::esplora_client;
use bdk_esplora::esplora_client::OutputStatus;
use bdk_esplora::esplora_client::TxStatus;
use bitcoin::consensus::encode::serialize_hex;
use bitcoin::Block;
use bitcoin::BlockHash;
use bitcoin::OutPoint;
use bitcoin::Transaction;
use bitcoin::Txid;
use std::sync::Arc;
use tokio::task::spawn_blocking;
use tracing::instrument;
const SOCKET_TIMEOUT: u64 = 30;
#[derive(Clone)]
pub struct Blockchain<N> {
/// Async client used during on-chain syncing and, sometimes, to broadcast transactions.
pub(crate) esplora_client_async: esplora_client::AsyncClient,
/// Blocking client used when the task to be performed is in a blocking context (usually
/// blocking trait methods).
esplora_client_blocking: esplora_client::BlockingClient,
node_storage: Arc<N>,
}
impl<N> Blockchain<N>
where
N: Storage,
{
pub fn new(electrs_url: String, node_storage: Arc<N>) -> Result<Self> {
let esplora_client_async = esplora_client::Builder::new(&electrs_url)
.timeout(SOCKET_TIMEOUT)
.build_async()?;
let esplora_client_blocking = esplora_client::Builder::new(&electrs_url)
.timeout(SOCKET_TIMEOUT)
.build_blocking()?;
Ok(Self {
esplora_client_async,
esplora_client_blocking,
node_storage,
})
}
#[instrument(skip_all, fields(txid = %tx.txid()))]
pub fn broadcast_transaction_blocking(&self, tx: &Transaction) -> Result<Txid> {
let txid = tx.txid();
tracing::info!(raw_tx = %serialize_hex(&tx), "Broadcasting transaction");
if let Err(e) = self.node_storage.upsert_transaction(tx.into()) {
tracing::error!("Failed to store transaction. Error: {e:#}");
}
self.esplora_client_blocking
.broadcast(tx)
.with_context(|| format!("Failed to broadcast transaction {txid}"))?;
Ok(txid)
}
pub fn get_blockchain_tip(&self) -> Result<u64> {
let height = self.esplora_client_blocking.get_height()?;
Ok(height as u64)
}
pub fn get_block_hash(&self, height: u64) -> Result<BlockHash> {
let block_hash = self.esplora_client_blocking.get_block_hash(height as u32)?;
Ok(block_hash)
}
pub fn get_block_by_hash(&self, block_hash: &BlockHash) -> Result<Block> {
let block = self
.esplora_client_blocking
.get_block_by_hash(block_hash)?
.context("Could not find block")?;
Ok(block)
}
pub fn get_transaction(&self, txid: &Txid) -> Result<Option<Transaction>> {
let tx = self.esplora_client_blocking.get_tx(txid)?;
Ok(tx)
}
pub fn get_transaction_confirmations(&self, txid: &Txid) -> Result<u32> {
let status = self.esplora_client_blocking.get_tx_status(txid)?;
let tx_height = match status.block_height {
Some(height) => height,
None => return Ok(0),
};
self.tx_height_to_confirmations(tx_height)
}
pub fn get_txo_confirmations(&self, txo: &OutPoint) -> Result<Option<(u32, Txid)>> {
let status = self
.esplora_client_blocking
.get_output_status(&txo.txid, txo.vout as u64)?;
let (tx_height, txid) = match status {
Some(OutputStatus {
txid: Some(txid),
status:
Some(TxStatus {
block_height: Some(height),
..
}),
..
}) => (height, txid),
_ => return Ok(None),
};
let confirmations = self.tx_height_to_confirmations(tx_height)?;
Ok(Some((confirmations, txid)))
}
fn tx_height_to_confirmations(&self, tx_height: u32) -> Result<u32> {
let tip = self.esplora_client_blocking.get_height()?;
let confirmations = match tip.checked_sub(tx_height) {
Some(diff) => diff + 1,
// Something is wrong if the tip is behind the transaction confirmation height. We
// simply mark the transaction as not confirmed.
None => return Ok(0),
};
Ok(confirmations)
}
}
impl<N> Blockchain<N>
where
N: Storage + Send + Sync + 'static,
{
#[instrument(skip_all, fields(txid = %tx.txid()))]
pub async fn broadcast_transaction(&self, tx: &Transaction) -> Result<Txid> {
let txid = tx.txid();
tracing::info!(raw_tx = %serialize_hex(&tx), "Broadcasting transaction");
if let Err(e) = spawn_blocking({
let storage = self.node_storage.clone();
let tx = tx.into();
move || {
storage.upsert_transaction(tx)?;
anyhow::Ok(())
}
})
.await
.expect("task to complete")
{
tracing::error!("Failed to store transaction. Error: {e:#}");
}
self.esplora_client_async
.broadcast(tx)
.await
.with_context(|| format!("Failed to broadcast transaction {txid}"))?;
Ok(txid)
}
}
impl<N> lightning::chain::chaininterface::BroadcasterInterface for Blockchain<N>
where
N: Storage,
{
fn broadcast_transactions(&self, txs: &[&bitcoin_old::Transaction]) {
for tx in txs {
let tx = to_tx_30((*tx).clone());
if let Err(e) = self.broadcast_transaction_blocking(&tx) {
tracing::error!(tx = %tx.txid(), "{e:#}");
}
}
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/xxi-node/src/bitmex_client.rs | crates/xxi-node/src/bitmex_client.rs | use crate::commons::Direction;
use crate::commons::Price;
use anyhow::bail;
use anyhow::Context;
use anyhow::Result;
use bitcoin::Network;
use rust_decimal::Decimal;
use serde::Deserialize;
use serde::Serialize;
use std::ops::Sub;
use std::time::Duration;
use time::format_description;
use time::OffsetDateTime;
pub struct BitmexClient {}
impl BitmexClient {
/// gets a quote for a given timestamp. An error is returned if the provided timestamp is
/// greater than the current timestamp
pub async fn get_quote(network: &Network, timestamp: &OffsetDateTime) -> Result<Quote> {
if OffsetDateTime::now_utc().lt(timestamp) {
bail!("timestamp must not be in the future!")
}
let url = match network {
Network::Bitcoin => "www.bitmex.com".to_string(),
_ => "testnet.bitmex.com".to_string(),
};
let format = format_description::parse("[year]-[month]-[day]T[hour]:[minute]:[second]")?;
// subtracting a second from the start time to ensure we will get a quote from bitmex.
let start_time = timestamp.sub(Duration::from_secs(60)).format(&format)?;
let end_time = timestamp.format(&format)?;
let quote: Vec<Quote> = reqwest::get(format!("https://{url}/api/v1/quote?symbol=XBTUSD&count=1&reverse=false&startTime={start_time}&endTime={end_time}"))
.await?
.json()
.await?;
let quote = quote.first().context("Did not get any quote from bitmex")?;
Ok(quote.clone())
}
}
#[derive(Debug, Clone, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct Quote {
pub bid_size: u64,
pub ask_size: u64,
#[serde(with = "rust_decimal::serde::float")]
pub bid_price: Decimal,
#[serde(with = "rust_decimal::serde::float")]
pub ask_price: Decimal,
pub symbol: String,
#[serde(with = "time::serde::rfc3339")]
pub timestamp: OffsetDateTime,
}
impl Quote {
pub fn get_price_for_direction(self, direction: Direction) -> Decimal {
let price = Price::from(self);
price.get_price_for_direction(direction)
}
}
impl From<Quote> for Price {
fn from(value: Quote) -> Self {
Price {
bid: value.bid_price,
ask: value.ask_price,
}
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/xxi-node/src/dlc_custom_signer.rs | crates/xxi-node/src/dlc_custom_signer.rs | //! This file has temporarily been copied from `https://github.com/p2pderivatives/rust-dlc/pull/97`.
//! We should reimplement some of these traits for production.
use crate::bitcoin_conversion::to_script_29;
use crate::on_chain_wallet::BdkStorage;
use crate::on_chain_wallet::OnChainWallet;
use anyhow::anyhow;
use anyhow::Result;
use bitcoin::address::Payload;
use dlc_manager::subchannel::LnDlcChannelSigner;
use dlc_manager::subchannel::LnDlcSignerProvider;
use lightning::ln::chan_utils::ChannelPublicKeys;
use lightning::ln::msgs::DecodeError;
use lightning::ln::script::ShutdownScript;
use lightning::offers::invoice::UnsignedBolt12Invoice;
use lightning::offers::invoice_request::UnsignedInvoiceRequest;
use lightning::sign::ChannelSigner;
use lightning::sign::EcdsaChannelSigner;
use lightning::sign::EntropySource;
use lightning::sign::InMemorySigner;
use lightning::sign::KeyMaterial;
use lightning::sign::KeysManager;
use lightning::sign::NodeSigner;
use lightning::sign::Recipient;
use lightning::sign::SignerProvider;
use lightning::sign::SpendableOutputDescriptor;
use lightning::sign::WriteableEcdsaChannelSigner;
use lightning::util::ser::Writeable;
use parking_lot::Mutex;
use parking_lot::MutexGuard;
use secp256k1_zkp::ecdsa::RecoverableSignature;
use std::sync::Arc;
pub struct CustomSigner {
in_memory_signer: Arc<Mutex<InMemorySigner>>,
// TODO(tibo): this might not be safe.
channel_public_keys: ChannelPublicKeys,
}
impl CustomSigner {
pub fn new(in_memory_signer: InMemorySigner) -> Self {
Self {
channel_public_keys: in_memory_signer.pubkeys().clone(),
in_memory_signer: Arc::new(Mutex::new(in_memory_signer)),
}
}
fn in_memory_signer_lock(&self) -> MutexGuard<InMemorySigner> {
self.in_memory_signer.lock()
}
}
impl Clone for CustomSigner {
fn clone(&self) -> Self {
Self {
in_memory_signer: self.in_memory_signer.clone(),
channel_public_keys: self.channel_public_keys.clone(),
}
}
}
impl EcdsaChannelSigner for CustomSigner {
fn sign_counterparty_commitment(
&self,
commitment_tx: &lightning::ln::chan_utils::CommitmentTransaction,
preimages: Vec<lightning::ln::PaymentPreimage>,
secp_ctx: &bitcoin_old::secp256k1::Secp256k1<bitcoin_old::secp256k1::All>,
) -> Result<
(
secp256k1_zkp::ecdsa::Signature,
Vec<secp256k1_zkp::ecdsa::Signature>,
),
(),
> {
self.in_memory_signer_lock().sign_counterparty_commitment(
commitment_tx,
preimages,
secp_ctx,
)
}
fn validate_counterparty_revocation(
&self,
idx: u64,
secret: &bitcoin_old::secp256k1::SecretKey,
) -> Result<(), ()> {
self.in_memory_signer_lock()
.validate_counterparty_revocation(idx, secret)
}
fn sign_holder_commitment_and_htlcs(
&self,
commitment_tx: &lightning::ln::chan_utils::HolderCommitmentTransaction,
secp_ctx: &bitcoin_old::secp256k1::Secp256k1<bitcoin_old::secp256k1::All>,
) -> Result<
(
secp256k1_zkp::ecdsa::Signature,
Vec<secp256k1_zkp::ecdsa::Signature>,
),
(),
> {
self.in_memory_signer_lock()
.sign_holder_commitment_and_htlcs(commitment_tx, secp_ctx)
}
fn sign_justice_revoked_output(
&self,
justice_tx: &bitcoin_old::Transaction,
input: usize,
amount: u64,
per_commitment_key: &bitcoin_old::secp256k1::SecretKey,
secp_ctx: &bitcoin_old::secp256k1::Secp256k1<bitcoin_old::secp256k1::All>,
) -> Result<secp256k1_zkp::ecdsa::Signature, ()> {
self.in_memory_signer_lock().sign_justice_revoked_output(
justice_tx,
input,
amount,
per_commitment_key,
secp_ctx,
)
}
fn sign_justice_revoked_htlc(
&self,
justice_tx: &bitcoin_old::Transaction,
input: usize,
amount: u64,
per_commitment_key: &bitcoin_old::secp256k1::SecretKey,
htlc: &lightning::ln::chan_utils::HTLCOutputInCommitment,
secp_ctx: &bitcoin_old::secp256k1::Secp256k1<bitcoin_old::secp256k1::All>,
) -> Result<secp256k1_zkp::ecdsa::Signature, ()> {
self.in_memory_signer_lock().sign_justice_revoked_htlc(
justice_tx,
input,
amount,
per_commitment_key,
htlc,
secp_ctx,
)
}
fn sign_holder_htlc_transaction(
&self,
htlc_tx: &bitcoin_old::Transaction,
input: usize,
htlc_descriptor: &lightning::events::bump_transaction::HTLCDescriptor,
secp_ctx: &bitcoin_old::secp256k1::Secp256k1<bitcoin_old::secp256k1::All>,
) -> Result<secp256k1_zkp::ecdsa::Signature, ()> {
self.in_memory_signer_lock().sign_holder_htlc_transaction(
htlc_tx,
input,
htlc_descriptor,
secp_ctx,
)
}
fn sign_counterparty_htlc_transaction(
&self,
htlc_tx: &bitcoin_old::Transaction,
input: usize,
amount: u64,
per_commitment_point: &secp256k1_zkp::PublicKey,
htlc: &lightning::ln::chan_utils::HTLCOutputInCommitment,
secp_ctx: &bitcoin_old::secp256k1::Secp256k1<bitcoin_old::secp256k1::All>,
) -> Result<secp256k1_zkp::ecdsa::Signature, ()> {
self.in_memory_signer_lock()
.sign_counterparty_htlc_transaction(
htlc_tx,
input,
amount,
per_commitment_point,
htlc,
secp_ctx,
)
}
fn sign_closing_transaction(
&self,
closing_tx: &lightning::ln::chan_utils::ClosingTransaction,
secp_ctx: &bitcoin_old::secp256k1::Secp256k1<bitcoin_old::secp256k1::All>,
) -> Result<secp256k1_zkp::ecdsa::Signature, ()> {
self.in_memory_signer_lock()
.sign_closing_transaction(closing_tx, secp_ctx)
}
fn sign_holder_anchor_input(
&self,
anchor_tx: &bitcoin_old::Transaction,
input: usize,
secp_ctx: &bitcoin_old::secp256k1::Secp256k1<bitcoin_old::secp256k1::All>,
) -> Result<secp256k1_zkp::ecdsa::Signature, ()> {
self.in_memory_signer_lock()
.sign_holder_anchor_input(anchor_tx, input, secp_ctx)
}
fn sign_channel_announcement_with_funding_key(
&self,
msg: &lightning::ln::msgs::UnsignedChannelAnnouncement,
secp_ctx: &bitcoin_old::secp256k1::Secp256k1<bitcoin_old::secp256k1::All>,
) -> Result<secp256k1_zkp::ecdsa::Signature, ()> {
self.in_memory_signer_lock()
.sign_channel_announcement_with_funding_key(msg, secp_ctx)
}
}
impl ChannelSigner for CustomSigner {
fn get_per_commitment_point(
&self,
idx: u64,
secp_ctx: &bitcoin_old::secp256k1::Secp256k1<bitcoin_old::secp256k1::All>,
) -> secp256k1_zkp::PublicKey {
self.in_memory_signer_lock()
.get_per_commitment_point(idx, secp_ctx)
}
fn release_commitment_secret(&self, idx: u64) -> [u8; 32] {
self.in_memory_signer_lock().release_commitment_secret(idx)
}
fn validate_holder_commitment(
&self,
holder_tx: &lightning::ln::chan_utils::HolderCommitmentTransaction,
preimages: Vec<lightning::ln::PaymentPreimage>,
) -> Result<(), ()> {
self.in_memory_signer_lock()
.validate_holder_commitment(holder_tx, preimages)
}
fn pubkeys(&self) -> &ChannelPublicKeys {
&self.channel_public_keys
}
fn channel_keys_id(&self) -> [u8; 32] {
self.in_memory_signer_lock().channel_keys_id()
}
fn provide_channel_parameters(
&mut self,
channel_parameters: &lightning::ln::chan_utils::ChannelTransactionParameters,
) {
self.in_memory_signer_lock()
.provide_channel_parameters(channel_parameters);
}
fn set_channel_value_satoshis(&mut self, value: u64) {
self.in_memory_signer_lock()
.set_channel_value_satoshis(value)
}
}
impl LnDlcChannelSigner for CustomSigner {
fn get_holder_split_tx_signature(
&self,
secp: &bitcoin_old::secp256k1::Secp256k1<secp256k1_zkp::All>,
split_tx: &bitcoin_old::Transaction,
original_funding_redeemscript: &bitcoin_old::Script,
original_channel_value_satoshis: u64,
) -> std::result::Result<secp256k1_zkp::ecdsa::Signature, dlc_manager::error::Error> {
dlc::util::get_raw_sig_for_tx_input(
secp,
split_tx,
0,
original_funding_redeemscript,
original_channel_value_satoshis,
&self.in_memory_signer_lock().funding_key,
)
.map_err(|e| e.into())
}
fn get_holder_split_tx_adaptor_signature(
&self,
secp: &bitcoin_old::secp256k1::Secp256k1<secp256k1_zkp::All>,
split_tx: &bitcoin_old::Transaction,
original_channel_value_satoshis: u64,
original_funding_redeemscript: &bitcoin_old::Script,
other_publish_key: &secp256k1_zkp::PublicKey,
) -> std::result::Result<secp256k1_zkp::EcdsaAdaptorSignature, dlc_manager::error::Error> {
dlc::channel::get_tx_adaptor_signature(
secp,
split_tx,
original_channel_value_satoshis,
original_funding_redeemscript,
&self.in_memory_signer_lock().funding_key,
other_publish_key,
)
.map_err(|e| e.into())
}
}
impl Writeable for CustomSigner {
fn write<W: lightning::util::ser::Writer>(&self, writer: &mut W) -> Result<(), std::io::Error> {
self.in_memory_signer_lock().write(writer)
}
}
pub struct CustomKeysManager<D> {
keys_manager: KeysManager,
wallet: Arc<OnChainWallet<D>>,
}
impl<D> CustomKeysManager<D> {
pub fn new(keys_manager: KeysManager, wallet: Arc<OnChainWallet<D>>) -> Self {
Self {
keys_manager,
wallet,
}
}
pub fn get_node_secret_key(&self) -> bitcoin_old::secp256k1::SecretKey {
self.keys_manager.get_node_secret_key()
}
}
impl<D> CustomKeysManager<D> {
#[allow(clippy::result_unit_err)]
pub fn spend_spendable_outputs<C: bitcoin_old::secp256k1::Signing>(
&self,
descriptors: &[&SpendableOutputDescriptor],
outputs: Vec<bitcoin_old::TxOut>,
change_destination_script: bitcoin_old::Script,
feerate_sat_per_1000_weight: u32,
secp_ctx: &bitcoin_old::secp256k1::Secp256k1<C>,
) -> Result<bitcoin_old::Transaction> {
self.keys_manager
.spend_spendable_outputs(
descriptors,
outputs,
change_destination_script,
feerate_sat_per_1000_weight,
None,
secp_ctx,
)
.map_err(|_| anyhow!("Could not spend spendable outputs"))
}
}
impl<D: BdkStorage> LnDlcSignerProvider<CustomSigner> for CustomKeysManager<D> {
fn derive_ln_dlc_channel_signer(
&self,
channel_value_satoshis: u64,
channel_keys_id: [u8; 32],
) -> CustomSigner {
self.derive_channel_signer(channel_value_satoshis, channel_keys_id)
}
}
impl<D: BdkStorage> SignerProvider for CustomKeysManager<D> {
type Signer = CustomSigner;
fn get_destination_script(&self) -> Result<bitcoin_old::Script, ()> {
let address = match self.wallet.get_new_address() {
Ok(address) => address,
Err(e) => {
tracing::error!("Failed to get new address: {e:?}");
return Err(());
}
};
let script_pubkey = address.script_pubkey();
let script_pubkey = to_script_29(script_pubkey);
Ok(script_pubkey)
}
fn get_shutdown_scriptpubkey(&self) -> std::result::Result<ShutdownScript, ()> {
let address = match self.wallet.get_new_address() {
Ok(address) => address,
Err(e) => {
tracing::error!("Failed to get new address: {e:?}");
return Err(());
}
};
match address.payload {
Payload::WitnessProgram(program) => {
let version = program.version().to_num();
let version =
bitcoin_old::util::address::WitnessVersion::try_from(version).expect("valid");
let program = program.program().as_bytes();
ShutdownScript::new_witness_program(version, program)
.map_err(|_ignored| tracing::error!("Invalid shutdown script"))
}
_ => {
tracing::error!("Tried to use a non-witness address. This must not ever happen.");
Err(())
}
}
}
fn read_chan_signer(&self, reader: &[u8]) -> Result<Self::Signer, DecodeError> {
let in_memory = self.keys_manager.read_chan_signer(reader)?;
Ok(CustomSigner::new(in_memory))
}
fn generate_channel_keys_id(
&self,
inbound: bool,
channel_value_satoshis: u64,
user_channel_id: u128,
) -> [u8; 32] {
self.keys_manager
.generate_channel_keys_id(inbound, channel_value_satoshis, user_channel_id)
}
fn derive_channel_signer(
&self,
channel_value_satoshis: u64,
channel_keys_id: [u8; 32],
) -> Self::Signer {
let inner = self
.keys_manager
.derive_channel_signer(channel_value_satoshis, channel_keys_id);
let pubkeys = inner.pubkeys();
CustomSigner {
channel_public_keys: pubkeys.clone(),
in_memory_signer: Arc::new(Mutex::new(inner)),
}
}
}
impl<D> NodeSigner for CustomKeysManager<D> {
fn get_inbound_payment_key_material(&self) -> KeyMaterial {
self.keys_manager.get_inbound_payment_key_material()
}
fn get_node_id(&self, recipient: Recipient) -> Result<secp256k1_zkp::PublicKey, ()> {
self.keys_manager.get_node_id(recipient)
}
fn ecdh(
&self,
recipient: Recipient,
other_key: &secp256k1_zkp::PublicKey,
tweak: Option<&secp256k1_zkp::Scalar>,
) -> Result<secp256k1_zkp::ecdh::SharedSecret, ()> {
self.keys_manager.ecdh(recipient, other_key, tweak)
}
fn sign_invoice(
&self,
hrp_bytes: &[u8],
invoice_data: &[bitcoin_old::bech32::u5],
recipient: Recipient,
) -> Result<RecoverableSignature, ()> {
self.keys_manager
.sign_invoice(hrp_bytes, invoice_data, recipient)
}
fn sign_bolt12_invoice_request(
&self,
invoice_request: &UnsignedInvoiceRequest,
) -> std::result::Result<bitcoin_old::secp256k1::schnorr::Signature, ()> {
self.keys_manager
.sign_bolt12_invoice_request(invoice_request)
}
fn sign_bolt12_invoice(
&self,
invoice: &UnsignedBolt12Invoice,
) -> std::result::Result<bitcoin_old::secp256k1::schnorr::Signature, ()> {
self.keys_manager.sign_bolt12_invoice(invoice)
}
fn sign_gossip_message(
&self,
msg: lightning::ln::msgs::UnsignedGossipMessage,
) -> Result<secp256k1_zkp::ecdsa::Signature, ()> {
self.keys_manager.sign_gossip_message(msg)
}
}
impl<D> EntropySource for CustomKeysManager<D> {
fn get_secure_random_bytes(&self) -> [u8; 32] {
self.keys_manager.get_secure_random_bytes()
}
}
impl WriteableEcdsaChannelSigner for CustomSigner {}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/xxi-node/src/transaction.rs | crates/xxi-node/src/transaction.rs | use bitcoin::consensus::encode::serialize_hex;
use bitcoin::Txid;
use std::fmt;
use std::fmt::Display;
use std::fmt::Formatter;
use time::OffsetDateTime;
#[derive(Debug, Clone, PartialEq)]
pub struct Transaction {
txid: Txid,
fee: u64,
created_at: OffsetDateTime,
updated_at: OffsetDateTime,
raw: String,
}
impl Transaction {
pub fn new(
txid: Txid,
fee: u64,
created_at: OffsetDateTime,
updated_at: OffsetDateTime,
raw: String,
) -> Self {
Self {
txid,
fee,
created_at,
updated_at,
raw,
}
}
pub fn txid(&self) -> Txid {
self.txid
}
pub fn fee(&self) -> u64 {
self.fee
}
pub fn with_fee(self, fee: u64) -> Self {
Self {
fee,
updated_at: OffsetDateTime::now_utc(),
..self
}
}
pub fn created_at(&self) -> OffsetDateTime {
self.created_at
}
pub fn updated_at(&self) -> OffsetDateTime {
self.updated_at
}
pub fn raw(&self) -> String {
self.raw.clone()
}
}
impl From<&bitcoin::Transaction> for Transaction {
fn from(value: &bitcoin::Transaction) -> Self {
let now = OffsetDateTime::now_utc();
Self::new(value.txid(), 0, now, now, serialize_hex(value))
}
}
impl Display for Transaction {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
format!(
"txid: {}, fees: {}, created_at: {}, updated_at: {}",
self.txid, self.fee, self.created_at, self.updated_at
)
.fmt(f)
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/xxi-node/src/dlc/logger.rs | crates/xxi-node/src/dlc/logger.rs | use lightning::util::logger::Level;
use lightning::util::logger::Logger;
use lightning::util::logger::Record as LnRecord;
#[derive(Clone)]
pub struct TracingLogger {
pub alias: String,
}
impl Logger for TracingLogger {
fn log(&self, record: &LnRecord) {
let level = match record.level {
Level::Gossip | Level::Trace => log::Level::Trace,
Level::Debug => log::Level::Debug,
Level::Info => log::Level::Info,
Level::Warn => log::Level::Warn,
Level::Error => log::Level::Error,
};
#[cfg(test)]
let target = {
// We must add the alias to the _end_ of the target because otherwise our `EnvFilter`
// configuration will not work
format!("{}[{}]", record.module_path, self.alias)
};
#[cfg(not(test))]
let target = record.module_path.to_string();
tracing_log::format_trace(
&log::Record::builder()
.level(level)
.args(record.args)
.target(&target)
.module_path(Some(&target))
.file_static(Some(record.file))
.line(Some(record.line))
.build(),
)
.expect("to be able to format a log record as a trace")
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/xxi-node/src/dlc/mod.rs | crates/xxi-node/src/dlc/mod.rs | mod contract_details;
mod dlc_channel_details;
mod logger;
pub use contract_details::ContractDetails;
pub use dlc_channel_details::DlcChannelDetails;
pub(crate) use logger::TracingLogger;
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/xxi-node/src/dlc/contract_details.rs | crates/xxi-node/src/dlc/contract_details.rs | use dlc_manager::contract::Contract;
use dlc_manager::ContractId;
use serde::Serialize;
use serde::Serializer;
#[derive(Serialize, Debug)]
pub struct ContractDetails {
#[serde(serialize_with = "contract_id_as_hex")]
pub contract_id: ContractId,
#[serde(serialize_with = "contract_id_as_hex")]
pub temporary_contract_id: ContractId,
pub contract_state: ContractState,
pub offered_funding_sats: Option<u64>,
pub accepted_funding_sats: Option<u64>,
pub fee_rate_per_vb: Option<u64>,
pub event_id: Option<String>,
}
#[derive(Serialize, Debug)]
pub enum ContractState {
Offered,
Accepted,
Signed,
Confirmed,
PreClosed,
Closed,
Refunded,
FailedAccept,
FailedSign,
Rejected,
}
impl From<Contract> for ContractDetails {
fn from(contract: Contract) -> Self {
let (
contract_state,
offered_funding_sats,
accepted_funding_sats,
fee_rate_per_vb,
event_id,
) = match &contract {
Contract::Offered(offered_contract) => (
ContractState::Offered,
Some(offered_contract.offer_params.collateral),
None,
Some(offered_contract.fee_rate_per_vb),
offered_contract.contract_info.first().map(|ci| {
ci.oracle_announcements
.first()
.map(|oa| oa.oracle_event.event_id.clone())
}),
),
Contract::Accepted(accepted_contract) => {
let offered_contract = accepted_contract.clone().offered_contract;
(
ContractState::Accepted,
Some(offered_contract.offer_params.collateral),
Some(accepted_contract.accept_params.collateral),
Some(offered_contract.fee_rate_per_vb),
offered_contract.contract_info.first().map(|ci| {
ci.oracle_announcements
.first()
.map(|oa| oa.oracle_event.event_id.clone())
}),
)
}
Contract::Signed(signed_contract) => {
let accepted_contract = signed_contract.clone().accepted_contract;
let offered_contract = accepted_contract.clone().offered_contract;
(
ContractState::Signed,
Some(offered_contract.offer_params.collateral),
Some(accepted_contract.accept_params.collateral),
Some(offered_contract.fee_rate_per_vb),
offered_contract.contract_info.first().map(|ci| {
ci.oracle_announcements
.first()
.map(|oa| oa.oracle_event.event_id.clone())
}),
)
}
Contract::Confirmed(confirmed_contract) => {
let accepted_contract = confirmed_contract.clone().accepted_contract;
let offered_contract = accepted_contract.clone().offered_contract;
(
ContractState::Confirmed,
Some(offered_contract.offer_params.collateral),
Some(accepted_contract.accept_params.collateral),
Some(offered_contract.fee_rate_per_vb),
offered_contract.contract_info.first().map(|ci| {
ci.oracle_announcements
.first()
.map(|oa| oa.oracle_event.event_id.clone())
}),
)
}
Contract::PreClosed(pre_closed_contract) => {
let accepted_contract = pre_closed_contract
.signed_contract
.clone()
.accepted_contract;
let offered_contract = accepted_contract.clone().offered_contract;
(
ContractState::PreClosed,
Some(offered_contract.offer_params.collateral),
Some(accepted_contract.accept_params.collateral),
Some(offered_contract.fee_rate_per_vb),
offered_contract.contract_info.first().map(|ci| {
ci.oracle_announcements
.first()
.map(|oa| oa.oracle_event.event_id.clone())
}),
)
}
Contract::Closed(_closed_contract) => (ContractState::Closed, None, None, None, None),
Contract::Refunded(refunded_contract) => {
let accepted_contract = refunded_contract.clone().accepted_contract;
let offered_contract = accepted_contract.clone().offered_contract;
(
ContractState::Refunded,
Some(offered_contract.offer_params.collateral),
Some(accepted_contract.accept_params.collateral),
Some(offered_contract.fee_rate_per_vb),
offered_contract.contract_info.first().map(|ci| {
ci.oracle_announcements
.first()
.map(|oa| oa.oracle_event.event_id.clone())
}),
)
}
Contract::FailedAccept(failed_accept_contract) => {
let offered_contract = failed_accept_contract.clone().offered_contract;
(
ContractState::FailedAccept,
Some(offered_contract.offer_params.collateral),
None,
Some(offered_contract.fee_rate_per_vb),
offered_contract.contract_info.first().map(|ci| {
ci.oracle_announcements
.first()
.map(|oa| oa.oracle_event.event_id.clone())
}),
)
}
Contract::FailedSign(failed_sign_contract) => {
let accepted_contract = failed_sign_contract.clone().accepted_contract;
let offered_contract = accepted_contract.clone().offered_contract;
(
ContractState::FailedSign,
Some(offered_contract.offer_params.collateral),
Some(accepted_contract.accept_params.collateral),
Some(offered_contract.fee_rate_per_vb),
offered_contract.contract_info.first().map(|ci| {
ci.oracle_announcements
.first()
.map(|oa| oa.oracle_event.event_id.clone())
}),
)
}
Contract::Rejected(rejected_contract) => (
ContractState::Rejected,
Some(rejected_contract.offer_params.collateral),
None,
Some(rejected_contract.fee_rate_per_vb),
rejected_contract.contract_info.first().map(|ci| {
ci.oracle_announcements
.first()
.map(|oa| oa.oracle_event.event_id.clone())
}),
),
};
ContractDetails {
contract_id: contract.get_id(),
temporary_contract_id: contract.get_temporary_id(),
contract_state,
offered_funding_sats,
accepted_funding_sats,
fee_rate_per_vb,
event_id: event_id.flatten(),
}
}
}
fn contract_id_as_hex<S>(contract_id: &ContractId, s: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
s.serialize_str(&hex::encode(contract_id))
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/xxi-node/src/dlc/dlc_channel_details.rs | crates/xxi-node/src/dlc/dlc_channel_details.rs | use crate::bitcoin_conversion::to_secp_pk_30;
use bitcoin::secp256k1::PublicKey;
use dlc_manager::channel::signed_channel::SignedChannel;
use dlc_manager::channel::Channel;
use dlc_manager::DlcChannelId;
use serde::Serialize;
use serde::Serializer;
#[derive(Serialize, Debug)]
pub struct DlcChannelDetails {
#[serde(serialize_with = "optional_channel_id_as_hex")]
pub dlc_channel_id: Option<DlcChannelId>,
#[serde(serialize_with = "pk_as_hex")]
pub counter_party: PublicKey,
pub channel_state: ChannelState,
pub signed_channel_state: Option<SignedChannelState>,
pub update_idx: Option<u64>,
pub fee_rate_per_vb: Option<u64>,
pub funding_txid: Option<String>,
pub funding_tx_vout: Option<usize>,
pub closing_txid: Option<String>,
}
#[derive(Serialize, Debug, Ord, PartialOrd, Eq, PartialEq)]
pub enum SignedChannelState {
Established,
SettledOffered,
SettledReceived,
SettledAccepted,
SettledConfirmed,
Settled,
RenewOffered,
RenewAccepted,
RenewConfirmed,
RenewFinalized,
Closing,
CollaborativeCloseOffered,
SettledClosing,
}
#[derive(Serialize, Debug, Eq, Ord, PartialOrd, PartialEq)]
pub enum ChannelState {
Offered,
Accepted,
Signed,
Closing,
SettledClosing,
Closed,
CounterClosed,
ClosedPunished,
CollaborativelyClosed,
FailedAccept,
FailedSign,
Cancelled,
}
impl From<Channel> for DlcChannelDetails {
fn from(channel: Channel) -> Self {
let (update_idx, state, fee_rate_per_vb, funding_txid, funding_tx_vout, closing_txid) =
match channel.clone() {
Channel::Signed(SignedChannel {
update_idx,
fee_rate_per_vb,
fund_tx,
fund_output_index,
state: dlc_manager::channel::signed_channel::SignedChannelState::CollaborativeCloseOffered {
close_tx,
..
},
..
}) => (
Some(update_idx),
Some(SignedChannelState::CollaborativeCloseOffered),
Some(fee_rate_per_vb),
Some(fund_tx.txid().to_string()),
Some(fund_output_index),
Some(close_tx.txid().to_string())),
Channel::Signed(SignedChannel {
update_idx,
fee_rate_per_vb,
fund_tx,
fund_output_index,
state: dlc_manager::channel::signed_channel::SignedChannelState::SettledClosing {
settle_transaction,
..
},
..
}) => (
Some(update_idx),
Some(SignedChannelState::SettledClosing),
Some(fee_rate_per_vb),
Some(fund_tx.txid().to_string()),
Some(fund_output_index),
Some(settle_transaction.txid().to_string())),
Channel::Signed(signed_channel) => (
Some(signed_channel.update_idx),
Some(SignedChannelState::from(signed_channel.state)),
Some(signed_channel.fee_rate_per_vb),
Some(signed_channel.fund_tx.txid().to_string()),
Some(signed_channel.fund_output_index),
None,
),
Channel::CollaborativelyClosed(closed_channel)
| Channel::Closed(closed_channel)
| Channel::CounterClosed(closed_channel) => (
None,
None,
None,
None,
None,
Some(closed_channel.closing_txid.to_string()),
),
Channel::Closing(closing_channel) => (
None,
None,
None,
None,
None,
Some(closing_channel.buffer_transaction.txid().to_string()),
),
_ => (None, None, None, None, None, None),
};
DlcChannelDetails {
dlc_channel_id: Some(channel.get_id()),
counter_party: to_secp_pk_30(channel.get_counter_party_id()),
channel_state: ChannelState::from(channel),
signed_channel_state: state.map(SignedChannelState::from),
update_idx,
fee_rate_per_vb,
funding_txid,
funding_tx_vout,
closing_txid,
}
}
}
impl From<Channel> for ChannelState {
fn from(value: Channel) -> Self {
match value {
Channel::Offered(_) => ChannelState::Offered,
Channel::Accepted(_) => ChannelState::Accepted,
Channel::Signed(_) => ChannelState::Signed,
Channel::Closing(_) => ChannelState::Closing,
Channel::SettledClosing(_) => ChannelState::SettledClosing,
Channel::Closed(_) => ChannelState::Closed,
Channel::CounterClosed(_) => ChannelState::CounterClosed,
Channel::ClosedPunished(_) => ChannelState::ClosedPunished,
Channel::CollaborativelyClosed(_) => ChannelState::CollaborativelyClosed,
Channel::FailedAccept(_) => ChannelState::FailedAccept,
Channel::FailedSign(_) => ChannelState::FailedSign,
Channel::Cancelled(_) => ChannelState::Cancelled,
}
}
}
impl From<dlc_manager::channel::signed_channel::SignedChannelState> for SignedChannelState {
fn from(value: dlc_manager::channel::signed_channel::SignedChannelState) -> Self {
use dlc_manager::channel::signed_channel::SignedChannelState::*;
match value {
Established { .. } => SignedChannelState::Established,
SettledOffered { .. } => SignedChannelState::SettledOffered,
SettledReceived { .. } => SignedChannelState::SettledReceived,
SettledAccepted { .. } => SignedChannelState::SettledAccepted,
SettledConfirmed { .. } => SignedChannelState::SettledConfirmed,
Settled { .. } => SignedChannelState::Settled,
RenewOffered { .. } => SignedChannelState::RenewOffered,
RenewAccepted { .. } => SignedChannelState::RenewAccepted,
RenewConfirmed { .. } => SignedChannelState::RenewConfirmed,
RenewFinalized { .. } => SignedChannelState::RenewFinalized,
Closing { .. } => SignedChannelState::Closing,
CollaborativeCloseOffered { .. } => SignedChannelState::CollaborativeCloseOffered,
SettledClosing { .. } => SignedChannelState::SettledClosing,
}
}
}
fn optional_channel_id_as_hex<S>(channel_id: &Option<DlcChannelId>, s: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match channel_id {
Some(channel_id) => s.serialize_str(&hex::encode(channel_id)),
None => s.serialize_none(),
}
}
fn pk_as_hex<S>(pk: &PublicKey, s: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
s.serialize_str(&pk.to_string())
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/xxi-node/src/storage/memory.rs | crates/xxi-node/src/storage/memory.rs | use crate::storage::DlcStoreProvider;
use crate::storage::KeyValue;
use anyhow::Context;
use parking_lot::RwLock;
use std::collections::HashMap;
use std::sync::Arc;
#[derive(Clone)]
pub struct TenTenOneInMemoryStorage {
dlc_store: InMemoryDlcStoreProvider,
}
impl TenTenOneInMemoryStorage {
pub fn new() -> Self {
Self {
dlc_store: InMemoryDlcStoreProvider::new(),
}
}
}
impl Default for TenTenOneInMemoryStorage {
fn default() -> Self {
Self::new()
}
}
impl DlcStoreProvider for TenTenOneInMemoryStorage {
fn read(&self, kind: u8, key: Option<Vec<u8>>) -> anyhow::Result<Vec<KeyValue>> {
self.dlc_store.read(kind, key)
}
fn write(&self, kind: u8, key: Vec<u8>, value: Vec<u8>) -> anyhow::Result<()> {
self.dlc_store.write(kind, key, value)
}
fn delete(&self, kind: u8, key: Option<Vec<u8>>) -> anyhow::Result<()> {
self.dlc_store.delete(kind, key)
}
}
type InMemoryStore = Arc<RwLock<HashMap<u8, HashMap<Vec<u8>, Vec<u8>>>>>;
#[derive(Clone)]
pub struct InMemoryDlcStoreProvider {
memory: InMemoryStore,
}
impl Default for InMemoryDlcStoreProvider {
fn default() -> Self {
Self::new()
}
}
impl InMemoryDlcStoreProvider {
pub fn new() -> Self {
InMemoryDlcStoreProvider {
memory: Arc::new(RwLock::new(HashMap::new())),
}
}
}
impl DlcStoreProvider for InMemoryDlcStoreProvider {
fn read(&self, kind: u8, key: Option<Vec<u8>>) -> anyhow::Result<Vec<KeyValue>> {
let store = self.memory.read();
let store = match store.get(&kind) {
Some(store) => store,
None => return Ok(vec![]),
};
if let Some(key) = key {
let result = match store.get(&key) {
Some(value) => vec![KeyValue {
key,
value: value.clone(),
}],
None => vec![],
};
Ok(result)
} else {
Ok(store
.clone()
.into_iter()
.map(|e| KeyValue {
key: e.0,
value: e.1,
})
.collect())
}
}
fn write(&self, kind: u8, key: Vec<u8>, value: Vec<u8>) -> anyhow::Result<()> {
self.memory
.write()
.entry(kind)
.and_modify(|v| {
v.insert(key.clone(), value.clone());
})
.or_insert(HashMap::from([(key, value)]));
Ok(())
}
fn delete(&self, kind: u8, key: Option<Vec<u8>>) -> anyhow::Result<()> {
if let Some(key) = key {
self.memory
.write()
.get_mut(&kind)
.context("couldn't find map")?
.remove(&key);
} else {
self.memory.write().remove(&kind);
}
Ok(())
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/xxi-node/src/storage/mod.rs | crates/xxi-node/src/storage/mod.rs | use anyhow::Result;
use bitcoin::secp256k1::PublicKey;
use bitcoin::secp256k1::SecretKey;
use dlc_manager::chain_monitor::ChainMonitor;
use dlc_manager::channel::accepted_channel::AcceptedChannel;
use dlc_manager::channel::offered_channel::OfferedChannel;
use dlc_manager::channel::signed_channel::SignedChannel;
use dlc_manager::channel::signed_channel::SignedChannelState;
use dlc_manager::channel::signed_channel::SignedChannelStateType;
use dlc_manager::channel::Channel;
use dlc_manager::channel::ClosedChannel;
use dlc_manager::channel::ClosedPunishedChannel;
use dlc_manager::channel::ClosingChannel;
use dlc_manager::channel::FailedAccept;
use dlc_manager::channel::FailedSign;
use dlc_manager::channel::SettledClosingChannel;
use dlc_manager::contract::accepted_contract::AcceptedContract;
use dlc_manager::contract::offered_contract::OfferedContract;
use dlc_manager::contract::ser::Serializable;
use dlc_manager::contract::signed_contract::SignedContract;
use dlc_manager::contract::ClosedContract;
use dlc_manager::contract::Contract;
use dlc_manager::contract::FailedAcceptContract;
use dlc_manager::contract::FailedSignContract;
use dlc_manager::contract::PreClosedContract;
use dlc_manager::error::Error;
use dlc_manager::subchannel::SubChannel;
use dlc_manager::subchannel::SubChannelState;
use dlc_manager::ContractId;
use dlc_manager::DlcChannelId;
use dlc_manager::ReferenceId;
use lightning::ln::ChannelId;
use lightning::util::ser::Readable;
use lightning::util::ser::Writeable;
use std::convert::TryInto;
use std::io::Cursor;
use std::io::Read;
use std::io::Seek;
use std::io::SeekFrom;
use std::string::ToString;
use std::sync::mpsc;
pub mod memory;
pub mod sled;
pub use memory::TenTenOneInMemoryStorage;
// Kinds.
const CONTRACT: u8 = 1;
const CHANNEL: u8 = 2;
const CHAIN_MONITOR: u8 = 3;
const KEY_PAIR: u8 = 6;
const SUB_CHANNEL: u8 = 7;
const ACTION: u8 = 9;
const CHAIN_MONITOR_KEY: &str = "chain_monitor";
pub trait WalletStorage {
fn upsert_key_pair(&self, public_key: &PublicKey, privkey: &SecretKey) -> Result<()>;
fn get_priv_key_for_pubkey(&self, public_key: &PublicKey) -> Result<Option<SecretKey>>;
}
pub struct KeyValue {
pub key: Vec<u8>,
pub value: Vec<u8>,
}
pub trait DlcStoreProvider {
/// Read the object from a kv store by the given key
fn read(&self, kind: u8, key: Option<Vec<u8>>) -> Result<Vec<KeyValue>>;
fn write(&self, kind: u8, key: Vec<u8>, value: Vec<u8>) -> Result<()>;
fn delete(&self, kind: u8, key: Option<Vec<u8>>) -> Result<()>;
}
pub trait TenTenOneStorage: DlcStoreProvider + Sync + Send + Clone {}
impl<T> TenTenOneStorage for T where T: DlcStoreProvider + Sync + Send + Clone {}
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum DlcChannelEvent {
Offered(Option<ReferenceId>),
Accepted(Option<ReferenceId>),
Established(Option<ReferenceId>),
SettledOffered(Option<ReferenceId>),
SettledReceived(Option<ReferenceId>),
SettledAccepted(Option<ReferenceId>),
SettledConfirmed(Option<ReferenceId>),
Settled(Option<ReferenceId>),
SettledClosing(Option<ReferenceId>),
RenewOffered(Option<ReferenceId>),
RenewAccepted(Option<ReferenceId>),
RenewConfirmed(Option<ReferenceId>),
RenewFinalized(Option<ReferenceId>),
Closing(Option<ReferenceId>),
CollaborativeCloseOffered(Option<ReferenceId>),
Closed(Option<ReferenceId>),
CounterClosed(Option<ReferenceId>),
ClosedPunished(Option<ReferenceId>),
CollaborativelyClosed(Option<ReferenceId>),
FailedAccept(Option<ReferenceId>),
FailedSign(Option<ReferenceId>),
Cancelled(Option<ReferenceId>),
Deleted(Option<ReferenceId>),
}
impl DlcChannelEvent {
pub fn get_reference_id(&self) -> Option<ReferenceId> {
*match self {
DlcChannelEvent::Offered(reference_id) => reference_id,
DlcChannelEvent::Accepted(reference_id) => reference_id,
DlcChannelEvent::Established(reference_id) => reference_id,
DlcChannelEvent::SettledOffered(reference_id) => reference_id,
DlcChannelEvent::SettledReceived(reference_id) => reference_id,
DlcChannelEvent::SettledAccepted(reference_id) => reference_id,
DlcChannelEvent::SettledConfirmed(reference_id) => reference_id,
DlcChannelEvent::Settled(reference_id) => reference_id,
DlcChannelEvent::SettledClosing(reference_id) => reference_id,
DlcChannelEvent::RenewOffered(reference_id) => reference_id,
DlcChannelEvent::RenewAccepted(reference_id) => reference_id,
DlcChannelEvent::RenewConfirmed(reference_id) => reference_id,
DlcChannelEvent::RenewFinalized(reference_id) => reference_id,
DlcChannelEvent::Closing(reference_id) => reference_id,
DlcChannelEvent::CollaborativeCloseOffered(reference_id) => reference_id,
DlcChannelEvent::Closed(reference_id) => reference_id,
DlcChannelEvent::CounterClosed(reference_id) => reference_id,
DlcChannelEvent::ClosedPunished(reference_id) => reference_id,
DlcChannelEvent::CollaborativelyClosed(reference_id) => reference_id,
DlcChannelEvent::FailedAccept(reference_id) => reference_id,
DlcChannelEvent::FailedSign(reference_id) => reference_id,
DlcChannelEvent::Cancelled(reference_id) => reference_id,
DlcChannelEvent::Deleted(reference_id) => reference_id,
}
}
}
/// Implementation of the dlc storage interface.
pub struct DlcStorageProvider<K> {
store: K,
event_sender: mpsc::Sender<DlcChannelEvent>,
}
macro_rules! convertible_enum {
(enum $name:ident {
$($vname:ident $(= $val:expr)? $(; $subprefix:ident, $subfield:ident)?,)*;
$($tname:ident $(= $tval:expr)?,)*
}, $input:ident) => {
#[derive(Debug)]
enum $name {
$($vname $(= $val)?,)*
$($tname $(= $tval)?,)*
}
impl From<$name> for u8 {
fn from(prefix: $name) -> u8 {
prefix as u8
}
}
impl TryFrom<u8> for $name {
type Error = Error;
fn try_from(v: u8) -> Result<Self, Self::Error> {
match v {
$(x if x == u8::from($name::$vname) => Ok($name::$vname),)*
$(x if x == u8::from($name::$tname) => Ok($name::$tname),)*
x => Err(Error::StorageError(format!("Unknown prefix {}", x))),
}
}
}
impl $name {
fn get_prefix(input: &$input) -> u8 {
let prefix = match input {
$($input::$vname(_) => $name::$vname,)*
$($input::$tname{..} => $name::$tname,)*
};
prefix.into()
}
}
}
}
convertible_enum!(
enum ContractPrefix {
Offered = 1,
Accepted,
Signed,
Confirmed,
PreClosed,
Closed,
FailedAccept,
FailedSign,
Refunded,
Rejected,;
},
Contract
);
convertible_enum!(
enum ChannelPrefix {
Offered = 1,
Accepted,
Signed; SignedChannelPrefix, state,
Closing,
Closed,
CounterClosed,
ClosedPunished,
CollaborativelyClosed,
FailedAccept,
FailedSign,
Cancelled,
SettledClosing,;
},
Channel
);
convertible_enum!(
enum SignedChannelPrefix {;
Established = 1,
SettledOffered,
SettledReceived,
SettledAccepted,
SettledConfirmed,
Settled,
Closing,
CollaborativeCloseOffered,
RenewAccepted,
RenewOffered,
RenewConfirmed,
RenewFinalized,
SettledClosing,
},
SignedChannelStateType
);
convertible_enum!(
enum SubChannelPrefix {;
Offered = 1,
Accepted,
Confirmed,
Finalized,
Signed,
Closing,
OnChainClosed,
CounterOnChainClosed,
CloseOffered,
CloseAccepted,
CloseConfirmed,
OffChainClosed,
ClosedPunished,
Rejected,
},
SubChannelState
);
fn to_storage_error<T>(e: T) -> Error
where
T: std::fmt::Display,
{
Error::StorageError(e.to_string())
}
impl<K: DlcStoreProvider> DlcStorageProvider<K> {
/// Creates a new instance of a DlcStorageProvider
pub fn new(store: K, event_sender: mpsc::Sender<DlcChannelEvent>) -> Self {
DlcStorageProvider {
store,
event_sender,
}
}
fn insert_contract(
&self,
serialized: Vec<u8>,
contract: &Contract,
) -> Result<Option<Vec<u8>>, Error> {
match contract {
a @ Contract::Accepted(_) | a @ Contract::Signed(_) => {
self.store
.delete(CONTRACT, Some(a.get_temporary_id().to_vec()))
.map_err(to_storage_error)?;
}
_ => {}
};
self.store
.write(CONTRACT, contract.get_id().to_vec(), serialized.clone())
.map_err(to_storage_error)?;
Ok(Some(serialized))
}
fn get_data_with_prefix<T: Serializable>(
&self,
data: &[Vec<u8>],
prefix: &[u8],
consume: Option<u64>,
) -> Result<Vec<T>, Error> {
data.iter()
.filter_map(|value| {
let mut cursor = Cursor::new(value);
let mut pref = vec![0u8; prefix.len()];
cursor.read_exact(&mut pref).expect("Error reading prefix");
if pref == prefix {
if let Some(c) = consume {
cursor.set_position(cursor.position() + c);
}
match T::deserialize(&mut cursor) {
Ok(deserialized) => Some(Ok(deserialized)),
Err(e) => {
tracing::error!("Failed to deserialize data: {e}");
None
}
}
} else {
None
}
})
.collect()
}
fn get_raw_contracts(&self) -> Result<Vec<Vec<u8>>, Error> {
let contracts = self
.store
.read(CONTRACT, None)
.map_err(to_storage_error)?
.into_iter()
.map(|x| x.value)
.collect();
Ok(contracts)
}
}
impl<K: DlcStoreProvider> dlc_manager::Storage for DlcStorageProvider<K> {
fn get_contract(&self, contract_id: &ContractId) -> Result<Option<Contract>, Error> {
match self
.store
.read(CONTRACT, Some(contract_id.to_vec()))
.map_err(to_storage_error)?
.first()
{
Some(res) => Ok(Some(deserialize_contract(&res.value)?)),
None => Ok(None),
}
}
fn get_contracts(&self) -> Result<Vec<Contract>, Error> {
let contracts = self.store.read(CONTRACT, None).map_err(to_storage_error)?;
let contracts = contracts
.iter()
.filter_map(|x| match deserialize_contract(&x.value) {
Ok(contract) => Some(contract),
Err(e) => {
log::error!("Failed to deserialize contract: {e}");
None
}
})
.collect();
Ok(contracts)
}
fn create_contract(&self, contract: &OfferedContract) -> Result<(), Error> {
let serialized = serialize_contract(&Contract::Offered(contract.clone()))?;
self.store
.write(CONTRACT, contract.id.to_vec(), serialized)
.map_err(to_storage_error)
}
fn delete_contract(&self, contract_id: &ContractId) -> Result<(), Error> {
self.store
.delete(CONTRACT, Some(contract_id.to_vec()))
.map_err(to_storage_error)
}
fn update_contract(&self, contract: &Contract) -> Result<(), Error> {
let serialized = serialize_contract(contract)?;
match contract {
a @ Contract::Accepted(_) | a @ Contract::Signed(_) => {
self.store
.delete(CONTRACT, Some(a.get_temporary_id().to_vec()))
.map_err(to_storage_error)?;
}
_ => {}
};
self.store
.write(CONTRACT, contract.get_id().to_vec(), serialized)
.map_err(to_storage_error)
}
fn get_contract_offers(&self) -> Result<Vec<OfferedContract>, Error> {
let contracts = self.get_raw_contracts()?;
self.get_data_with_prefix(&contracts, &[ContractPrefix::Offered.into()], None)
}
fn get_signed_contracts(&self) -> Result<Vec<SignedContract>, Error> {
let contracts = self.get_raw_contracts()?;
self.get_data_with_prefix(&contracts, &[ContractPrefix::Signed.into()], None)
}
fn get_confirmed_contracts(&self) -> Result<Vec<SignedContract>, Error> {
let contracts = self.get_raw_contracts()?;
self.get_data_with_prefix(&contracts, &[ContractPrefix::Confirmed.into()], None)
}
fn get_preclosed_contracts(&self) -> Result<Vec<PreClosedContract>, Error> {
let contracts = self.get_raw_contracts()?;
self.get_data_with_prefix(&contracts, &[ContractPrefix::PreClosed.into()], None)
}
fn upsert_channel(&self, channel: Channel, contract: Option<Contract>) -> Result<(), Error> {
let serialized = serialize_channel(&channel)?;
let serialized_contract = match contract.as_ref() {
Some(c) => Some(serialize_contract(c)?),
None => None,
};
match &channel {
a @ Channel::Accepted(_) | a @ Channel::Signed(_) => {
self.store
.delete(CHANNEL, Some(a.get_temporary_id().to_vec()))
.map_err(to_storage_error)?;
}
_ => {}
};
self.store
.write(CHANNEL, channel.get_id().to_vec(), serialized)
.map_err(to_storage_error)?;
if let Some(contract) = contract.as_ref() {
self.insert_contract(
serialized_contract.expect("to have the serialized version"),
contract,
)?;
}
let dlc_channel_event = DlcChannelEvent::from(channel);
let _ = self.event_sender.send(dlc_channel_event);
Ok(())
}
fn delete_channel(&self, channel_id: &DlcChannelId) -> Result<(), Error> {
let channel = self.get_channel(channel_id)?;
self.store
.delete(CHANNEL, Some(channel_id.to_vec()))
.map_err(to_storage_error)?;
let dlc_channel_event =
DlcChannelEvent::Deleted(channel.and_then(|channel| channel.get_reference_id()));
let _ = self.event_sender.send(dlc_channel_event);
Ok(())
}
fn get_channel(&self, channel_id: &DlcChannelId) -> Result<Option<Channel>, Error> {
match self
.store
.read(CHANNEL, Some(channel_id.to_vec()))
.map_err(to_storage_error)?
.first()
{
Some(res) => Ok(Some(deserialize_channel(&res.value)?)),
None => Ok(None),
}
}
fn get_signed_channels(
&self,
channel_state: Option<SignedChannelStateType>,
) -> Result<Vec<SignedChannel>, Error> {
let (prefix, consume) = if let Some(state) = &channel_state {
(
vec![
ChannelPrefix::Signed.into(),
SignedChannelPrefix::get_prefix(state),
],
None,
)
} else {
(vec![ChannelPrefix::Signed.into()], Some(1))
};
let channels = self
.store
.read(CHANNEL, None)
.map_err(to_storage_error)?
.into_iter()
.map(|x| x.value)
.collect::<Vec<Vec<u8>>>();
let channels = self.get_data_with_prefix(&channels, &prefix, consume)?;
Ok(channels)
}
fn get_offered_channels(&self) -> Result<Vec<OfferedChannel>, Error> {
let channels = self
.store
.read(CHANNEL, None)
.map_err(to_storage_error)?
.into_iter()
.map(|x| x.value)
.collect::<Vec<Vec<u8>>>();
self.get_data_with_prefix(&channels, &[ChannelPrefix::Offered.into()], None)
}
fn get_settled_closing_channels(&self) -> Result<Vec<SettledClosingChannel>, Error> {
let channels = self
.store
.read(CHANNEL, None)
.map_err(to_storage_error)?
.into_iter()
.map(|x| x.value)
.collect::<Vec<Vec<u8>>>();
self.get_data_with_prefix(&channels, &[ChannelPrefix::SettledClosing.into()], None)
}
fn persist_chain_monitor(&self, monitor: &ChainMonitor) -> Result<(), Error> {
self.store
.write(
CHAIN_MONITOR,
CHAIN_MONITOR_KEY.to_string().into_bytes(),
monitor.serialize()?,
)
.map_err(|e| Error::StorageError(format!("Error writing chain monitor: {e}")))
}
fn get_chain_monitor(&self) -> Result<Option<ChainMonitor>, Error> {
let chain_monitors = self
.store
.read(
CHAIN_MONITOR,
Some(CHAIN_MONITOR_KEY.to_string().into_bytes()),
)
.map_err(|e| Error::StorageError(format!("Error reading chain monitor: {e}")))?;
let serialized = chain_monitors.first();
let deserialized = match serialized {
Some(s) => Some(
ChainMonitor::deserialize(&mut ::std::io::Cursor::new(s.value.clone()))
.map_err(to_storage_error)?,
),
None => None,
};
Ok(deserialized)
}
fn upsert_sub_channel(&self, subchannel: &SubChannel) -> Result<(), Error> {
let serialized = serialize_sub_channel(subchannel)?;
self.store
.write(SUB_CHANNEL, subchannel.channel_id.0.to_vec(), serialized)
.map_err(to_storage_error)
}
fn get_sub_channel(&self, channel_id: ChannelId) -> Result<Option<SubChannel>, Error> {
match self
.store
.read(SUB_CHANNEL, Some(channel_id.0.to_vec()))
.map_err(to_storage_error)?
.first()
{
Some(res) => Ok(Some(deserialize_sub_channel(&res.value)?)),
None => Ok(None),
}
}
fn get_sub_channels(&self) -> Result<Vec<SubChannel>, Error> {
Ok(self
.store
.read(SUB_CHANNEL, None)
.map_err(to_storage_error)?
.iter()
.filter_map(|x| match deserialize_sub_channel(&x.value) {
Ok(sub_channel) => Some(sub_channel),
Err(e) => {
tracing::error!("Failed to deserialize subchannel: {e}");
None
}
})
.collect::<Vec<SubChannel>>())
}
fn get_offered_sub_channels(&self) -> Result<Vec<SubChannel>, Error> {
let sub_channels = self
.store
.read(SUB_CHANNEL, None)
.map_err(to_storage_error)?
.into_iter()
.map(|x| x.value)
.collect::<Vec<Vec<u8>>>();
self.get_data_with_prefix(&sub_channels, &[SubChannelPrefix::Offered.into()], None)
}
fn save_sub_channel_actions(
&self,
actions: &[dlc_manager::sub_channel_manager::Action],
) -> Result<(), Error> {
let mut buf = Vec::new();
for action in actions {
action.write(&mut buf)?;
}
self.store
.write(ACTION, "action".to_string().into_bytes(), buf)
.map_err(to_storage_error)
}
fn get_sub_channel_actions(
&self,
) -> Result<Vec<dlc_manager::sub_channel_manager::Action>, Error> {
let actions = self.store.read(ACTION, None).map_err(to_storage_error)?;
let buf = match actions.first() {
Some(buf) if !buf.value.is_empty() => buf,
Some(_) | None => return Ok(Vec::new()),
};
debug_assert!(!buf.value.is_empty());
let len = buf.value.len();
let mut res = Vec::new();
let mut cursor = Cursor::new(buf.value.clone());
while (cursor.position() as usize) < len - 1 {
let action = Readable::read(&mut cursor).map_err(to_storage_error)?;
res.push(action);
}
Ok(res)
}
fn get_channels(&self) -> Result<Vec<Channel>, Error> {
Ok(self
.store
.read(CHANNEL, None)
.map_err(to_storage_error)?
.iter()
.filter_map(|x| match deserialize_channel(&x.value) {
Ok(channel) => Some(channel),
Err(e) => {
tracing::error!("Failed to deserialize dlc channel: {e}");
None
}
})
.collect::<Vec<Channel>>())
}
}
impl<K: DlcStoreProvider> WalletStorage for DlcStorageProvider<K> {
fn upsert_key_pair(&self, public_key: &PublicKey, privkey: &SecretKey) -> Result<()> {
self.store.write(
KEY_PAIR,
public_key.serialize().to_vec(),
privkey.secret_bytes().to_vec(),
)
}
fn get_priv_key_for_pubkey(&self, public_key: &PublicKey) -> Result<Option<SecretKey>> {
let priv_key = self
.store
.read(KEY_PAIR, None)?
.iter()
.filter_map(|x| {
if x.key == public_key.serialize().to_vec() {
Some(SecretKey::from_slice(&x.value).expect("a valid secret key"))
} else {
None
}
})
.collect::<Vec<SecretKey>>()
.first()
.cloned();
Ok(priv_key)
}
}
fn serialize_contract(contract: &Contract) -> Result<Vec<u8>, Error> {
let serialized = match contract {
Contract::Offered(o) | Contract::Rejected(o) => o.serialize(),
Contract::Accepted(o) => o.serialize(),
Contract::Signed(o) | Contract::Confirmed(o) | Contract::Refunded(o) => o.serialize(),
Contract::FailedAccept(c) => c.serialize(),
Contract::FailedSign(c) => c.serialize(),
Contract::PreClosed(c) => c.serialize(),
Contract::Closed(c) => c.serialize(),
};
let mut serialized = serialized?;
let mut res = Vec::with_capacity(serialized.len() + 1);
res.push(ContractPrefix::get_prefix(contract));
res.append(&mut serialized);
Ok(res)
}
fn deserialize_contract(buff: &Vec<u8>) -> Result<Contract, Error> {
let mut cursor = ::std::io::Cursor::new(buff);
let mut prefix = [0u8; 1];
cursor.read_exact(&mut prefix)?;
let contract_prefix: ContractPrefix = prefix[0].try_into()?;
let contract = match contract_prefix {
ContractPrefix::Offered => {
Contract::Offered(OfferedContract::deserialize(&mut cursor).map_err(to_storage_error)?)
}
ContractPrefix::Accepted => Contract::Accepted(
AcceptedContract::deserialize(&mut cursor).map_err(to_storage_error)?,
),
ContractPrefix::Signed => {
Contract::Signed(SignedContract::deserialize(&mut cursor).map_err(to_storage_error)?)
}
ContractPrefix::Confirmed => {
Contract::Confirmed(SignedContract::deserialize(&mut cursor).map_err(to_storage_error)?)
}
ContractPrefix::PreClosed => Contract::PreClosed(
PreClosedContract::deserialize(&mut cursor).map_err(to_storage_error)?,
),
ContractPrefix::Closed => {
Contract::Closed(ClosedContract::deserialize(&mut cursor).map_err(to_storage_error)?)
}
ContractPrefix::FailedAccept => Contract::FailedAccept(
FailedAcceptContract::deserialize(&mut cursor).map_err(to_storage_error)?,
),
ContractPrefix::FailedSign => Contract::FailedSign(
FailedSignContract::deserialize(&mut cursor).map_err(to_storage_error)?,
),
ContractPrefix::Refunded => {
Contract::Refunded(SignedContract::deserialize(&mut cursor).map_err(to_storage_error)?)
}
ContractPrefix::Rejected => {
Contract::Rejected(OfferedContract::deserialize(&mut cursor).map_err(to_storage_error)?)
}
};
Ok(contract)
}
fn serialize_channel(channel: &Channel) -> Result<Vec<u8>, ::std::io::Error> {
let serialized = match channel {
Channel::Offered(o) => o.serialize(),
Channel::Accepted(a) => a.serialize(),
Channel::Signed(s) => s.serialize(),
Channel::FailedAccept(f) => f.serialize(),
Channel::FailedSign(f) => f.serialize(),
Channel::Closing(c) => c.serialize(),
Channel::SettledClosing(c) => c.serialize(),
Channel::Closed(c) | Channel::CounterClosed(c) | Channel::CollaborativelyClosed(c) => {
c.serialize()
}
Channel::ClosedPunished(c) => c.serialize(),
Channel::Cancelled(o) => o.serialize(),
};
let mut serialized = serialized?;
let mut res = Vec::with_capacity(serialized.len() + 1);
res.push(ChannelPrefix::get_prefix(channel));
if let Channel::Signed(s) = channel {
res.push(SignedChannelPrefix::get_prefix(&s.state.get_type()))
}
res.append(&mut serialized);
Ok(res)
}
fn deserialize_channel(buff: &Vec<u8>) -> Result<Channel, Error> {
let mut cursor = ::std::io::Cursor::new(buff);
let mut prefix = [0u8; 1];
cursor.read_exact(&mut prefix)?;
let channel_prefix: ChannelPrefix = prefix[0].try_into()?;
let channel = match channel_prefix {
ChannelPrefix::Offered => {
Channel::Offered(OfferedChannel::deserialize(&mut cursor).map_err(to_storage_error)?)
}
ChannelPrefix::Accepted => {
Channel::Accepted(AcceptedChannel::deserialize(&mut cursor).map_err(to_storage_error)?)
}
ChannelPrefix::Signed => {
// Skip the channel state prefix.
cursor.set_position(cursor.position() + 1);
Channel::Signed(SignedChannel::deserialize(&mut cursor).map_err(to_storage_error)?)
}
ChannelPrefix::FailedAccept => {
Channel::FailedAccept(FailedAccept::deserialize(&mut cursor).map_err(to_storage_error)?)
}
ChannelPrefix::FailedSign => {
Channel::FailedSign(FailedSign::deserialize(&mut cursor).map_err(to_storage_error)?)
}
ChannelPrefix::Closing => {
Channel::Closing(ClosingChannel::deserialize(&mut cursor).map_err(to_storage_error)?)
}
ChannelPrefix::SettledClosing => Channel::SettledClosing(
SettledClosingChannel::deserialize(&mut cursor).map_err(to_storage_error)?,
),
ChannelPrefix::Closed => {
Channel::Closed(ClosedChannel::deserialize(&mut cursor).map_err(to_storage_error)?)
}
ChannelPrefix::CollaborativelyClosed => Channel::CollaborativelyClosed(
ClosedChannel::deserialize(&mut cursor).map_err(to_storage_error)?,
),
ChannelPrefix::CounterClosed => Channel::CounterClosed(
ClosedChannel::deserialize(&mut cursor).map_err(to_storage_error)?,
),
ChannelPrefix::ClosedPunished => Channel::ClosedPunished(
ClosedPunishedChannel::deserialize(&mut cursor).map_err(to_storage_error)?,
),
ChannelPrefix::Cancelled => {
Channel::Cancelled(OfferedChannel::deserialize(&mut cursor).map_err(to_storage_error)?)
}
};
Ok(channel)
}
fn serialize_sub_channel(sub_channel: &SubChannel) -> Result<Vec<u8>, ::std::io::Error> {
let prefix = SubChannelPrefix::get_prefix(&sub_channel.state);
let mut buf = Vec::new();
buf.push(prefix);
buf.append(&mut sub_channel.serialize()?);
Ok(buf)
}
fn deserialize_sub_channel(buff: &Vec<u8>) -> Result<SubChannel, Error> {
let mut cursor = ::std::io::Cursor::new(buff);
// Skip prefix
cursor.seek(SeekFrom::Start(1))?;
SubChannel::deserialize(&mut cursor).map_err(to_storage_error)
}
impl From<Channel> for DlcChannelEvent {
fn from(value: Channel) -> Self {
match value {
Channel::Offered(OfferedChannel { reference_id, .. }) => {
DlcChannelEvent::Offered(reference_id)
}
Channel::Accepted(AcceptedChannel { reference_id, .. }) => {
DlcChannelEvent::Accepted(reference_id)
}
Channel::Signed(SignedChannel {
reference_id,
state,
..
}) => match state {
SignedChannelState::Established { .. } => {
DlcChannelEvent::Established(reference_id)
}
SignedChannelState::SettledOffered { .. } => {
DlcChannelEvent::SettledOffered(reference_id)
}
SignedChannelState::SettledReceived { .. } => {
DlcChannelEvent::SettledReceived(reference_id)
}
SignedChannelState::SettledAccepted { .. } => {
DlcChannelEvent::SettledAccepted(reference_id)
}
SignedChannelState::SettledConfirmed { .. } => {
DlcChannelEvent::SettledConfirmed(reference_id)
}
SignedChannelState::Settled { .. } => DlcChannelEvent::Settled(reference_id),
SignedChannelState::RenewOffered { .. } => {
DlcChannelEvent::RenewOffered(reference_id)
}
SignedChannelState::RenewAccepted { .. } => {
DlcChannelEvent::RenewAccepted(reference_id)
}
SignedChannelState::RenewConfirmed { .. } => {
DlcChannelEvent::RenewConfirmed(reference_id)
}
SignedChannelState::RenewFinalized { .. } => {
DlcChannelEvent::RenewFinalized(reference_id)
}
SignedChannelState::Closing { .. } => DlcChannelEvent::Closing(reference_id),
SignedChannelState::SettledClosing { .. } => {
DlcChannelEvent::SettledClosing(reference_id)
}
SignedChannelState::CollaborativeCloseOffered { .. } => {
DlcChannelEvent::CollaborativeCloseOffered(reference_id)
}
},
Channel::Closing(ClosingChannel { reference_id, .. }) => {
DlcChannelEvent::Closing(reference_id)
}
Channel::SettledClosing(SettledClosingChannel { reference_id, .. }) => {
DlcChannelEvent::SettledClosing(reference_id)
}
Channel::Closed(ClosedChannel { reference_id, .. }) => {
DlcChannelEvent::Closed(reference_id)
}
Channel::CounterClosed(ClosedChannel { reference_id, .. }) => {
DlcChannelEvent::CounterClosed(reference_id)
}
Channel::ClosedPunished(ClosedPunishedChannel { reference_id, .. }) => {
DlcChannelEvent::ClosedPunished(reference_id)
}
Channel::CollaborativelyClosed(ClosedChannel { reference_id, .. }) => {
DlcChannelEvent::CollaborativelyClosed(reference_id)
}
Channel::FailedAccept(FailedAccept { reference_id, .. }) => {
DlcChannelEvent::FailedAccept(reference_id)
}
Channel::FailedSign(FailedSign { reference_id, .. }) => {
DlcChannelEvent::FailedSign(reference_id)
}
Channel::Cancelled(OfferedChannel { reference_id, .. }) => {
DlcChannelEvent::Cancelled(reference_id)
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::storage::memory::InMemoryDlcStoreProvider;
use dlc_manager::channel::signed_channel::SignedChannelState;
use dlc_manager::Storage;
fn deserialize_object<T>(serialized: &[u8]) -> T
where
T: Serializable,
{
let mut cursor = std::io::Cursor::new(&serialized);
T::deserialize(&mut cursor).unwrap()
}
#[test]
fn create_contract_can_be_retrieved() {
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | true |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/xxi-node/src/storage/sled.rs | crates/xxi-node/src/storage/sled.rs | use crate::storage::DlcStoreProvider;
use crate::storage::KeyValue;
use anyhow::Result;
use sled::Db;
#[derive(Clone)]
pub struct SledStorageProvider {
db: Db,
}
pub struct SledStorageExport {
pub kind: u8,
pub key: Vec<u8>,
pub value: Vec<u8>,
}
impl SledStorageProvider {
pub fn new(path: &str) -> Self {
SledStorageProvider {
db: sled::open(path).expect("valid path"),
}
}
/// Exports all key value pairs from the sled storage
pub fn export(&self) -> Vec<SledStorageExport> {
let mut export = vec![];
for (collection_type, collection_name, collection_iter) in self.db.export() {
if collection_type != b"tree" {
continue;
}
for mut kv in collection_iter {
let value = kv.pop().expect("failed to get value from tree export");
let key = kv.pop().expect("failed to get key from tree export");
let kind = collection_name
.first()
.expect("failed to get kind from tree export");
export.push(SledStorageExport {
kind: *kind,
key,
value,
});
}
}
export
}
}
impl DlcStoreProvider for SledStorageProvider {
fn read(&self, kind: u8, key: Option<Vec<u8>>) -> Result<Vec<KeyValue>> {
let tree = self.db.open_tree([kind])?;
if let Some(key) = key {
let result = match tree.get(key.clone())? {
Some(value) => vec![KeyValue {
key,
value: value.to_vec(),
}],
None => vec![],
};
Ok(result)
} else {
let result = tree
.iter()
.map(|entry| {
let entry = entry.expect("to not fail");
KeyValue {
key: entry.0.to_vec(),
value: entry.1.to_vec(),
}
})
.collect();
Ok(result)
}
}
fn write(&self, kind: u8, key: Vec<u8>, value: Vec<u8>) -> Result<()> {
self.db.open_tree([kind])?.insert(key, value)?;
self.db.flush()?;
Ok(())
}
fn delete(&self, kind: u8, key: Option<Vec<u8>>) -> Result<()> {
let tree = self.db.open_tree([kind])?;
if let Some(key) = key {
tree.remove(key)?;
} else {
tree.clear()?;
}
self.db.flush()?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use crate::storage::sled::SledStorageProvider;
use crate::storage::DlcStoreProvider;
macro_rules! sled_test {
($name: ident, $body: expr) => {
#[test]
fn $name() {
let path = format!("{}{}", "test_files/sleddb/", std::stringify!($name));
{
let storage = SledStorageProvider::new(&path);
#[allow(clippy::redundant_closure_call)]
$body(storage);
}
std::fs::remove_dir_all(path).unwrap();
}
};
}
sled_test!(write_key_and_value, |storage: SledStorageProvider| {
let result = storage.write(
1,
"key".to_string().into_bytes(),
"test".to_string().into_bytes(),
);
assert!(result.is_ok())
});
sled_test!(read_without_key, |storage: SledStorageProvider| {
storage
.write(
1,
"key".to_string().into_bytes(),
"test".to_string().into_bytes(),
)
.unwrap();
storage
.write(
1,
"key2".to_string().into_bytes(),
"test2".to_string().into_bytes(),
)
.unwrap();
storage
.write(
2,
"key3".to_string().into_bytes(),
"test3".to_string().into_bytes(),
)
.unwrap();
let result = storage.read(1, None).unwrap();
assert_eq!(2, result.len());
});
sled_test!(read_with_key, |storage: SledStorageProvider| {
storage
.write(
1,
"key".to_string().into_bytes(),
"test".to_string().into_bytes(),
)
.unwrap();
storage
.write(
1,
"key2".to_string().into_bytes(),
"test2".to_string().into_bytes(),
)
.unwrap();
storage
.write(
2,
"key3".to_string().into_bytes(),
"test3".to_string().into_bytes(),
)
.unwrap();
let result = storage
.read(1, Some("key2".to_string().into_bytes()))
.unwrap();
assert_eq!(1, result.len());
});
sled_test!(
read_with_non_existing_key,
|storage: SledStorageProvider| {
let result = storage
.read(1, Some("non_existing".to_string().into_bytes()))
.unwrap();
assert_eq!(0, result.len())
}
);
sled_test!(delete_without_key, |storage: SledStorageProvider| {
storage
.write(
1,
"key".to_string().into_bytes(),
"test".to_string().into_bytes(),
)
.unwrap();
storage
.write(
1,
"key2".to_string().into_bytes(),
"test2".to_string().into_bytes(),
)
.unwrap();
storage
.write(
2,
"key3".to_string().into_bytes(),
"test3".to_string().into_bytes(),
)
.unwrap();
let result = storage.read(1, None).unwrap();
assert_eq!(2, result.len());
let result = storage.delete(1, None);
assert!(result.is_ok());
let result = storage.read(1, None).unwrap();
assert_eq!(0, result.len());
});
sled_test!(delete_with_key, |storage: SledStorageProvider| {
storage
.write(
1,
"key".to_string().into_bytes(),
"test".to_string().into_bytes(),
)
.unwrap();
storage
.write(
1,
"key2".to_string().into_bytes(),
"test2".to_string().into_bytes(),
)
.unwrap();
storage
.write(
2,
"key3".to_string().into_bytes(),
"test3".to_string().into_bytes(),
)
.unwrap();
let result = storage.read(1, None).unwrap();
assert_eq!(2, result.len());
let result = storage.delete(1, Some("key2".to_string().into_bytes()));
assert!(result.is_ok());
let result = storage.read(1, None).unwrap();
assert_eq!(1, result.len());
});
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/xxi-node/src/commons/collab_revert.rs | crates/xxi-node/src/commons/collab_revert.rs | use bitcoin::secp256k1::ecdsa::Signature;
use bitcoin::Transaction;
use bitcoin::Txid;
use rust_decimal::Decimal;
use serde::Deserialize;
use serde::Serialize;
/// The information needed for the coordinator to kickstart the collaborative revert protocol.
#[derive(Deserialize, Serialize)]
pub struct CollaborativeRevertCoordinatorRequest {
/// Channel to collaboratively revert.
pub channel_id: String,
/// Fee rate for the collaborative revert transaction.
pub fee_rate_sats_vb: u64,
/// Amount to be paid out to the counterparty in sats.
///
/// Note: the tx fee will be subtracted evenly between both parties
pub counter_payout: u64,
/// The price at which the position has been closed
///
/// Note: this is just for informative purposes and is not used in any calculations
pub price: Decimal,
}
/// The information provided by the trader in response to a collaborative revert proposal.
#[derive(Deserialize, Serialize)]
pub struct CollaborativeRevertTraderResponse {
/// Channel to collaboratively revert.
pub channel_id: String,
/// The unsigned collaborative revert transaction.
pub transaction: Transaction,
/// The trader's signature on the collaborative revert transaction.
pub signature: Signature,
}
/// The information needed for the coordinator to kickstart the _legacy_ collaborative revert
/// protocol.
#[derive(Deserialize, Serialize)]
pub struct LegacyCollaborativeRevertCoordinatorRequest {
/// Channel to collaboratively revert.
pub channel_id: String,
/// The TXID of the LN funding transaction.
pub txid: Txid,
/// The vout corresponding to the funding TXO.
pub vout: u32,
/// How much the coordinator should get out of the collaborative revert transaction, without
/// considering transaction fees.
pub coordinator_amount: u64,
/// Fee rate for the collaborative revert transaction.
pub fee_rate_sats_vb: u64,
/// Price at which to settle the DLC channel.
///
/// This price is purely informational for the trader, as the caller provides the
/// `coordinator_amount` already.
pub price: Decimal,
}
/// The information provided by the trader in response to a _legacy_ collaborative revert proposal.
#[derive(Deserialize, Serialize)]
pub struct LegacyCollaborativeRevertTraderResponse {
/// Channel to collaboratively revert.
pub channel_id: String,
/// The unsigned collaborative revert transaction.
pub transaction: Transaction,
/// The trader's signature on the collaborative revert transaction.
pub signature: Signature,
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/xxi-node/src/commons/order_matching_fee.rs | crates/xxi-node/src/commons/order_matching_fee.rs | use rust_decimal::prelude::FromPrimitive;
use rust_decimal::prelude::ToPrimitive;
use rust_decimal::Decimal;
use rust_decimal::RoundingStrategy;
pub fn order_matching_fee(quantity: f32, price: Decimal, fee_per_cent: Decimal) -> bitcoin::Amount {
let quantity = Decimal::from_f32(quantity).expect("quantity to fit in Decimal");
let fee: f64 = match price != Decimal::ZERO {
true => {
let fee = quantity * (Decimal::ONE / price) * fee_per_cent;
fee.round_dp_with_strategy(8, RoundingStrategy::MidpointAwayFromZero)
.to_f64()
.expect("fee to fit in f64")
}
false => 0.0,
};
bitcoin::Amount::from_btc(fee).expect("fee to fit in bitcoin::Amount")
}
#[cfg(test)]
mod tests {
use super::*;
use rust_decimal_macros::dec;
#[test]
fn calculate_order_matching_fee() {
let price = Decimal::new(30209, 0);
let fee = order_matching_fee(50.0, price, dec!(0.003));
assert_eq!(fee.to_sat(), 497);
}
#[test]
fn calculate_order_matching_fee_with_0() {
let price = Decimal::new(0, 0);
let fee = order_matching_fee(50.0, price, dec!(0.003));
assert_eq!(fee.to_sat(), 0);
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/xxi-node/src/commons/pre_image.rs | crates/xxi-node/src/commons/pre_image.rs | use anyhow::bail;
use anyhow::Result;
use base64::engine::general_purpose;
use base64::Engine;
use rand::Rng;
use sha2::Digest;
use sha2::Sha256;
pub struct PreImage {
// TODO(bonomat): instead of implementing `PreImage` we should create our
// own `serialize` and `deserialize` and `to_string` which converts back and forth from a
// url_safe string
pub pre_image: [u8; 32],
pub hash: String,
}
impl PreImage {
pub fn get_base64_encoded_pre_image(&self) -> String {
general_purpose::URL_SAFE.encode(self.pre_image)
}
pub fn from_url_safe_encoded_pre_image(url_safe_pre_image: &str) -> Result<Self> {
let vec = general_purpose::URL_SAFE.decode(url_safe_pre_image)?;
let hex_array = vec_to_hex_array(vec)?;
let hash = inner_hash_pre_image(&hex_array);
Ok(Self {
pre_image: hex_array,
hash,
})
}
}
pub fn create_pre_image() -> PreImage {
let pre_image = inner_create_pre_image();
let hash = inner_hash_pre_image(&pre_image);
PreImage { pre_image, hash }
}
fn inner_create_pre_image() -> [u8; 32] {
let mut rng = rand::thread_rng();
let pre_image: [u8; 32] = rng.gen();
pre_image
}
fn inner_hash_pre_image(pre_image: &[u8; 32]) -> String {
let mut hasher = Sha256::new();
hasher.update(pre_image);
let hash = hasher.finalize();
general_purpose::URL_SAFE.encode(hash)
}
fn vec_to_hex_array(pre_image: Vec<u8>) -> Result<[u8; 32]> {
let pre_image: [u8; 32] = match pre_image.try_into() {
Ok(array) => array,
Err(_) => {
bail!("Failed to parse pre-image");
}
};
Ok(pre_image)
}
#[cfg(test)]
pub mod tests {
use crate::commons::pre_image::inner_hash_pre_image;
#[test]
pub fn given_preimage_computes_deterministic_hash() {
let pre_image = "92b1de6841db0cf46cc40be6fe80110a0264513ab27eb822ed71ca517ffe8fd9";
let pre_image = hex::decode(pre_image).unwrap();
let pre_image: [u8; 32] = pre_image
.try_into()
.expect("Failed to convert Vec<u8> to [u8; 32]");
let hash = inner_hash_pre_image(&pre_image);
assert_eq!(hash, "da63WurzUQibvu0OLClJFatzvT3kuZDrcCm5tl0bEBg=")
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/xxi-node/src/commons/signature.rs | crates/xxi-node/src/commons/signature.rs | use bitcoin::secp256k1::Message as SecpMessage;
use bitcoin::secp256k1::PublicKey;
use secp256k1::Secp256k1;
use secp256k1::SecretKey;
use secp256k1::VerifyOnly;
use serde::Deserialize;
use serde::Serialize;
use sha2::digest::FixedOutput;
use sha2::Digest;
use sha2::Sha256;
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct SignedValue<T> {
pub value: T,
/// A signature of the sha256 of [`value`]
pub signature: secp256k1::ecdsa::Signature,
}
impl<T: Serialize> SignedValue<T> {
pub fn new(value: T, secret_key: SecretKey) -> anyhow::Result<Self> {
let serialized_value = serde_json::to_vec(&value)?;
let message = create_sign_message(serialized_value);
let signature = secret_key.sign_ecdsa(message);
Ok(Self { value, signature })
}
pub fn verify(&self, secp: &Secp256k1<VerifyOnly>, node_id: &PublicKey) -> anyhow::Result<()> {
let message = serde_json::to_vec(&self.value)?;
let message = create_sign_message(message);
secp.verify_ecdsa(&message, &self.signature, node_id)?;
Ok(())
}
}
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
pub struct Signature {
pub pubkey: PublicKey,
pub signature: secp256k1::ecdsa::Signature,
}
pub fn create_sign_message(message: Vec<u8>) -> SecpMessage {
let hashed_message = Sha256::new().chain_update(message).finalize_fixed();
let msg = SecpMessage::from_slice(hashed_message.as_slice())
.expect("The message is static, hence this should never happen");
msg
}
#[cfg(test)]
mod test {
use crate::commons::signature::Signature;
use crate::commons::SignedValue;
use bitcoin::secp256k1::PublicKey;
use bitcoin::secp256k1::SecretKey;
use secp256k1::Secp256k1;
use secp256k1::SECP256K1;
use serde::Deserialize;
use serde::Serialize;
use std::str::FromStr;
fn dummy_public_key() -> PublicKey {
PublicKey::from_str("02bd998ebd176715fe92b7467cf6b1df8023950a4dd911db4c94dfc89cc9f5a655")
.unwrap()
}
#[test]
fn test_serialize_signature() {
let secret_key = SecretKey::from_slice(&[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 26, 27, 27, 29, 30, 31,
])
.unwrap();
let sig = Signature {
pubkey: secret_key.public_key(&secp256k1::Secp256k1::new()),
signature: "3045022100ddd8e15dea994a3dd98c481d901fb46b7f3624bb25b4210ea10f8a00779c6f0e0220222235da47b1ba293184fa4a91b39999911c08020e069c9f4afa2d81586b23e1".parse().unwrap(),
};
let serialized = serde_json::to_string(&sig).unwrap();
assert_eq!(
serialized,
r#"{"pubkey":"02bd998ebd176715fe92b7467cf6b1df8023950a4dd911db4c94dfc89cc9f5a655","signature":"3045022100ddd8e15dea994a3dd98c481d901fb46b7f3624bb25b4210ea10f8a00779c6f0e0220222235da47b1ba293184fa4a91b39999911c08020e069c9f4afa2d81586b23e1"}"#
);
}
#[test]
fn test_deserialize_signature() {
let sig = r#"{"pubkey":"02bd998ebd176715fe92b7467cf6b1df8023950a4dd911db4c94dfc89cc9f5a655","signature":"3045022100ddd8e15dea994a3dd98c481d901fb46b7f3624bb25b4210ea10f8a00779c6f0e0220222235da47b1ba293184fa4a91b39999911c08020e069c9f4afa2d81586b23e1"}"#;
let serialized: Signature = serde_json::from_str(sig).unwrap();
let signature = Signature {
pubkey: dummy_public_key(),
signature: "3045022100ddd8e15dea994a3dd98c481d901fb46b7f3624bb25b4210ea10f8a00779c6f0e0220222235da47b1ba293184fa4a91b39999911c08020e069c9f4afa2d81586b23e1".parse().unwrap(),
};
assert_eq!(serialized, signature);
}
fn test_secret_key() -> SecretKey {
SecretKey::from_slice(&[
32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 26, 27, 27, 29, 30, 31,
])
.unwrap()
}
#[derive(Serialize, Deserialize, Clone)]
pub struct DummyModel {
dummy1: String,
dummy2: u64,
}
#[test]
fn test_signed_value_valid_signature() {
let secret_key = test_secret_key();
let value = DummyModel {
dummy1: "10101".to_string(),
dummy2: 10101,
};
let signed_value = SignedValue::new(value, secret_key).unwrap();
signed_value
.verify(
&Secp256k1::verification_only(),
&secret_key.public_key(SECP256K1),
)
.unwrap();
}
#[test]
#[should_panic(expected = "signature failed verification")]
fn test_signed_value_invalid_signature() {
let value = DummyModel {
dummy1: "10101".to_string(),
dummy2: 10101,
};
let signed_value = SignedValue::new(value.clone(), test_secret_key()).unwrap();
signed_value
.verify(&Secp256k1::verification_only(), &dummy_public_key())
.unwrap();
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/xxi-node/src/commons/funding_fee_event.rs | crates/xxi-node/src/commons/funding_fee_event.rs | use crate::commons::to_nearest_hour_in_the_past;
use crate::commons::ContractSymbol;
use crate::commons::Direction;
use bitcoin::SignedAmount;
use rust_decimal::Decimal;
use serde::Deserialize;
use serde::Serialize;
use time::OffsetDateTime;
/// The funding rate for any position opened before the `end_date`, which remained open through the
/// `end_date`.
#[derive(Serialize, Clone, Copy, Deserialize, Debug)]
pub struct FundingRate {
/// A positive funding rate indicates that longs pay shorts; a negative funding rate indicates
/// that shorts pay longs.
rate: Decimal,
/// The start date for the funding rate period. This value is only used for informational
/// purposes.
///
/// The `start_date` is always a whole hour.
start_date: OffsetDateTime,
/// The end date for the funding rate period. When the end date has passed, all active
/// positions that were created before the end date should be charged a funding fee based
/// on the `rate`.
///
/// The `end_date` is always a whole hour.
end_date: OffsetDateTime,
}
impl FundingRate {
pub fn new(rate: Decimal, start_date: OffsetDateTime, end_date: OffsetDateTime) -> Self {
let start_date = to_nearest_hour_in_the_past(start_date);
let end_date = to_nearest_hour_in_the_past(end_date);
Self {
rate,
start_date,
end_date,
}
}
pub fn rate(&self) -> Decimal {
self.rate
}
pub fn start_date(&self) -> OffsetDateTime {
self.start_date
}
pub fn end_date(&self) -> OffsetDateTime {
self.end_date
}
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub struct FundingFeeEvent {
pub contract_symbol: ContractSymbol,
pub contracts: Decimal,
pub direction: Direction,
#[serde(with = "rust_decimal::serde::float")]
pub price: Decimal,
/// A positive amount indicates that the trader pays the coordinator; a negative amount
/// indicates that the coordinator pays the trader.
#[serde(with = "bitcoin::amount::serde::as_sat")]
pub fee: SignedAmount,
pub due_date: OffsetDateTime,
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/xxi-node/src/commons/order.rs | crates/xxi-node/src/commons/order.rs | use crate::commons::ContractSymbol;
use crate::commons::Direction;
use anyhow::Result;
use bitcoin::hashes::sha256;
use bitcoin::secp256k1::PublicKey;
use bitcoin::Amount;
use rust_decimal::Decimal;
use secp256k1::ecdsa::Signature;
use secp256k1::Message;
use secp256k1::VerifyOnly;
use serde::Deserialize;
use serde::Serialize;
use time::OffsetDateTime;
use uuid::Uuid;
#[derive(Serialize, Deserialize, Clone)]
pub struct NewOrderRequest {
pub value: NewOrder,
/// A signature of the sha256 of [`value`]
pub signature: Signature,
pub channel_opening_params: Option<ChannelOpeningParams>,
}
impl NewOrderRequest {
pub fn verify(&self, secp: &secp256k1::Secp256k1<VerifyOnly>) -> Result<()> {
let message = self.value.message();
let public_key = self.value.trader_id();
secp.verify_ecdsa(&message, &self.signature, &public_key)?;
Ok(())
}
}
#[derive(Serialize, Deserialize, Clone, Debug)]
pub enum NewOrder {
Market(NewMarketOrder),
Limit(NewLimitOrder),
}
impl NewOrder {
pub fn message(&self) -> Message {
match self {
NewOrder::Market(o) => o.message(),
NewOrder::Limit(o) => o.message(),
}
}
pub fn trader_id(&self) -> PublicKey {
match self {
NewOrder::Market(o) => o.trader_id,
NewOrder::Limit(o) => o.trader_id,
}
}
pub fn id(&self) -> Uuid {
match self {
NewOrder::Market(o) => o.id,
NewOrder::Limit(o) => o.id,
}
}
pub fn direction(&self) -> Direction {
match self {
NewOrder::Market(o) => o.direction,
NewOrder::Limit(o) => o.direction,
}
}
pub fn price(&self) -> String {
match self {
NewOrder::Market(_) => "Market".to_string(),
NewOrder::Limit(o) => o.price.to_string(),
}
}
pub fn order_type(&self) -> String {
match self {
NewOrder::Market(_) => "Market",
NewOrder::Limit(_) => "Limit",
}
.to_string()
}
}
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct NewMarketOrder {
pub id: Uuid,
pub contract_symbol: ContractSymbol,
#[serde(with = "rust_decimal::serde::float")]
pub quantity: Decimal,
pub trader_id: PublicKey,
pub direction: Direction,
#[serde(with = "rust_decimal::serde::float")]
pub leverage: Decimal,
#[serde(with = "time::serde::timestamp")]
pub expiry: OffsetDateTime,
pub stable: bool,
}
#[derive(Serialize, Deserialize, Clone, Copy, Debug)]
pub struct NewLimitOrder {
pub id: Uuid,
pub contract_symbol: ContractSymbol,
#[serde(with = "rust_decimal::serde::float")]
pub price: Decimal,
#[serde(with = "rust_decimal::serde::float")]
pub quantity: Decimal,
pub trader_id: PublicKey,
pub direction: Direction,
#[serde(with = "rust_decimal::serde::float")]
pub leverage: Decimal,
#[serde(with = "time::serde::timestamp")]
pub expiry: OffsetDateTime,
pub stable: bool,
}
impl NewLimitOrder {
pub fn message(&self) -> Message {
let mut vec: Vec<u8> = vec![];
let mut id = self.id.as_bytes().to_vec();
let unix_timestamp = self.expiry.unix_timestamp();
let mut seconds = unix_timestamp.to_le_bytes().to_vec();
let symbol = self.contract_symbol.label();
let symbol = symbol.as_bytes();
let direction = self.direction.to_string();
let direction = direction.as_bytes();
let quantity = format!("{:.2}", self.quantity);
let quantity = quantity.as_bytes();
let price = format!("{:.2}", self.price);
let price = price.as_bytes();
let leverage = format!("{:.2}", self.leverage);
let leverage = leverage.as_bytes();
vec.append(&mut id);
vec.append(&mut seconds);
vec.append(&mut symbol.to_vec());
vec.append(&mut direction.to_vec());
vec.append(&mut quantity.to_vec());
vec.append(&mut price.to_vec());
vec.append(&mut leverage.to_vec());
Message::from_hashed_data::<sha256::Hash>(vec.as_slice())
}
}
impl NewMarketOrder {
pub fn message(&self) -> Message {
let mut vec: Vec<u8> = vec![];
let mut id = self.id.as_bytes().to_vec();
let unix_timestamp = self.expiry.unix_timestamp();
let mut seconds = unix_timestamp.to_le_bytes().to_vec();
let symbol = self.contract_symbol.label();
let symbol = symbol.as_bytes();
let direction = self.direction.to_string();
let direction = direction.as_bytes();
let quantity = format!("{:.2}", self.quantity);
let quantity = quantity.as_bytes();
let leverage = format!("{:.2}", self.leverage);
let leverage = leverage.as_bytes();
vec.append(&mut id);
vec.append(&mut seconds);
vec.append(&mut symbol.to_vec());
vec.append(&mut direction.to_vec());
vec.append(&mut quantity.to_vec());
vec.append(&mut leverage.to_vec());
Message::from_hashed_data::<sha256::Hash>(vec.as_slice())
}
}
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq)]
pub enum OrderType {
#[allow(dead_code)]
Market,
Limit,
}
impl OrderType {
pub fn label(self) -> String {
match self {
OrderType::Market => "Market",
OrderType::Limit => "Limit",
}
.to_string()
}
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub enum OrderState {
Open,
Matched,
Taken,
Failed,
Expired,
Deleted,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub enum OrderReason {
Manual,
Expired,
CoordinatorLiquidated,
TraderLiquidated,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub struct Order {
pub id: Uuid,
#[serde(with = "rust_decimal::serde::float")]
pub price: Decimal,
pub leverage: f32,
pub contract_symbol: ContractSymbol,
pub trader_id: PublicKey,
pub direction: Direction,
#[serde(with = "rust_decimal::serde::float")]
pub quantity: Decimal,
pub order_type: OrderType,
#[serde(with = "time::serde::rfc3339")]
pub timestamp: OffsetDateTime,
#[serde(with = "time::serde::rfc3339")]
pub expiry: OffsetDateTime,
pub order_state: OrderState,
pub order_reason: OrderReason,
pub stable: bool,
}
/// Extra information required to open a DLC channel, independent of the [`TradeParams`] associated
/// with the filled order.
///
/// [`TradeParams`]: commons::TradeParams
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub struct ChannelOpeningParams {
#[serde(with = "bitcoin::amount::serde::as_sat")]
pub trader_reserve: Amount,
#[serde(with = "bitcoin::amount::serde::as_sat")]
pub coordinator_reserve: Amount,
/// if set, the channel will be opened with funding only from the coordinator.
pub pre_image: Option<String>,
}
#[cfg(test)]
pub mod tests {
use crate::commons::ContractSymbol;
use crate::commons::Direction;
use crate::commons::NewLimitOrder;
use crate::commons::NewOrder;
use crate::commons::NewOrderRequest;
use secp256k1::rand;
use secp256k1::Secp256k1;
use secp256k1::SecretKey;
use secp256k1::SECP256K1;
use std::str::FromStr;
use time::ext::NumericalDuration;
use time::OffsetDateTime;
use uuid::Uuid;
#[test]
pub fn round_trip_signature_new_order() {
let secret_key = SecretKey::new(&mut rand::thread_rng());
let public_key = secret_key.public_key(SECP256K1);
let order = NewLimitOrder {
id: Default::default(),
contract_symbol: ContractSymbol::BtcUsd,
price: rust_decimal_macros::dec!(53_000),
quantity: rust_decimal_macros::dec!(2000),
trader_id: public_key,
direction: Direction::Long,
leverage: rust_decimal_macros::dec!(2.0),
expiry: OffsetDateTime::now_utc(),
stable: false,
};
let message = order.message();
let signature = secret_key.sign_ecdsa(message);
signature.verify(&message, &public_key).unwrap();
}
#[test]
pub fn round_trip_order_signature_verification() {
// setup
let secret_key =
SecretKey::from_str("01010101010101010001020304050607ffff0000ffff00006363636363636363")
.unwrap();
let public_key = secret_key.public_key(SECP256K1);
let original_order = NewLimitOrder {
id: Uuid::from_str("67e5504410b1426f9247bb680e5fe0c8").unwrap(),
contract_symbol: ContractSymbol::BtcUsd,
price: rust_decimal_macros::dec!(53_000),
quantity: rust_decimal_macros::dec!(2000),
trader_id: public_key,
direction: Direction::Long,
leverage: rust_decimal_macros::dec!(2.0),
// Note: the last 5 is too much as it does not get serialized
expiry: OffsetDateTime::UNIX_EPOCH + 1.1010101015.seconds(),
stable: false,
};
let message = original_order.clone().message();
let signature = secret_key.sign_ecdsa(message);
signature.verify(&message, &public_key).unwrap();
let original_request = NewOrderRequest {
value: NewOrder::Limit(original_order),
signature,
channel_opening_params: None,
};
let original_serialized_request = serde_json::to_string(&original_request).unwrap();
let serialized_msg = "{\"value\":{\"Limit\":{\"id\":\"67e55044-10b1-426f-9247-bb680e5fe0c8\",\"contract_symbol\":\"BtcUsd\",\"price\":53000.0,\"quantity\":2000.0,\"trader_id\":\"0218845781f631c48f1c9709e23092067d06837f30aa0cd0544ac887fe91ddd166\",\"direction\":\"Long\",\"leverage\":2.0,\"expiry\":1,\"stable\":false}},\"signature\":\"304402205024fd6aea64c02155bdc063cf9168d9cd24fc6d54d3da0db645372828df210e022062323c30a88b60ef647d6740a01ac38fccc7f306f1c380bd92715d8b2e39adb9\",\"channel_opening_params\":null}";
// replace the signature with the one from above to have the same string
let serialized_msg =
serialized_msg.replace("SIGNATURE_PLACEHOLDER", signature.to_string().as_str());
// act
let parsed_request: NewOrderRequest =
serde_json::from_str(serialized_msg.as_str()).unwrap();
// assert
// ensure that the two strings are the same, besides the signature (which has a random
// factor)
assert_eq!(original_serialized_request, serialized_msg);
assert_eq!(
original_request.value.message(),
parsed_request.value.message()
);
// Below would also fail but we don't even get there yet
let secp = Secp256k1::verification_only();
parsed_request.verify(&secp).unwrap();
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/xxi-node/src/commons/liquidity_option.rs | crates/xxi-node/src/commons/liquidity_option.rs | use rust_decimal::Decimal;
use serde::Deserialize;
use serde::Serialize;
use time::OffsetDateTime;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LiquidityOption {
pub id: i32,
pub rank: usize,
pub title: String,
/// amount the trader can trade up to in sats
pub trade_up_to_sats: u64,
/// min deposit in sats
pub min_deposit_sats: u64,
/// max deposit in sats
pub max_deposit_sats: u64,
/// min fee in sats
pub min_fee_sats: u64,
pub fee_percentage: f64,
pub coordinator_leverage: f32,
#[serde(with = "time::serde::rfc3339")]
pub created_at: OffsetDateTime,
#[serde(with = "time::serde::rfc3339")]
pub updated_at: OffsetDateTime,
pub active: bool,
}
impl LiquidityOption {
/// Get fees for the liquidity option on an amount in sats
pub fn get_fee(&self, amount_sats: Decimal) -> Decimal {
let fee = (amount_sats / Decimal::from(100))
* Decimal::try_from(self.fee_percentage).expect("to fit into decimal");
if fee < Decimal::from(self.min_fee_sats) {
return Decimal::from(self.min_fee_sats);
}
fee
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct OnboardingParam {
pub target_node: String,
pub user_channel_id: String,
pub amount_sats: u64,
pub liquidity_option_id: i32,
}
#[cfg(test)]
mod test {
use crate::commons::liquidity_option::LiquidityOption;
use rust_decimal::Decimal;
use time::OffsetDateTime;
fn get_liquidity_option() -> LiquidityOption {
LiquidityOption {
id: 1,
rank: 1,
title: "test".to_string(),
trade_up_to_sats: 500_000,
min_deposit_sats: 50_000,
max_deposit_sats: 500_000,
min_fee_sats: 10_000,
fee_percentage: 1.0,
coordinator_leverage: 2.0,
created_at: OffsetDateTime::now_utc(),
updated_at: OffsetDateTime::now_utc(),
active: true,
}
}
#[test]
fn test_min_fee() {
let option = get_liquidity_option();
let fee = option.get_fee(Decimal::from(60_000));
assert_eq!(Decimal::from(10_000), fee)
}
#[test]
fn test_percentage_fee() {
let option = get_liquidity_option();
let fee = option.get_fee(Decimal::from(1_100_000));
assert_eq!(Decimal::from(11_000), fee)
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/xxi-node/src/commons/price.rs | crates/xxi-node/src/commons/price.rs | use crate::commons::order::Order;
use crate::commons::order::OrderState;
use crate::commons::ContractSymbol;
use crate::commons::Direction;
use rust_decimal::Decimal;
use serde::Deserialize;
use serde::Serialize;
use std::collections::HashMap;
use time::OffsetDateTime;
#[derive(Serialize, Deserialize, Default, Debug, Clone, PartialEq)]
pub struct Price {
pub bid: Decimal,
pub ask: Decimal,
}
impl Price {
/// Get the price for the direction
///
/// For going long we get the best ask price, for going short we get the best bid price.
pub fn get_price_for_direction(&self, direction: Direction) -> Decimal {
match direction {
Direction::Long => self.ask,
Direction::Short => self.bid,
}
}
}
pub type Prices = HashMap<ContractSymbol, BestPrice>;
#[derive(Serialize, Deserialize, Default, Debug, Clone, PartialEq)]
pub struct BestPrice {
pub bid: Option<Decimal>,
pub ask: Option<Decimal>,
}
/// Best prices across all current orders for given ContractSymbol in the orderbook
/// Taken orders are not included in the average
pub fn best_current_price(current_orders: &[Order]) -> Prices {
let mut prices = HashMap::new();
let mut add_price_for_symbol = |symbol| {
prices.insert(
symbol,
BestPrice {
bid: best_bid_price(current_orders, symbol),
ask: best_ask_price(current_orders, symbol),
},
);
};
add_price_for_symbol(ContractSymbol::BtcUsd);
prices
}
/// If you place a market order to go short/sell, the best/highest `Bid` price
///
/// Differently said, remember `buy high`, `sell low`!
/// Ask = high
/// Bid = low
///
/// The best `Ask` is the lowest of all `Asks`
/// The best `Bid` is the highest of all `Bids`
///
/// If you SELL, you ask and you get the best price someone is willing to buy at i.e. the highest
/// bid price.
pub fn best_bid_price(orders: &[Order], symbol: ContractSymbol) -> Option<Decimal> {
orders
.iter()
.filter(|o| {
o.order_state == OrderState::Open
&& o.direction == Direction::Long
&& o.contract_symbol == symbol
&& o.expiry > OffsetDateTime::now_utc()
})
.map(|o| o.price)
.max()
}
/// If you place a market order to go long/buy, you get the best/lowest `Ask` price
///
/// Differently said, remember `buy high`, `sell low`!
/// Ask = high
/// Bid = low
///
/// The best `Ask` is the lowest of all `Asks`
/// The best `Bid` is the highest of all `Bids`
///
/// If you BUY, you bid and you get the best price someone is willing to sell at i.e. the lowest ask
/// price.
pub fn best_ask_price(orders: &[Order], symbol: ContractSymbol) -> Option<Decimal> {
orders
.iter()
.filter(|o| {
o.order_state == OrderState::Open
&& o.direction == Direction::Short
&& o.contract_symbol == symbol
&& o.expiry > OffsetDateTime::now_utc()
})
.map(|o| o.price)
.min()
}
#[cfg(test)]
mod test {
use crate::commons::order::Order;
use crate::commons::order::OrderReason;
use crate::commons::order::OrderState;
use crate::commons::order::OrderType;
use crate::commons::price::best_ask_price;
use crate::commons::price::best_bid_price;
use crate::commons::ContractSymbol;
use crate::commons::Direction;
use bitcoin::secp256k1::PublicKey;
use rust_decimal::Decimal;
use rust_decimal_macros::dec;
use std::str::FromStr;
use time::Duration;
use time::OffsetDateTime;
use uuid::Uuid;
fn dummy_public_key() -> PublicKey {
PublicKey::from_str("02bd998ebd176715fe92b7467cf6b1df8023950a4dd911db4c94dfc89cc9f5a655")
.unwrap()
}
fn dummy_order(price: Decimal, direction: Direction, order_state: OrderState) -> Order {
Order {
id: Uuid::new_v4(),
price,
trader_id: dummy_public_key(),
direction,
leverage: 1.0,
contract_symbol: ContractSymbol::BtcUsd,
quantity: 100.into(),
order_type: OrderType::Market,
timestamp: OffsetDateTime::now_utc(),
expiry: OffsetDateTime::now_utc() + Duration::minutes(1),
order_state,
order_reason: OrderReason::Manual,
stable: false,
}
}
#[test]
fn test_best_bid_price() {
let current_orders = vec![
dummy_order(dec!(10_000), Direction::Long, OrderState::Open),
dummy_order(dec!(30_000), Direction::Long, OrderState::Open),
dummy_order(dec!(500_000), Direction::Long, OrderState::Taken), // taken
dummy_order(dec!(50_000), Direction::Short, OrderState::Open), // wrong direction
];
assert_eq!(
best_bid_price(¤t_orders, ContractSymbol::BtcUsd),
Some(dec!(30_000))
);
}
#[test]
fn test_best_ask_price() {
let current_orders = vec![
dummy_order(dec!(10_000), Direction::Short, OrderState::Open),
dummy_order(dec!(30_000), Direction::Short, OrderState::Open),
// ignored in the calculations - this order is taken
dummy_order(dec!(5_000), Direction::Short, OrderState::Taken),
// ignored in the calculations - it's the bid price
dummy_order(dec!(50_000), Direction::Long, OrderState::Open),
];
assert_eq!(
best_ask_price(¤t_orders, ContractSymbol::BtcUsd),
Some(dec!(10_000))
);
}
#[test]
fn test_no_price() {
let all_orders_taken = vec![
dummy_order(dec!(10_000), Direction::Short, OrderState::Taken),
dummy_order(dec!(30_000), Direction::Long, OrderState::Taken),
];
assert_eq!(
best_ask_price(&all_orders_taken, ContractSymbol::BtcUsd),
None
);
assert_eq!(
best_bid_price(&all_orders_taken, ContractSymbol::BtcUsd),
None
);
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/xxi-node/src/commons/reported_error.rs | crates/xxi-node/src/commons/reported_error.rs | use bitcoin::secp256k1::PublicKey;
use serde::Deserialize;
use serde::Serialize;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ReportedError {
pub trader_pk: PublicKey,
pub msg: String,
pub version: Option<String>,
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/xxi-node/src/commons/polls.rs | crates/xxi-node/src/commons/polls.rs | use anyhow::bail;
use bitcoin::secp256k1::PublicKey;
use serde::Deserialize;
use serde::Serialize;
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct Poll {
pub id: i32,
pub poll_type: PollType,
pub question: String,
pub choices: Vec<Choice>,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct Choice {
pub id: i32,
pub value: String,
pub editable: bool,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct Answer {
pub choice_id: i32,
pub value: String,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub enum PollType {
SingleChoice,
}
impl TryFrom<&str> for PollType {
type Error = anyhow::Error;
fn try_from(value: &str) -> Result<Self, Self::Error> {
match value.to_lowercase().as_str() {
"single_choice" => Ok(PollType::SingleChoice),
_ => {
bail!("Unsupported poll type")
}
}
}
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct PollAnswers {
pub poll_id: i32,
pub trader_pk: PublicKey,
pub answers: Vec<Answer>,
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/xxi-node/src/commons/mod.rs | crates/xxi-node/src/commons/mod.rs | use anyhow::bail;
use bitcoin::secp256k1::PublicKey;
use rust_decimal::Decimal;
use serde::Deserialize;
use serde::Serialize;
use std::fmt;
use std::str::FromStr;
use time::OffsetDateTime;
use time::Time;
mod backup;
mod collab_revert;
mod funding_fee_event;
mod liquidity_option;
mod message;
mod order;
mod order_matching_fee;
mod polls;
mod pre_image;
mod price;
mod reported_error;
mod rollover;
mod signature;
mod trade;
pub use crate::commons::trade::*;
pub use backup::*;
pub use collab_revert::*;
pub use funding_fee_event::*;
pub use liquidity_option::*;
pub use message::*;
pub use order::*;
pub use order_matching_fee::order_matching_fee;
pub use polls::*;
pub use pre_image::*;
pub use price::*;
pub use reported_error::ReportedError;
pub use rollover::*;
pub use signature::*;
pub const AUTH_SIGN_MESSAGE: &[u8; 19] = b"Hello it's me Mario";
/// Registration details for enrolling into the beta program
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct RegisterParams {
pub pubkey: PublicKey,
pub contact: Option<String>,
pub nickname: Option<String>,
pub version: Option<String>,
pub os: Option<String>,
/// Entered referral code, i.e. this user was revered by using this referral code
pub referral_code: Option<String>,
}
/// Registration details for enrolling into the beta program
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct UpdateUsernameParams {
pub pubkey: PublicKey,
pub nickname: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct User {
pub pubkey: PublicKey,
pub contact: Option<String>,
pub nickname: Option<String>,
pub referral_code: String,
}
impl User {
pub fn new(
pubkey: PublicKey,
contact: Option<String>,
nickname: Option<String>,
referral_code: String,
) -> Self {
Self {
pubkey,
contact,
nickname,
referral_code,
}
}
}
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct HodlInvoiceParams {
pub trader_pubkey: PublicKey,
pub amt_sats: u64,
pub r_hash: String,
}
pub fn referral_from_pubkey(public_key: PublicKey) -> String {
let referral_code = public_key
.to_string()
.chars()
.rev()
.take(6)
.collect::<String>()
.chars()
.rev()
.collect::<String>()
.to_uppercase();
referral_code
}
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct ReferralStatus {
/// your personal referral code
pub referral_code: String,
/// These are the referrals which have reached the tier's min trading volume
pub number_of_activated_referrals: usize,
/// Total number of referred users
pub number_of_total_referrals: usize,
/// The more the user refers, the higher the tier. Tier 0 means no referral
pub referral_tier: usize,
/// Activated bonus, a percentage to be subtracted from the matching fee.
#[serde(with = "rust_decimal::serde::float")]
pub referral_fee_bonus: Decimal,
/// The type of this referral status
pub bonus_status_type: Option<BonusStatusType>,
}
#[derive(Serialize, Deserialize, Clone, Debug, Copy, PartialEq, Eq)]
pub enum BonusStatusType {
/// The bonus is because he referred enough users
Referral,
/// The user has been referred and gets a bonus
Referent,
}
impl ReferralStatus {
pub fn new(trader_id: PublicKey) -> Self {
Self {
referral_code: referral_from_pubkey(trader_id),
number_of_activated_referrals: 0,
number_of_total_referrals: 0,
referral_tier: 0,
referral_fee_bonus: Default::default(),
bonus_status_type: None,
}
}
}
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq, Hash)]
pub enum ContractSymbol {
BtcUsd,
}
impl ContractSymbol {
pub fn label(self) -> String {
match self {
ContractSymbol::BtcUsd => "btcusd".to_string(),
}
}
}
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq, Hash)]
pub enum Direction {
Long,
Short,
}
impl Direction {
pub fn opposite(&self) -> Direction {
match self {
Direction::Long => Direction::Short,
Direction::Short => Direction::Long,
}
}
}
impl fmt::Display for Direction {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let s = match self {
Direction::Long => "Long",
Direction::Short => "Short",
};
s.fmt(f)
}
}
impl FromStr for ContractSymbol {
type Err = anyhow::Error;
fn from_str(value: &str) -> Result<Self, Self::Err> {
match value.to_lowercase().as_str() {
"btcusd" => Ok(ContractSymbol::BtcUsd),
// BitMEX representation
"xbtusd" => Ok(ContractSymbol::BtcUsd),
unknown => bail!("Unknown contract symbol {unknown}"),
}
}
}
impl fmt::Display for ContractSymbol {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let symbol = match self {
ContractSymbol::BtcUsd => "btcusd",
};
symbol.to_string().fmt(f)
}
}
/// Remove minutes, seconds and nano seconds from a given [`OffsetDateTime`].
pub fn to_nearest_hour_in_the_past(start_date: OffsetDateTime) -> OffsetDateTime {
OffsetDateTime::new_utc(
start_date.date(),
Time::from_hms_nano(start_date.time().hour(), 0, 0, 0).expect("to be valid time"),
)
}
#[cfg(test)]
pub mod tests {
use super::*;
use secp256k1::PublicKey;
use std::str::FromStr;
#[test]
fn contract_symbol_from_str() {
assert_eq!(
ContractSymbol::from_str("btcusd").unwrap(),
ContractSymbol::BtcUsd
);
assert_eq!(
ContractSymbol::from_str("BTCUSD").unwrap(),
ContractSymbol::BtcUsd
);
assert_eq!(
ContractSymbol::from_str("xbtusd").unwrap(),
ContractSymbol::BtcUsd
);
assert!(ContractSymbol::from_str("dogeusd").is_err());
}
#[test]
pub fn test_referral_generation() {
let pk = PublicKey::from_str(
"0218845781f631c48f1c9709e23092067d06837f30aa0cd0544ac887fe91ddd166",
)
.unwrap();
let referral = referral_from_pubkey(pk);
assert_eq!(referral, "DDD166".to_string());
}
#[test]
fn test_remove_small_units() {
let start_date = OffsetDateTime::now_utc();
// Act
let result = to_nearest_hour_in_the_past(start_date);
// Assert
assert_eq!(result.hour(), start_date.time().hour());
assert_eq!(result.minute(), 0);
assert_eq!(result.second(), 0);
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/xxi-node/src/commons/backup.rs | crates/xxi-node/src/commons/backup.rs | use crate::commons::signature::create_sign_message;
use bitcoin::secp256k1::ecdsa::Signature;
use bitcoin::secp256k1::PublicKey;
use bitcoin::secp256k1::Secp256k1;
use bitcoin::secp256k1::VerifyOnly;
use serde::Deserialize;
use serde::Serialize;
/// A message to restore a key with its value.
#[derive(Serialize, Deserialize)]
pub struct Restore {
pub key: String,
pub value: Vec<u8>,
}
/// A message to backup a key with its value.
#[derive(Serialize, Deserialize)]
pub struct Backup {
pub key: String,
pub value: Vec<u8>,
/// A signature of the value using the nodes private key
pub signature: Signature,
}
impl Backup {
/// Verifies if the backup was from the given node id
pub fn verify(&self, secp: &Secp256k1<VerifyOnly>, node_id: &PublicKey) -> anyhow::Result<()> {
let message = create_sign_message(self.value.clone());
secp.verify_ecdsa(&message, &self.signature, node_id)?;
Ok(())
}
}
/// A message to delete a backup of a key
#[derive(Serialize, Deserialize)]
pub struct DeleteBackup {
pub key: String,
/// A signature of the requesting node id using the nodes private key
pub signature: Signature,
}
impl DeleteBackup {
pub fn verify(&self, secp: &Secp256k1<VerifyOnly>, node_id: &PublicKey) -> anyhow::Result<()> {
let message = node_id.to_string().as_bytes().to_vec();
let message = create_sign_message(message);
secp.verify_ecdsa(&message, &self.signature, node_id)?;
Ok(())
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/xxi-node/src/commons/rollover.rs | crates/xxi-node/src/commons/rollover.rs | use bitcoin::Network;
use time::macros::time;
use time::Duration;
use time::OffsetDateTime;
use time::Weekday;
/// Calculates the next expiry timestamp based on the given timestamp and the network.
pub fn calculate_next_expiry(timestamp: OffsetDateTime, network: Network) -> OffsetDateTime {
match network {
// Calculates the expiry timestamp at the next Sunday at 3 pm UTC from a given offset date
// time. If the argument falls in between Friday, 3 pm UTC and Sunday, 3pm UTC, the
// expiry will be calculated to next weeks Sunday at 3 pm
Network::Bitcoin => {
let days = if is_eligible_for_rollover(timestamp, network)
|| timestamp.weekday() == Weekday::Sunday
{
// if the provided timestamp is in the rollover weekend or on a sunday, we expire
// the sunday the week after.
7 - timestamp.weekday().number_from_monday() + 7
} else {
7 - timestamp.weekday().number_from_monday()
};
let time = timestamp
.date()
.with_hms(15, 0, 0)
.expect("to fit into time");
(time + Duration::days(days as i64)).assume_utc()
}
// Calculates the expiry timestamp on the same day at midnight unless its already in
// rollover then the next day midnight.
_ => {
if is_eligible_for_rollover(timestamp, network) {
let after_tomorrow = timestamp.date() + Duration::days(2);
after_tomorrow.midnight().assume_utc()
} else {
let tomorrow = timestamp.date() + Duration::days(1);
tomorrow.midnight().assume_utc()
}
}
}
}
/// Checks whether the provided expiry date is eligible for a rollover
pub fn is_eligible_for_rollover(timestamp: OffsetDateTime, network: Network) -> bool {
match network {
// Returns true if the given date falls in between Friday 15 pm UTC and Sunday 15 pm UTC
Network::Bitcoin => match timestamp.weekday() {
Weekday::Friday => timestamp.time() >= time!(15:00),
Weekday::Saturday => true,
Weekday::Sunday => timestamp.time() < time!(15:00),
_ => false,
},
// Returns true if the timestamp is less than 8 hours from now
_ => {
let midnight = (OffsetDateTime::now_utc().date() + Duration::days(1))
.midnight()
.assume_utc();
(midnight - timestamp) < Duration::hours(8)
}
}
}
#[cfg(test)]
mod test {
use crate::commons::rollover::calculate_next_expiry;
use crate::commons::rollover::is_eligible_for_rollover;
use bitcoin::Network;
use time::Duration;
use time::OffsetDateTime;
#[test]
fn test_is_not_eligible_for_rollover() {
// Wed Aug 09 2023 09:30:23 GMT+0000
let expiry = OffsetDateTime::from_unix_timestamp(1691573423).unwrap();
assert!(!is_eligible_for_rollover(expiry, Network::Bitcoin));
}
#[test]
fn test_is_just_eligible_for_rollover_friday() {
// Fri Aug 11 2023 15:00:00 GMT+0000
let expiry = OffsetDateTime::from_unix_timestamp(1691766000).unwrap();
assert!(is_eligible_for_rollover(expiry, Network::Bitcoin));
// Fri Aug 11 2023 15:00:01 GMT+0000
let expiry = OffsetDateTime::from_unix_timestamp(1691766001).unwrap();
assert!(is_eligible_for_rollover(expiry, Network::Bitcoin));
}
#[test]
fn test_is_eligible_for_rollover_saturday() {
// Sat Aug 12 2023 16:00:00 GMT+0000
let expiry = OffsetDateTime::from_unix_timestamp(1691856000).unwrap();
assert!(is_eligible_for_rollover(expiry, Network::Bitcoin));
}
#[test]
fn test_is_just_eligible_for_rollover_sunday() {
// Sun Aug 13 2023 14:59:59 GMT+0000
let expiry = OffsetDateTime::from_unix_timestamp(1691938799).unwrap();
assert!(is_eligible_for_rollover(expiry, Network::Bitcoin));
}
#[test]
fn test_is_just_not_eligible_for_rollover_sunday() {
// Sun Aug 13 2023 15:00:00 GMT+0000
let expiry = OffsetDateTime::from_unix_timestamp(1691938800).unwrap();
assert!(!is_eligible_for_rollover(expiry, Network::Bitcoin));
// Sun Aug 13 2023 15:00:01 GMT+0000
let expiry = OffsetDateTime::from_unix_timestamp(1691938801).unwrap();
assert!(!is_eligible_for_rollover(expiry, Network::Bitcoin));
}
#[test]
fn test_expiry_timestamp_before_friday_15pm() {
// Wed Aug 09 2023 09:30:23 GMT+0000
let from = OffsetDateTime::from_unix_timestamp(1691573423).unwrap();
let expiry = calculate_next_expiry(from, Network::Bitcoin);
// Sun Aug 13 2023 15:00:00 GMT+0000
assert_eq!(1691938800, expiry.unix_timestamp());
}
#[test]
fn test_expiry_timestamp_just_before_friday_15pm() {
// Fri Aug 11 2023 14:59:59 GMT+0000
let from = OffsetDateTime::from_unix_timestamp(1691765999).unwrap();
let expiry = calculate_next_expiry(from, Network::Bitcoin);
// Sun Aug 13 2023 15:00:00 GMT+0000
assert_eq!(1691938800, expiry.unix_timestamp());
}
#[test]
fn test_expiry_timestamp_just_after_friday_15pm() {
// Fri Aug 11 2023 15:00:01 GMT+0000
let from = OffsetDateTime::from_unix_timestamp(1691766001).unwrap();
let expiry = calculate_next_expiry(from, Network::Bitcoin);
// Sun Aug 20 2023 15:00:00 GMT+0000
assert_eq!(1692543600, expiry.unix_timestamp());
}
#[test]
fn test_expiry_timestamp_at_friday_15pm() {
// Fri Aug 11 2023 15:00:00 GMT+0000
let from = OffsetDateTime::from_unix_timestamp(1691766000).unwrap();
let expiry = calculate_next_expiry(from, Network::Bitcoin);
// Sun Aug 20 2023 15:00:00 GMT+0000
assert_eq!(1692543600, expiry.unix_timestamp());
}
#[test]
fn test_expiry_timestamp_after_sunday_15pm() {
// Sun Aug 06 2023 16:00:00 GMT+0000
let from = OffsetDateTime::from_unix_timestamp(1691337600).unwrap();
let expiry = calculate_next_expiry(from, Network::Bitcoin);
// Sun Aug 13 2023 15:00:00 GMT+0000
assert_eq!(1691938800, expiry.unix_timestamp());
}
#[test]
fn test_expiry_timestamp_on_saturday() {
// Sat Aug 12 2023 16:00:00 GMT+0000
let from = OffsetDateTime::from_unix_timestamp(1691856000).unwrap();
let expiry = calculate_next_expiry(from, Network::Bitcoin);
// Sun Aug 20 2023 15:00:00 GMT+0000
assert_eq!(1692543600, expiry.unix_timestamp());
}
#[test]
fn test_expiry_timestamp_regtest_midnight() {
// 12:00 on the current day
let timestamp = OffsetDateTime::now_utc().date().midnight() + Duration::hours(12);
let expiry = calculate_next_expiry(timestamp.assume_utc(), Network::Regtest);
let midnight = (OffsetDateTime::now_utc().date() + Duration::days(1))
.midnight()
.assume_utc();
assert_eq!(midnight, expiry);
}
#[test]
fn test_expiry_timestamp_regtest_next_midnight() {
// 20:00 on the current day
let timestamp = OffsetDateTime::now_utc().date().midnight() + Duration::hours(20);
let expiry = calculate_next_expiry(timestamp.assume_utc(), Network::Regtest);
let next_midnight = (timestamp.date() + Duration::days(2))
.midnight()
.assume_utc();
assert_eq!(next_midnight, expiry);
}
#[test]
fn test_is_not_eligable_for_rollover_regtest() {
let timestamp = OffsetDateTime::now_utc().date().midnight() + Duration::hours(16);
assert!(!is_eligible_for_rollover(
timestamp.assume_utc(),
Network::Regtest
))
}
#[test]
fn test_is_eligable_for_rollover_regtest() {
let timestamp = OffsetDateTime::now_utc().date().midnight() + Duration::hours(17);
assert!(is_eligible_for_rollover(
timestamp.assume_utc(),
Network::Regtest
))
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/xxi-node/src/commons/trade.rs | crates/xxi-node/src/commons/trade.rs | use crate::commons::ContractSymbol;
use crate::commons::Direction;
use bitcoin::secp256k1::PublicKey;
use bitcoin::secp256k1::XOnlyPublicKey;
use bitcoin::Amount;
use rust_decimal::Decimal;
use serde::Deserialize;
use serde::Serialize;
use time::OffsetDateTime;
use uuid::Uuid;
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct TradeAndChannelParams {
pub trade_params: TradeParams,
#[serde(with = "bitcoin::amount::serde::as_sat::opt")]
pub trader_reserve: Option<Amount>,
#[serde(with = "bitcoin::amount::serde::as_sat::opt")]
pub coordinator_reserve: Option<Amount>,
#[serde(with = "bitcoin::amount::serde::as_sat::opt")]
pub external_funding: Option<Amount>,
}
/// The trade parameters defining the trade execution.
///
/// Emitted by the orderbook when a match is found.
///
/// Both trading parties will receive trade params and then request trade execution with said trade
/// parameters from the coordinator.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct TradeParams {
/// The identity of the trader
pub pubkey: PublicKey,
/// The contract symbol for the trade to be set up
pub contract_symbol: ContractSymbol,
/// The leverage of the trader
///
/// This has to correspond to our order's leverage.
pub leverage: f32,
/// The quantity of the trader
///
/// For the trade set up with the coordinator it is the quantity of the contract.
/// This quantity may be the complete quantity of an order or a fraction.
pub quantity: f32,
/// The direction of the trader
///
/// The direction from the point of view of the trader.
/// The coordinator takes the counter-position when setting up the trade.
pub direction: Direction,
/// The filling information from the orderbook
///
/// This is used by the coordinator to be able to make sure both trading parties are acting.
/// The `quantity` has to match the cummed up quantities of the matches in `filled_with`.
pub filled_with: FilledWith,
}
impl TradeParams {
pub fn average_execution_price(&self) -> Decimal {
self.filled_with.average_execution_price()
}
pub fn order_matching_fee(&self) -> Amount {
self.filled_with.order_matching_fee()
}
}
/// A match for an order
///
/// The match defines the execution price and the quantity to be used of the order with the
/// corresponding order id.
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq)]
pub struct Match {
/// The id of the match
pub id: Uuid,
/// The id of the matched order defined by the orderbook
///
/// The identifier of the order as defined by the orderbook.
pub order_id: Uuid,
/// The quantity of the matched order to be used
///
/// This might be the complete quantity of the matched order, or a fraction.
#[serde(with = "rust_decimal::serde::float")]
pub quantity: Decimal,
/// Pubkey of the node which order was matched
pub pubkey: PublicKey,
/// The execution price as defined by the orderbook
///
/// The trade is to be executed at this price.
#[serde(with = "rust_decimal::serde::float")]
pub execution_price: Decimal,
#[serde(with = "bitcoin::amount::serde::as_sat")]
pub matching_fee: Amount,
}
impl From<Matches> for Match {
fn from(value: Matches) -> Self {
Match {
id: value.id,
order_id: value.order_id,
quantity: value.quantity,
pubkey: value.trader_id,
execution_price: value.execution_price,
matching_fee: value.matching_fee,
}
}
}
/// The match params for one order
///
/// This is emitted by the orderbook to the trader when an order gets filled.
/// This emitted for one of the trader's order, i.e. the `order_id` matches one of the orders that
/// the trader submitted to the orderbook. The matches define how this order was filled.
/// This information is used to request trade execution with the coordinator.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct FilledWith {
/// The id of the order defined by the orderbook
///
/// The identifier of the order as defined by the orderbook.
///
/// TODO(holzeis): We might want to consider adding the order to the filled with struct. Having
/// this separated doesn't make much sense anymore since, the filled with is not separately
/// processed by the app anymore.
pub order_id: Uuid,
/// The expiry timestamp of the contract-to-be
///
/// A timestamp that defines when the contract will expire.
/// The orderbook defines the timestamp so that the systems using the trade params to set up
/// the trade are aligned on one timestamp. The systems using the trade params should
/// validate this timestamp against their trade settings. If the expiry timestamp is older
/// than a defined threshold a system my discard the trade params as outdated.
///
/// The oracle event-id is defined by contract symbol and the expiry timestamp.
pub expiry_timestamp: OffsetDateTime,
/// The public key of the oracle to be used
///
/// The orderbook decides this when matching orders.
/// The oracle_pk is used to define what oracle is to be used in the contract.
/// This `oracle_pk` must correspond to one `oracle_pk` configured in the dlc-manager.
/// It is possible to configure multiple oracles in the dlc-manager; this
/// `oracle_pk` has to match one of them. This allows us to configure the dlc-managers
/// using two oracles, where one oracles can be used as backup if the other oracle is not
/// available. Eventually this can be changed to be a list of oracle PKs and a threshold of
/// how many oracle have to agree on the attestation.
pub oracle_pk: XOnlyPublicKey,
/// The matches for the order
pub matches: Vec<Match>,
}
impl FilledWith {
pub fn average_execution_price(&self) -> Decimal {
average_execution_price(self.matches.clone())
}
pub fn order_matching_fee(&self) -> Amount {
self.matches.iter().map(|m| m.matching_fee).sum()
}
}
/// calculates the average execution price for inverse contracts
///
/// The average execution price follows a simple formula:
/// `total_order_quantity / (quantity_trade_0 / execution_price_trade_0 + quantity_trade_1 /
/// execution_price_trade_1 )`
pub fn average_execution_price(matches: Vec<Match>) -> Decimal {
if matches.len() == 1 {
return matches.first().expect("to be exactly one").execution_price;
}
let sum_quantity = matches
.iter()
.fold(Decimal::ZERO, |acc, m| acc + m.quantity);
let nominal_prices: Decimal = matches.iter().fold(Decimal::ZERO, |acc, m| {
acc + (m.quantity / m.execution_price)
});
sum_quantity / nominal_prices
}
pub enum MatchState {
Pending,
Filled,
Failed,
}
pub struct Matches {
pub id: Uuid,
pub match_state: MatchState,
pub order_id: Uuid,
pub trader_id: PublicKey,
pub match_order_id: Uuid,
pub match_trader_id: PublicKey,
pub execution_price: Decimal,
pub quantity: Decimal,
pub created_at: OffsetDateTime,
pub updated_at: OffsetDateTime,
pub matching_fee: Amount,
}
#[cfg(test)]
mod test {
fn dummy_public_key() -> PublicKey {
PublicKey::from_str("02bd998ebd176715fe92b7467cf6b1df8023950a4dd911db4c94dfc89cc9f5a655")
.unwrap()
}
use crate::commons::trade::FilledWith;
use crate::commons::trade::Match;
use bitcoin::secp256k1::PublicKey;
use bitcoin::secp256k1::XOnlyPublicKey;
use bitcoin::Amount;
use rust_decimal_macros::dec;
use std::str::FromStr;
use time::OffsetDateTime;
use uuid::Uuid;
#[test]
fn test_average_execution_price() {
let match_0_quantity = dec!(1000);
let match_0_price = dec!(10_000);
let match_1_quantity = dec!(2000);
let match_1_price = dec!(12_000);
let filled = FilledWith {
order_id: Default::default(),
expiry_timestamp: OffsetDateTime::now_utc(),
oracle_pk: XOnlyPublicKey::from_str(
"16f88cf7d21e6c0f46bcbc983a4e3b19726c6c98858cc31c83551a88fde171c0",
)
.expect("To be a valid pubkey"),
matches: vec![
Match {
id: Uuid::new_v4(),
order_id: Default::default(),
quantity: match_0_quantity,
pubkey: dummy_public_key(),
execution_price: match_0_price,
matching_fee: Amount::from_sat(1000),
},
Match {
id: Uuid::new_v4(),
order_id: Default::default(),
quantity: match_1_quantity,
pubkey: dummy_public_key(),
execution_price: match_1_price,
matching_fee: Amount::from_sat(1000),
},
],
};
let average_execution_price = filled.average_execution_price();
assert_eq!(average_execution_price.round_dp(2), dec!(11250.00));
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/xxi-node/src/commons/message.rs | crates/xxi-node/src/commons/message.rs | use crate::commons::order::Order;
use crate::commons::signature::Signature;
use crate::commons::FundingRate;
use crate::commons::LiquidityOption;
use crate::commons::NewLimitOrder;
use crate::commons::ReferralStatus;
use crate::FundingFeeEvent;
use anyhow::Result;
use bitcoin::address::NetworkUnchecked;
use bitcoin::Address;
use bitcoin::Amount;
use rust_decimal::Decimal;
use serde::Deserialize;
use serde::Serialize;
use std::fmt::Display;
use thiserror::Error;
use tokio_tungstenite_wasm as tungstenite;
use uuid::Uuid;
pub type ChannelId = [u8; 32];
pub type DlcChannelId = [u8; 32];
#[derive(Serialize, Clone, Deserialize, Debug)]
pub enum Message {
AllOrders(Vec<Order>),
NewOrder(Order),
DeleteOrder(Uuid),
Update(Order),
InvalidAuthentication(String),
Authenticated(TenTenOneConfig),
/// Message used to collaboratively revert DLC channels.
DlcChannelCollaborativeRevert {
channel_id: DlcChannelId,
coordinator_address: Address<NetworkUnchecked>,
#[serde(with = "bitcoin::amount::serde::as_sat")]
coordinator_amount: Amount,
#[serde(with = "bitcoin::amount::serde::as_sat")]
trader_amount: Amount,
#[serde(with = "rust_decimal::serde::float")]
execution_price: Decimal,
},
TradeError {
order_id: Uuid,
error: TradingError,
},
LnPaymentReceived {
r_hash: String,
#[serde(with = "bitcoin::amount::serde::as_sat")]
amount: Amount,
},
RolloverError {
error: TradingError,
},
FundingFeeEvent(FundingFeeEvent),
AllFundingFeeEvents(Vec<FundingFeeEvent>),
NextFundingRate(FundingRate),
}
#[derive(Serialize, Deserialize, Clone, Error, Debug, PartialEq)]
pub enum TradingError {
#[error("Invalid order: {0}")]
InvalidOrder(String),
#[error("No match found: {0}")]
NoMatchFound(String),
#[error("{0}")]
Other(String),
}
impl From<anyhow::Error> for TradingError {
fn from(value: anyhow::Error) -> Self {
TradingError::Other(format!("{value:#}"))
}
}
#[derive(Serialize, Clone, Deserialize, Debug)]
pub struct TenTenOneConfig {
// The liquidity options for onboarding
pub liquidity_options: Vec<LiquidityOption>,
pub min_quantity: u64,
pub maintenance_margin_rate: f32,
pub order_matching_fee_rate: f32,
pub referral_status: ReferralStatus,
pub max_leverage: u8,
}
#[derive(Serialize, Clone, Deserialize, Debug)]
pub enum OrderbookRequest {
Authenticate {
fcm_token: Option<String>,
version: Option<String>,
os: Option<String>,
signature: Signature,
},
InsertOrder(NewLimitOrder),
DeleteOrder(Uuid),
}
impl TryFrom<OrderbookRequest> for tungstenite::Message {
type Error = anyhow::Error;
fn try_from(request: OrderbookRequest) -> Result<Self> {
let msg = serde_json::to_string(&request)?;
Ok(tungstenite::Message::Text(msg))
}
}
impl Display for Message {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let s = match self {
Message::AllOrders(_) => "AllOrders",
Message::NewOrder(_) => "NewOrder",
Message::DeleteOrder(_) => "DeleteOrder",
Message::Update(_) => "Update",
Message::InvalidAuthentication(_) => "InvalidAuthentication",
Message::Authenticated(_) => "Authenticated",
Message::DlcChannelCollaborativeRevert { .. } => "DlcChannelCollaborativeRevert",
Message::TradeError { .. } => "TradeError",
Message::RolloverError { .. } => "RolloverError",
Message::LnPaymentReceived { .. } => "LnPaymentReceived",
Message::FundingFeeEvent(_) => "FundingFeeEvent",
Message::AllFundingFeeEvents(_) => "FundingFeeEvent",
Message::NextFundingRate(_) => "NextFundingRate",
};
f.write_str(s)
}
}
/// All values are from the perspective of the coordinator
#[derive(Serialize, Deserialize, Debug, Clone)]
pub enum PositionMessage {
/// The current position as seen from the coordinator
CurrentPosition {
/// if quantity is < 0 then coordinator is short, if > 0, then coordinator is long
quantity: f32,
average_entry_price: f32,
},
/// A new trade which was executed successfully
NewTrade {
/// The coordinator's total position
///
/// if quantity is < 0 then coordinator is short, if > 0, then coordinator is long
total_quantity: f32,
/// The average entry price of the total position
total_average_entry_price: f32,
/// The quantity of the new trade
///
/// if quantity is < 0 then coordinator is short, if > 0, then coordinator is long
new_trade_quantity: f32,
/// The average entry price of the new trade
new_trade_average_entry_price: f32,
},
Authenticated,
InvalidAuthentication(String),
}
impl TryFrom<PositionMessage> for tungstenite::Message {
type Error = anyhow::Error;
fn try_from(request: PositionMessage) -> Result<Self> {
let msg = serde_json::to_string(&request)?;
Ok(tungstenite::Message::Text(msg))
}
}
impl TryFrom<PositionMessageRequest> for tungstenite::Message {
type Error = anyhow::Error;
fn try_from(request: PositionMessageRequest) -> Result<Self> {
let msg = serde_json::to_string(&request)?;
Ok(tungstenite::Message::Text(msg))
}
}
#[derive(Serialize, Clone, Deserialize, Debug)]
pub enum PositionMessageRequest {
Authenticate { signature: Signature },
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/xxi-node/src/node/oracle.rs | crates/xxi-node/src/node/oracle.rs | use crate::bitcoin_conversion::to_xonly_pk_29;
use crate::bitcoin_conversion::to_xonly_pk_30;
use crate::node::Node;
use crate::node::Storage;
use crate::on_chain_wallet::BdkStorage;
use crate::storage::TenTenOneStorage;
use bitcoin::secp256k1::XOnlyPublicKey;
use dlc_manager::Oracle;
use p2pd_oracle_client::P2PDOracleClient;
use serde::Deserialize;
use serde::Serialize;
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct OracleInfo {
pub endpoint: String,
pub public_key: XOnlyPublicKey,
}
impl From<OracleInfo> for P2PDOracleClient {
fn from(oracle: OracleInfo) -> Self {
P2PDOracleClient {
host: oracle.endpoint + "/",
public_key: to_xonly_pk_29(oracle.public_key),
}
}
}
impl<D: BdkStorage, S: TenTenOneStorage, N: Storage> Node<D, S, N> {
pub fn oracle_pk(&self) -> Vec<XOnlyPublicKey> {
self.oracles
.clone()
.into_iter()
.map(|oracle| to_xonly_pk_30(oracle.get_public_key()))
.collect()
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/xxi-node/src/node/event.rs | crates/xxi-node/src/node/event.rs | use crate::message_handler::TenTenOneMessage;
use crate::storage::DlcChannelEvent;
use bitcoin::secp256k1::PublicKey;
use std::sync::mpsc;
use std::sync::Arc;
use tokio::sync::broadcast;
use tokio::sync::broadcast::Receiver;
use tokio::task::spawn_blocking;
#[derive(Clone, Debug)]
pub enum NodeEvent {
Connected {
peer: PublicKey,
},
SendDlcMessage {
peer: PublicKey,
msg: TenTenOneMessage,
},
StoreDlcMessage {
peer: PublicKey,
msg: TenTenOneMessage,
},
SendLastDlcMessage {
peer: PublicKey,
},
DlcChannelEvent {
dlc_channel_event: DlcChannelEvent,
},
}
#[derive(Clone)]
pub struct NodeEventHandler {
sender: broadcast::Sender<NodeEvent>,
}
impl Default for NodeEventHandler {
fn default() -> Self {
Self::new()
}
}
impl NodeEventHandler {
pub fn new() -> Self {
let (sender, _) = broadcast::channel(100);
NodeEventHandler { sender }
}
pub fn subscribe(&self) -> Receiver<NodeEvent> {
self.sender.subscribe()
}
pub fn publish(&self, event: NodeEvent) {
if let Err(e) = self.sender.send(event) {
tracing::error!("Failed to send node event. Error {e:#}");
}
}
}
pub fn connect_node_event_handler_to_dlc_channel_events(
node_event_handler: Arc<NodeEventHandler>,
dlc_event_receiver: mpsc::Receiver<DlcChannelEvent>,
) {
spawn_blocking(move || loop {
match dlc_event_receiver.recv() {
Ok(dlc_channel_event) => {
node_event_handler.publish(NodeEvent::DlcChannelEvent { dlc_channel_event })
}
Err(e) => {
tracing::error!("The dlc event channel has been closed. Error: {e:#}");
break;
}
}
});
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/xxi-node/src/node/connection.rs | crates/xxi-node/src/node/connection.rs | use crate::bitcoin_conversion::to_secp_pk_29;
use crate::networking;
use crate::node::Node;
use crate::node::NodeInfo;
use crate::node::Storage;
use crate::on_chain_wallet::BdkStorage;
use crate::storage::TenTenOneStorage;
use anyhow::bail;
use anyhow::Context;
use anyhow::Result;
use bitcoin::secp256k1::PublicKey;
use futures::Future;
use std::pin::Pin;
use std::time::Duration;
impl<D: BdkStorage, S: TenTenOneStorage + 'static, N: Storage + Sync + Send + 'static>
Node<D, S, N>
{
/// Establish a connection with a peer.
///
/// # Returns
///
/// If successful, a [`Future`] is returned which will be ready once the connection has been
/// _lost_. This is meant to be used by the caller to know when to initiate a reconnect if they
/// want to keep the connection alive.
pub async fn connect(&self, peer: NodeInfo) -> Result<Pin<Box<impl Future<Output = ()>>>> {
#[allow(clippy::async_yields_async)] // We want to poll this future in a loop elsewhere
let connection_closed_future = tokio::time::timeout(Duration::from_secs(15), async {
let mut round = 1;
loop {
tracing::debug!(%peer, "Setting up connection");
if let Some(fut) =
networking::connect_outbound(self.peer_manager.clone(), peer).await
{
return fut;
};
let retry_interval = Duration::from_secs(1) * round;
tracing::debug!(%peer, ?retry_interval, "Connection setup failed; retrying");
tokio::time::sleep(retry_interval).await;
round *= 2;
}
})
.await
.with_context(|| format!("Failed to connect to peer: {peer}"))?;
tracing::debug!(%peer, "Connection setup completed");
let mut connection_closed_future = Box::pin(connection_closed_future);
tokio::time::timeout(Duration::from_secs(30), async {
while !self.is_connected(peer.pubkey) {
if futures::poll!(&mut connection_closed_future).is_ready() {
bail!("Peer disconnected before we finished the handshake");
}
tracing::debug!(%peer, "Waiting to confirm established connection");
tokio::time::sleep(Duration::from_secs(1)).await;
}
Ok(())
})
.await??;
tracing::info!(%peer, "Connection established");
Ok(connection_closed_future)
}
/// Establish a one-time connection with a peer.
///
/// The caller is not interested in knowing if the connection is ever lost. If the caller does
/// care about that, they should use `connect` instead.
pub async fn connect_once(&self, peer: NodeInfo) -> Result<()> {
let fut = self.connect(peer).await?;
// The caller does not care if the connection is dropped eventually.
drop(fut);
Ok(())
}
pub fn is_connected(&self, pubkey: PublicKey) -> bool {
self.peer_manager
.get_peer_node_ids()
.iter()
.any(|(id, _)| *id == to_secp_pk_29(pubkey))
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/xxi-node/src/node/dlc_manager.rs | crates/xxi-node/src/node/dlc_manager.rs | use crate::bitcoin_conversion::to_secp_pk_29;
use crate::dlc_wallet::DlcWallet;
use crate::fee_rate_estimator::FeeRateEstimator;
use crate::message_handler::TenTenOneMessage;
use crate::node::Node;
use crate::node::Storage;
use crate::on_chain_wallet::BdkStorage;
use crate::storage::DlcStorageProvider;
use crate::storage::TenTenOneStorage;
use anyhow::Context;
use anyhow::Result;
use bitcoin::secp256k1::PublicKey;
use dlc_manager::channel::signed_channel::SignedChannel;
use dlc_manager::channel::signed_channel::SignedChannelState;
use dlc_manager::Storage as DlcStorage;
use dlc_manager::SystemTimeProvider;
use p2pd_oracle_client::P2PDOracleClient;
use std::collections::HashMap;
use std::fs;
use std::path::Path;
use std::sync::Arc;
pub type DlcManager<D, S, N> = dlc_manager::manager::Manager<
Arc<DlcWallet<D, S, N>>,
Arc<DlcWallet<D, S, N>>,
Arc<DlcStorageProvider<S>>,
Arc<P2PDOracleClient>,
Arc<SystemTimeProvider>,
Arc<FeeRateEstimator>,
>;
pub fn build<D: BdkStorage, S: TenTenOneStorage, N: Storage>(
data_dir: &Path,
wallet: Arc<DlcWallet<D, S, N>>,
dlc_storage: Arc<DlcStorageProvider<S>>,
p2pdoracles: Vec<Arc<P2PDOracleClient>>,
fee_rate_estimator: Arc<FeeRateEstimator>,
) -> Result<DlcManager<D, S, N>> {
let offers_path = data_dir.join("offers");
fs::create_dir_all(offers_path)?;
let mut oracles = HashMap::new();
for oracle in p2pdoracles.into_iter() {
oracles.insert(oracle.public_key, oracle);
}
// FIXME: We need to do this to ensure that we can upgrade `Node`s from LDK 0.0.114 to 0.0.116.
// We should remove this workaround as soon as possible.
if let Err(e) = dlc_storage.get_chain_monitor() {
tracing::error!("Failed to load DLC ChainMonitor from storage: {e:#}");
tracing::info!("Overwriting old DLC ChainMonitor with empty one to be able to proceed");
dlc_storage.persist_chain_monitor(&dlc_manager::chain_monitor::ChainMonitor::new(0))?;
}
DlcManager::new(
wallet.clone(),
wallet,
dlc_storage,
oracles,
Arc::new(SystemTimeProvider {}),
fee_rate_estimator,
)
.context("Failed to initialise DlcManager")
}
impl<D: BdkStorage, S: TenTenOneStorage + 'static, N: Storage + Sync + Send + 'static>
Node<D, S, N>
{
pub fn process_tentenone_message(
&self,
message: TenTenOneMessage,
node_id: PublicKey,
) -> Result<Option<TenTenOneMessage>> {
let response = self
.dlc_manager
.on_dlc_message(&message.clone().into(), to_secp_pk_29(node_id))?;
let response = match response {
Some(resp) => Some(TenTenOneMessage::build_from_response(
resp,
message.get_order_id(),
message.get_order_reason(),
)?),
None => None,
};
Ok(response)
}
}
pub fn signed_channel_state_name(signed_channel: &SignedChannel) -> String {
let name = match signed_channel.state {
SignedChannelState::Established { .. } => "Established",
SignedChannelState::SettledOffered { .. } => "SettledOffered",
SignedChannelState::SettledReceived { .. } => "SettledReceived",
SignedChannelState::SettledAccepted { .. } => "SettledAccepted",
SignedChannelState::SettledConfirmed { .. } => "SettledConfirmed",
SignedChannelState::Settled { .. } => "Settled",
SignedChannelState::RenewOffered { .. } => "RenewOffered",
SignedChannelState::RenewAccepted { .. } => "RenewAccepted",
SignedChannelState::RenewConfirmed { .. } => "RenewConfirmed",
SignedChannelState::RenewFinalized { .. } => "RenewFinalized",
SignedChannelState::Closing { .. } => "Closing",
SignedChannelState::CollaborativeCloseOffered { .. } => "CollaborativeCloseOffered",
SignedChannelState::SettledClosing { .. } => "SettledClosing",
};
name.to_string()
}
impl<D: BdkStorage, S: TenTenOneStorage + 'static, N: Storage + Sync + Send + 'static>
Node<D, S, N>
{
pub fn get_signed_channel_by_trader_id(&self, trader_id: PublicKey) -> Result<SignedChannel> {
let dlc_channels = self.list_signed_dlc_channels()?;
let signed_channel = dlc_channels
.iter()
.find(|channel| channel.counter_party == to_secp_pk_29(trader_id))
.context(format!(
"Could not find a signed dlc channel for trader {trader_id}",
))?;
Ok(signed_channel.clone())
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/xxi-node/src/node/storage.rs | crates/xxi-node/src/node/storage.rs | use crate::transaction::Transaction;
use anyhow::Result;
use lightning::chain::transaction::OutPoint;
use lightning::sign::DelayedPaymentOutputDescriptor;
use lightning::sign::SpendableOutputDescriptor;
use lightning::sign::StaticPaymentOutputDescriptor;
use parking_lot::Mutex;
use std::collections::HashMap;
use std::sync::Arc;
/// Storage layer interface.
///
/// It exists so that consumers of [`crate::node::Node`] can define their own storage.
pub trait Storage {
// Spendable outputs
/// Add a new [`SpendableOutputDescriptor`] to the store.
fn insert_spendable_output(&self, descriptor: SpendableOutputDescriptor) -> Result<()>;
/// Get a [`SpendableOutputDescriptor`] by its [`OutPoint`].
///
/// # Returns
///
/// A [`SpendableOutputDescriptor`] if the [`OutPoint`] hash was found in the store; `Ok(None)`
/// if the [`OutPoint`] was not found in the store; an error if accessing the store failed.
fn get_spendable_output(
&self,
outpoint: &OutPoint,
) -> Result<Option<SpendableOutputDescriptor>>;
/// Delete a [`SpendableOutputDescriptor`] by its [`OutPoint`].
fn delete_spendable_output(&self, outpoint: &OutPoint) -> Result<()>;
/// Get all [`SpendableOutputDescriptor`]s stored.
fn all_spendable_outputs(&self) -> Result<Vec<SpendableOutputDescriptor>>;
// Transaction
/// Insert or update a transaction
fn upsert_transaction(&self, transaction: Transaction) -> Result<()>;
/// Get transaction by `txid`
fn get_transaction(&self, txid: &str) -> Result<Option<Transaction>>;
/// Get all transactions without fees
fn all_transactions_without_fees(&self) -> Result<Vec<Transaction>>;
}
#[derive(Default, Clone)]
pub struct InMemoryStore {
spendable_outputs: Arc<Mutex<HashMap<OutPoint, SpendableOutputDescriptor>>>,
transactions: Arc<Mutex<HashMap<String, Transaction>>>,
}
impl Storage for InMemoryStore {
// Spendable outputs
fn insert_spendable_output(&self, descriptor: SpendableOutputDescriptor) -> Result<()> {
use SpendableOutputDescriptor::*;
let outpoint = match &descriptor {
// Static outputs don't need to be persisted because they pay directly to an address
// owned by the on-chain wallet
StaticOutput { .. } => return Ok(()),
DelayedPaymentOutput(DelayedPaymentOutputDescriptor { outpoint, .. }) => outpoint,
StaticPaymentOutput(StaticPaymentOutputDescriptor { outpoint, .. }) => outpoint,
};
self.spendable_outputs.lock().insert(*outpoint, descriptor);
Ok(())
}
fn get_spendable_output(
&self,
outpoint: &OutPoint,
) -> Result<Option<SpendableOutputDescriptor>> {
Ok(self.spendable_outputs.lock().get(outpoint).cloned())
}
fn delete_spendable_output(&self, outpoint: &OutPoint) -> Result<()> {
self.spendable_outputs.lock().remove(outpoint);
Ok(())
}
fn all_spendable_outputs(&self) -> Result<Vec<SpendableOutputDescriptor>> {
Ok(self.spendable_outputs.lock().values().cloned().collect())
}
// Transaction
fn upsert_transaction(&self, transaction: Transaction) -> Result<()> {
let txid = transaction.txid().to_string();
self.transactions.lock().insert(txid, transaction);
Ok(())
}
fn get_transaction(&self, txid: &str) -> Result<Option<Transaction>> {
let transaction = self.transactions.lock().get(txid).cloned();
Ok(transaction)
}
fn all_transactions_without_fees(&self) -> Result<Vec<Transaction>> {
Ok(self
.transactions
.lock()
.values()
.filter(|t| t.fee() == 0)
.cloned()
.collect())
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/xxi-node/src/node/mod.rs | crates/xxi-node/src/node/mod.rs | use crate::bitcoin_conversion::to_secp_pk_30;
use crate::blockchain::Blockchain;
use crate::dlc::TracingLogger;
use crate::dlc_custom_signer::CustomKeysManager;
use crate::dlc_wallet::DlcWallet;
use crate::fee_rate_estimator::FeeRateEstimator;
use crate::message_handler::TenTenOneMessageHandler;
use crate::node::event::connect_node_event_handler_to_dlc_channel_events;
use crate::node::event::NodeEventHandler;
use crate::on_chain_wallet::BdkStorage;
use crate::on_chain_wallet::FeeConfig;
use crate::on_chain_wallet::OnChainWallet;
use crate::seed::Bip39Seed;
use crate::shadow::Shadow;
use crate::storage::DlcChannelEvent;
use crate::storage::DlcStorageProvider;
use crate::storage::TenTenOneStorage;
use crate::PeerManager;
use anyhow::Result;
use bitcoin::address::NetworkUnchecked;
use bitcoin::secp256k1::PublicKey;
use bitcoin::secp256k1::XOnlyPublicKey;
use bitcoin::Address;
use bitcoin::Amount;
use bitcoin::Network;
use bitcoin::Txid;
use futures::future::RemoteHandle;
use futures::FutureExt;
use lightning::sign::KeysManager;
use p2pd_oracle_client::P2PDOracleClient;
use serde::Deserialize;
use serde::Serialize;
use serde_with::serde_as;
use serde_with::DurationSeconds;
use std::fmt;
use std::fmt::Display;
use std::fmt::Formatter;
use std::net::SocketAddr;
use std::path::Path;
use std::str::from_utf8;
use std::str::FromStr;
use std::sync::mpsc;
use std::sync::Arc;
use std::time::Duration;
use std::time::SystemTime;
use tokio::sync::RwLock;
use tokio::task::spawn_blocking;
mod connection;
mod dlc_manager;
mod oracle;
mod storage;
mod wallet;
pub mod dlc_channel;
pub mod event;
pub mod peer_manager;
pub use crate::message_handler::tentenone_message_name;
pub use ::dlc_manager as rust_dlc_manager;
use ::dlc_manager::ReferenceId;
use bdk_esplora::esplora_client::OutputStatus;
use bdk_esplora::esplora_client::Tx;
pub use dlc_manager::signed_channel_state_name;
pub use dlc_manager::DlcManager;
use lightning::ln::peer_handler::ErroringMessageHandler;
use lightning::ln::peer_handler::IgnoringMessageHandler;
use lightning::ln::peer_handler::MessageHandler;
pub use oracle::OracleInfo;
use secp256k1_zkp::SECP256K1;
pub use storage::InMemoryStore;
pub use storage::Storage;
use uuid::Uuid;
/// A node.
pub struct Node<D: BdkStorage, S: TenTenOneStorage, N: Storage> {
pub settings: Arc<RwLock<XXINodeSettings>>,
pub network: Network,
pub(crate) wallet: Arc<OnChainWallet<D>>,
pub blockchain: Arc<Blockchain<N>>,
// Making this public is only necessary because of the collaborative revert protocol.
pub dlc_wallet: Arc<DlcWallet<D, S, N>>,
pub peer_manager: Arc<PeerManager<D>>,
pub keys_manager: Arc<CustomKeysManager<D>>,
pub fee_rate_estimator: Arc<FeeRateEstimator>,
pub logger: Arc<TracingLogger>,
pub info: NodeInfo,
pub dlc_manager: Arc<DlcManager<D, S, N>>,
/// All oracles clients the node is aware of.
pub oracles: Vec<Arc<P2PDOracleClient>>,
pub dlc_message_handler: Arc<TenTenOneMessageHandler>,
/// The oracle pubkey used for proposing dlc channels
pub oracle_pubkey: XOnlyPublicKey,
pub event_handler: Arc<NodeEventHandler>,
// storage
// TODO(holzeis): The node storage should get extracted to the corresponding application
// layers.
pub node_storage: Arc<N>,
pub dlc_storage: Arc<DlcStorageProvider<S>>,
// fields below are needed only to start the node
#[allow(dead_code)]
listen_address: SocketAddr, // Irrelevant when using websockets
}
#[derive(Debug, Clone, Copy, Deserialize, Serialize)]
pub struct NodeInfo {
pub pubkey: PublicKey,
pub address: SocketAddr,
pub is_ws: bool,
}
/// Node is running until this struct is dropped
pub struct RunningNode {
_handles: Vec<RemoteHandle<()>>,
}
#[serde_as]
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq)]
pub struct XXINodeSettings {
/// How often we sync the off chain wallet
#[serde_as(as = "DurationSeconds")]
pub off_chain_sync_interval: Duration,
/// How often we sync the BDK wallet
#[serde_as(as = "DurationSeconds")]
pub on_chain_sync_interval: Duration,
/// How often we update the fee rate
#[serde_as(as = "DurationSeconds")]
pub fee_rate_sync_interval: Duration,
/// How often we run the [`SubChannelManager`]'s periodic check.
#[serde_as(as = "DurationSeconds")]
pub sub_channel_manager_periodic_check_interval: Duration,
/// How often we sync the shadow states
#[serde_as(as = "DurationSeconds")]
pub shadow_sync_interval: Duration,
}
impl<D: BdkStorage, S: TenTenOneStorage + 'static, N: Storage + Sync + Send + 'static>
Node<D, S, N>
{
pub async fn update_settings(&self, new_settings: XXINodeSettings) {
tracing::info!(?new_settings, "Updating LnDlcNode settings");
*self.settings.write().await = new_settings;
}
#[allow(clippy::too_many_arguments)]
pub fn new(
alias: &str,
network: Network,
data_dir: &Path,
storage: S,
node_storage: Arc<N>,
wallet_storage: D,
announcement_address: SocketAddr,
listen_address: SocketAddr,
electrs_server_url: String,
seed: Bip39Seed,
ephemeral_randomness: [u8; 32],
settings: XXINodeSettings,
oracle_clients: Vec<P2PDOracleClient>,
oracle_pubkey: XOnlyPublicKey,
node_event_handler: Arc<NodeEventHandler>,
dlc_event_sender: mpsc::Sender<DlcChannelEvent>,
) -> Result<Self> {
let time_since_unix_epoch = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)?;
let logger = Arc::new(TracingLogger {
alias: alias.to_string(),
});
let fee_rate_estimator = Arc::new(FeeRateEstimator::new(network));
let on_chain_wallet = OnChainWallet::new(
network,
seed.wallet_seed(),
wallet_storage,
fee_rate_estimator.clone(),
)?;
let on_chain_wallet = Arc::new(on_chain_wallet);
let blockchain = Blockchain::new(electrs_server_url.clone(), node_storage.clone())?;
let blockchain = Arc::new(blockchain);
let dlc_storage = Arc::new(DlcStorageProvider::new(storage.clone(), dlc_event_sender));
let keys_manager = {
Arc::new(CustomKeysManager::new(
KeysManager::new(
&seed.lightning_seed(),
time_since_unix_epoch.as_secs(),
time_since_unix_epoch.subsec_nanos(),
),
on_chain_wallet.clone(),
))
};
let oracle_clients: Vec<Arc<P2PDOracleClient>> =
oracle_clients.into_iter().map(Arc::new).collect();
let dlc_wallet = DlcWallet::new(
on_chain_wallet.clone(),
dlc_storage.clone(),
blockchain.clone(),
);
let dlc_wallet = Arc::new(dlc_wallet);
let dlc_manager = dlc_manager::build(
data_dir,
dlc_wallet.clone(),
dlc_storage.clone(),
oracle_clients.clone(),
fee_rate_estimator.clone(),
)?;
let dlc_manager = Arc::new(dlc_manager);
let dlc_message_handler =
Arc::new(TenTenOneMessageHandler::new(node_event_handler.clone()));
let peer_manager: Arc<PeerManager<D>> = Arc::new(PeerManager::new(
MessageHandler {
chan_handler: Arc::new(ErroringMessageHandler::new()),
route_handler: Arc::new(IgnoringMessageHandler {}),
onion_message_handler: dlc_message_handler.clone(),
custom_message_handler: dlc_message_handler.clone(),
},
time_since_unix_epoch.as_secs() as u32,
&ephemeral_randomness,
logger.clone(),
keys_manager.clone(),
));
let node_id = keys_manager.get_node_secret_key().public_key(SECP256K1);
let node_info = NodeInfo {
pubkey: to_secp_pk_30(node_id),
address: announcement_address,
is_ws: false,
};
let settings = Arc::new(RwLock::new(settings));
Ok(Self {
network,
wallet: on_chain_wallet,
blockchain,
dlc_wallet,
peer_manager,
keys_manager,
logger,
info: node_info,
oracles: oracle_clients,
dlc_message_handler,
dlc_manager,
dlc_storage,
node_storage,
fee_rate_estimator,
settings,
listen_address,
oracle_pubkey,
event_handler: node_event_handler,
})
}
/// Starts the background handles - if the returned handles are dropped, the
/// background tasks are stopped.
// TODO: Consider having handles for *all* the tasks & threads for a clean shutdown.
pub fn start(
&self,
dlc_event_receiver: mpsc::Receiver<DlcChannelEvent>,
) -> Result<RunningNode> {
#[cfg(feature = "ln_net_tcp")]
let handles = vec![spawn_connection_management(
self.peer_manager.clone(),
self.listen_address,
)];
#[cfg(not(feature = "ln_net_tcp"))]
let mut handles = Vec::new();
std::thread::spawn(shadow_sync_periodically(
self.settings.clone(),
self.node_storage.clone(),
self.wallet.clone(),
));
tokio::spawn(update_fee_rate_estimates(
self.settings.clone(),
self.fee_rate_estimator.clone(),
));
connect_node_event_handler_to_dlc_channel_events(
self.event_handler.clone(),
dlc_event_receiver,
);
tracing::info!("Node started with node ID {}", self.info);
Ok(RunningNode { _handles: handles })
}
/// Send the given `amount_sats` sats to the given unchecked, on-chain `address`.
pub async fn send_to_address(
&self,
address: Address<NetworkUnchecked>,
amount_sats: u64,
fee_config: FeeConfig,
) -> Result<Txid> {
let address = address.require_network(self.network)?;
let tx = spawn_blocking({
let wallet = self.wallet.clone();
move || {
let tx = wallet.build_on_chain_payment_tx(&address, amount_sats, fee_config)?;
anyhow::Ok(tx)
}
})
.await
.expect("task to complete")?;
let txid = self.blockchain.broadcast_transaction_blocking(&tx)?;
Ok(txid)
}
pub fn list_peers(&self) -> Vec<PublicKey> {
self.peer_manager
.get_peer_node_ids()
.into_iter()
.map(|(peer, _)| to_secp_pk_30(peer))
.collect()
}
pub async fn get_unspent_txs(&self, address: &Address) -> Result<Vec<(Tx, Amount)>> {
let txs = self.get_utxo_for_address(address).await?;
let mut statuses = vec![];
for tx in txs {
if let Some(index) = tx
.vout
.iter()
.position(|vout| vout.scriptpubkey == address.script_pubkey())
{
match self.get_status_for_vout(&tx.txid, index as u64).await {
Ok(Some(status)) => {
if status.spent {
tracing::warn!(
txid = tx.txid.to_string(),
vout = index,
"Ignoring output as it is already spent"
)
} else {
let amount =
Amount::from_sat(tx.vout.get(index).expect("to exist").value);
statuses.push((tx, amount));
}
}
Ok(None) => {
tracing::warn!(
txid = tx.txid.to_string(),
vout = index,
"No status found for tx"
);
}
Err(error) => {
tracing::error!(
txid = tx.txid.to_string(),
vout = index,
"Failed at checking tx status {error:?}"
);
}
}
} else {
tracing::error!(
txid = tx.txid.to_string(),
address = address.to_string(),
"Output not found. This should not happen, but if it does, it indicates something is wrong.");
}
}
Ok(statuses)
}
async fn get_utxo_for_address(&self, address: &Address) -> Result<Vec<Tx>> {
let vec = self
.blockchain
.esplora_client_async
.scripthash_txs(&address.script_pubkey(), None)
.await?;
Ok(vec)
}
async fn get_status_for_vout(&self, tx_id: &Txid, vout: u64) -> Result<Option<OutputStatus>> {
let status = self
.blockchain
.esplora_client_async
.get_output_status(tx_id, vout)
.await?;
Ok(status)
}
}
async fn update_fee_rate_estimates(
settings: Arc<RwLock<XXINodeSettings>>,
fee_rate_estimator: Arc<FeeRateEstimator>,
) {
loop {
if let Err(err) = fee_rate_estimator.update().await {
tracing::error!("Failed to update fee rate estimates: {err:#}");
}
let interval = {
let guard = settings.read().await;
guard.fee_rate_sync_interval
};
tokio::time::sleep(interval).await;
}
}
fn shadow_sync_periodically<D: BdkStorage, N: Storage>(
settings: Arc<RwLock<XXINodeSettings>>,
node_storage: Arc<N>,
wallet: Arc<OnChainWallet<D>>,
) -> impl Fn() {
let handle = tokio::runtime::Handle::current();
let shadow = Shadow::new(node_storage, wallet);
move || loop {
if let Err(e) = shadow.sync_transactions() {
tracing::error!("Failed to sync transaction shadows. Error: {e:#}");
}
let interval = handle.block_on(async {
let guard = settings.read().await;
guard.shadow_sync_interval
});
std::thread::sleep(interval);
}
}
#[cfg(feature = "ln_net_tcp")]
fn spawn_connection_management<D: BdkStorage>(
peer_manager: Arc<PeerManager<D>>,
listen_address: SocketAddr,
) -> RemoteHandle<()> {
let (fut, remote_handle) = async move {
let mut connection_handles = Vec::new();
let listener = tokio::net::TcpListener::bind(listen_address)
.await
.expect("Failed to bind to listen port");
loop {
let peer_manager = peer_manager.clone();
let (tcp_stream, addr) = match listener.accept().await {
Ok(ret) => ret,
Err(e) => {
tracing::error!("Failed to accept incoming connection: {e:#}");
continue;
}
};
tracing::debug!(%addr, "Received inbound connection");
let (fut, connection_handle) = async move {
crate::networking::tcp::setup_inbound(
peer_manager.clone(),
tcp_stream.into_std().expect("Stream conversion to succeed"),
)
.await;
}
.remote_handle();
connection_handles.push(connection_handle);
tokio::spawn(fut);
}
}
.remote_handle();
tokio::spawn(fut);
tracing::info!("Listening on {listen_address}");
remote_handle
}
impl Display for NodeInfo {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
let scheme = if self.is_ws { "ws" } else { "tcp" };
format!("{scheme}://{}@{}", self.pubkey, self.address).fmt(f)
}
}
#[derive(Debug, Copy, Clone, PartialEq)]
pub struct ProtocolId(Uuid);
impl ProtocolId {
pub fn new() -> Self {
ProtocolId(Uuid::new_v4())
}
pub fn to_uuid(&self) -> Uuid {
self.0
}
}
impl Default for ProtocolId {
fn default() -> Self {
Self::new()
}
}
impl Display for ProtocolId {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
self.0.to_string().fmt(f)
}
}
impl From<ProtocolId> for ReferenceId {
fn from(value: ProtocolId) -> Self {
let uuid = value.to_uuid();
// 16 bytes.
let uuid_bytes = uuid.as_bytes();
// 32-digit hex string.
let hex = hex::encode(uuid_bytes);
// Derived `ReferenceId`: 32-bytes.
let hex_bytes = hex.as_bytes();
let mut array = [0u8; 32];
array.copy_from_slice(hex_bytes);
array
}
}
impl TryFrom<ReferenceId> for ProtocolId {
type Error = anyhow::Error;
fn try_from(value: ReferenceId) -> Result<Self> {
// 32-digit hex string.
let hex = from_utf8(&value)?;
// 16 bytes.
let uuid_bytes = hex::decode(hex)?;
let uuid = Uuid::from_slice(&uuid_bytes)?;
Ok(ProtocolId(uuid))
}
}
impl From<Uuid> for ProtocolId {
fn from(value: Uuid) -> Self {
ProtocolId(value)
}
}
impl From<ProtocolId> for Uuid {
fn from(value: ProtocolId) -> Self {
value.0
}
}
impl FromStr for ProtocolId {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self> {
let uuid = Uuid::from_str(s)?;
Ok(ProtocolId(uuid))
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/xxi-node/src/node/wallet.rs | crates/xxi-node/src/node/wallet.rs | use crate::bitcoin_conversion::to_secp_sk_30;
use crate::node::Node;
use crate::node::Storage;
use crate::on_chain_wallet::BdkStorage;
use crate::on_chain_wallet::FeeConfig;
use crate::on_chain_wallet::OnChainWallet;
use crate::on_chain_wallet::TransactionDetails;
use crate::storage::TenTenOneStorage;
use anyhow::Context;
use anyhow::Result;
use bdk_esplora::EsploraAsyncExt;
use bitcoin::secp256k1::SecretKey;
use bitcoin::Address;
use bitcoin::Amount;
use bitcoin::OutPoint;
use bitcoin::ScriptBuf;
use bitcoin::TxOut;
use std::sync::Arc;
use tokio::task::spawn_blocking;
/// The number of parallel requests to be used during the on-chain sync.
///
/// This number was chosen arbitrarily.
const PARALLEL_REQUESTS_SYNC: usize = 5;
impl<D: BdkStorage, S: TenTenOneStorage, N: Storage + Send + Sync + 'static> Node<D, S, N> {
pub fn wallet(&self) -> Arc<OnChainWallet<D>> {
self.wallet.clone()
}
pub fn get_new_address(&self) -> Result<Address> {
self.wallet.get_new_address()
}
pub fn get_unused_address(&self) -> Result<Address> {
self.wallet.get_unused_address()
}
pub fn get_blockchain_height(&self) -> Result<u64> {
self.blockchain
.get_blockchain_tip()
.context("Failed to get blockchain height")
}
pub fn get_on_chain_balance(&self) -> bdk::wallet::Balance {
self.wallet.get_balance()
}
pub fn node_key(&self) -> SecretKey {
to_secp_sk_30(self.keys_manager.get_node_secret_key())
}
pub fn get_on_chain_history(&self) -> Vec<TransactionDetails> {
self.wallet.get_on_chain_history()
}
pub fn get_utxos(&self) -> Vec<(OutPoint, TxOut)> {
self.wallet.get_utxos()
}
pub fn is_mine(&self, script_pubkey: &ScriptBuf) -> bool {
self.wallet.is_mine(script_pubkey)
}
/// Estimate the fee for sending sats to the given `address` on-chain with the given fee
/// configuration.
pub fn estimate_fee(&self, address: Address, fee_config: FeeConfig) -> Result<Amount> {
self.wallet.estimate_fee(&address, fee_config)
}
/// Sync the state of the on-chain wallet against the blockchain.
pub async fn sync_on_chain_wallet(&self) -> Result<()> {
let client = &self.blockchain.esplora_client_async;
let (local_chain, unused_revealed_script_pubkeys, unconfirmed_txids, utxos) =
spawn_blocking({
let wallet = self.wallet.clone();
move || wallet.pre_sync_state()
})
.await
.expect("task to complete");
let graph_update = client
.sync(
unused_revealed_script_pubkeys,
unconfirmed_txids,
utxos,
PARALLEL_REQUESTS_SYNC,
)
.await?;
let chain_update = {
let missing_heights = graph_update.missing_heights(&local_chain);
client
.update_local_chain(local_chain.tip(), missing_heights)
.await?
};
let wallet_update = bdk::wallet::Update {
graph: graph_update.clone(),
chain: Some(chain_update),
..Default::default()
};
spawn_blocking({
let wallet = self.wallet.clone();
move || {
wallet.commit_wallet_update(wallet_update)?;
anyhow::Ok(())
}
})
.await
.expect("task to complete")?;
// On sync, we unlock our locked utxos, because we are syncing with the blockchain we will
// find already spent utxos and release those which were locked unnecessarily.
self.wallet.locked_utxos.lock().clear();
Ok(())
}
pub async fn full_sync(&self, stop_gap: usize) -> Result<()> {
let client = &self.blockchain.esplora_client_async;
tracing::info!("Running full sync of on-chain wallet");
let (local_chain, all_script_pubkeys) = spawn_blocking({
let wallet = self.wallet.clone();
move || {
let all_script_pubkeys = wallet.all_script_pubkeys();
let local_chain = wallet.local_chain();
(local_chain, all_script_pubkeys)
}
})
.await
.expect("task to complete");
let (graph_update, last_active_indices) = client
.full_scan(all_script_pubkeys, stop_gap, PARALLEL_REQUESTS_SYNC)
.await?;
let chain_update = {
let missing_heights = graph_update.missing_heights(&local_chain);
client
.update_local_chain(local_chain.tip(), missing_heights)
.await?
};
let wallet_update = bdk::wallet::Update {
graph: graph_update.clone(),
chain: Some(chain_update),
last_active_indices,
};
spawn_blocking({
let wallet = self.wallet.clone();
move || {
wallet.commit_wallet_update(wallet_update)?;
anyhow::Ok(())
}
})
.await
.expect("task to complete")?;
tracing::info!("Finished full sync of on-chain wallet");
Ok(())
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/xxi-node/src/node/peer_manager.rs | crates/xxi-node/src/node/peer_manager.rs | use anyhow::ensure;
pub fn alias_as_bytes(alias: &str) -> anyhow::Result<[u8; 32]> {
ensure!(
alias.len() <= 32,
"Node Alias can not be longer than 32 bytes"
);
let mut bytes = [0; 32];
bytes[..alias.len()].copy_from_slice(alias.as_bytes());
Ok(bytes)
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/xxi-node/src/node/dlc_channel.rs | crates/xxi-node/src/node/dlc_channel.rs | use crate::bitcoin_conversion::to_secp_pk_29;
use crate::bitcoin_conversion::to_secp_pk_30;
use crate::commons;
use crate::message_handler::FundingFeeEvent;
use crate::message_handler::TenTenOneCollaborativeCloseOffer;
use crate::message_handler::TenTenOneMessage;
use crate::message_handler::TenTenOneMessageHandler;
use crate::message_handler::TenTenOneOfferChannel;
use crate::message_handler::TenTenOneRenewOffer;
use crate::message_handler::TenTenOneRolloverOffer;
use crate::message_handler::TenTenOneSettleAccept;
use crate::message_handler::TenTenOneSettleOffer;
use crate::node::event::NodeEvent;
use crate::node::Node;
use crate::node::ProtocolId;
use crate::node::Storage as LnDlcStorage;
use crate::on_chain_wallet::BdkStorage;
use crate::storage::TenTenOneStorage;
use crate::PeerManager;
use anyhow::anyhow;
use anyhow::bail;
use anyhow::ensure;
use anyhow::Context;
use anyhow::Result;
use bitcoin::secp256k1::PublicKey;
use bitcoin::Amount;
use dlc_manager::channel::signed_channel::SignedChannel;
use dlc_manager::channel::signed_channel::SignedChannelState;
use dlc_manager::channel::Channel;
use dlc_manager::contract::contract_input::ContractInput;
use dlc_manager::contract::Contract;
use dlc_manager::contract::ContractDescriptor;
use dlc_manager::ContractId;
use dlc_manager::DlcChannelId;
use dlc_manager::Oracle;
use dlc_manager::ReferenceId;
use dlc_manager::Storage;
use time::OffsetDateTime;
use tokio::task::spawn_blocking;
use uuid::Uuid;
impl<D: BdkStorage, S: TenTenOneStorage + 'static, N: LnDlcStorage + Sync + Send + 'static>
Node<D, S, N>
{
pub async fn propose_dlc_channel(
&self,
filled_with: commons::FilledWith,
contract_input: ContractInput,
counterparty: PublicKey,
protocol_id: ProtocolId,
fee_config: dlc::FeeConfig,
) -> Result<(ContractId, DlcChannelId)> {
tracing::info!(
trader_id = %counterparty,
oracles = ?contract_input.contract_infos[0].oracles,
"Sending DLC channel offer"
);
if let Some(channel) = self
.list_signed_dlc_channels()?
.iter()
.find(|channel| channel.counter_party == to_secp_pk_29(counterparty))
{
tracing::error!(
trader_id = %counterparty,
existing_channel_id = hex::encode(channel.channel_id),
existing_channel_state = %channel.state,
"We can't open a new channel because we still have an open dlc-channel"
);
bail!("Cant have more than one dlc channel.");
}
spawn_blocking({
let p2pd_oracles = self.oracles.clone();
let dlc_manager = self.dlc_manager.clone();
let oracles = contract_input.contract_infos[0].oracles.clone();
let event_id = oracles.event_id;
let event_handler = self.event_handler.clone();
move || {
let announcements: Vec<_> = p2pd_oracles
.into_iter()
.filter(|o| oracles.public_keys.contains(&o.public_key))
.filter_map(|oracle| oracle.get_announcement(&event_id).ok())
.collect();
ensure!(
!announcements.is_empty(),
format!("Can't propose dlc channel without oracles")
);
let offer_channel = dlc_manager.offer_channel(
&contract_input,
to_secp_pk_29(counterparty),
fee_config,
Some(protocol_id.into()),
)?;
let temporary_contract_id = offer_channel.temporary_contract_id;
let temporary_channel_id = offer_channel.temporary_channel_id;
event_handler.publish(NodeEvent::StoreDlcMessage {
peer: counterparty,
msg: TenTenOneMessage::Offer(TenTenOneOfferChannel {
offer_channel,
filled_with,
}),
});
Ok((temporary_contract_id, temporary_channel_id))
}
})
.await?
}
#[cfg(test)]
pub fn accept_dlc_channel_offer(
&self,
order_id: Uuid,
channel_id: &DlcChannelId,
) -> Result<()> {
use crate::message_handler::TenTenOneAcceptChannel;
let channel_id_hex = hex::encode(channel_id);
tracing::info!(channel_id = %channel_id_hex, "Accepting DLC channel offer");
let (accept_channel, _channel_id, _contract_id, counter_party) = self
.dlc_manager
.accept_channel(channel_id, dlc::FeeConfig::EvenSplit)?;
self.event_handler.publish(NodeEvent::SendDlcMessage {
peer: to_secp_pk_30(counter_party),
msg: TenTenOneMessage::Accept(TenTenOneAcceptChannel {
accept_channel,
order_id,
}),
});
Ok(())
}
pub async fn close_dlc_channel(
&self,
channel_id: DlcChannelId,
is_force_close: bool,
) -> Result<ProtocolId> {
let channel_id_hex = hex::encode(channel_id);
tracing::info!(
is_force_close,
channel_id = channel_id_hex,
"Closing DLC channel"
);
let channel = self
.get_signed_dlc_channel(|channel| channel.channel_id == channel_id)?
.context("DLC channel to close not found")?;
let protocol_id = ProtocolId::new();
if is_force_close {
self.force_close_dlc_channel(channel, protocol_id)?;
} else {
self.propose_dlc_channel_collaborative_close(channel, protocol_id)
.await?
}
Ok(protocol_id)
}
fn force_close_dlc_channel(
&self,
channel: SignedChannel,
protocol_id: ProtocolId,
) -> Result<()> {
let channel_id = channel.channel_id;
let channel_id_hex = hex::encode(channel_id);
tracing::info!(
channel_id = %channel_id_hex,
"Force closing DLC channel"
);
self.dlc_manager
.force_close_channel(&channel_id, Some(protocol_id.into()))?;
Ok(())
}
/// Close a DLC channel on-chain collaboratively, if there is no open position.
async fn propose_dlc_channel_collaborative_close(
&self,
channel: SignedChannel,
protocol_id: ProtocolId,
) -> Result<()> {
let counterparty = channel.counter_party;
match channel.state {
SignedChannelState::Settled { counter_payout, .. } => {
spawn_blocking({
let dlc_manager = self.dlc_manager.clone();
let event_handler = self.event_handler.clone();
move || {
tracing::info!(
counter_payout,
channel_id = hex::encode(channel.channel_id),
"Proposing collaborative close"
);
let settle_offer = dlc_manager
.offer_collaborative_close(
&channel.channel_id,
counter_payout,
Some(protocol_id.into()),
)
.context(
"Could not propose to collaboratively close the dlc channel.",
)?;
event_handler.publish(NodeEvent::SendDlcMessage {
peer: to_secp_pk_30(counterparty),
msg: TenTenOneMessage::CollaborativeCloseOffer(
TenTenOneCollaborativeCloseOffer {
collaborative_close_offer: settle_offer,
},
),
});
anyhow::Ok(())
}
})
.await??;
}
_ => {
tracing::error!( state = %channel.state, "Can't collaboratively close a channel which is not settled.");
bail!("Can't collaboratively close a channel which is not settled");
}
}
Ok(())
}
/// Collaboratively close a position within a DLC Channel
pub async fn propose_dlc_channel_collaborative_settlement(
&self,
order: commons::Order,
filled_with: commons::FilledWith,
channel_id: &DlcChannelId,
accept_settlement_amount: u64,
protocol_id: ProtocolId,
) -> Result<()> {
let channel_id_hex = hex::encode(channel_id);
tracing::info!(
channel_id = %channel_id_hex,
%accept_settlement_amount,
"Settling DLC in channel collaboratively"
);
spawn_blocking({
let dlc_manager = self.dlc_manager.clone();
let event_handler = self.event_handler.clone();
let channel_id = *channel_id;
move || {
let (settle_offer, counterparty) = dlc_manager.settle_offer(
&channel_id,
accept_settlement_amount,
Some(protocol_id.into()),
)?;
event_handler.publish(NodeEvent::StoreDlcMessage {
peer: to_secp_pk_30(counterparty),
msg: TenTenOneMessage::SettleOffer(TenTenOneSettleOffer {
settle_offer,
order,
filled_with,
}),
});
Ok(())
}
})
.await?
}
pub fn accept_dlc_channel_collaborative_close(&self, channel_id: &DlcChannelId) -> Result<()> {
let channel_id_hex = hex::encode(channel_id);
tracing::info!(channel_id = %channel_id_hex, "Accepting DLC channel collaborative close offer");
let dlc_manager = self.dlc_manager.clone();
dlc_manager.accept_collaborative_close(channel_id)?;
Ok(())
}
pub fn accept_dlc_channel_collaborative_settlement(
&self,
order_id: Uuid,
order_reason: commons::OrderReason,
channel_id: &DlcChannelId,
) -> Result<()> {
let channel_id_hex = hex::encode(channel_id);
tracing::info!(channel_id = %channel_id_hex, "Accepting DLC channel collaborative settlement");
let dlc_manager = self.dlc_manager.clone();
let (settle_accept, counterparty_pk) = dlc_manager.accept_settle_offer(channel_id)?;
self.event_handler.publish(NodeEvent::SendDlcMessage {
peer: to_secp_pk_30(counterparty_pk),
msg: TenTenOneMessage::SettleAccept(TenTenOneSettleAccept {
settle_accept,
order_id,
order_reason,
}),
});
Ok(())
}
/// Propose an update to the DLC channel based on the provided [`ContractInput`]. A
/// [`TenTenOneRenewOffer`] is sent to the counterparty, kickstarting the dlc renew protocol.
pub async fn propose_reopen_or_resize(
&self,
filled_with: commons::FilledWith,
dlc_channel_id: &DlcChannelId,
contract_input: ContractInput,
protocol_id: ProtocolId,
) -> Result<ContractId> {
tracing::info!(channel_id = %hex::encode(dlc_channel_id), "Proposing a DLC channel reopen or resize");
spawn_blocking({
let dlc_manager = self.dlc_manager.clone();
let dlc_channel_id = *dlc_channel_id;
let event_handler = self.event_handler.clone();
move || {
// Not actually needed. See https://github.com/p2pderivatives/rust-dlc/issues/149.
let counter_payout = 0;
let (renew_offer, counterparty_pubkey) = dlc_manager.renew_offer(
&dlc_channel_id,
counter_payout,
&contract_input,
Some(protocol_id.into()),
)?;
event_handler.publish(NodeEvent::StoreDlcMessage {
msg: TenTenOneMessage::RenewOffer(TenTenOneRenewOffer {
renew_offer,
filled_with,
}),
peer: to_secp_pk_30(counterparty_pubkey),
});
let offered_contracts = dlc_manager.get_store().get_contract_offers()?;
// We assume that the first `OfferedContract` we find here is the one we just
// proposed when renewing the DLC channel.
//
// TODO: Change `renew_offer` API to return the `temporary_contract_id`, like
// `offer_channel` does.
let offered_contract = offered_contracts
.iter()
.find(|contract| contract.counter_party == counterparty_pubkey)
.context(
"Could not find offered contract after proposing DLC channel update",
)?;
Ok(offered_contract.id)
}
})
.await
.map_err(|e| anyhow!("{e:#}"))?
}
/// Propose an update to the DLC channel based on the provided [`ContractInput`]. A
/// [`TenTenOneRolloverOffer`] is sent to the counterparty, kickstarting the dlc renew protocol.
pub async fn propose_rollover(
&self,
dlc_channel_id: &DlcChannelId,
contract_input: ContractInput,
protocol_id: ReferenceId,
funding_fee_events: Vec<FundingFeeEvent>,
) -> Result<ContractId> {
tracing::info!(channel_id = %hex::encode(dlc_channel_id), "Proposing a DLC channel rollover");
spawn_blocking({
let dlc_manager = self.dlc_manager.clone();
let dlc_channel_id = *dlc_channel_id;
let event_handler = self.event_handler.clone();
move || {
// Not actually needed. See https://github.com/p2pderivatives/rust-dlc/issues/149.
let counter_payout = 0;
let (renew_offer, counterparty_pubkey) = dlc_manager.renew_offer(
&dlc_channel_id,
counter_payout,
&contract_input,
Some(protocol_id),
)?;
event_handler.publish(NodeEvent::StoreDlcMessage {
msg: TenTenOneMessage::RolloverOffer(TenTenOneRolloverOffer {
renew_offer,
funding_fee_events,
}),
peer: to_secp_pk_30(counterparty_pubkey),
});
let offered_contracts = dlc_manager.get_store().get_contract_offers()?;
// We assume that the first `OfferedContract` we find here is the one we just
// proposed when renewing the DLC channel.
//
// TODO: Change `renew_offer` API to return the `temporary_contract_id`, like
// `offer_channel` does.
let offered_contract = offered_contracts
.iter()
.find(|contract| contract.counter_party == counterparty_pubkey)
.context(
"Could not find offered contract after proposing DLC channel update",
)?;
Ok(offered_contract.id)
}
})
.await
.map_err(|e| anyhow!("{e:#}"))?
}
#[cfg(test)]
/// Accept an update to the DLC channel. This can only succeed if we previously received a DLC
/// channel update offer from the the counterparty.
// The accept code has diverged on the app side (hence the #[cfg(test)]). Another hint that we
// should delete most of this crate soon.
pub fn accept_dlc_channel_update(
&self,
order_id: Uuid,
channel_id: &DlcChannelId,
) -> Result<()> {
use crate::message_handler::TenTenOneRenewAccept;
let channel_id_hex = hex::encode(channel_id);
tracing::info!(channel_id = %channel_id_hex, "Accepting DLC channel update offer");
let (renew_accept, counter_party) = self.dlc_manager.accept_renew_offer(channel_id)?;
send_dlc_message(
&self.dlc_message_handler,
&self.peer_manager,
to_secp_pk_30(counter_party),
TenTenOneMessage::RenewAccept(TenTenOneRenewAccept {
renew_accept,
order_id,
}),
);
Ok(())
}
/// Get the expiry for the [`SignedContract`] corresponding to the given [`DlcChannelId`].
///
/// Will return an error if the contract is not yet signed or confirmed on-chain.
pub fn get_expiry_for_confirmed_dlc_channel(
&self,
dlc_channel_id: &DlcChannelId,
) -> Result<OffsetDateTime> {
match self.get_contract_by_dlc_channel_id(dlc_channel_id)? {
Contract::Signed(contract) | Contract::Confirmed(contract) => {
let offered_contract = contract.accepted_contract.offered_contract;
let contract_info = offered_contract
.contract_info
.first()
.expect("contract info to exist on a signed contract");
let oracle_announcement = contract_info
.oracle_announcements
.first()
.expect("oracle announcement to exist on signed contract");
let expiry_timestamp = OffsetDateTime::from_unix_timestamp(
oracle_announcement.oracle_event.event_maturity_epoch as i64,
)?;
Ok(expiry_timestamp)
}
state => bail!(
"Confirmed contract not found for channel ID: {} which was in state {state:?}",
hex::encode(dlc_channel_id)
),
}
}
pub fn get_dlc_channel_by_reference_id(&self, reference_id: ReferenceId) -> Result<Channel> {
let channels = self.list_dlc_channels()?;
channels
.into_iter()
.find(|channel| channel.get_reference_id() == Some(reference_id))
.context("Couldn't find channel by reference id")
}
/// Get the DLC [`Channel`] by its [`DlcChannelId`].
pub fn get_dlc_channel_by_id(&self, dlc_channel_id: &DlcChannelId) -> Result<Channel> {
self.dlc_manager
.get_store()
.get_channel(dlc_channel_id)?
.with_context(|| {
format!(
"Couldn't find channel by id {}",
hex::encode(dlc_channel_id)
)
})
}
pub fn get_signed_dlc_channel_by_counterparty(
&self,
counterparty_pk: &PublicKey,
) -> Result<Option<SignedChannel>> {
self.get_signed_dlc_channel(|signed_channel| {
signed_channel.counter_party == to_secp_pk_29(*counterparty_pk)
})
}
pub fn get_contract_by_id(&self, contract_id: &ContractId) -> Result<Option<Contract>> {
let contract = self.dlc_manager.get_store().get_contract(contract_id)?;
Ok(contract)
}
/// Fetch the [`Contract`] corresponding to the given [`DlcChannelId`].
pub fn get_contract_by_dlc_channel_id(
&self,
dlc_channel_id: &DlcChannelId,
) -> Result<Contract> {
let channel = self.get_dlc_channel_by_id(dlc_channel_id)?;
let contract_id = channel
.get_contract_id()
.context("Could not find contract id")?;
self.dlc_manager
.get_store()
.get_contract(&contract_id)?
.with_context(|| {
format!(
"Couldn't find dlc channel with id: {}",
hex::encode(dlc_channel_id)
)
})
}
pub fn get_established_dlc_channel(&self, pubkey: &PublicKey) -> Result<Option<SignedChannel>> {
let matcher = |dlc_channel: &&SignedChannel| {
dlc_channel.counter_party == to_secp_pk_29(*pubkey)
&& matches!(&dlc_channel.state, SignedChannelState::Established { .. })
};
let dlc_channel = self.get_signed_dlc_channel(&matcher)?;
Ok(dlc_channel)
}
fn get_signed_dlc_channel(
&self,
matcher: impl FnMut(&&SignedChannel) -> bool,
) -> Result<Option<SignedChannel>> {
let dlc_channels = self.list_signed_dlc_channels()?;
let dlc_channel = dlc_channels.iter().find(matcher);
Ok(dlc_channel.cloned())
}
pub fn list_signed_dlc_channels(&self) -> Result<Vec<SignedChannel>> {
let dlc_channels = self.dlc_manager.get_store().get_signed_channels(None)?;
Ok(dlc_channels)
}
pub fn get_dlc_channel(
&self,
matcher: impl FnMut(&&Channel) -> bool,
) -> Result<Option<Channel>> {
let dlc_channels = self.list_dlc_channels()?;
let dlc_channel = dlc_channels.iter().find(matcher);
Ok(dlc_channel.cloned())
}
pub fn list_dlc_channels(&self) -> Result<Vec<Channel>> {
let dlc_channels = self.dlc_manager.get_store().get_channels()?;
Ok(dlc_channels)
}
/// Checks if the underlying contract of the signed channel has been confirmed. If not a
/// periodic check is run to ensure we are on the latest state and return the corresponding
/// response.
pub async fn check_if_signed_channel_is_confirmed(&self, trader: PublicKey) -> Result<bool> {
let signed_channel = self.get_signed_channel_by_trader_id(trader)?;
if !self.is_dlc_channel_confirmed(&signed_channel.channel_id)? {
self.sync_on_chain_wallet().await?;
spawn_blocking({
let dlc_manager = self.dlc_manager.clone();
move || dlc_manager.periodic_check()
})
.await
.expect("task to complete")?;
return self.is_dlc_channel_confirmed(&signed_channel.channel_id);
}
Ok(true)
}
fn is_contract_confirmed(&self, contract_id: &ContractId) -> Result<bool> {
let contract = self
.get_contract_by_id(contract_id)?
.context("Could not find contract for signed channel in state Established.")?;
Ok(matches!(contract, Contract::Confirmed { .. }))
}
fn is_dlc_channel_confirmed(&self, dlc_channel_id: &DlcChannelId) -> Result<bool> {
let channel = self.get_dlc_channel_by_id(dlc_channel_id)?;
let confirmed = match channel {
Channel::Signed(signed_channel) => match signed_channel.state {
SignedChannelState::Established {
signed_contract_id, ..
} => self.is_contract_confirmed(&signed_contract_id)?,
_ => true,
},
Channel::Offered(_)
| Channel::Accepted(_)
| Channel::FailedAccept(_)
| Channel::FailedSign(_)
| Channel::Cancelled(_) => false,
Channel::Closing(_)
| Channel::SettledClosing(_)
| Channel::Closed(_)
| Channel::CounterClosed(_)
| Channel::ClosedPunished(_)
| Channel::CollaborativelyClosed(_) => true,
};
Ok(confirmed)
}
/// Return the usable balance for all the DLC channels.
pub fn get_dlc_channels_usable_balance(&self) -> Result<Amount> {
self.list_signed_dlc_channels()?
.iter()
.try_fold(Amount::ZERO, |acc, channel| {
let balance = self.get_dlc_channel_usable_balance(&channel.channel_id)?;
Ok(acc + balance)
})
}
/// Return the usable counterparty balance for all the DLC channels.
pub fn get_dlc_channels_usable_balance_counterparty(&self) -> Result<Amount> {
self.list_signed_dlc_channels()?
.iter()
.try_fold(Amount::ZERO, |acc, channel| {
let balance =
self.get_dlc_channel_usable_balance_counterparty(&channel.channel_id)?;
Ok(acc + balance)
})
}
pub fn signed_dlc_channel_total_collateral(&self, channel_id: &DlcChannelId) -> Result<Amount> {
let channel = self.get_dlc_channel_by_id(channel_id)?;
match channel {
Channel::Signed(channel) => Ok(Amount::from_sat(
channel.own_params.collateral + channel.counter_params.collateral,
)),
_ => bail!("DLC channel {} not signed", hex::encode(channel_id)),
}
}
/// Return the usable balance for the DLC channel.
///
/// Usable balance excludes all balance which is being wagered in DLCs. It also excludes some
/// reserved funds to be used when the channel is closed on-chain.
pub fn get_dlc_channel_usable_balance(&self, channel_id: &DlcChannelId) -> Result<Amount> {
let dlc_channel = self.get_dlc_channel_by_id(channel_id)?;
let usable_balance = match dlc_channel {
Channel::Signed(SignedChannel {
state: SignedChannelState::Settled { own_payout, .. },
..
}) => {
// We settled the position inside the DLC channel.
Amount::from_sat(own_payout)
}
Channel::Signed(SignedChannel {
state: SignedChannelState::SettledOffered { counter_payout, .. },
own_params,
counter_params,
..
})
| Channel::Signed(SignedChannel {
state: SignedChannelState::SettledReceived { counter_payout, .. },
own_params,
counter_params,
..
})
| Channel::Signed(SignedChannel {
state: SignedChannelState::SettledAccepted { counter_payout, .. },
own_params,
counter_params,
..
})
| Channel::Signed(SignedChannel {
state: SignedChannelState::SettledConfirmed { counter_payout, .. },
own_params,
counter_params,
..
}) => {
// We haven't settled the DLC off-chain yet, but we are optimistic that the
// protocol will complete. Hence, the usable balance is the
// total collateral minus what the counterparty gets.
Amount::from_sat(own_params.collateral + counter_params.collateral - counter_payout)
}
Channel::Signed(SignedChannel {
state: SignedChannelState::CollaborativeCloseOffered { counter_payout, .. },
own_params,
counter_params,
..
}) => {
// The channel is not yet closed. Hence, we keep showing the channel balance.
Amount::from_sat(own_params.collateral + counter_params.collateral - counter_payout)
}
// For all other cases we can rely on the `Contract`, since
// `SignedChannelState::get_contract_id` will return a `ContractId` for
// them.
_ => self.get_contract_own_usable_balance(&dlc_channel)?,
};
Ok(usable_balance)
}
/// Return the usable balance for the DLC channel, for the counterparty.
///
/// Usable balance excludes all balance which is being wagered in DLCs. It also excludes some
/// reserved funds to be used when the channel is closed on-chain.
pub fn get_dlc_channel_usable_balance_counterparty(
&self,
channel_id: &DlcChannelId,
) -> Result<Amount> {
let dlc_channel = self.get_dlc_channel_by_id(channel_id)?;
let usable_balance = match dlc_channel {
Channel::Signed(SignedChannel {
state: SignedChannelState::Settled { counter_payout, .. },
..
}) => {
// We settled the position inside the DLC channel.
Amount::from_sat(counter_payout)
}
Channel::Signed(SignedChannel {
state: SignedChannelState::SettledOffered { counter_payout, .. },
..
})
| Channel::Signed(SignedChannel {
state: SignedChannelState::SettledReceived { counter_payout, .. },
..
})
| Channel::Signed(SignedChannel {
state: SignedChannelState::SettledAccepted { counter_payout, .. },
..
})
| Channel::Signed(SignedChannel {
state: SignedChannelState::SettledConfirmed { counter_payout, .. },
..
}) => {
// We haven't settled the DLC off-chain yet, but we are optimistic that the
// protocol will complete.
Amount::from_sat(counter_payout)
}
Channel::Signed(SignedChannel {
state: SignedChannelState::CollaborativeCloseOffered { counter_payout, .. },
..
}) => {
// The channel is not yet closed.
Amount::from_sat(counter_payout)
}
// For all other cases we can rely on the `Contract`, since
// `SignedChannelState::get_contract_id` will return a `ContractId` for
// them.
_ => self.get_contract_counterparty_usable_balance(&dlc_channel)?,
};
Ok(usable_balance)
}
fn get_contract_own_usable_balance(&self, dlc_channel: &Channel) -> Result<Amount> {
self.get_contract_usable_balance(dlc_channel, true)
}
fn get_contract_counterparty_usable_balance(&self, dlc_channel: &Channel) -> Result<Amount> {
self.get_contract_usable_balance(dlc_channel, false)
}
fn get_contract_usable_balance(
&self,
dlc_channel: &Channel,
is_balance_being_calculated_for_self: bool,
) -> Result<Amount> {
let contract_id = match dlc_channel.get_contract_id() {
Some(contract_id) => contract_id,
None => return Ok(Amount::ZERO),
};
let contract = self
.dlc_manager
.get_store()
.get_contract(&contract_id)
.context("Could not find contract associated with channel to compute usable balance")?
.context("Could not find contract associated with channel to compute usable balance")?;
// We are only including contracts that are actually established.
//
// TODO: Model other kinds of balance (e.g. pending incoming, pending outgoing)
// to avoid situations where money appears to be missing.
let signed_contract = match contract {
Contract::Signed(signed_contract) | Contract::Confirmed(signed_contract) => {
signed_contract
}
_ => return Ok(Amount::ZERO),
};
let am_i_offer_party = signed_contract
.accepted_contract
.offered_contract
.is_offer_party;
let is_balance_being_calculated_for_offer_party = if is_balance_being_calculated_for_self {
am_i_offer_party
}
// If we want the counterparty balance, their role in the protocol (offer or accept) is the
// opposite of ours.
else {
!am_i_offer_party
};
let offered_contract = signed_contract.accepted_contract.offered_contract;
let total_collateral = offered_contract.total_collateral;
let usable_balance = match &offered_contract.contract_info[0].contract_descriptor {
ContractDescriptor::Enum(_) => {
unreachable!("We are not using DLCs with enumerated outcomes");
}
ContractDescriptor::Numerical(descriptor) => {
let payouts = descriptor
.get_payouts(total_collateral)
.expect("valid payouts");
// The minimum payout for each party determines how many coins are _not_ currently
// being wagered. Since they are not being wagered, they have the potential to be
// wagered (by renewing the channel, for example) and so they are usable.
let reserve = if is_balance_being_calculated_for_offer_party {
payouts
.iter()
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | true |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/xxi-node/src/tests/mod.rs | crates/xxi-node/src/tests/mod.rs | use crate::bitcoin_conversion::to_secp_pk_29;
use crate::bitcoin_conversion::to_xonly_pk_29;
use crate::commons;
use crate::node::dlc_channel::send_dlc_message;
use crate::node::event::NodeEvent;
use crate::node::event::NodeEventHandler;
use crate::node::InMemoryStore;
use crate::node::Node;
use crate::node::NodeInfo;
use crate::node::OracleInfo;
use crate::node::RunningNode;
use crate::node::XXINodeSettings;
use crate::on_chain_wallet;
use crate::seed::Bip39Seed;
use crate::storage::DlcChannelEvent;
use crate::storage::TenTenOneInMemoryStorage;
use anyhow::Result;
use bitcoin::secp256k1::XOnlyPublicKey;
use bitcoin::Amount;
use bitcoin::Network;
use dlc_manager::contract::contract_input::ContractInput;
use dlc_manager::contract::contract_input::ContractInputInfo;
use dlc_manager::contract::contract_input::OracleInput;
use dlc_manager::contract::numerical_descriptor::NumericalDescriptor;
use dlc_manager::contract::ContractDescriptor;
use dlc_manager::payout_curve::PayoutFunction;
use dlc_manager::payout_curve::PayoutFunctionPiece;
use dlc_manager::payout_curve::PayoutPoint;
use dlc_manager::payout_curve::PolynomialPayoutCurvePiece;
use dlc_manager::payout_curve::RoundingInterval;
use dlc_manager::payout_curve::RoundingIntervals;
use futures::Future;
use rand::distributions::Alphanumeric;
use rand::thread_rng;
use rand::Rng;
use rand::RngCore;
use secp256k1::PublicKey;
use std::collections::HashMap;
use std::env::temp_dir;
use std::net::TcpListener;
use std::path::PathBuf;
use std::str::FromStr;
use std::string::ToString;
use std::sync::mpsc;
use std::sync::Arc;
use std::sync::Once;
use std::time::Duration;
use time::OffsetDateTime;
mod bitcoind;
mod dlc_channel;
const ELECTRS_ORIGIN: &str = "http://localhost:3000";
const FAUCET_ORIGIN: &str = "http://localhost:8080";
const ORACLE_ORIGIN: &str = "http://localhost:8081";
const ORACLE_PUBKEY: &str = "16f88cf7d21e6c0f46bcbc983a4e3b19726c6c98858cc31c83551a88fde171c0";
fn init_tracing() {
static TRACING_TEST_SUBSCRIBER: Once = Once::new();
TRACING_TEST_SUBSCRIBER.call_once(|| {
tracing_subscriber::fmt()
.with_env_filter(
"debug,\
hyper=warn,\
reqwest=warn,\
rustls=warn,\
bdk=debug,\
lightning::ln::peer_handler=debug,\
lightning=trace,\
lightning_transaction_sync=warn,\
sled=info,\
ureq=info",
)
.with_test_writer()
.init()
})
}
impl Node<on_chain_wallet::InMemoryStorage, TenTenOneInMemoryStorage, InMemoryStore> {
fn start_test_app(name: &str) -> Result<(Arc<Self>, RunningNode)> {
Self::start_test(
name,
ELECTRS_ORIGIN.to_string(),
OracleInfo {
endpoint: ORACLE_ORIGIN.to_string(),
public_key: XOnlyPublicKey::from_str(ORACLE_PUBKEY)?,
},
Arc::new(InMemoryStore::default()),
xxi_node_settings_app(),
)
}
fn start_test_coordinator(name: &str) -> Result<(Arc<Self>, RunningNode)> {
Self::start_test_coordinator_internal(
name,
Arc::new(InMemoryStore::default()),
xxi_node_settings_coordinator(),
)
}
fn start_test_coordinator_internal(
name: &str,
storage: Arc<InMemoryStore>,
settings: XXINodeSettings,
) -> Result<(Arc<Self>, RunningNode)> {
Self::start_test(
name,
ELECTRS_ORIGIN.to_string(),
OracleInfo {
endpoint: ORACLE_ORIGIN.to_string(),
public_key: XOnlyPublicKey::from_str(ORACLE_PUBKEY)?,
},
storage,
settings,
)
}
#[allow(clippy::too_many_arguments)]
fn start_test(
name: &str,
electrs_origin: String,
oracle: OracleInfo,
node_storage: Arc<InMemoryStore>,
settings: XXINodeSettings,
) -> Result<(Arc<Self>, RunningNode)> {
let data_dir = random_tmp_dir().join(name);
let seed = Bip39Seed::new().expect("A valid bip39 seed");
let mut ephemeral_randomness = [0; 32];
thread_rng().fill_bytes(&mut ephemeral_randomness);
let address = {
let listener = TcpListener::bind("0.0.0.0:0").unwrap();
listener.local_addr().expect("To get a free local address")
};
let storage = TenTenOneInMemoryStorage::new();
let wallet_storage = on_chain_wallet::InMemoryStorage::new();
let (dlc_event_sender, dlc_event_receiver) = mpsc::channel::<DlcChannelEvent>();
let event_handler = Arc::new(NodeEventHandler::new());
let node = Node::new(
name,
Network::Regtest,
data_dir.as_path(),
storage,
node_storage,
wallet_storage,
address,
address,
electrs_origin,
seed,
ephemeral_randomness,
settings,
vec![oracle.into()],
XOnlyPublicKey::from_str(ORACLE_PUBKEY)?,
event_handler.clone(),
dlc_event_sender,
)?;
let node = Arc::new(node);
tokio::spawn({
let mut receiver = event_handler.subscribe();
let node = node.clone();
async move {
let mut queue = HashMap::new();
loop {
match receiver.recv().await {
Ok(NodeEvent::StoreDlcMessage { peer, msg }) => {
queue.insert(peer, msg);
}
Ok(NodeEvent::SendLastDlcMessage { peer }) => match queue.get(&peer) {
Some(msg) => {
send_dlc_message(
&node.dlc_message_handler,
&node.peer_manager,
peer,
msg.clone(),
);
}
None => {
tracing::warn!(%peer, "No last dlc message found in queue.");
}
},
Ok(NodeEvent::SendDlcMessage { peer, msg }) => {
send_dlc_message(
&node.dlc_message_handler,
&node.peer_manager,
peer,
msg,
);
}
Ok(NodeEvent::Connected { .. }) => {} // ignored
Ok(NodeEvent::DlcChannelEvent { .. }) => {} // ignored
Err(_) => {
tracing::error!(
"Failed to receive message from node event handler channel."
);
break;
}
}
}
}
});
let running = node.start(dlc_event_receiver)?;
tracing::debug!(%name, info = %node.info, "Node started");
Ok((node, running))
}
/// Trigger on-chain and off-chain wallet syncs.
///
/// We wrap the wallet sync with a `block_in_place` to avoid blocking the async task in
/// `tokio::test`s.
///
/// Because we use `block_in_place`, we must configure the `tokio::test`s with `flavor =
/// "multi_thread"`.
async fn sync_wallets(&self) -> Result<()> {
self.sync_on_chain_wallet().await
}
async fn fund(&self, amount: Amount, n_utxos: u64) -> Result<()> {
let starting_balance = self.get_confirmed_balance();
let expected_balance = starting_balance + amount.to_sat();
// we mine blocks so that the internal wallet in bitcoind has enough utxos to fund the
// wallet
bitcoind::mine(n_utxos as u16 + 1).await?;
for _ in 0..n_utxos {
let address = self.wallet.get_new_address().unwrap();
bitcoind::fund(
address.to_string(),
Amount::from_sat(amount.to_sat() / n_utxos),
)
.await?;
}
bitcoind::mine(1).await?;
tokio::time::timeout(Duration::from_secs(30), async {
while self.get_confirmed_balance() < expected_balance {
let interval = Duration::from_millis(200);
self.sync_wallets().await.unwrap();
tokio::time::sleep(interval).await;
tracing::debug!(
?interval,
"Checking if wallet has been funded after interval"
);
}
})
.await?;
Ok(())
}
fn get_confirmed_balance(&self) -> u64 {
self.get_on_chain_balance().confirmed
}
pub fn disconnect(&self, peer: NodeInfo) {
self.peer_manager
.disconnect_by_node_id(to_secp_pk_29(peer.pubkey))
}
pub async fn reconnect(&self, peer: NodeInfo) -> Result<()> {
self.disconnect(peer);
tokio::time::sleep(Duration::from_secs(1)).await;
self.connect_once(peer).await?;
Ok(())
}
}
fn random_tmp_dir() -> PathBuf {
let tmp = if let Ok(tmp) = std::env::var("RUNNER_TEMP") {
tracing::debug!("Running test on github actions - using temporary directory at {tmp}");
PathBuf::from(tmp)
} else {
temp_dir()
};
let rand_string = thread_rng()
.sample_iter(&Alphanumeric)
.take(10)
.map(char::from)
.collect::<String>();
let tmp = tmp.join(rand_string);
tracing::debug!(
path = %tmp.to_str().expect("to be a valid path"),
"Generated temporary directory string"
);
tmp
}
async fn wait_until<P, T, F>(timeout: Duration, predicate_fn: P) -> Result<T>
where
P: Fn() -> F,
F: Future<Output = Result<Option<T>>>,
{
tokio::time::timeout(timeout, async {
loop {
match predicate_fn().await? {
Some(value) => return Ok(value),
None => tokio::time::sleep(Duration::from_millis(100)).await,
};
}
})
.await?
}
fn xxi_node_settings_coordinator() -> XXINodeSettings {
XXINodeSettings {
off_chain_sync_interval: Duration::from_secs(5),
on_chain_sync_interval: Duration::from_secs(300),
fee_rate_sync_interval: Duration::from_secs(20),
sub_channel_manager_periodic_check_interval: Duration::from_secs(30),
shadow_sync_interval: Duration::from_secs(600),
}
}
fn xxi_node_settings_app() -> XXINodeSettings {
XXINodeSettings {
off_chain_sync_interval: Duration::from_secs(5),
on_chain_sync_interval: Duration::from_secs(300),
fee_rate_sync_interval: Duration::from_secs(20),
sub_channel_manager_periodic_check_interval: Duration::from_secs(30),
shadow_sync_interval: Duration::from_secs(600),
}
}
fn dummy_contract_input(
offer_collateral: u64,
accept_collateral: u64,
oracle_pk: XOnlyPublicKey,
fee_rate_sats_per_vbyte: Option<u64>,
) -> ContractInput {
let total_collateral = offer_collateral + accept_collateral;
let n_cets = 100;
let rounding_mod = total_collateral / (n_cets + 1);
let maturity_time = OffsetDateTime::now_utc() + time::Duration::days(7);
let maturity_time = maturity_time.unix_timestamp() as u64;
ContractInput {
offer_collateral,
accept_collateral,
fee_rate: fee_rate_sats_per_vbyte.unwrap_or(2),
contract_infos: vec![ContractInputInfo {
contract_descriptor: ContractDescriptor::Numerical(NumericalDescriptor {
payout_function: PayoutFunction::new(vec![
PayoutFunctionPiece::PolynomialPayoutCurvePiece(
PolynomialPayoutCurvePiece::new(vec![
PayoutPoint {
event_outcome: 0,
outcome_payout: 0,
extra_precision: 0,
},
PayoutPoint {
event_outcome: 50_000,
outcome_payout: 0,
extra_precision: 0,
},
])
.unwrap(),
),
PayoutFunctionPiece::PolynomialPayoutCurvePiece(
PolynomialPayoutCurvePiece::new(vec![
PayoutPoint {
event_outcome: 50_000,
outcome_payout: 0,
extra_precision: 0,
},
PayoutPoint {
event_outcome: 60_000,
outcome_payout: total_collateral,
extra_precision: 0,
},
])
.unwrap(),
),
PayoutFunctionPiece::PolynomialPayoutCurvePiece(
PolynomialPayoutCurvePiece::new(vec![
PayoutPoint {
event_outcome: 60_000,
outcome_payout: total_collateral,
extra_precision: 0,
},
PayoutPoint {
event_outcome: 1048575,
outcome_payout: total_collateral,
extra_precision: 0,
},
])
.unwrap(),
),
])
.unwrap(),
rounding_intervals: RoundingIntervals {
intervals: vec![
RoundingInterval {
begin_interval: 0,
rounding_mod: 1,
},
RoundingInterval {
begin_interval: 50_000,
rounding_mod,
},
RoundingInterval {
begin_interval: 60_000,
rounding_mod: 1,
},
],
},
difference_params: None,
oracle_numeric_infos: dlc_trie::OracleNumericInfo {
base: 2,
nb_digits: vec![20],
},
}),
oracles: OracleInput {
public_keys: vec![to_xonly_pk_29(oracle_pk)],
event_id: format!("btcusd{maturity_time}"),
threshold: 1,
},
}],
}
}
pub fn dummy_order() -> commons::Order {
commons::Order {
id: Default::default(),
price: Default::default(),
leverage: 0.0,
contract_symbol: commons::ContractSymbol::BtcUsd,
trader_id: PublicKey::from_str(
"02d5aa8fce495f6301b466594af056a46104dcdc6d735ec4793aa43108854cbd4a",
)
.unwrap(),
direction: commons::Direction::Long,
quantity: Default::default(),
order_type: commons::OrderType::Market,
timestamp: OffsetDateTime::now_utc(),
expiry: OffsetDateTime::now_utc(),
order_state: commons::OrderState::Open,
order_reason: commons::OrderReason::Manual,
stable: false,
}
}
pub fn dummy_filled_with() -> commons::FilledWith {
commons::FilledWith {
order_id: Default::default(),
expiry_timestamp: OffsetDateTime::now_utc(),
oracle_pk: XOnlyPublicKey::from_str(
"cc8a4bc64d897bddc5fbc2f670f7a8ba0b386779106cf1223c6fc5d7cd6fc115",
)
.unwrap(),
matches: vec![],
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/xxi-node/src/tests/bitcoind.rs | crates/xxi-node/src/tests/bitcoind.rs | use crate::tests;
use anyhow::bail;
use anyhow::Result;
use bitcoin::Amount;
use reqwest::Response;
use serde::Deserialize;
use std::time::Duration;
use tests::FAUCET_ORIGIN;
#[derive(Deserialize, Debug)]
struct BitcoindResponse {
result: String,
}
pub async fn fund(address: String, amount: Amount) -> Result<Response> {
query(format!(
r#"{{"jsonrpc": "1.0", "method": "sendtoaddress", "params": ["{}", "{}", "", "", false, false, null, null, false, 1.0]}}"#,
address,
amount.to_btc(),
))
.await
}
/// Instructs `bitcoind` to generate to address.
pub async fn mine(n_blocks: u16) -> Result<()> {
tracing::debug!(n_blocks, "Mining");
let response =
query(r#"{"jsonrpc": "1.0", "method": "getnewaddress", "params": []}"#.to_string()).await?;
let response: BitcoindResponse = response.json().await.unwrap();
query(format!(
r#"{{"jsonrpc": "1.0", "method": "generatetoaddress", "params": [{}, "{}"]}}"#,
n_blocks, response.result
))
.await?;
// For the mined blocks to be picked up by the subsequent wallet
// syncs
tokio::time::sleep(Duration::from_secs(5)).await;
Ok(())
}
async fn query(query: String) -> Result<Response> {
let client = reqwest::Client::new();
let response = client
.post(format!("{FAUCET_ORIGIN}/bitcoin"))
.body(query)
.send()
.await?;
if !response.status().is_success() {
bail!(response.text().await?)
}
Ok(response)
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/xxi-node/src/tests/dlc_channel.rs | crates/xxi-node/src/tests/dlc_channel.rs | use crate::bitcoin_conversion::to_secp_pk_29;
use crate::node::dlc_channel::estimated_dlc_channel_fee_reserve;
use crate::node::event::NodeEvent;
use crate::node::InMemoryStore;
use crate::node::Node;
use crate::node::ProtocolId;
use crate::node::RunningNode;
use crate::on_chain_wallet;
use crate::storage::TenTenOneInMemoryStorage;
use crate::tests::bitcoind::mine;
use crate::tests::dummy_contract_input;
use crate::tests::dummy_filled_with;
use crate::tests::dummy_order;
use crate::tests::init_tracing;
use crate::tests::wait_until;
use bitcoin::Amount;
use dlc_manager::channel::signed_channel::SignedChannel;
use dlc_manager::channel::signed_channel::SignedChannelState;
use dlc_manager::channel::signed_channel::SignedChannelStateType;
use dlc_manager::contract::Contract;
use dlc_manager::Storage;
use std::sync::Arc;
use std::time::Duration;
#[tokio::test(flavor = "multi_thread")]
#[ignore]
async fn can_open_and_settle_offchain() {
init_tracing();
let (
(app, _running_app),
(coordinator, _running_coordinator),
app_signed_channel,
coordinator_signed_channel,
) = set_up_channel_with_position().await;
let oracle_pk = *coordinator.oracle_pk().first().unwrap();
let contract_input = dummy_contract_input(15_000, 5_000, oracle_pk, None);
let filled_with = dummy_filled_with();
coordinator
.propose_reopen_or_resize(
filled_with.clone(),
&coordinator_signed_channel.channel_id,
contract_input,
ProtocolId::new(),
)
.await
.unwrap();
coordinator
.event_handler
.publish(NodeEvent::SendLastDlcMessage {
peer: app.info.pubkey,
});
wait_until(Duration::from_secs(10), || async {
app.process_incoming_messages()?;
let dlc_channels = app
.dlc_manager
.get_store()
.get_signed_channels(Some(SignedChannelStateType::RenewOffered))?;
Ok(dlc_channels
.iter()
.find(|dlc_channel| dlc_channel.counter_party == to_secp_pk_29(coordinator.info.pubkey))
.cloned())
})
.await
.unwrap();
app.accept_dlc_channel_update(filled_with.order_id, &app_signed_channel.channel_id)
.unwrap();
wait_until(Duration::from_secs(10), || async {
coordinator.process_incoming_messages()?;
let dlc_channels = coordinator
.dlc_manager
.get_store()
.get_signed_channels(Some(SignedChannelStateType::RenewConfirmed))?;
Ok(dlc_channels
.iter()
.find(|dlc_channel| dlc_channel.counter_party == to_secp_pk_29(app.info.pubkey))
.cloned())
})
.await
.unwrap();
wait_until(Duration::from_secs(10), || async {
app.process_incoming_messages()?;
let dlc_channels = app
.dlc_manager
.get_store()
.get_signed_channels(Some(SignedChannelStateType::RenewFinalized))?;
Ok(dlc_channels
.iter()
.find(|dlc_channel| dlc_channel.counter_party == to_secp_pk_29(coordinator.info.pubkey))
.cloned())
})
.await
.unwrap();
wait_until(Duration::from_secs(10), || async {
coordinator.process_incoming_messages()?;
let dlc_channels = coordinator
.dlc_manager
.get_store()
.get_signed_channels(Some(SignedChannelStateType::Established))?;
Ok(dlc_channels
.iter()
.find(|dlc_channel| dlc_channel.counter_party == to_secp_pk_29(app.info.pubkey))
.cloned())
})
.await
.unwrap();
wait_until(Duration::from_secs(10), || async {
app.process_incoming_messages()?;
let dlc_channels = app
.dlc_manager
.get_store()
.get_signed_channels(Some(SignedChannelStateType::Established))?;
Ok(dlc_channels
.iter()
.find(|dlc_channel| dlc_channel.counter_party == to_secp_pk_29(coordinator.info.pubkey))
.cloned())
})
.await
.unwrap();
}
#[tokio::test(flavor = "multi_thread")]
#[ignore]
async fn can_open_and_collaboratively_close_channel() {
init_tracing();
let (
(app, _running_app),
(coordinator, _running_coordinator),
app_signed_channel,
coordinator_signed_channel,
) = set_up_channel_with_position().await;
let app_on_chain_balance_before_close = app.get_on_chain_balance();
let coordinator_on_chain_balance_before_close = coordinator.get_on_chain_balance();
tracing::debug!("Proposing to close dlc channel collaboratively");
coordinator
.close_dlc_channel(app_signed_channel.channel_id, false)
.await
.unwrap();
wait_until(Duration::from_secs(10), || async {
app.process_incoming_messages()?;
let dlc_channels = app
.dlc_manager
.get_store()
.get_signed_channels(Some(SignedChannelStateType::CollaborativeCloseOffered))?;
Ok(dlc_channels
.iter()
.find(|dlc_channel| dlc_channel.counter_party == to_secp_pk_29(coordinator.info.pubkey))
.cloned())
})
.await
.unwrap();
tracing::debug!("Accepting collaborative close offer");
app.accept_dlc_channel_collaborative_close(&coordinator_signed_channel.channel_id)
.unwrap();
wait_until(Duration::from_secs(10), || async {
mine(1).await.unwrap();
coordinator.sync_wallets().await?;
let coordinator_on_chain_balances_after_close = coordinator.get_on_chain_balance();
let coordinator_balance_changed = coordinator_on_chain_balances_after_close.confirmed
> coordinator_on_chain_balance_before_close.confirmed;
if coordinator_balance_changed {
tracing::debug!(
old_balance = coordinator_on_chain_balance_before_close.confirmed,
new_balance = coordinator_on_chain_balances_after_close.confirmed,
"Balance updated"
)
}
Ok(coordinator_balance_changed.then_some(true))
})
.await
.unwrap();
wait_until(Duration::from_secs(10), || async {
mine(1).await.unwrap();
app.sync_wallets().await?;
let app_on_chain_balances_after_close = app.get_on_chain_balance();
let app_balance_changed = app_on_chain_balances_after_close.confirmed
> app_on_chain_balance_before_close.confirmed;
if app_balance_changed {
tracing::debug!(
old_balance = app_on_chain_balance_before_close.confirmed,
new_balance = app_on_chain_balances_after_close.confirmed,
"Balance updated"
)
}
Ok(app_balance_changed.then_some(()))
})
.await
.unwrap();
}
#[tokio::test(flavor = "multi_thread")]
#[ignore]
async fn can_open_and_force_close_settled_channel() {
init_tracing();
let ((app, _running_app), (coordinator, _running_coordinator), _, coordinator_signed_channel) =
set_up_channel_with_position().await;
tracing::debug!("Force-closing DLC channel");
wait_until(Duration::from_secs(10), || async {
mine(1).await.unwrap();
let dlc_channels = coordinator
.dlc_manager
.get_store()
.get_signed_channels(None)?;
Ok(dlc_channels
.iter()
.find(|dlc_channel| dlc_channel.counter_party == to_secp_pk_29(app.info.pubkey))
.cloned())
})
.await
.unwrap();
coordinator
.close_dlc_channel(coordinator_signed_channel.channel_id, true)
.await
.unwrap();
wait_until(Duration::from_secs(10), || async {
let dlc_channels = coordinator
.dlc_manager
.get_store()
.get_signed_channels(None)?;
Ok(dlc_channels
.iter()
.find(|dlc_channel| {
dlc_channel.counter_party == to_secp_pk_29(app.info.pubkey)
&& matches!(dlc_channel.state, SignedChannelState::SettledClosing { .. })
})
.cloned())
})
.await
.unwrap();
}
#[tokio::test(flavor = "multi_thread")]
#[ignore]
async fn funding_transaction_pays_expected_fees() {
init_tracing();
// Arrange
let app_dlc_collateral = Amount::from_sat(10_000);
let coordinator_dlc_collateral = Amount::from_sat(10_000);
let fee_rate_sats_per_vb = 2;
// Give enough funds to app and coordinator so that each party can have their own change output.
// This is not currently enforced by `rust-dlc`, but it will be in the near future:
// https://github.com/p2pderivatives/rust-dlc/pull/152.
let (app, _running_app) = start_and_fund_app(app_dlc_collateral * 2, 1).await;
let (coordinator, _running_coordinator) =
start_and_fund_coordinator(app_dlc_collateral * 2, 1).await;
// Act
let (app_signed_channel, _) = open_channel_and_position_and_settle_position(
app.clone(),
coordinator.clone(),
app_dlc_collateral,
coordinator_dlc_collateral,
Some(fee_rate_sats_per_vb),
)
.await;
// Assert
let fund_tx_outputs_amount = app_signed_channel
.fund_tx
.output
.iter()
.fold(Amount::ZERO, |acc, output| {
acc + Amount::from_sat(output.value)
});
let fund_tx_inputs_amount = Amount::from_sat(
app_signed_channel.own_params.input_amount + app_signed_channel.counter_params.input_amount,
);
let fund_tx_fee = fund_tx_inputs_amount - fund_tx_outputs_amount;
let fund_tx_weight_wu = app_signed_channel.fund_tx.weight();
let fund_tx_weight_vb = (fund_tx_weight_wu / 4) as u64;
let fund_tx_fee_rate_sats_per_vb = fund_tx_fee.to_sat() / fund_tx_weight_vb;
assert_eq!(fund_tx_fee_rate_sats_per_vb, fee_rate_sats_per_vb);
}
#[tokio::test(flavor = "multi_thread")]
#[ignore]
async fn dlc_channel_includes_expected_fee_reserve() {
init_tracing();
let app_dlc_collateral = Amount::from_sat(10_000);
let coordinator_dlc_collateral = Amount::from_sat(10_000);
// We must fix the fee rate so that we can predict how many sats `rust-dlc` will allocate
// for transaction fees.
let fee_rate_sats_per_vb = 2;
let total_fee_reserve = estimated_dlc_channel_fee_reserve(fee_rate_sats_per_vb as f64);
let expected_fund_output_amount =
app_dlc_collateral + coordinator_dlc_collateral + total_fee_reserve;
let (app, _running_app) = start_and_fund_app(app_dlc_collateral * 2, 1).await;
let (coordinator, _running_coordinator) =
start_and_fund_coordinator(coordinator_dlc_collateral * 2, 1).await;
let (app_signed_channel, _) = open_channel_and_position_and_settle_position(
app.clone(),
coordinator.clone(),
app_dlc_collateral,
coordinator_dlc_collateral,
Some(fee_rate_sats_per_vb),
)
.await;
let fund_output_vout = app_signed_channel.fund_output_index;
let fund_output_amount = &app_signed_channel.fund_tx.output[fund_output_vout].value;
// We cannot easily assert equality because both `rust-dlc` and us have to round in several
// spots.
let epsilon = *fund_output_amount as i64 - expected_fund_output_amount.to_sat() as i64;
assert!(
epsilon.abs() < 5,
"Error out of bounds: actual {fund_output_amount} != {}",
expected_fund_output_amount.to_sat()
);
}
async fn start_and_fund_app(
amount: Amount,
n_utxos: u64,
) -> (
Arc<Node<on_chain_wallet::InMemoryStorage, TenTenOneInMemoryStorage, InMemoryStore>>,
RunningNode,
) {
let (node, running_node) = Node::start_test_app("app").unwrap();
node.fund(amount, n_utxos).await.unwrap();
(node, running_node)
}
async fn start_and_fund_coordinator(
amount: Amount,
n_utxos: u64,
) -> (
Arc<Node<on_chain_wallet::InMemoryStorage, TenTenOneInMemoryStorage, InMemoryStore>>,
RunningNode,
) {
let (node, running_node) = Node::start_test_coordinator("coordinator").unwrap();
node.fund(amount, n_utxos).await.unwrap();
(node, running_node)
}
async fn set_up_channel_with_position() -> (
(
Arc<Node<on_chain_wallet::InMemoryStorage, TenTenOneInMemoryStorage, InMemoryStore>>,
RunningNode,
),
(
Arc<Node<on_chain_wallet::InMemoryStorage, TenTenOneInMemoryStorage, InMemoryStore>>,
RunningNode,
),
SignedChannel,
SignedChannel,
) {
let app_dlc_collateral = Amount::from_sat(10_000);
let coordinator_dlc_collateral = Amount::from_sat(10_000);
let (app, running_app) = start_and_fund_app(Amount::from_sat(10_000_000), 10).await;
let (coordinator, running_coordinator) =
start_and_fund_coordinator(Amount::from_sat(10_000_000), 10).await;
let (app_signed_channel, coordinator_signed_channel) =
open_channel_and_position_and_settle_position(
app.clone(),
coordinator.clone(),
app_dlc_collateral,
coordinator_dlc_collateral,
None,
)
.await;
(
(app, running_app),
(coordinator, running_coordinator),
app_signed_channel,
coordinator_signed_channel,
)
}
async fn open_channel_and_position_and_settle_position(
app: Arc<Node<on_chain_wallet::InMemoryStorage, TenTenOneInMemoryStorage, InMemoryStore>>,
coordinator: Arc<
Node<on_chain_wallet::InMemoryStorage, TenTenOneInMemoryStorage, InMemoryStore>,
>,
app_dlc_collateral: Amount,
coordinator_dlc_collateral: Amount,
fee_rate_sats_per_vbyte: Option<u64>,
) -> (SignedChannel, SignedChannel) {
app.connect_once(coordinator.info).await.unwrap();
let app_balance_before_sat = app.get_on_chain_balance().confirmed;
let coordinator_balance_before_sat = coordinator.get_on_chain_balance().confirmed;
let oracle_pk = *coordinator.oracle_pk().first().unwrap();
let contract_input = dummy_contract_input(
app_dlc_collateral.to_sat(),
coordinator_dlc_collateral.to_sat(),
oracle_pk,
fee_rate_sats_per_vbyte,
);
let filled_with = dummy_filled_with();
coordinator
.propose_dlc_channel(
filled_with.clone(),
contract_input,
app.info.pubkey,
ProtocolId::new(),
dlc::FeeConfig::EvenSplit,
)
.await
.unwrap();
coordinator
.event_handler
.publish(NodeEvent::SendLastDlcMessage {
peer: app.info.pubkey,
});
let offered_channel = wait_until(Duration::from_secs(30), || async {
app.process_incoming_messages()?;
let dlc_channels = app.dlc_manager.get_store().get_offered_channels()?;
Ok(dlc_channels
.iter()
.find(|dlc_channel| dlc_channel.counter_party == to_secp_pk_29(coordinator.info.pubkey))
.cloned())
})
.await
.unwrap();
app.accept_dlc_channel_offer(filled_with.order_id, &offered_channel.temporary_channel_id)
.unwrap();
let coordinator_signed_channel = wait_until(Duration::from_secs(30), || async {
coordinator.process_incoming_messages()?;
let dlc_channels = coordinator
.dlc_manager
.get_store()
.get_signed_channels(None)?;
Ok(dlc_channels
.iter()
.find(|dlc_channel| dlc_channel.counter_party == to_secp_pk_29(app.info.pubkey))
.cloned())
})
.await
.unwrap();
let app_signed_channel = wait_until(Duration::from_secs(30), || async {
app.process_incoming_messages()?;
let dlc_channels = app.dlc_manager.get_store().get_signed_channels(None)?;
Ok(dlc_channels
.iter()
.find(|dlc_channel| dlc_channel.counter_party == to_secp_pk_29(coordinator.info.pubkey))
.cloned())
})
.await
.unwrap();
mine(dlc_manager::manager::NB_CONFIRMATIONS as u16)
.await
.unwrap();
wait_until(Duration::from_secs(30), || async {
app.sync_wallets().await.unwrap();
let app_balance_after_open_sat = app.get_on_chain_balance().confirmed;
// We don't aim to account for transaction fees exactly.
Ok(
(app_balance_after_open_sat <= app_balance_before_sat - app_dlc_collateral.to_sat())
.then_some(()),
)
})
.await
.unwrap();
wait_until(Duration::from_secs(30), || async {
coordinator.sync_wallets().await.unwrap();
let coordinator_balance_after_open_sat = coordinator.get_on_chain_balance().confirmed;
// We don't aim to account for transaction fees exactly.
Ok((coordinator_balance_after_open_sat
<= coordinator_balance_before_sat - coordinator_dlc_collateral.to_sat())
.then_some(()))
})
.await
.unwrap();
wait_until(Duration::from_secs(30), || async {
if let Err(e) = app.dlc_manager.periodic_check() {
tracing::error!("Failed to run DLC manager periodic check: {e:#}");
};
let contract = app
.dlc_manager
.get_store()
.get_contract(&app_signed_channel.get_contract_id().unwrap())
.unwrap();
Ok(matches!(contract, Some(Contract::Confirmed(_))).then_some(()))
})
.await
.unwrap();
wait_until(Duration::from_secs(30), || async {
coordinator.dlc_manager.periodic_check().unwrap();
let contract = coordinator
.dlc_manager
.get_store()
.get_contract(&coordinator_signed_channel.get_contract_id().unwrap())
.unwrap();
Ok(matches!(contract, Some(Contract::Confirmed(_))).then_some(()))
})
.await
.unwrap();
tracing::info!("DLC channel is on-chain");
let order = dummy_order();
coordinator
.propose_dlc_channel_collaborative_settlement(
order.clone(),
filled_with.clone(),
&coordinator_signed_channel.channel_id,
coordinator_dlc_collateral.to_sat() / 2,
ProtocolId::new(),
)
.await
.unwrap();
coordinator
.event_handler
.publish(NodeEvent::SendLastDlcMessage {
peer: app.info.pubkey,
});
tracing::debug!("Waiting for settle offer...");
let app_signed_channel = wait_until(Duration::from_secs(30), || async {
app.process_incoming_messages()?;
let dlc_channels = app
.dlc_manager
.get_store()
.get_signed_channels(Some(SignedChannelStateType::SettledReceived))?;
Ok(dlc_channels
.iter()
.find(|dlc_channel| dlc_channel.counter_party == to_secp_pk_29(coordinator.info.pubkey))
.cloned())
})
.await
.unwrap();
tracing::debug!("Accepting settle offer and waiting for being settled...");
app.accept_dlc_channel_collaborative_settlement(
filled_with.order_id,
order.order_reason,
&app_signed_channel.channel_id,
)
.unwrap();
app.event_handler.publish(NodeEvent::SendLastDlcMessage {
peer: coordinator.info.pubkey,
});
wait_until(Duration::from_secs(10), || async {
app.process_incoming_messages()?;
let dlc_channels = app
.dlc_manager
.get_store()
.get_signed_channels(Some(SignedChannelStateType::SettledAccepted))?;
Ok(dlc_channels
.iter()
.find(|dlc_channel| dlc_channel.counter_party == to_secp_pk_29(coordinator.info.pubkey))
.cloned())
})
.await
.unwrap();
wait_until(Duration::from_secs(10), || async {
coordinator.process_incoming_messages()?;
let dlc_channels = coordinator
.dlc_manager
.get_store()
.get_signed_channels(Some(SignedChannelStateType::SettledConfirmed))?;
Ok(dlc_channels
.iter()
.find(|dlc_channel| dlc_channel.counter_party == to_secp_pk_29(app.info.pubkey))
.cloned())
})
.await
.unwrap();
wait_until(Duration::from_secs(10), || async {
app.process_incoming_messages()?;
let dlc_channels = app
.dlc_manager
.get_store()
.get_signed_channels(Some(SignedChannelStateType::Settled))?;
Ok(dlc_channels
.iter()
.find(|dlc_channel| dlc_channel.counter_party == to_secp_pk_29(coordinator.info.pubkey))
.cloned())
})
.await
.unwrap();
wait_until(Duration::from_secs(10), || async {
coordinator.process_incoming_messages()?;
let dlc_channels = coordinator
.dlc_manager
.get_store()
.get_signed_channels(Some(SignedChannelStateType::Settled))?;
Ok(dlc_channels
.iter()
.find(|dlc_channel| dlc_channel.counter_party == to_secp_pk_29(app.info.pubkey))
.cloned())
})
.await
.unwrap();
(app_signed_channel, coordinator_signed_channel)
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/xxi-node/src/networking/axum.rs | crates/xxi-node/src/networking/axum.rs | use crate::networking::DynamicSocketDescriptor;
use anyhow::Context;
use axum::extract::ws::Message;
use axum::extract::ws::WebSocket;
use futures::future::Either;
use futures::StreamExt;
use lightning::ln::peer_handler;
use lightning::ln::peer_handler::APeerManager;
use std::future;
use std::hash::Hash;
use std::hash::Hasher;
use std::net::SocketAddr;
use std::ops::ControlFlow;
use std::ops::Deref;
use std::sync::atomic::AtomicU64;
use std::sync::atomic::Ordering;
use tokio::sync::mpsc;
use tokio::sync::mpsc::UnboundedReceiver;
use tracing::error;
static ID_COUNTER: AtomicU64 = AtomicU64::new(0);
pub async fn setup_inbound<PM: Deref + 'static + Send + Sync + Clone>(
peer_manager: PM,
mut ws: WebSocket,
remote: SocketAddr,
) where
PM::Target: APeerManager<Descriptor = DynamicSocketDescriptor>,
{
let (task_tx, mut task_rx) = mpsc::unbounded_channel();
let mut descriptor = DynamicSocketDescriptor::Axum(SocketDescriptor {
tx: task_tx,
id: ID_COUNTER.fetch_add(1, Ordering::AcqRel),
});
if peer_manager
.as_ref()
.new_inbound_connection(descriptor.clone(), Some(remote.into()))
.is_ok()
{
let mut emit_read_events = true;
loop {
match process_messages(
&peer_manager,
&mut task_rx,
&mut ws,
&mut descriptor,
&mut emit_read_events,
)
.await
{
Ok(ControlFlow::Break(())) => break,
Ok(ControlFlow::Continue(())) => (),
Err(err) => {
error!("Disconnecting websocket with error: {err}");
peer_manager.as_ref().socket_disconnected(&descriptor);
peer_manager.as_ref().process_events();
break;
}
}
}
let _ = ws.close().await;
}
}
async fn process_messages<PM>(
peer_manager: &PM,
task_rx: &mut UnboundedReceiver<BgTaskMessage>,
ws: &mut WebSocket,
descriptor: &mut DynamicSocketDescriptor,
emit_read_events: &mut bool,
) -> Result<ControlFlow<()>, anyhow::Error>
where
PM: Deref + 'static + Send + Sync + Clone,
PM::Target: APeerManager<Descriptor = DynamicSocketDescriptor>,
{
let ws_next = if *emit_read_events {
Either::Left(ws.next())
} else {
Either::Right(future::pending())
};
tokio::select! {
task_msg = task_rx.recv() => match task_msg.context("rust-lightning SocketDescriptor dropped")? {
BgTaskMessage::SendData { data, resume_read } => {
if resume_read {
*emit_read_events = true;
}
ws.send(Message::Binary(data)).await?;
},
BgTaskMessage::Close => {
return Ok(ControlFlow::Break(()))
},
},
ws_msg = ws_next => {
let data = ws_msg.context("WS returned no data")??.into_data();
if let Ok(true) = peer_manager.as_ref().read_event(descriptor, &data) {
*emit_read_events = false; // Pause read events
}
peer_manager.as_ref().process_events();
}
}
Ok(ControlFlow::Continue(()))
}
enum BgTaskMessage {
SendData { data: Vec<u8>, resume_read: bool },
Close,
}
#[derive(Clone)]
pub struct SocketDescriptor {
tx: mpsc::UnboundedSender<BgTaskMessage>,
id: u64,
}
impl Eq for SocketDescriptor {}
impl PartialEq for SocketDescriptor {
fn eq(&self, o: &Self) -> bool {
self.id == o.id
}
}
impl Hash for SocketDescriptor {
fn hash<H: Hasher>(&self, state: &mut H) {
self.id.hash(state);
}
}
impl peer_handler::SocketDescriptor for SocketDescriptor {
fn send_data(&mut self, data: &[u8], resume_read: bool) -> usize {
// See the TODO in tungstenite.rs
let _ = self.tx.send(BgTaskMessage::SendData {
data: data.to_vec(),
resume_read,
});
data.len()
}
fn disconnect_socket(&mut self) {
let _ = self.tx.send(BgTaskMessage::Close);
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/xxi-node/src/networking/tcp.rs | crates/xxi-node/src/networking/tcp.rs | // This code comes from https://github.com/bonomat/rust-lightning-p2p-derivatives/blob/main/lightning-net-tokio/src/lib.rs
// (revision fd2464374b2e826a77582c511eb65bece4403be4) and is under following license. Please
// interpret 'visible in version control' to refer to the version control of the
// rust-lightning-p2p-derivatives repository, NOT the 10101 repository. It has been modified for
// use with 10101.
//
// Original license follows:
// This file is Copyright its original authors, visible in version control
// history.
//
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
// You may not use this file except in accordance with one or both of these
// licenses.
//! A socket handling library for those running in Tokio environments who wish to use
//! rust-lightning with native [`TcpStream`]s.
//!
//! Designed to be as simple as possible, the high-level usage is almost as simple as "hand over a
//! [`TcpStream`] and a reference to a [`PeerManager`] and the rest is handled".
//!
//! The [`PeerManager`], due to the fire-and-forget nature of this logic, must be a reference,
//! (e.g. an [`Arc`]) and must use the [`SocketDescriptor`] provided here as the [`PeerManager`]'s
//! `SocketDescriptor` implementation.
//!
//! Three methods are exposed to register a new connection for handling in [`tokio::spawn`] calls;
//! see their individual docs for details.
//!
//! [`PeerManager`]: lightning::ln::peer_handler::PeerManager
// Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
#![allow(clippy::unwrap_used)]
#![deny(rustdoc::broken_intra_doc_links)]
#![deny(rustdoc::private_intra_doc_links)]
#![deny(missing_docs)]
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
use crate::bitcoin_conversion::to_secp_pk_29;
use crate::networking::DynamicSocketDescriptor;
use bitcoin::secp256k1::PublicKey;
use lightning::ln::msgs::SocketAddress;
use lightning::ln::peer_handler;
use lightning::ln::peer_handler::APeerManager;
use lightning::ln::peer_handler::SocketDescriptor as LnSocketTrait;
use std::future::Future;
use std::hash::Hash;
use std::net::SocketAddr;
use std::net::TcpStream as StdTcpStream;
use std::ops::Deref;
use std::pin::Pin;
use std::sync::atomic::AtomicU64;
use std::sync::atomic::Ordering;
use std::sync::Arc;
use std::sync::Mutex;
use std::task::Poll;
use std::task::{self};
use std::time::Duration;
use tokio::io;
use tokio::io::AsyncReadExt;
use tokio::io::AsyncWrite;
use tokio::io::AsyncWriteExt;
use tokio::net::TcpStream;
use tokio::sync::mpsc;
use tokio::time;
static ID_COUNTER: AtomicU64 = AtomicU64::new(0);
// We only need to select over multiple futures in one place, and taking on the full `tokio/macros`
// dependency tree in order to do so (which has broken our MSRV before) is excessive. Instead, we
// define a trivial two- and three- select macro with the specific types we need and just use that.
pub(crate) enum SelectorOutput {
A(Option<()>),
B,
C(tokio::io::Result<usize>),
}
pub(crate) struct TwoSelector<
A: Future<Output = Option<()>> + Unpin,
B: Future<Output = Option<()>> + Unpin,
> {
pub a: A,
pub b: B,
}
impl<A: Future<Output = Option<()>> + Unpin, B: Future<Output = Option<()>> + Unpin> Future
for TwoSelector<A, B>
{
type Output = SelectorOutput;
fn poll(mut self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll<SelectorOutput> {
match Pin::new(&mut self.a).poll(ctx) {
Poll::Ready(res) => {
return Poll::Ready(SelectorOutput::A(res));
}
Poll::Pending => {}
}
match Pin::new(&mut self.b).poll(ctx) {
Poll::Ready(_) => {
return Poll::Ready(SelectorOutput::B);
}
Poll::Pending => {}
}
Poll::Pending
}
}
pub(crate) struct ThreeSelector<
A: Future<Output = Option<()>> + Unpin,
B: Future<Output = Option<()>> + Unpin,
C: Future<Output = tokio::io::Result<usize>> + Unpin,
> {
pub a: A,
pub b: B,
pub c: C,
}
impl<
A: Future<Output = Option<()>> + Unpin,
B: Future<Output = Option<()>> + Unpin,
C: Future<Output = tokio::io::Result<usize>> + Unpin,
> Future for ThreeSelector<A, B, C>
{
type Output = SelectorOutput;
fn poll(mut self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll<SelectorOutput> {
match Pin::new(&mut self.a).poll(ctx) {
Poll::Ready(res) => {
return Poll::Ready(SelectorOutput::A(res));
}
Poll::Pending => {}
}
match Pin::new(&mut self.b).poll(ctx) {
Poll::Ready(_) => {
return Poll::Ready(SelectorOutput::B);
}
Poll::Pending => {}
}
match Pin::new(&mut self.c).poll(ctx) {
Poll::Ready(res) => {
return Poll::Ready(SelectorOutput::C(res));
}
Poll::Pending => {}
}
Poll::Pending
}
}
/// Connection contains all our internal state for a connection - we hold a reference to the
/// Connection object (in an Arc<Mutex<>>) in each SocketDescriptor we create as well as in the
/// read future (which is returned by schedule_read).
struct Connection {
writer: Option<io::WriteHalf<TcpStream>>,
// Because our PeerManager is templated by user-provided types, and we can't (as far as I can
// tell) have a const RawWakerVTable built out of templated functions, we need some indirection
// between being woken up with write-ready and calling PeerManager::write_buffer_space_avail.
// This provides that indirection, with a Sender which gets handed to the PeerManager Arc on
// the schedule_read stack.
//
// An alternative (likely more effecient) approach would involve creating a RawWakerVTable at
// runtime with functions templated by the Arc<PeerManager> type, calling
// write_buffer_space_avail directly from tokio's write wake, however doing so would require
// more unsafe voodo than I really feel like writing.
write_avail: mpsc::Sender<()>,
// When we are told by rust-lightning to pause read (because we have writes backing up), we do
// so by setting read_paused. At that point, the read task will stop reading bytes from the
// socket. To wake it up (without otherwise changing its state, we can push a value into this
// Sender.
read_waker: mpsc::Sender<()>,
read_paused: bool,
rl_requested_disconnect: bool,
id: u64,
}
impl Connection {
async fn poll_event_process<PM: Deref + 'static + Send + Sync>(
peer_manager: PM,
mut event_receiver: mpsc::Receiver<()>,
) where
PM::Target: APeerManager<Descriptor = DynamicSocketDescriptor>,
{
loop {
if event_receiver.recv().await.is_none() {
return;
}
peer_manager.as_ref().process_events();
}
}
async fn schedule_read<PM: Deref + 'static + Send + Sync + Clone>(
peer_manager: PM,
us: Arc<Mutex<Self>>,
mut reader: io::ReadHalf<TcpStream>,
mut read_wake_receiver: mpsc::Receiver<()>,
mut write_avail_receiver: mpsc::Receiver<()>,
) where
PM::Target: APeerManager<Descriptor = DynamicSocketDescriptor>,
{
// Create a waker to wake up poll_event_process, above
let (event_waker, event_receiver) = mpsc::channel(1);
tokio::spawn(Self::poll_event_process(
peer_manager.clone(),
event_receiver,
));
// 4KiB is nice and big without handling too many messages all at once, giving other peers
// a chance to do some work.
let mut buf = [0; 4096];
let mut our_descriptor = DynamicSocketDescriptor::Tcp(SocketDescriptor::new(us.clone()));
// An enum describing why we did/are disconnecting:
enum Disconnect {
// Rust-Lightning told us to disconnect, either by returning an Err or by calling
// SocketDescriptor::disconnect_socket.
// In this case, we do not call peer_manager.socket_disconnected() as Rust-Lightning
// already knows we're disconnected.
CloseConnection,
// The connection was disconnected for some other reason, ie because the socket was
// closed.
// In this case, we do need to call peer_manager.socket_disconnected() to inform
// Rust-Lightning that the socket is gone.
PeerDisconnected,
}
let disconnect_type = loop {
let read_paused = {
let us_lock = us.lock().unwrap();
if us_lock.rl_requested_disconnect {
break Disconnect::CloseConnection;
}
us_lock.read_paused
};
// TODO: Drop the Box'ing of the futures once Rust has pin-on-stack support.
let select_result = if read_paused {
TwoSelector {
a: Box::pin(write_avail_receiver.recv()),
b: Box::pin(read_wake_receiver.recv()),
}
.await
} else {
ThreeSelector {
a: Box::pin(write_avail_receiver.recv()),
b: Box::pin(read_wake_receiver.recv()),
c: Box::pin(reader.read(&mut buf)),
}
.await
};
match select_result {
SelectorOutput::A(v) => {
assert!(v.is_some()); // We can't have dropped the sending end, its in the us Arc!
if peer_manager
.as_ref()
.write_buffer_space_avail(&mut our_descriptor)
.is_err()
{
break Disconnect::CloseConnection;
}
}
SelectorOutput::B => {}
SelectorOutput::C(read) => match read {
Ok(0) => break Disconnect::PeerDisconnected,
Ok(len) => {
let read_res = peer_manager
.as_ref()
.read_event(&mut our_descriptor, &buf[0..len]);
let mut us_lock = us.lock().unwrap();
match read_res {
Ok(pause_read) => {
if pause_read {
us_lock.read_paused = true;
}
}
Err(_) => break Disconnect::CloseConnection,
}
}
Err(_) => break Disconnect::PeerDisconnected,
},
}
let _ = event_waker.try_send(());
// At this point we've processed a message or two, and reset the ping timer for this
// peer, at least in the "are we still receiving messages" context, if we don't give up
// our timeslice to another task we may just spin on this peer, starving other peers
// and eventually disconnecting them for ping timeouts. Instead, we explicitly yield
// here.
let _ = tokio::task::yield_now().await;
};
let writer_option = us.lock().unwrap().writer.take();
if let Some(mut writer) = writer_option {
// If the socket is already closed, shutdown() will fail, so just ignore it.
let _ = writer.shutdown().await;
}
if let Disconnect::PeerDisconnected = disconnect_type {
peer_manager.as_ref().socket_disconnected(&our_descriptor);
peer_manager.as_ref().process_events();
}
}
fn new(
stream: StdTcpStream,
) -> (
io::ReadHalf<TcpStream>,
mpsc::Receiver<()>,
mpsc::Receiver<()>,
Arc<Mutex<Self>>,
) {
// We only ever need a channel of depth 1 here: if we returned a non-full write to the
// PeerManager, we will eventually get notified that there is room in the socket to write
// new bytes, which will generate an event. That event will be popped off the queue before
// we call write_buffer_space_avail, ensuring that we have room to push a new () if, during
// the write_buffer_space_avail() call, send_data() returns a non-full write.
let (write_avail, write_receiver) = mpsc::channel(1);
// Similarly here - our only goal is to make sure the reader wakes up at some point after
// we shove a value into the channel which comes after we've reset the read_paused bool to
// false.
let (read_waker, read_receiver) = mpsc::channel(1);
stream.set_nonblocking(true).unwrap();
let (reader, writer) = io::split(TcpStream::from_std(stream).unwrap());
(
reader,
write_receiver,
read_receiver,
Arc::new(Mutex::new(Self {
writer: Some(writer),
write_avail,
read_waker,
read_paused: false,
rl_requested_disconnect: false,
id: ID_COUNTER.fetch_add(1, Ordering::AcqRel),
})),
)
}
}
fn get_addr_from_stream(stream: &StdTcpStream) -> Option<SocketAddress> {
match stream.peer_addr() {
Ok(SocketAddr::V4(sockaddr)) => Some(SocketAddress::TcpIpV4 {
addr: sockaddr.ip().octets(),
port: sockaddr.port(),
}),
Ok(SocketAddr::V6(sockaddr)) => Some(SocketAddress::TcpIpV6 {
addr: sockaddr.ip().octets(),
port: sockaddr.port(),
}),
Err(_) => None,
}
}
/// Process incoming messages and feed outgoing messages on the provided socket generated by
/// accepting an incoming connection.
///
/// The returned future will complete when the peer is disconnected and associated handling
/// futures are freed, though, because all processing futures are spawned with tokio::spawn, you do
/// not need to poll the provided future in order to make progress.
pub fn setup_inbound<PM: Deref + 'static + Send + Sync + Clone>(
peer_manager: PM,
stream: StdTcpStream,
) -> impl Future<Output = ()>
where
PM::Target: APeerManager<Descriptor = DynamicSocketDescriptor>,
{
let remote_addr = get_addr_from_stream(&stream);
let (reader, write_receiver, read_receiver, us) = Connection::new(stream);
#[cfg(test)]
let last_us = Arc::clone(&us);
let descriptor = DynamicSocketDescriptor::Tcp(SocketDescriptor::new(us.clone()));
let handle_opt = if peer_manager
.as_ref()
.new_inbound_connection(descriptor, remote_addr)
.is_ok()
{
Some(tokio::spawn(Connection::schedule_read(
peer_manager,
us,
reader,
read_receiver,
write_receiver,
)))
} else {
// Note that we will skip socket_disconnected here, in accordance with the PeerManager
// requirements.
None
};
async move {
if let Some(handle) = handle_opt {
if let Err(e) = handle.await {
assert!(e.is_cancelled());
} else {
// This is certainly not guaranteed to always be true - the read loop may exit
// while there are still pending write wakers that need to be woken up after the
// socket shutdown(). Still, as a check during testing, to make sure tokio doesn't
// keep too many wakers around, this makes sense. The race should be rare (we do
// some work after shutdown()) and an error would be a major memory leak.
#[cfg(test)]
debug_assert!(Arc::try_unwrap(last_us).is_ok());
}
}
}
}
/// Process incoming messages and feed outgoing messages on the provided socket generated by
/// making an outbound connection which is expected to be accepted by a peer with the given
/// public key. The relevant processing is set to run free (via tokio::spawn).
///
/// The returned future will complete when the peer is disconnected and associated handling
/// futures are freed, though, because all processing futures are spawned with tokio::spawn, you do
/// not need to poll the provided future in order to make progress.
pub fn setup_outbound<PM: Deref + 'static + Send + Sync + Clone>(
peer_manager: PM,
their_node_id: PublicKey,
stream: StdTcpStream,
) -> impl Future<Output = ()>
where
PM::Target: APeerManager<Descriptor = DynamicSocketDescriptor>,
{
let remote_addr = get_addr_from_stream(&stream);
let (reader, mut write_receiver, read_receiver, us) = Connection::new(stream);
#[cfg(test)]
let last_us = Arc::clone(&us);
let descriptor = DynamicSocketDescriptor::Tcp(SocketDescriptor::new(us.clone()));
let handle_opt = if let Ok(initial_send) = peer_manager.as_ref().new_outbound_connection(
to_secp_pk_29(their_node_id),
descriptor,
remote_addr,
) {
Some(tokio::spawn(async move {
// We should essentially always have enough room in a TCP socket buffer to send the
// initial 10s of bytes. However, tokio running in single-threaded mode will always
// fail writes and wake us back up later to write. Thus, we handle a single
// std::task::Poll::Pending but still expect to write the full set of bytes at once
// and use a relatively tight timeout.
if let Ok(Ok(())) = tokio::time::timeout(Duration::from_millis(100), async {
loop {
match SocketDescriptor::new(us.clone()).send_data(&initial_send, true) {
v if v == initial_send.len() => break Ok(()),
0 => {
write_receiver.recv().await;
// In theory we could check for if we've been instructed to disconnect
// the peer here, but its OK to just skip it - we'll check for it in
// schedule_read prior to any relevant calls into RL.
}
_ => {
tracing::error!("Failed to write first full message to socket!");
let descriptor = DynamicSocketDescriptor::Tcp(SocketDescriptor::new(
Arc::clone(&us),
));
peer_manager.as_ref().socket_disconnected(&descriptor);
break Err(());
}
}
}
})
.await
{
Connection::schedule_read(peer_manager, us, reader, read_receiver, write_receiver)
.await;
}
}))
} else {
// Note that we will skip socket_disconnected here, in accordance with the PeerManager
// requirements.
None
};
async move {
if let Some(handle) = handle_opt {
if let Err(e) = handle.await {
assert!(e.is_cancelled());
} else {
// This is certainly not guaranteed to always be true - the read loop may exit
// while there are still pending write wakers that need to be woken up after the
// socket shutdown(). Still, as a check during testing, to make sure tokio doesn't
// keep too many wakers around, this makes sense. The race should be rare (we do
// some work after shutdown()) and an error would be a major memory leak.
#[cfg(test)]
debug_assert!(Arc::try_unwrap(last_us).is_ok());
}
}
}
}
/// Process incoming messages and feed outgoing messages on a new connection made to the given
/// socket address which is expected to be accepted by a peer with the given public key (by
/// scheduling futures with tokio::spawn).
///
/// Shorthand for TcpStream::connect(addr) with a timeout followed by setup_outbound().
///
/// Returns a future (as the fn is async) which needs to be polled to complete the connection and
/// connection setup. That future then returns a future which will complete when the peer is
/// disconnected and associated handling futures are freed, though, because all processing in said
/// futures are spawned with tokio::spawn, you do not need to poll the second future in order to
/// make progress.
pub async fn connect_outbound<PM: Deref + 'static + Send + Sync + Clone>(
peer_manager: PM,
their_node_id: PublicKey,
addr: SocketAddr,
) -> Option<impl Future<Output = ()>>
where
PM::Target: APeerManager<Descriptor = DynamicSocketDescriptor>,
{
if let Ok(Ok(stream)) = time::timeout(Duration::from_secs(10), async {
TcpStream::connect(&addr)
.await
.map(|s| s.into_std().unwrap())
})
.await
{
Some(setup_outbound(peer_manager, their_node_id, stream))
} else {
None
}
}
const SOCK_WAKER_VTABLE: task::RawWakerVTable = task::RawWakerVTable::new(
clone_socket_waker,
wake_socket_waker,
wake_socket_waker_by_ref,
drop_socket_waker,
);
fn clone_socket_waker(orig_ptr: *const ()) -> task::RawWaker {
write_avail_to_waker(orig_ptr as *const mpsc::Sender<()>)
}
// When waking, an error should be fine. Most likely we got two send_datas in a row, both of which
// failed to fully write, but we only need to call write_buffer_space_avail() once. Otherwise, the
// sending thread may have already gone away due to a socket close, in which case there's nothing
// to wake up anyway.
fn wake_socket_waker(orig_ptr: *const ()) {
let sender = unsafe { &mut *(orig_ptr as *mut mpsc::Sender<()>) };
let _ = sender.try_send(());
drop_socket_waker(orig_ptr);
}
fn wake_socket_waker_by_ref(orig_ptr: *const ()) {
let sender_ptr = orig_ptr as *const mpsc::Sender<()>;
let sender = unsafe { (*sender_ptr).clone() };
let _ = sender.try_send(());
}
fn drop_socket_waker(orig_ptr: *const ()) {
let _orig_box = unsafe { Box::from_raw(orig_ptr as *mut mpsc::Sender<()>) };
// _orig_box is now dropped
}
fn write_avail_to_waker(sender: *const mpsc::Sender<()>) -> task::RawWaker {
let new_box = Box::leak(Box::new(unsafe { (*sender).clone() }));
let new_ptr = new_box as *const mpsc::Sender<()>;
task::RawWaker::new(new_ptr as *const (), &SOCK_WAKER_VTABLE)
}
/// The SocketDescriptor used to refer to sockets by a PeerHandler. This is pub only as it is a
/// type in the template of PeerHandler.
pub struct SocketDescriptor {
conn: Arc<Mutex<Connection>>,
id: u64,
}
impl SocketDescriptor {
fn new(conn: Arc<Mutex<Connection>>) -> Self {
let id = conn.lock().unwrap().id;
Self { conn, id }
}
}
impl peer_handler::SocketDescriptor for SocketDescriptor {
fn send_data(&mut self, data: &[u8], resume_read: bool) -> usize {
// To send data, we take a lock on our Connection to access the WriteHalf of the TcpStream,
// writing to it if there's room in the kernel buffer, or otherwise create a new Waker with
// a SocketDescriptor in it which can wake up the write_avail Sender, waking up the
// processing future which will call write_buffer_space_avail and we'll end up back here.
let mut us = self.conn.lock().unwrap();
if us.writer.is_none() {
// The writer gets take()n when it is time to shut down, so just fast-return 0 here.
return 0;
}
if resume_read && us.read_paused {
// The schedule_read future may go to lock up but end up getting woken up by there
// being more room in the write buffer, dropping the other end of this Sender
// before we get here, so we ignore any failures to wake it up.
us.read_paused = false;
let _ = us.read_waker.try_send(());
}
if data.is_empty() {
return 0;
}
let waker = unsafe { task::Waker::from_raw(write_avail_to_waker(&us.write_avail)) };
let mut ctx = task::Context::from_waker(&waker);
let mut written_len = 0;
loop {
match std::pin::Pin::new(us.writer.as_mut().unwrap())
.poll_write(&mut ctx, &data[written_len..])
{
task::Poll::Ready(Ok(res)) => {
// The tokio docs *seem* to indicate this can't happen, and I certainly don't
// know how to handle it if it does (cause it should be a Poll::Pending
// instead):
assert_ne!(res, 0);
written_len += res;
if written_len == data.len() {
return written_len;
}
}
task::Poll::Ready(Err(e)) => {
// The tokio docs *seem* to indicate this can't happen, and I certainly don't
// know how to handle it if it does (cause it should be a Poll::Pending
// instead):
assert_ne!(e.kind(), io::ErrorKind::WouldBlock);
// Probably we've already been closed, just return what we have and let the
// read thread handle closing logic.
return written_len;
}
task::Poll::Pending => {
// We're queued up for a write event now, but we need to make sure we also
// pause read given we're now waiting on the remote end to ACK (and in
// accordance with the send_data() docs).
us.read_paused = true;
// Further, to avoid any current pending read causing a `read_event` call, wake
// up the read_waker and restart its loop.
let _ = us.read_waker.try_send(());
return written_len;
}
}
}
}
fn disconnect_socket(&mut self) {
let mut us = self.conn.lock().unwrap();
us.rl_requested_disconnect = true;
// Wake up the sending thread, assuming it is still alive
let _ = us.write_avail.try_send(());
}
}
impl Clone for SocketDescriptor {
fn clone(&self) -> Self {
Self {
conn: Arc::clone(&self.conn),
id: self.id,
}
}
}
impl Eq for SocketDescriptor {}
impl PartialEq for SocketDescriptor {
fn eq(&self, o: &Self) -> bool {
self.id == o.id
}
}
impl Hash for SocketDescriptor {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.id.hash(state);
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/xxi-node/src/networking/tungstenite.rs | crates/xxi-node/src/networking/tungstenite.rs | use crate::bitcoin_conversion::to_secp_pk_29;
use crate::networking::DynamicSocketDescriptor;
use crate::node::NodeInfo;
use anyhow::Context;
use futures::future::Either;
use futures::SinkExt;
use futures::StreamExt;
use lightning::ln::peer_handler;
use lightning::ln::peer_handler::APeerManager;
use std::future;
use std::future::Future;
use std::hash::Hash;
use std::hash::Hasher;
use std::ops::ControlFlow;
use std::ops::Deref;
use std::sync::atomic::AtomicU64;
use std::sync::atomic::Ordering;
use tokio::sync::mpsc;
use tokio::sync::mpsc::UnboundedReceiver;
use tokio_tungstenite_wasm::Message;
use tokio_tungstenite_wasm::WebSocketStream;
use tracing::error;
static ID_COUNTER: AtomicU64 = AtomicU64::new(0);
pub async fn connect_outbound<PM>(
peer_manager: PM,
node_info: NodeInfo,
) -> Option<impl Future<Output = ()>>
where
PM: Deref + 'static + Send + Sync + Clone,
PM::Target: APeerManager<Descriptor = DynamicSocketDescriptor>,
{
let url = &format!(
"ws://{}:{}",
node_info.address.ip(),
node_info.address.port()
);
let mut ws = tokio_tungstenite_wasm::connect(url)
.await
.map_err(|err| error!("error connecting to peer over websocket: {err:#?}"))
.ok()?;
let (task_tx, mut task_rx) = mpsc::unbounded_channel();
let mut descriptor = DynamicSocketDescriptor::Tungstenite(SocketDescriptor {
tx: task_tx,
id: ID_COUNTER.fetch_add(1, Ordering::AcqRel),
});
if let Ok(initial_send) = peer_manager.as_ref().new_outbound_connection(
to_secp_pk_29(node_info.pubkey),
descriptor.clone(),
Some(node_info.address.into()),
) {
ws.send(Message::Binary(initial_send))
.await
.map_err(|err| error!("error sending initial data over websocket: {err:#?}"))
.ok()?;
Some(async move {
let mut emit_read_events = true;
loop {
match process_messages(
&peer_manager,
&mut task_rx,
&mut ws,
&mut descriptor,
&mut emit_read_events,
)
.await
{
Ok(ControlFlow::Break(())) => break,
Ok(ControlFlow::Continue(())) => (),
Err(err) => {
error!("Disconnecting websocket with error: {err}");
let _ = ws.close().await;
peer_manager.as_ref().socket_disconnected(&descriptor);
peer_manager.as_ref().process_events();
break;
}
}
}
})
} else {
None
}
}
async fn process_messages<PM>(
peer_manager: &PM,
task_rx: &mut UnboundedReceiver<BgTaskMessage>,
ws: &mut WebSocketStream,
descriptor: &mut DynamicSocketDescriptor,
emit_read_events: &mut bool,
) -> Result<ControlFlow<()>, anyhow::Error>
where
PM: Deref + 'static + Send + Sync + Clone,
PM::Target: APeerManager<Descriptor = DynamicSocketDescriptor>,
{
let ws_next = if *emit_read_events {
Either::Left(ws.next())
} else {
Either::Right(future::pending())
};
tokio::select! {
task_msg = task_rx.recv() => match task_msg.context("rust-lightning SocketDescriptor dropped")? {
BgTaskMessage::SendData { data, resume_read } => {
if resume_read {
*emit_read_events = true;
}
ws.send(Message::Binary(data)).await?;
},
BgTaskMessage::Close => {
let _ = ws.close().await;
return Ok(ControlFlow::Break(()))
},
},
ws_msg = ws_next => {
let data = ws_msg.context("WS returned no data")??.into_data();
if let Ok(true) = peer_manager.as_ref().read_event(descriptor, &data) {
*emit_read_events = false; // Pause reading
}
peer_manager.as_ref().process_events();
}
}
Ok(ControlFlow::Continue(()))
}
enum BgTaskMessage {
SendData { data: Vec<u8>, resume_read: bool },
Close,
}
#[derive(Clone)]
pub struct SocketDescriptor {
tx: mpsc::UnboundedSender<BgTaskMessage>,
id: u64,
}
impl Eq for SocketDescriptor {}
impl PartialEq for SocketDescriptor {
fn eq(&self, o: &Self) -> bool {
self.id == o.id
}
}
impl Hash for SocketDescriptor {
fn hash<H: Hasher>(&self, state: &mut H) {
self.id.hash(state);
}
}
impl peer_handler::SocketDescriptor for SocketDescriptor {
fn send_data(&mut self, data: &[u8], resume_read: bool) -> usize {
// TODO(ws):
// This isn't so great as we should be waiting for this to be sent before returning the
// amount of data sent (which implies that the send operation is done). This is so that the
// backpressure stuff works properly
//
// It's a little more complicated than it may seem. If we don't send all the data when
// calling send_data then there is a function we need to call of the peer manager
// which results in send_data being called again. At first glance you'd think you
// can just make the 2nd a no-op, but there could be more data that's waiting to be
// sent by then. Therefore, we need to keep track of how much we promised to send
// and how much extra must be sent.
let _ = self.tx.send(BgTaskMessage::SendData {
data: data.to_vec(),
resume_read,
});
data.len()
}
fn disconnect_socket(&mut self) {
let _ = self.tx.send(BgTaskMessage::Close);
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/payout_curve/src/lib.rs | crates/payout_curve/src/lib.rs | use anyhow::ensure;
use anyhow::Context;
use anyhow::Result;
use bitcoin::Amount;
use rust_decimal::prelude::ToPrimitive;
use rust_decimal::Decimal;
use serde::Deserialize;
use serde::Serialize;
use xxi_node::cfd::calculate_pnl;
use xxi_node::cfd::BTCUSD_MAX_PRICE;
use xxi_node::commons::Direction;
/// Factor by which we can multiply the total margin being wagered in order to get consistent
/// rounding in the middle (non-constant) part of the payout function.
///
/// E.g. with a value of 0.01 and a total margin of 20_000 sats would get payout jumps of 200 sats,
/// for a total of ~100 intervals.
///
/// TODO: We should not use the same rounding for all non-constant parts of the payout function,
/// because not all intervals are equally as likely. That way we can avoid excessive CET generation.
pub const ROUNDING_PERCENT: f32 = 0.01;
/// Number of intervals which we want to use to discretize the payout function.
const PAYOUT_CURVE_DISCRETIZATION_INTERVALS: u64 = 200;
/// A payout point representing a payout for a given outcome.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Serialize, Deserialize)]
pub struct PayoutPoint {
/// The event outcome.
pub event_outcome: u64,
/// The payout for the outcome.
pub outcome_payout: u64,
/// Extra precision to use when computing the payout.
pub extra_precision: u16,
}
#[derive(Clone, Copy)]
pub struct PartyParams {
/// How many coins the party is wagering.
margin: u64,
/// How many coins the party is excluding from the bet, in sats.
///
/// If the party gets liquidated, they get back exactly this much, in sats.
collateral_reserve: u64,
}
impl PartyParams {
pub fn new(margin: Amount, collateral_reserve: Amount) -> Self {
Self {
margin: margin.to_sat(),
collateral_reserve: collateral_reserve.to_sat(),
}
}
pub fn margin(&self) -> u64 {
self.margin
}
/// The sum of all the coins that the party is wagering and reserving, in sats.
///
/// The separation between margin and collateral may seem superfluous, but it is necessary
/// because this code is used in a DLC channel where all of the coins are stored in a DLC
/// output, but where all the coins are not always meant to be at stake.
pub fn total_collateral(&self) -> u64 {
self.margin + self.collateral_reserve
}
}
#[derive(Clone, Copy)]
pub struct PriceParams {
initial_price: Decimal,
/// The price at which the party going long gets liquidated.
///
/// This is _lower_ than the initial price.
long_liquidation_price: Decimal,
/// The price at which the party going short gets liquidated.
///
/// This is _higher_ than the initial price.
short_liquidation_price: Decimal,
}
impl PriceParams {
pub fn new_btc_usd(
initial: Decimal,
long_liquidation: Decimal,
short_liquidation: Decimal,
) -> Result<Self> {
// We cap the short liquidation at the maximum possible value of Bitcoin w.r.t to USD that
// we support.
let short_liquidation = short_liquidation.min(Decimal::from(BTCUSD_MAX_PRICE));
Self::new(initial, short_liquidation, long_liquidation)
}
fn new(
initial: Decimal,
short_liquidation: Decimal,
long_liquidation: Decimal,
) -> Result<Self> {
ensure!(
long_liquidation <= initial,
"Long liquidation price should not be greater than the initial price"
);
ensure!(
initial <= short_liquidation,
"Short liquidation price should not be smaller than the initial price"
);
Ok(Self {
initial_price: initial,
short_liquidation_price: short_liquidation,
long_liquidation_price: long_liquidation,
})
}
}
/// Build a discretized payout function for an inverse perpetual future (e.g. BTCUSD) from the
/// perspective of the offer party.
///
/// Returns a `Vec<(PayoutPoint, PayoutPoint)>`, with the first element of the tuple being the start
/// of the interval and the second element of the tuple being the end of the interval.
///
/// Each tuple is meant to map to one [`dlc_manager::payout_curve::PolynomialPayoutCurvePiece`] when
/// building the corresponding [`dlc_manager::payout_curve::PayoutFunction`].
pub fn build_inverse_payout_function(
// The number of contracts.
quantity: f32,
offer_party: PartyParams,
accept_party: PartyParams,
price_params: PriceParams,
offer_party_direction: Direction,
) -> Result<Vec<(PayoutPoint, PayoutPoint)>> {
let mut pieces = vec![];
let total_collateral = offer_party.total_collateral() + accept_party.total_collateral();
let (collateral_reserve_long, collateral_reserve_short) = match offer_party_direction {
Direction::Long => (
offer_party.collateral_reserve,
accept_party.collateral_reserve,
),
Direction::Short => (
accept_party.collateral_reserve,
offer_party.collateral_reserve,
),
};
let (long_liquidation_interval_start, long_liquidation_interval_end) =
calculate_long_liquidation_interval_payouts(
offer_party_direction,
total_collateral,
price_params.long_liquidation_price,
collateral_reserve_long,
)?;
pieces.push((
long_liquidation_interval_start,
long_liquidation_interval_end,
));
let (short_liquidation_interval_start, short_liquidation_interval_end) =
calculate_short_liquidation_interval_payouts(
offer_party_direction,
total_collateral,
price_params.short_liquidation_price,
collateral_reserve_short,
)?;
let mid_range = calculate_mid_range_payouts(
offer_party,
accept_party,
price_params.initial_price,
&long_liquidation_interval_end,
&short_liquidation_interval_start,
offer_party_direction,
quantity,
)?;
for (lower, upper) in mid_range.iter() {
pieces.push((*lower, *upper));
}
pieces.push((
short_liquidation_interval_start,
short_liquidation_interval_end,
));
// Connect the intervals `[(X, A), (Y, A))]` and `[(Y, B), (Z, B)]` by introducing a step-up
// interval in between:
//
// `[(X, A), (Y - 1, A))]`, `[(Y - 1, A), (Y, B)]`, `[(Y, B), (Z, D)]`.
//
// E.g. converting
//
// `[($100, 60 sats), ($200, 60 sats)]`, `[($200, 30 sats), ($300, 30 sats)]` into
//
// `[($100, 60 sats), ($199, 60 sats)]`, `[($199, 60 sats), ($200, 30 sats)]`, `[($200, 30
// sats), ($300, 30 sats)]`.
let pieces_minus_first = pieces.iter().skip(1);
let mut pieces = pieces
.iter()
.zip(pieces_minus_first)
.flat_map(|((a, b), (c, _))| {
let shared_point = PayoutPoint {
event_outcome: b.event_outcome - 1,
..*b
};
vec![(*a, shared_point), (shared_point, *c)]
})
.collect::<Vec<(PayoutPoint, PayoutPoint)>>();
// The last interval is dropped by zipping an iterator of length `L` with an iterator of length
// `L-1`, dropping the last element implicitly. Therefore, we need to reintroduce the last
// element.
pieces.push((
short_liquidation_interval_start,
short_liquidation_interval_end,
));
let pieces = pieces
.into_iter()
.filter(|(start, end)| start.event_outcome != end.event_outcome)
.collect::<Vec<_>>();
Ok(pieces)
}
/// Calculate the payout points for the interval where the party going long gets liquidated, from
/// the perspective of the offer party.
///
/// The price ranges from 0 to the `long_liquidation_price`.
fn calculate_long_liquidation_interval_payouts(
offer_direction: Direction,
total_collateral: u64,
liquidation_price_long: Decimal,
collateral_reserve_long: u64,
) -> Result<(PayoutPoint, PayoutPoint)> {
let liquidation_price_long = liquidation_price_long
.to_u64()
.expect("to be able to fit decimal into u64");
let (lower, upper) = match offer_direction {
// If the offer party is short and the long party gets liquidated, the offer party gets all
// the collateral minus the long party's collateral reserve.
Direction::Short => {
let outcome_payout = total_collateral - collateral_reserve_long;
(
PayoutPoint {
event_outcome: 0,
outcome_payout,
extra_precision: 0,
},
PayoutPoint {
event_outcome: liquidation_price_long,
outcome_payout,
extra_precision: 0,
},
)
}
// If the offer party is long and they get liquidated, they get their collateral reserve.
Direction::Long => (
PayoutPoint {
event_outcome: 0,
outcome_payout: collateral_reserve_long,
extra_precision: 0,
},
PayoutPoint {
event_outcome: liquidation_price_long,
outcome_payout: collateral_reserve_long,
extra_precision: 0,
},
),
};
Ok((lower, upper))
}
/// Calculates the payout points for the interval between the `long_liquidation_price` and the
/// `short_liquidation_price`.
///
/// Returns tuples of payout points, first item is lower point, next item is higher point of two
/// points on the payout curve.
fn calculate_mid_range_payouts(
offer_party: PartyParams,
accept_party: PartyParams,
initial_price: Decimal,
// The end of the price interval within which the party going long gets liquidated. This is the
// highest of the two points in terms of price.
long_liquidation_interval_end_payout: &PayoutPoint,
// The start of the price interval within which the party going short gets liquidated. This is
// the lowest of the two points in terms of price.
short_liquidation_interval_start_payout: &PayoutPoint,
offer_direction: Direction,
quantity: f32,
) -> Result<Vec<(PayoutPoint, PayoutPoint)>> {
let long_liquidation_price = long_liquidation_interval_end_payout.event_outcome;
let short_liquidation_price = short_liquidation_interval_start_payout.event_outcome;
let min_payout_offer_party = offer_party.collateral_reserve;
// This excludes the collateral reserve of the accept party.
let max_payout_offer_party = offer_party.total_collateral() + accept_party.margin;
let (long_margin, short_margin) = match offer_direction {
Direction::Long => (offer_party.margin, accept_party.margin),
Direction::Short => (accept_party.margin, offer_party.margin),
};
let step = {
let diff = short_liquidation_price
.checked_sub(long_liquidation_price)
.context("Long liquidation price smaller than short liquidation price")?;
diff / PAYOUT_CURVE_DISCRETIZATION_INTERVALS
};
let pieces = (long_liquidation_price..short_liquidation_price)
.step_by(step as usize)
.map(|interval_start_price| {
let interval_mid_price = interval_start_price + step / 2;
let pnl = calculate_pnl(
initial_price,
Decimal::from(interval_mid_price),
quantity,
offer_direction,
long_margin,
short_margin,
)?;
// If this is the start of the middle interval after the long liquidation interval.
let interval_payout = offer_party.total_collateral() as i64 + pnl;
// Payout cannot be below min.
let interval_payout = interval_payout.max(min_payout_offer_party as i64);
// Payout cannot be above max.
let interval_payout = interval_payout.min(max_payout_offer_party as i64);
let interval_start_payout_point = PayoutPoint {
event_outcome: interval_start_price,
outcome_payout: interval_payout as u64,
extra_precision: 0,
};
let interval_end_price = (interval_start_price + step).min(short_liquidation_price);
let interval_end_payout_point = PayoutPoint {
event_outcome: interval_end_price,
outcome_payout: interval_payout as u64,
extra_precision: 0,
};
Ok((interval_start_payout_point, interval_end_payout_point))
})
.collect::<Result<Vec<(_, _)>>>()?;
Ok(pieces)
}
/// Calculate the payout points for the interval where the party going short gets liquidated, from
/// the perspective of the offer party.
///
/// The price ranges from the `short_liquidation_price` to `BTCUSD_MAX_PRICE`.
fn calculate_short_liquidation_interval_payouts(
offer_direction: Direction,
total_collateral: u64,
liquidation_price_short: Decimal,
collateral_reserve_short: u64,
) -> Result<(PayoutPoint, PayoutPoint)> {
let liquidation_price_short = {
let price = liquidation_price_short.to_u64().expect("to fit");
// We cannot end up generating an interval with a constant price, because `rust-dlc` says
// that `Payout points must have ascending event outcome value`.
if price == BTCUSD_MAX_PRICE {
price - 1
} else {
price
}
};
let (lower, upper) = match offer_direction {
// If the offer party is long and the short party gets liquidated, the offer party gets all
// the collateral minus the short party's collateral reserve.
Direction::Long => {
let outcome_payout = total_collateral - collateral_reserve_short;
let interval_start = PayoutPoint {
event_outcome: liquidation_price_short,
outcome_payout,
extra_precision: 0,
};
let interval_end = PayoutPoint {
event_outcome: BTCUSD_MAX_PRICE,
outcome_payout,
extra_precision: 0,
};
(interval_start, interval_end)
}
// If the offer party is short and they get liquidated, they get their collateral reserve.
Direction::Short => {
let outcome_payout = collateral_reserve_short;
let interval_start = PayoutPoint {
event_outcome: liquidation_price_short,
outcome_payout,
extra_precision: 0,
};
let interval_end = PayoutPoint {
event_outcome: BTCUSD_MAX_PRICE,
outcome_payout,
extra_precision: 0,
};
(interval_start, interval_end)
}
};
Ok((lower, upper))
}
#[cfg(test)]
mod tests {
use super::*;
use insta::assert_debug_snapshot;
use proptest::prelude::*;
use rust_decimal::prelude::FromPrimitive;
use rust_decimal::prelude::ToPrimitive;
use rust_decimal_macros::dec;
use std::fs::File;
use std::ops::Mul;
use xxi_node::cfd::calculate_long_bankruptcy_price;
use xxi_node::cfd::calculate_margin;
use xxi_node::cfd::calculate_short_bankruptcy_price;
/// set this to true to export test data to csv files
/// An example gnuplot file has been provided in [`payout_curve.gp`]
const PRINT_CSV: bool = false;
#[test]
fn calculate_lower_range_payout_points_when_offerer_long_then_gets_zero() {
// setup
// we take 2 BTC so that all tests have nice numbers
let total_collateral = Amount::ONE_BTC.to_sat() * 2;
let bound = dec!(20_000);
let collateral_reserve_long = 300_000;
// act
let (lower_payout_lower, lower_payout_upper) = calculate_long_liquidation_interval_payouts(
Direction::Long,
total_collateral,
bound,
collateral_reserve_long,
)
.unwrap();
// assert
assert_eq!(lower_payout_lower.event_outcome, 0);
assert_eq!(lower_payout_lower.outcome_payout, collateral_reserve_long);
assert_eq!(lower_payout_upper.event_outcome, bound.to_u64().unwrap());
assert_eq!(lower_payout_upper.outcome_payout, collateral_reserve_long);
if PRINT_CSV {
let file = File::create("src/payout_curve/lower_range_long.csv").unwrap();
let mut wtr = csv::WriterBuilder::new().delimiter(b';').from_writer(file);
wtr.serialize(lower_payout_lower)
.expect("to be able to write");
wtr.serialize(lower_payout_upper)
.expect("to be able to write");
wtr.flush().unwrap();
}
}
#[test]
fn calculate_lower_range_payout_points_when_offerer_long_then_gets_zero_plus_reserve() {
// setup
// we take 2 BTC so that all tests have nice numbers
let total_collateral = Amount::ONE_BTC.to_sat() * 2;
let bound = dec!(20_000);
// 0.003 BTC
let collateral_reserve_long = 300_000;
// act
let (lower_payout_lower, lower_payout_upper) = calculate_long_liquidation_interval_payouts(
Direction::Long,
total_collateral,
bound,
collateral_reserve_long,
)
.unwrap();
// assert
assert_eq!(lower_payout_lower.event_outcome, 0);
assert_eq!(lower_payout_lower.outcome_payout, collateral_reserve_long);
assert_eq!(lower_payout_upper.event_outcome, bound.to_u64().unwrap());
assert_eq!(lower_payout_upper.outcome_payout, collateral_reserve_long);
if PRINT_CSV {
let file = File::create("src/payout_curve/lower_range_long.csv").unwrap();
let mut wtr = csv::WriterBuilder::new().delimiter(b';').from_writer(file);
wtr.serialize(lower_payout_lower)
.expect("to be able to write");
wtr.serialize(lower_payout_upper)
.expect("to be able to write");
wtr.flush().unwrap();
}
}
#[test]
fn calculate_lower_range_payout_points_when_offer_short_then_gets_all() {
// setup
// we take 2 BTC so that all tests have nice numbers
let total_collateral = Amount::ONE_BTC.to_sat() * 2;
let bound = dec!(20_000);
let collateral_reserve_long = 300_000;
// act
let (lower_payout_lower, lower_payout_upper) = calculate_long_liquidation_interval_payouts(
Direction::Short,
total_collateral,
bound,
collateral_reserve_long,
)
.unwrap();
// assert
assert_eq!(lower_payout_lower.event_outcome, 0);
assert_eq!(
lower_payout_lower.outcome_payout,
total_collateral - collateral_reserve_long
);
assert_eq!(lower_payout_upper.event_outcome, bound.to_u64().unwrap());
assert_eq!(
lower_payout_upper.outcome_payout,
total_collateral - collateral_reserve_long
);
// print to csv
if PRINT_CSV {
let file = File::create("src/payout_curve/lower_range_short.csv").unwrap();
let mut wtr = csv::WriterBuilder::new().delimiter(b';').from_writer(file);
wtr.serialize(lower_payout_lower)
.expect("to be able to write");
wtr.serialize(lower_payout_upper)
.expect("to be able to write");
wtr.flush().unwrap();
}
}
#[test]
fn payout_function_snapshot() {
let quantity = 60_000.0;
let initial_price = dec!(30_000);
let leverage_long = Decimal::TWO;
let leverage_short = Decimal::TWO;
let collateral_reserve_offer = Amount::from_sat(300_000);
let collateral_reserve_accept = Amount::ZERO;
let offer_party_direction = Direction::Short;
let (leverage_offer, leverage_accept) = match offer_party_direction {
Direction::Long => (leverage_long, leverage_short),
Direction::Short => (leverage_short, leverage_long),
};
let margin_offer =
calculate_margin(initial_price, quantity, leverage_offer.to_f32().unwrap());
let margin_accept =
calculate_margin(initial_price, quantity, leverage_accept.to_f32().unwrap());
let offer_party = PartyParams {
margin: margin_offer.to_sat(),
collateral_reserve: collateral_reserve_offer.to_sat(),
};
let accept_party = PartyParams {
margin: margin_accept.to_sat(),
collateral_reserve: collateral_reserve_accept.to_sat(),
};
let long_liquidation_price = calculate_long_bankruptcy_price(leverage_long, initial_price);
let short_liquidation_price =
calculate_short_bankruptcy_price(leverage_short, initial_price);
let price_params = PriceParams {
initial_price,
long_liquidation_price,
short_liquidation_price,
};
let payout_function = build_inverse_payout_function(
quantity,
offer_party,
accept_party,
price_params,
offer_party_direction,
)
.unwrap();
assert_debug_snapshot!(payout_function);
}
#[test]
fn ensure_all_bounds_smaller_or_equal_max_btc_price() {
// setup
let quantity = 19.0;
let initial_price = dec!(36780);
let long_leverage = 2.0;
let short_leverage = 1.0;
let offer_margin = calculate_margin(initial_price, quantity, long_leverage);
let accept_margin = calculate_margin(initial_price, quantity, short_leverage);
let collateral_reserve_offer = Amount::from_sat(155);
let long_liquidation_price = calculate_long_bankruptcy_price(
Decimal::from_f32(long_leverage).expect("to fit into f32"),
initial_price,
);
let short_liquidation_price = calculate_short_bankruptcy_price(
Decimal::from_f32(short_leverage).expect("to fit into f32"),
initial_price,
);
let party_params_offer = PartyParams::new(offer_margin, collateral_reserve_offer);
let party_params_accept = PartyParams::new(accept_margin, Amount::ZERO);
// act: offer long
let offer_direction = Direction::Long;
let mid_range_payouts_offer_long = calculate_mid_range_payouts(
party_params_offer,
party_params_accept,
initial_price,
&PayoutPoint {
event_outcome: long_liquidation_price.to_u64().unwrap(),
outcome_payout: party_params_offer.collateral_reserve,
extra_precision: 0,
},
&PayoutPoint {
event_outcome: short_liquidation_price.to_u64().unwrap(),
outcome_payout: party_params_offer.total_collateral()
+ party_params_accept.margin(),
extra_precision: 0,
},
offer_direction,
quantity,
)
.expect("To be able to compute mid range");
for (lower, upper) in &mid_range_payouts_offer_long {
assert!(
lower.event_outcome <= BTCUSD_MAX_PRICE,
"{} > {}",
lower.event_outcome,
BTCUSD_MAX_PRICE
);
assert!(
upper.event_outcome <= BTCUSD_MAX_PRICE,
"{} > {}",
upper.event_outcome,
BTCUSD_MAX_PRICE
);
}
}
#[test]
fn calculate_upper_range_payout_points_when_offer_short_then_gets_reserve() {
// setup
// we take 2 BTC so that all tests have nice numbers
let total_collateral = Amount::ONE_BTC.to_sat() * 2;
let liquidation_price_short = dec!(60_000);
let collateral_reserve_offer = 300_000;
// act
let offer_direction = Direction::Short;
let (lower, upper) = calculate_short_liquidation_interval_payouts(
offer_direction,
total_collateral,
liquidation_price_short,
collateral_reserve_offer,
)
.unwrap();
// assert
assert_eq!(lower.event_outcome, 60_000);
assert_eq!(lower.outcome_payout, collateral_reserve_offer);
assert_eq!(upper.event_outcome, BTCUSD_MAX_PRICE);
assert_eq!(upper.outcome_payout, collateral_reserve_offer);
if PRINT_CSV {
let file = File::create("src/payout_curve/upper_range_short.csv").unwrap();
let mut wtr = csv::WriterBuilder::new().delimiter(b';').from_writer(file);
wtr.serialize(lower).expect("to be able to write");
wtr.serialize(upper).expect("to be able to write");
wtr.flush().unwrap();
}
}
#[test]
fn calculate_upper_range_payout_points_when_offer_long_then_gets_everything() {
// setup
// we take 2 BTC so that all tests have nice numbers
let total_collateral = Amount::ONE_BTC.to_sat() * 2;
let liquidation_price_short = dec!(60_000);
let collateral_reserve_accept = 50_000;
// act
let offer_direction = Direction::Long;
let (lower, upper) = calculate_short_liquidation_interval_payouts(
offer_direction,
total_collateral,
liquidation_price_short,
collateral_reserve_accept,
)
.unwrap();
// assert
assert_eq!(lower.event_outcome, 60_000);
assert_eq!(
lower.outcome_payout,
total_collateral - collateral_reserve_accept
);
assert_eq!(upper.event_outcome, BTCUSD_MAX_PRICE);
assert_eq!(
upper.outcome_payout,
total_collateral - collateral_reserve_accept
);
if PRINT_CSV {
let file = File::create("src/payout_curve/upper_range_long.csv").unwrap();
let mut wtr = csv::WriterBuilder::new().delimiter(b';').from_writer(file);
wtr.serialize(lower).expect("to be able to write");
wtr.serialize(upper).expect("to be able to write");
wtr.flush().unwrap();
}
}
#[test]
fn upper_range_price_always_below_max_btc_price() {
// setup
let total_collateral = Amount::ONE_BTC.to_sat() * 2;
let collateral_reserve_accept = 300_000;
// act
let offer_direction = Direction::Long;
let (lower, upper) = calculate_short_liquidation_interval_payouts(
offer_direction,
total_collateral,
Decimal::from(BTCUSD_MAX_PRICE),
collateral_reserve_accept,
)
.unwrap();
// assert
assert_eq!(lower.event_outcome, BTCUSD_MAX_PRICE - 1);
assert_eq!(upper.event_outcome, BTCUSD_MAX_PRICE);
}
#[derive(Serialize, Deserialize)]
struct PayoutCouple {
lower_event_outcome: u64,
lower_outcome_payout: u64,
lower_extra_precision: u16,
upper_event_outcome: u64,
upper_outcome_payout: u64,
upper_extra_precision: u16,
}
#[derive(Serialize, Deserialize, Debug)]
struct ShouldPayout {
start: u64,
payout_offer: u64,
payout_accept: u64,
collateral_reserve_offer: u64,
}
//******* Proptests *******//
proptest! {
#[test]
fn calculating_lower_bound_doesnt_crash_offer_short(total_collateral in 1u64..100_000_000_000, bound in 1u64..100_000) {
let bound = Decimal::from_u64(bound).expect("to be able to parse bound");
let collateral_reserve_long = total_collateral / 5;
// act:
let (lower_payout_lower, lower_payout_upper) =
calculate_long_liquidation_interval_payouts(Direction::Short, total_collateral, bound, collateral_reserve_long).unwrap();
// assert
prop_assert_eq!(lower_payout_lower.event_outcome, 0);
prop_assert_eq!(lower_payout_lower.outcome_payout, total_collateral - collateral_reserve_long);
prop_assert_eq!(lower_payout_upper.event_outcome, bound.to_u64().unwrap());
prop_assert_eq!(lower_payout_upper.outcome_payout, total_collateral - collateral_reserve_long);
}
}
proptest! {
#[test]
fn calculating_lower_bound_doesnt_crash_offer_long(total_collateral in 1u64..100_000_000_000, bound in 1u64..100_000) {
let bound = Decimal::from_u64(bound).expect("to be able to parse bound");
let collateral_reserve_long = total_collateral / 5;
// act:
let (lower_payout_lower, lower_payout_upper) =
calculate_long_liquidation_interval_payouts(Direction::Short, total_collateral, bound, collateral_reserve_long).unwrap();
// assert
prop_assert_eq!(lower_payout_lower.event_outcome, 0);
prop_assert_eq!(lower_payout_lower.outcome_payout, total_collateral - collateral_reserve_long);
prop_assert_eq!(lower_payout_upper.event_outcome, bound.to_u64().unwrap());
prop_assert_eq!(lower_payout_upper.outcome_payout, total_collateral - collateral_reserve_long);
}
}
proptest! {
#[test]
fn calculating_upper_bound_doesnt_crash_offer_short(total_collateral in 1u64..100_000_000_000, bound in 1u64..100_000) {
let collateral_reserve_short = total_collateral / 5;
// act
let offer_direction = Direction::Short;
let (lower, upper) =
calculate_short_liquidation_interval_payouts(offer_direction, total_collateral, Decimal::from(bound), collateral_reserve_short).unwrap();
// assert
prop_assert_eq!(lower.event_outcome, bound);
prop_assert_eq!(lower.outcome_payout, collateral_reserve_short);
prop_assert_eq!(upper.event_outcome, BTCUSD_MAX_PRICE);
prop_assert_eq!(upper.outcome_payout, collateral_reserve_short);
}
}
proptest! {
#[test]
fn calculating_upper_bound_doesnt_crash_offer_long(total_collateral in 1u64..100_000_000_000, bound in 1u64..100_000) {
let collateral_reserve_short = total_collateral / 5;
// act
let offer_direction = Direction::Long;
let (lower, upper) =
calculate_short_liquidation_interval_payouts(offer_direction, total_collateral, Decimal::from(bound), collateral_reserve_short).unwrap();
// assert
assert_eq!(lower.event_outcome, bound);
assert_eq!(lower.outcome_payout, total_collateral - collateral_reserve_short);
assert_eq!(upper.event_outcome, BTCUSD_MAX_PRICE);
assert_eq!(upper.outcome_payout, total_collateral - collateral_reserve_short);
}
}
proptest! {
#[test]
fn midrange_always_positive(initial_price in 20_000i32..50_000, short_leverage in 1i32..5) {
// setup
let quantity = 1000.0;
let initial_price = Decimal::from_i32(initial_price).expect("to be able to parse");
let long_leverage = 2.0;
let short_leverage = short_leverage as f32;
let offer_margin =
calculate_margin(initial_price, quantity, long_leverage);
let accept_margin =
calculate_margin(initial_price, quantity, short_leverage);
// Collateral reserve for the offer party based on a fee calculation.
let collateral_reserve_offer = {
let collateral_reserve = dec!(0.003) * Decimal::from_f32(quantity).expect("to be able to parse into dec")
/ initial_price;
let collateral_reserve = collateral_reserve
.mul(dec!(100_000_000))
.to_u64()
.expect("to fit into u64");
Amount::from_sat(collateral_reserve)
};
let long_liquidation_price = calculate_long_bankruptcy_price(
Decimal::from_f32(long_leverage).expect("to fit into f32"),
initial_price,
);
let short_liquidation_price = calculate_short_bankruptcy_price(
Decimal::from_f32(short_leverage).expect("to fit into f32"),
initial_price,
);
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | true |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/payout_curve/tests/integration_proptests.rs | crates/payout_curve/tests/integration_proptests.rs | #![allow(clippy::unwrap_used)]
use anyhow::Context;
use anyhow::Result;
use bitcoin::Amount;
use dlc_manager::payout_curve::PayoutFunction;
use dlc_manager::payout_curve::PayoutFunctionPiece;
use dlc_manager::payout_curve::PolynomialPayoutCurvePiece;
use dlc_manager::payout_curve::RoundingInterval;
use dlc_manager::payout_curve::RoundingIntervals;
use payout_curve::build_inverse_payout_function;
use payout_curve::PartyParams;
use payout_curve::PriceParams;
use proptest::prelude::*;
use rust_decimal::prelude::FromPrimitive;
use rust_decimal::Decimal;
use rust_decimal_macros::dec;
use std::fs::File;
use std::time::SystemTime;
use std::time::UNIX_EPOCH;
use xxi_node::cfd::calculate_long_bankruptcy_price;
use xxi_node::cfd::calculate_margin;
use xxi_node::cfd::calculate_short_bankruptcy_price;
use xxi_node::commons::Direction;
/// set this to true to export test data to csv files
const PRINT_CSV: bool = false;
/// Taken from a past crash.
#[test]
fn calculating_payout_curve_doesnt_crash_1() {
let coordinator_direction = Direction::Short;
let initial_price = Decimal::from_u64(26986).unwrap();
let leverage_trader = 3.0;
let leverage_coordinator = 3.0;
let collateral_reserve_offer = 0;
let quantity = 1.0;
let coordinator_margin = calculate_margin(initial_price, quantity, leverage_coordinator);
let trader_margin = calculate_margin(initial_price, quantity, leverage_trader);
let (leverage_long, leverage_short) = match coordinator_direction {
Direction::Long => (leverage_coordinator, leverage_trader),
Direction::Short => (leverage_trader, leverage_coordinator),
};
let long_liquidation_price = calculate_long_bankruptcy_price(
Decimal::from_f32(leverage_long).expect("to be able to parse f32"),
initial_price,
);
let short_liquidation_price = calculate_short_bankruptcy_price(
Decimal::from_f32(leverage_short).expect("to be able to parse f32"),
initial_price,
);
// act: we only test that this does not panic
computed_payout_curve(
quantity,
coordinator_margin.to_sat(),
trader_margin.to_sat(),
initial_price,
collateral_reserve_offer,
coordinator_direction,
long_liquidation_price,
short_liquidation_price,
)
.unwrap();
}
/// Taken from a past crash.
#[test]
fn calculating_payout_curve_doesnt_crash_2() {
let coordinator_direction = Direction::Short;
let initial_price = dec!(30_000.0);
let leverage_trader = 1.0;
let leverage_coordinator = 1.0;
let collateral_reserve_offer = 0;
let quantity = 10.0;
let coordinator_collateral = calculate_margin(initial_price, quantity, leverage_coordinator);
let trader_collateral = calculate_margin(initial_price, quantity, leverage_trader);
let (leverage_long, leverage_short) = match coordinator_direction {
Direction::Long => (leverage_coordinator, leverage_trader),
Direction::Short => (leverage_trader, leverage_coordinator),
};
let long_liquidation_price = calculate_long_bankruptcy_price(
Decimal::from_f32(leverage_long).expect("to be able to parse f32"),
initial_price,
);
let short_liquidation_price = calculate_short_bankruptcy_price(
Decimal::from_f32(leverage_short).expect("to be able to parse f32"),
initial_price,
);
// act: we only test that this does not panic
computed_payout_curve(
quantity,
coordinator_collateral.to_sat(),
trader_collateral.to_sat(),
initial_price,
collateral_reserve_offer,
coordinator_direction,
long_liquidation_price,
short_liquidation_price,
)
.unwrap();
}
/// Taken from a past crash.
#[test]
fn calculating_payout_curve_doesnt_crash_3() {
let coordinator_direction = Direction::Short;
let initial_price = dec!(34586);
let leverage_trader = 2.0;
let leverage_coordinator = 2.0;
let collateral_reserve_offer = 0;
let quantity = 1.0;
let coordinator_collateral = calculate_margin(initial_price, quantity, leverage_coordinator);
let trader_collateral = calculate_margin(initial_price, quantity, leverage_trader);
let (leverage_long, leverage_short) = match coordinator_direction {
Direction::Long => (leverage_coordinator, leverage_trader),
Direction::Short => (leverage_trader, leverage_coordinator),
};
let long_liquidation_price = calculate_long_bankruptcy_price(
Decimal::from_f32(leverage_long).expect("to be able to parse f32"),
initial_price,
);
let short_liquidation_price = calculate_short_bankruptcy_price(
Decimal::from_f32(leverage_short).expect("to be able to parse f32"),
initial_price,
);
// act: we only test that this does not panic
computed_payout_curve(
quantity,
coordinator_collateral.to_sat(),
trader_collateral.to_sat(),
initial_price,
collateral_reserve_offer,
coordinator_direction,
long_liquidation_price,
short_liquidation_price,
)
.unwrap();
}
proptest! {
#[test]
fn calculating_lower_bound_doesnt_crash(
leverage_trader in 1u8..5,
direction in 0..2,
) {
init_tracing_for_test();
let leverage_trader = leverage_trader as f32;
let coordinator_direction = if direction == 0 {
Direction::Short
}
else {
Direction::Long
};
let initial_price = dec!(30_000.0);
let leverage_coordinator = 2.0;
let quantity = 10.0;
let fee = 0;
let coordinator_margin = calculate_margin(initial_price, quantity, leverage_coordinator);
let trader_margin = calculate_margin(initial_price, quantity, leverage_trader);
let (leverage_long, leverage_short) = match coordinator_direction {
Direction::Long => (leverage_coordinator, leverage_trader),
Direction::Short => (leverage_trader, leverage_coordinator),
};
let long_liquidation_price = calculate_long_bankruptcy_price(
Decimal::from_f32(leverage_long).expect("to be able to parse f32"),
initial_price,
);
let short_liquidation_price = calculate_short_bankruptcy_price(
Decimal::from_f32(leverage_short).expect("to be able to parse f32"),
initial_price,
);
tracing::info!(
leverage_trader,
?coordinator_direction,
initial_price = initial_price.to_string(),
leverage_coordinator,
quantity,
fee,
%coordinator_margin,
%trader_margin,
?long_liquidation_price,
?short_liquidation_price,
"Started computing payout curve"
);
// act: we only test that this does not panic
let now = std::time::Instant::now();
computed_payout_curve(
quantity,
coordinator_margin.to_sat(),
trader_margin.to_sat(),
initial_price,
fee,
coordinator_direction,
long_liquidation_price,
short_liquidation_price,
).unwrap();
tracing::info!(
elapsed_ms = %now.elapsed().as_millis(),
"Computed payout curve"
);
}
}
#[allow(clippy::too_many_arguments)]
fn computed_payout_curve(
quantity: f32,
coordinator_margin: u64,
trader_margin: u64,
initial_price: Decimal,
coordinator_collateral_reserve: u64,
coordinator_direction: Direction,
long_liquidation_price: Decimal,
short_liquidation_price: Decimal,
) -> Result<()> {
let price_params = PriceParams::new_btc_usd(
initial_price,
long_liquidation_price,
short_liquidation_price,
)?;
let party_params_coordinator = PartyParams::new(
Amount::from_sat(coordinator_margin),
Amount::from_sat(coordinator_collateral_reserve),
);
let party_params_trader = PartyParams::new(Amount::from_sat(trader_margin), Amount::ZERO);
let payout_points = build_inverse_payout_function(
quantity,
party_params_coordinator,
party_params_trader,
price_params,
coordinator_direction,
)?;
let start = SystemTime::now();
let now = start.duration_since(UNIX_EPOCH)?;
let mut pieces = vec![];
for (lower, upper) in &payout_points {
let lower_range = PolynomialPayoutCurvePiece::new(vec![
dlc_manager::payout_curve::PayoutPoint {
event_outcome: lower.event_outcome,
outcome_payout: lower.outcome_payout,
extra_precision: lower.extra_precision,
},
dlc_manager::payout_curve::PayoutPoint {
event_outcome: upper.event_outcome,
outcome_payout: upper.outcome_payout,
extra_precision: upper.extra_precision,
},
])?;
pieces.push(PayoutFunctionPiece::PolynomialPayoutCurvePiece(lower_range));
}
if PRINT_CSV {
let file = File::create(format!("./testrun-{}.csv", now.as_millis()))?;
let mut wtr = csv::WriterBuilder::new().delimiter(b';').from_writer(file);
wtr.write_record(["lower", "upper", "lower payout", "upper payout"])
.context("to be able to write record")?;
for (lower, upper) in payout_points {
wtr.write_record([
lower.event_outcome.to_string(),
upper.event_outcome.to_string(),
lower.outcome_payout.to_string(),
upper.outcome_payout.to_string(),
])?;
}
wtr.flush()?;
}
let payout_function =
PayoutFunction::new(pieces).context("could not create payout function")?;
let total_collateral =
party_params_coordinator.total_collateral() + party_params_trader.total_collateral();
let _ = payout_function.to_range_payouts(
total_collateral,
&RoundingIntervals {
intervals: vec![RoundingInterval {
begin_interval: 0,
rounding_mod: 1,
}],
},
)?;
Ok(())
}
/// Initialise tracing for tests
#[cfg(test)]
pub(crate) fn init_tracing_for_test() {
static TRACING_TEST_SUBSCRIBER: std::sync::Once = std::sync::Once::new();
TRACING_TEST_SUBSCRIBER.call_once(|| {
tracing_subscriber::fmt()
.with_env_filter("debug")
.with_test_writer()
.init()
})
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.