repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/payout_curve/examples/payout_curve_csv.rs | crates/payout_curve/examples/payout_curve_csv.rs | #![allow(clippy::unwrap_used)]
use anyhow::Context;
use anyhow::Result;
use bitcoin::Amount;
use dlc_manager::payout_curve::PayoutFunction;
use dlc_manager::payout_curve::PayoutFunctionPiece;
use dlc_manager::payout_curve::PolynomialPayoutCurvePiece;
use dlc_manager::payout_curve::RoundingInterval;
use dlc_manager::payout_curve::RoundingIntervals;
use payout_curve::build_inverse_payout_function;
use payout_curve::PartyParams;
use payout_curve::PayoutPoint;
use rust_decimal::prelude::FromPrimitive;
use rust_decimal::prelude::ToPrimitive;
use rust_decimal::Decimal;
use rust_decimal_macros::dec;
use std::fs::File;
use std::ops::Mul;
use xxi_node::cfd::calculate_long_bankruptcy_price;
use xxi_node::cfd::calculate_margin;
use xxi_node::cfd::calculate_pnl;
use xxi_node::cfd::calculate_short_bankruptcy_price;
use xxi_node::commons::Direction;
/// The example below will export the computed payout curve and how it should look like as CSV.
///
/// Use gnuplot to create a chart for it. An example gnuplot file has been provided
/// [`payout_curve.pg`]
fn main() -> Result<()> {
let initial_price = dec!(30_000);
let quantity = 30_000.0;
let leverage_short = 2.0;
let leverage_long = 2.0;
let price_params = {
let short_liquidation_price = calculate_short_bankruptcy_price(
Decimal::from_f32(leverage_short).expect("to be able to parse f32"),
initial_price,
);
let long_liquidation_price = calculate_long_bankruptcy_price(
Decimal::from_f32(leverage_long).expect("to be able to parse f32"),
initial_price,
);
payout_curve::PriceParams::new_btc_usd(
initial_price,
long_liquidation_price,
short_liquidation_price,
)?
};
// Fee is e.g. 0.3% * quantity / initial_price = 0.003 BTC = 300_000 sats.
//
// We compute it here so that can easily adjust the example.
let fee_offer = {
let fee = dec!(0.3) * Decimal::from_f32(quantity).expect("to be able to parse into dec")
/ initial_price;
let fee = fee
.mul(dec!(100_000_000))
.to_u64()
.expect("to fit into u64");
Amount::from_sat(fee)
};
let margin_short = calculate_margin(initial_price, quantity, leverage_short);
let margin_long = calculate_margin(initial_price, quantity, leverage_long);
let direction_offer = Direction::Long;
let (party_params_offer, party_params_accept) = match direction_offer {
Direction::Long => (
payout_curve::PartyParams::new(margin_long, fee_offer),
payout_curve::PartyParams::new(margin_short, Amount::ZERO),
),
Direction::Short => (
payout_curve::PartyParams::new(margin_short, fee_offer),
payout_curve::PartyParams::new(margin_long, Amount::ZERO),
),
};
let total_collateral =
party_params_offer.total_collateral() + party_params_accept.total_collateral();
let payout_points_offer_long = build_inverse_payout_function(
quantity,
party_params_offer,
party_params_accept,
price_params,
direction_offer,
)?;
discretized_payouts_as_csv(
"./crates/payout_curve/examples/discretized_long.csv",
payout_points_offer_long.clone(),
total_collateral,
)?;
let direction_offer = Direction::Short;
let (party_params_offer, party_params_accept) = match direction_offer {
Direction::Long => (
payout_curve::PartyParams::new(margin_long, fee_offer),
payout_curve::PartyParams::new(margin_short, Amount::ZERO),
),
Direction::Short => (
payout_curve::PartyParams::new(margin_short, fee_offer),
payout_curve::PartyParams::new(margin_long, Amount::ZERO),
),
};
let total_collateral =
party_params_offer.total_collateral() + party_params_accept.total_collateral();
let payout_points_offer_short = build_inverse_payout_function(
quantity,
party_params_offer,
party_params_accept,
price_params,
direction_offer,
)?;
discretized_payouts_as_csv(
"./crates/payout_curve/examples/discretized_short.csv",
payout_points_offer_short.clone(),
total_collateral,
)?;
computed_payout_curve(
party_params_offer,
party_params_accept,
"./crates/payout_curve/examples/computed_payout_long.csv",
payout_points_offer_long,
)?;
computed_payout_curve(
party_params_accept,
party_params_offer,
"./crates/payout_curve/examples/computed_payout_short.csv",
payout_points_offer_short,
)?;
let leverage_long = Decimal::from_f32(leverage_long).context("to be able to parse f32")?;
let leverage_short = Decimal::from_f32(leverage_short).context("to be able to parse f32")?;
should_payouts_as_csv_short(
margin_short.to_sat(),
total_collateral,
leverage_long,
leverage_short,
quantity,
initial_price,
"./crates/payout_curve/examples/should_short.csv",
fee_offer.to_sat() as i64,
)?;
should_payouts_as_csv_long(
margin_long.to_sat(),
total_collateral,
leverage_short,
leverage_long,
quantity,
initial_price,
"./crates/payout_curve/examples/should_long.csv",
fee_offer.to_sat() as i64,
)?;
Ok(())
}
/// This is the discretized payout curve thrown into `to_rage_payouts` from rust-dlc, i.e. our DLCs
/// will be based on these points
#[allow(clippy::too_many_arguments)]
fn computed_payout_curve(
party_params_coordinator: PartyParams,
party_params_trader: PartyParams,
csv_path: &str,
payout_points: Vec<(PayoutPoint, PayoutPoint)>,
) -> Result<()> {
let mut pieces = vec![];
for (lower, upper) in payout_points {
let lower_range = PolynomialPayoutCurvePiece::new(vec![
dlc_manager::payout_curve::PayoutPoint {
event_outcome: lower.event_outcome,
outcome_payout: lower.outcome_payout,
extra_precision: lower.extra_precision,
},
dlc_manager::payout_curve::PayoutPoint {
event_outcome: upper.event_outcome,
outcome_payout: upper.outcome_payout,
extra_precision: upper.extra_precision,
},
])?;
pieces.push(PayoutFunctionPiece::PolynomialPayoutCurvePiece(lower_range));
}
let payout_function =
PayoutFunction::new(pieces).context("could not create payout function")?;
let total_collateral =
party_params_coordinator.total_collateral() + party_params_trader.total_collateral();
let range_payouts = payout_function.to_range_payouts(
total_collateral,
&RoundingIntervals {
intervals: vec![RoundingInterval {
begin_interval: 0,
rounding_mod: 1,
}],
},
)?;
let file = File::create(csv_path)?;
let mut wtr = csv::WriterBuilder::new().delimiter(b';').from_writer(file);
wtr.write_record(["price", "payout_offer", "trader"])
.context("to be able to write record")?;
for payout in &range_payouts {
wtr.write_record([
payout.start.to_string(),
payout.payout.offer.to_string(),
payout.payout.accept.to_string(),
])?;
}
wtr.flush()?;
Ok(())
}
/// This is our approach to discretize the payout, i.e. we only call our internal library
#[allow(clippy::too_many_arguments)]
fn discretized_payouts_as_csv(
csv_path: &str,
payout_points: Vec<(PayoutPoint, PayoutPoint)>,
total_collateral: u64,
) -> Result<()> {
let file = File::create(csv_path)?;
let mut wtr = csv::WriterBuilder::new().delimiter(b';').from_writer(file);
wtr.write_record(["price", "payout_offer", "trader"])
.context("to be able to write record")?;
for (lower, _upper) in &payout_points {
wtr.write_record([
lower.event_outcome.to_string(),
lower.outcome_payout.to_string(),
(total_collateral - lower.outcome_payout).to_string(),
])?;
}
// need to add the last point because we ignored it explicitely above
let last_point = payout_points[payout_points.len() - 1];
wtr.write_record([
last_point.1.event_outcome.to_string(),
last_point.1.outcome_payout.to_string(),
(total_collateral - last_point.1.outcome_payout).to_string(),
])?;
wtr.flush()?;
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub fn should_payouts_as_csv_short(
coordinator_margin: u64,
total_collateral: u64,
leverage_long: Decimal,
leverage_short: Decimal,
quantity: f32,
initial_price: Decimal,
csv_path: &str,
coordinator_collateral_reserve: i64,
) -> Result<()> {
let coordinator_direction = Direction::Short;
let total_collateral = total_collateral as i64;
let file = File::create(csv_path)?;
let mut wtr = csv::WriterBuilder::new().delimiter(b';').from_writer(file);
let long_liquidation_price = calculate_long_bankruptcy_price(leverage_long, initial_price);
let short_liquidation_price = calculate_short_bankruptcy_price(leverage_short, initial_price);
wtr.write_record(["price", "payout_offer", "trader"])?;
wtr.write_record(&[0.to_string(), total_collateral.to_string(), 0.to_string()])?;
wtr.write_record(&[
long_liquidation_price.to_string(),
total_collateral.to_string(),
0.to_string(),
])?;
let long_liquidation_price_i32 = long_liquidation_price
.to_i32()
.expect("to be able to convert");
let short_liquidation_price_i32 = short_liquidation_price
.to_i32()
.expect("to be able to convert");
let leverage_long = leverage_long.to_f32().expect("to be able to convert");
let leverage_short = leverage_short.to_f32().expect("to be able to convert");
let long_margin = calculate_margin(initial_price, quantity, leverage_long);
let short_margin = calculate_margin(initial_price, quantity, leverage_short);
for price in long_liquidation_price_i32..short_liquidation_price_i32 {
let coordinator_payout = (((coordinator_margin as i64)
+ calculate_pnl(
initial_price,
Decimal::from(price),
quantity,
coordinator_direction,
long_margin.to_sat(),
short_margin.to_sat(),
)?)
+ coordinator_collateral_reserve)
.min(total_collateral);
let trader_payout = total_collateral - coordinator_payout;
wtr.write_record(&[
price.to_string(),
coordinator_payout.to_string(),
trader_payout.to_string(),
])?;
}
{
// upper liquidation range end
let coordinator_payout = (((coordinator_margin as i64)
+ calculate_pnl(
initial_price,
short_liquidation_price,
quantity,
coordinator_direction,
long_margin.to_sat(),
short_margin.to_sat(),
)?)
+ coordinator_collateral_reserve)
.min(total_collateral);
let trader_payout = total_collateral - coordinator_payout;
wtr.write_record(&[
short_liquidation_price.to_string(),
coordinator_payout.to_string(),
trader_payout.to_string(),
])?;
}
{
// upper range end to get to 100k
let coordinator_payout = (((coordinator_margin as i64)
+ calculate_pnl(
initial_price,
Decimal::from(100_000),
quantity,
coordinator_direction,
long_margin.to_sat(),
short_margin.to_sat(),
)?)
+ coordinator_collateral_reserve)
.min(total_collateral);
let trader_payout = total_collateral - coordinator_payout;
wtr.write_record(&[
100_000.to_string(),
coordinator_payout.to_string(),
trader_payout.to_string(),
])?;
}
wtr.flush()?;
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub fn should_payouts_as_csv_long(
coordinator_margin: u64,
total_collateral: u64,
leverage_long: Decimal,
leverage_short: Decimal,
quantity: f32,
initial_price: Decimal,
csv_path: &str,
coordinator_collateral_reserve: i64,
) -> Result<()> {
let coordinator_direction = Direction::Long;
let total_collateral = total_collateral as i64;
let file = File::create(csv_path)?;
let mut wtr = csv::WriterBuilder::new().delimiter(b';').from_writer(file);
let long_liquidation_price = calculate_long_bankruptcy_price(leverage_long, initial_price);
let short_liquidation_price = calculate_short_bankruptcy_price(leverage_short, initial_price);
wtr.write_record(["price", "payout_offer", "trader"])?;
wtr.write_record(&[
0.to_string(),
coordinator_collateral_reserve.to_string(),
(total_collateral - coordinator_collateral_reserve).to_string(),
])?;
wtr.write_record(&[
long_liquidation_price.to_string(),
coordinator_collateral_reserve.to_string(),
(total_collateral - coordinator_collateral_reserve).to_string(),
])?;
let long_liquidation_price_i32 = long_liquidation_price
.to_i32()
.expect("to be able to convert");
let short_liquidation_price_i32 = short_liquidation_price
.to_i32()
.expect("to be able to convert");
let leverage_long = leverage_long.to_f32().expect("to be able to convert");
let leverage_short = leverage_short.to_f32().expect("to be able to convert");
let long_margin = calculate_margin(initial_price, quantity, leverage_long);
let short_margin = calculate_margin(initial_price, quantity, leverage_short);
for price in long_liquidation_price_i32..short_liquidation_price_i32 {
let coordinator_payout = (((coordinator_margin as i64)
+ calculate_pnl(
initial_price,
Decimal::from(price),
quantity,
coordinator_direction,
long_margin.to_sat(),
short_margin.to_sat(),
)?)
+ coordinator_collateral_reserve)
.min(total_collateral);
let trader_payout = total_collateral - coordinator_payout;
wtr.write_record(&[
price.to_string(),
coordinator_payout.to_string(),
trader_payout.min(total_collateral).max(0).to_string(),
])?;
}
{
// upper range end to upper liquidation point
let coordinator_payout = (((coordinator_margin as i64)
+ calculate_pnl(
initial_price,
short_liquidation_price,
quantity,
coordinator_direction,
long_margin.to_sat(),
short_margin.to_sat(),
)?)
+ coordinator_collateral_reserve)
.min(total_collateral);
let trader_payout = (total_collateral - coordinator_payout).max(0);
wtr.write_record(&[
short_liquidation_price.to_string(),
coordinator_payout.to_string(),
trader_payout.max(0).min(total_collateral).to_string(),
])?;
}
{
// upper range end to get to 100k
let coordinator_payout = (((coordinator_margin as i64)
+ calculate_pnl(
initial_price,
Decimal::from(100_000),
quantity,
coordinator_direction,
long_margin.to_sat(),
short_margin.to_sat(),
)?)
+ coordinator_collateral_reserve)
.min(total_collateral);
let trader_payout = total_collateral - coordinator_payout;
wtr.write_record(&[
100_000.to_string(),
coordinator_payout.to_string(),
(trader_payout).min(total_collateral).max(0).to_string(),
])?;
}
wtr.flush()?;
Ok(())
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/dev-maker/src/logger.rs | crates/dev-maker/src/logger.rs | use anyhow::Context;
use anyhow::Result;
use time::macros::format_description;
use tracing::metadata::LevelFilter;
use tracing_subscriber::filter::Directive;
use tracing_subscriber::fmt::time::UtcTime;
use tracing_subscriber::layer::SubscriberExt;
use tracing_subscriber::util::SubscriberInitExt;
use tracing_subscriber::EnvFilter;
use tracing_subscriber::Layer;
const RUST_LOG_ENV: &str = "RUST_LOG";
pub fn init_tracing(level: LevelFilter) -> Result<()> {
if level == LevelFilter::OFF {
return Ok(());
}
let mut filter = EnvFilter::new("")
.add_directive(Directive::from(level))
.add_directive("hyper=warn".parse()?);
// Parse additional log directives from env variable
let filter = match std::env::var_os(RUST_LOG_ENV).map(|s| s.into_string()) {
Some(Ok(env)) => {
for directive in env.split(',') {
#[allow(clippy::print_stdout)]
match directive.parse() {
Ok(d) => filter = filter.add_directive(d),
Err(e) => println!("WARN ignoring log directive: `{directive}`: {e}"),
};
}
filter
}
_ => filter,
};
let fmt_layer = tracing_subscriber::fmt::layer()
.with_writer(std::io::stderr)
.with_ansi(true);
let fmt_layer = fmt_layer
.with_timer(UtcTime::new(format_description!(
"[year]-[month]-[day] [hour]:[minute]:[second]"
)))
.boxed();
tracing_subscriber::registry()
.with(filter)
.with(fmt_layer)
.try_init()
.context("Failed to init tracing")?;
tracing::info!("Initialized logger");
Ok(())
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/dev-maker/src/orderbook_client.rs | crates/dev-maker/src/orderbook_client.rs | use anyhow::Result;
use reqwest::Client;
use reqwest::Url;
use secp256k1::SecretKey;
use uuid::Uuid;
use xxi_node::commons::ChannelOpeningParams;
use xxi_node::commons::NewOrder;
use xxi_node::commons::NewOrderRequest;
#[derive(Clone)]
pub struct OrderbookClient {
url: Url,
client: Client,
}
impl OrderbookClient {
pub fn new(url: Url) -> Self {
let client = Client::builder()
.timeout(std::time::Duration::from_secs(10))
.build()
.expect("Failed to build reqwest client");
Self { url, client }
}
pub(crate) async fn post_new_order(
&self,
order: NewOrder,
channel_opening_params: Option<ChannelOpeningParams>,
secret_key: SecretKey,
) -> Result<()> {
let url = self.url.join("/api/orderbook/orders")?;
tracing::info!(
id = order.id().to_string(),
direction = order.direction().to_string(),
price = order.price().to_string(),
"Posting order"
);
let message = order.message();
let signature = secret_key.sign_ecdsa(message);
let new_order_request = NewOrderRequest {
value: order,
signature,
channel_opening_params,
};
let response = self
.client
.post(url)
.json(&new_order_request)
.send()
.await?;
response.error_for_status()?;
Ok(())
}
pub async fn delete_order(&self, order_id: &Uuid) -> Result<()> {
tracing::debug!(
order_id = order_id.to_string(),
"Deleting order from orderbook"
);
let url = self.url.join(
format!("/api/admin/orderbook/orders/{}", order_id)
.to_string()
.as_str(),
)?;
let response = self.client.delete(url).send().await?;
response.error_for_status()?;
Ok(())
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/dev-maker/src/main.rs | crates/dev-maker/src/main.rs | use crate::logger::init_tracing;
use crate::orderbook_client::OrderbookClient;
use anyhow::Result;
use clap::Parser;
use reqwest::Url;
use rust_decimal::Decimal;
use secp256k1::rand;
use secp256k1::PublicKey;
use secp256k1::SecretKey;
use secp256k1::SECP256K1;
use std::str::FromStr;
use std::time::Duration;
use time::OffsetDateTime;
use tokio::time::sleep;
use tracing::metadata::LevelFilter;
use uuid::Uuid;
use xxi_node::commons::ContractSymbol;
use xxi_node::commons::Direction;
use xxi_node::commons::NewLimitOrder;
use xxi_node::commons::NewOrder;
mod historic_rates;
mod logger;
mod orderbook_client;
const ORDER_EXPIRY: u64 = 30;
#[tokio::main]
async fn main() -> Result<()> {
init_tracing(LevelFilter::DEBUG)?;
let opts: Opts = Opts::parse();
let rates: Vec<Decimal> = match opts.sub_command() {
SubCommand::Historic => {
let mut historic_rates = historic_rates::read();
historic_rates.sort_by(|a, b| a.timestamp.cmp(&b.timestamp));
historic_rates.into_iter().map(|rate| rate.open).collect()
}
SubCommand::Fixed(Fixed { price }) => vec![Decimal::try_from(price)?],
};
let client = OrderbookClient::new(Url::from_str("http://localhost:8000")?);
let secret_key = SecretKey::new(&mut rand::thread_rng());
let public_key = secret_key.public_key(SECP256K1);
tracing::info!(pubkey = public_key.to_string(), "Starting new dev-maker");
let mut past_ids = vec![];
loop {
for rate in &rates {
let mut tmp_ids = vec![];
for _ in 0..5 {
tmp_ids.push(
post_order(
client.clone(),
secret_key,
public_key,
Direction::Short,
rate + Decimal::ONE,
ORDER_EXPIRY,
)
.await,
);
tmp_ids.push(
post_order(
client.clone(),
secret_key,
public_key,
Direction::Long,
rate - Decimal::ONE,
ORDER_EXPIRY,
)
.await,
);
}
for old_id in &past_ids {
if let Err(err) = client.delete_order(old_id).await {
tracing::error!(
"Could not delete old order with id {old_id} because of {err:?}"
);
}
}
past_ids.clear();
past_ids.extend(tmp_ids);
// we sleep a bit shorter than the last order expires to ensure always having an order
sleep(Duration::from_secs(ORDER_EXPIRY - 1)).await;
}
}
}
/// posts a new order
///
/// Define a `spread` which will be added or subtracted from `historic_rate.open`.
/// Remove it or modify it to get some instant profits :)
async fn post_order(
client: OrderbookClient,
secret_key: SecretKey,
public_key: PublicKey,
direction: Direction,
price: Decimal,
order_expiry_seconds: u64,
) -> Uuid {
let uuid = Uuid::new_v4();
if let Err(err) = client
.post_new_order(
NewOrder::Limit(NewLimitOrder {
id: uuid,
contract_symbol: ContractSymbol::BtcUsd,
price,
quantity: Decimal::from(5000),
trader_id: public_key,
direction,
leverage: Decimal::from(2),
expiry: OffsetDateTime::now_utc()
+ time::Duration::seconds(order_expiry_seconds as i64),
stable: false,
}),
None,
secret_key,
)
.await
{
tracing::error!("Failed posting new order {err:?}");
}
uuid
}
#[derive(Parser)]
struct Opts {
#[clap(subcommand)]
subcmd: Option<SubCommand>,
}
impl Opts {
fn sub_command(&self) -> SubCommand {
self.subcmd
.clone()
.unwrap_or(SubCommand::Fixed(Fixed { price: 50_000.0 }))
}
}
#[derive(Parser, Clone)]
enum SubCommand {
Historic,
Fixed(Fixed),
}
#[derive(Parser, Clone)]
struct Fixed {
#[clap(default_value = "50000.0")]
price: f32,
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/dev-maker/src/historic_rates.rs | crates/dev-maker/src/historic_rates.rs | use rust_decimal::Decimal;
use serde::Deserialize;
use std::fs::File;
use std::io::BufReader;
use time::OffsetDateTime;
#[derive(Deserialize, Debug)]
pub(crate) struct HistoricRate {
#[serde(with = "time::serde::rfc3339")]
pub timestamp: OffsetDateTime,
pub open: Decimal,
}
pub fn read() -> Vec<HistoricRate> {
let file = File::open("./crates/dev-maker/bitmex_hourly_rates.json")
.expect("To be able to find this file");
let reader = BufReader::new(file);
serde_json::from_reader(reader).expect("to be able to deserialize from json")
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/orderbook-client/src/lib.rs | crates/orderbook-client/src/lib.rs | use anyhow::anyhow;
use anyhow::Context;
use anyhow::Result;
use async_stream::stream;
use futures::stream::SplitSink;
use futures::SinkExt;
use futures::Stream;
use futures::StreamExt;
use secp256k1::Message;
use tokio_tungstenite_wasm as tungstenite;
use tokio_tungstenite_wasm::WebSocketStream;
use xxi_node::commons::create_sign_message;
use xxi_node::commons::OrderbookRequest;
use xxi_node::commons::Signature;
use xxi_node::commons::AUTH_SIGN_MESSAGE;
/// Connects to the 10101 orderbook WebSocket API.
///
/// If the connection needs authentication please use `subscribe_with_authentication` instead.
pub async fn subscribe(
url: String,
) -> Result<(
SplitSink<WebSocketStream, tungstenite::Message>,
impl Stream<Item = Result<String, anyhow::Error>> + Unpin,
)> {
subscribe_impl(None, url, None, None, None).await
}
/// Connects to the orderbook WebSocket API with authentication.
///
/// It subscribes and yields all messages.
pub async fn subscribe_with_authentication(
url: String,
authenticate: impl Fn(Message) -> Signature,
fcm_token: Option<String>,
version: Option<String>,
os: Option<String>,
) -> Result<(
SplitSink<WebSocketStream, tungstenite::Message>,
impl Stream<Item = Result<String, anyhow::Error>> + Unpin,
)> {
let signature = create_auth_message_signature(authenticate);
subscribe_impl(Some(signature), url, fcm_token, version, os).await
}
pub fn create_auth_message_signature(authenticate: impl Fn(Message) -> Signature) -> Signature {
authenticate(create_sign_message(AUTH_SIGN_MESSAGE.to_vec()))
}
/// Connects to the orderbook WebSocket API and yields all messages.
async fn subscribe_impl(
signature: Option<Signature>,
url: String,
fcm_token: Option<String>,
version: Option<String>,
os: Option<String>,
) -> Result<(
SplitSink<WebSocketStream, tungstenite::Message>,
impl Stream<Item = Result<String>> + Unpin,
)> {
tracing::debug!("Connecting to orderbook API");
let mut connection = tokio_tungstenite_wasm::connect(url.clone())
.await
.context("Could not connect to websocket")?;
tracing::info!("Connected to orderbook realtime API");
if let Some(signature) = signature {
let _ = connection
.send(tungstenite::Message::try_from(
OrderbookRequest::Authenticate {
fcm_token,
version,
signature,
os,
},
)?)
.await;
}
let (sink, mut stream) = connection.split();
let stream = stream! {
loop {
tokio::select! {
msg = stream.next() => {
let msg = match msg {
Some(Ok(msg)) => {
msg
},
None => {
return;
}
Some(Err(e)) => {
yield Err(anyhow!(e));
return;
}
};
match msg {
tungstenite::Message::Text(s) if s.eq_ignore_ascii_case("pong") => {
tracing::trace!("Received pong");
continue;
}
tungstenite::Message::Text(text) => {
yield Ok(text);
}
other => {
tracing::trace!("Unsupported message: {:?}", other);
continue;
}
}
}
}
}
};
Ok((sink, stream.boxed()))
}
#[cfg(test)]
mod test {
use crate::create_sign_message;
use secp256k1::SecretKey;
use secp256k1::SECP256K1;
use std::str::FromStr;
#[test]
fn test_signature_get() {
let secret_key = test_secret_key();
let message = create_sign_message(b"Hello it's me Mario".to_vec());
let signature = secret_key.sign_ecdsa(message);
let should_signature = secp256k1::ecdsa::Signature::from_str(
"304402202f2545f818a5dac9311157d75065156b141e5a6437e817d1d75f9fab084e46940220757bb6f0916f83b2be28877a0d6b05c45463794e3c8c99f799b774443575910d",
)
.unwrap();
assert_eq!(signature, should_signature);
}
fn test_secret_key() -> SecretKey {
SecretKey::from_slice(&[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 26, 27, 27, 29, 30, 31,
])
.unwrap()
}
#[test]
fn test_verify_signature() {
let secret_key = test_secret_key();
let message = create_sign_message(b"Hello it's me Mario".to_vec());
let signature = secret_key.sign_ecdsa(message);
let pubkey = secret_key.public_key(SECP256K1);
let msg = create_sign_message(b"Hello it's me Mario".to_vec());
signature.verify(&msg, &pubkey).unwrap();
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/orderbook-client/examples/authenticated.rs | crates/orderbook-client/examples/authenticated.rs | use anyhow::Result;
use futures::never::Never;
use futures::TryStreamExt;
use secp256k1::SecretKey;
use secp256k1::SECP256K1;
use std::time::Duration;
use xxi_node::commons::Signature;
#[tokio::main]
async fn main() -> Result<Never> {
tracing_subscriber::fmt()
.with_env_filter("info,orderbook_client=trace")
.init();
let secret_key =
SecretKey::from_slice(&b"bring sally up, bring sally down"[..]).expect("valid secret key");
let url = "ws://localhost:8000/api/orderbook/websocket".to_string();
let authenticate = move |msg| {
let signature = secret_key.sign_ecdsa(msg);
Signature {
pubkey: secret_key.public_key(SECP256K1),
signature,
}
};
loop {
let (_, mut stream) = orderbook_client::subscribe_with_authentication(
url.clone(),
&authenticate,
None,
None,
None,
)
.await?;
loop {
match stream.try_next().await {
Ok(Some(event)) => tracing::info!(%event, "Event received"),
Ok(None) => {
tracing::error!("Stream ended");
break;
}
Err(error) => {
tracing::error!(%error, "Stream ended");
break;
}
}
}
tokio::time::sleep(Duration::from_secs(2)).await;
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/lnd-bridge/src/lib.rs | crates/lnd-bridge/src/lib.rs | use anyhow::anyhow;
use anyhow::Context;
use anyhow::Result;
use async_stream::stream;
use futures::Stream;
use futures::StreamExt;
use reqwest::Method;
use serde::Deserialize;
use serde::Serialize;
use serde::Serializer;
use tokio_tungstenite::tungstenite;
use tokio_tungstenite::tungstenite::client::IntoClientRequest;
#[derive(Clone)]
pub struct LndBridge {
pub client: reqwest::Client,
pub endpoint: String,
pub macaroon: String,
pub secure: bool,
}
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct InvoiceResult {
pub result: Invoice,
}
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct Invoice {
pub memo: String,
#[serde(deserialize_with = "string_as_u64", serialize_with = "u64_as_string")]
pub expiry: u64,
#[serde(deserialize_with = "string_as_u64", serialize_with = "u64_as_string")]
pub amt_paid_sat: u64,
pub state: InvoiceState,
pub payment_request: String,
pub r_hash: String,
#[serde(deserialize_with = "string_as_u64", serialize_with = "u64_as_string")]
pub add_index: u64,
#[serde(deserialize_with = "string_as_u64", serialize_with = "u64_as_string")]
pub settle_index: u64,
}
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct InvoiceParams {
pub value: u64,
pub memo: String,
pub expiry: u64,
pub hash: String,
}
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct InvoiceResponse {
#[serde(deserialize_with = "string_as_u64", serialize_with = "u64_as_string")]
pub add_index: u64,
pub payment_addr: String,
pub payment_request: String,
}
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct SettleInvoice {
pub preimage: String,
}
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct CancelInvoice {
pub payment_hash: String,
}
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
pub enum InvoiceState {
#[serde(rename = "OPEN")]
Open,
#[serde(rename = "SETTLED")]
Settled,
#[serde(rename = "CANCELED")]
Canceled,
#[serde(rename = "ACCEPTED")]
Accepted,
}
fn string_as_u64<'de, T, D>(de: D) -> Result<T, D::Error>
where
D: serde::Deserializer<'de>,
T: std::str::FromStr,
<T as std::str::FromStr>::Err: std::fmt::Display,
{
String::deserialize(de)?
.parse()
.map_err(serde::de::Error::custom)
}
pub fn u64_as_string<S>(x: &u64, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_str(&x.to_string())
}
impl LndBridge {
pub fn new(endpoint: String, macaroon: String, secure: bool) -> Self {
Self {
client: reqwest::Client::new(),
endpoint,
macaroon,
secure,
}
}
pub async fn settle_invoice(&self, preimage: String) -> Result<()> {
let builder = self.client.request(
Method::POST,
format!(
"{}://{}/v2/invoices/settle",
if self.secure { "https" } else { "http" },
self.endpoint
),
);
let resp = builder
.header("content-type", "application/json")
.header("Grpc-Metadata-macaroon", self.macaroon.clone())
.json(&SettleInvoice { preimage })
.send()
.await?;
resp.error_for_status()?.text().await?;
Ok(())
}
pub async fn create_invoice(&self, params: InvoiceParams) -> Result<InvoiceResponse> {
let builder = self.client.request(
Method::POST,
format!(
"{}://{}/v2/invoices/hodl",
if self.secure { "https" } else { "http" },
self.endpoint
),
);
let resp = builder
.header("content-type", "application/json")
.header("Grpc-Metadata-macaroon", self.macaroon.clone())
.json(¶ms)
.send()
.await?;
let invoice: InvoiceResponse = resp.error_for_status()?.json().await?;
Ok(invoice)
}
pub async fn cancel_invoice(&self, r_hash: String) -> Result<()> {
let builder = self.client.request(
Method::POST,
format!(
"{}://{}/v2/invoices/cancel",
if self.secure { "https" } else { "http" },
self.endpoint
),
);
let resp = builder
.header("content-type", "application/json")
.header("Grpc-Metadata-macaroon", self.macaroon.clone())
.json(&CancelInvoice {
payment_hash: r_hash,
})
.send()
.await?;
resp.error_for_status()?.text().await?;
Ok(())
}
/// Subscribes to an invoice update for a given `r_hash` to the lnd api.
pub fn subscribe_to_invoice(
&self,
r_hash: String,
) -> impl Stream<Item = Result<Invoice>> + Unpin + '_ {
let stream = stream! {
tracing::debug!("Connecting to lnd websocket API");
let url_str = &*format!("{}://{}/v2/invoices/subscribe/{r_hash}", if self.secure { "wss" } else { "ws" }, self.endpoint);
let url = url::Url::parse(url_str)?;
let mut req = url.into_client_request()?;
let headers = req.headers_mut();
headers.insert("Grpc-Metadata-macaroon", self.macaroon.parse().map_err(|e| anyhow!(format!("{e:#}")))?);
let (mut connection, _) = tokio_tungstenite::connect_async(req)
.await
.context("Could not connect to websocket")?;
tracing::info!("Connected to lnd websocket API");
loop {
match connection.next().await {
Some(Ok(msg)) => match msg {
tungstenite::Message::Text(text) => {
match serde_json::from_str::<InvoiceResult>(&text) {
Ok(invoice) => yield Ok(invoice.result),
Err(e) => yield Err(anyhow!(format!("{text}. Error: {e:#}")))
}
}
tungstenite::Message::Ping(_) => {
tracing::trace!("Received ping from lnd");
}
other => {
tracing::trace!("Unsupported message: {:?}", other);
continue;
}
},
None => return,
Some(Err(e)) => {
yield Err(anyhow!(e));
return;
}
}
}
};
stream.boxed()
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/lnd-bridge/examples/create_invoice_api.rs | crates/lnd-bridge/examples/create_invoice_api.rs | use anyhow::Result;
use base64::engine::general_purpose;
use base64::Engine;
use lnd_bridge::InvoiceParams;
use rand::Rng;
use sha2::Digest;
use sha2::Sha256;
#[tokio::main]
async fn main() -> Result<()> {
tracing_subscriber::fmt()
.with_env_filter("info,lnd_bridge=trace")
.init();
let macaroon = "[enter macroon here]".to_string();
let lnd_bridge = lnd_bridge::LndBridge::new("localhost:18080".to_string(), macaroon, false);
let mut rng = rand::thread_rng();
let pre_image: [u8; 32] = rng.gen();
tracing::info!("{pre_image:?}");
let mut hasher = Sha256::new();
hasher.update(pre_image);
let r_hash = hasher.finalize();
let r_hash = general_purpose::STANDARD.encode(r_hash);
let pre_image = general_purpose::URL_SAFE.encode(pre_image);
tracing::info!("pre_image: {pre_image}");
tracing::info!("r_hash: {r_hash}");
let params = InvoiceParams {
value: 10101,
memo: "Fund your 10101 position".to_string(),
expiry: 5 * 60, // 5 minutes
hash: r_hash,
};
let response = lnd_bridge.create_invoice(params).await?;
tracing::info!("Response: {response:?}");
Ok(())
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/lnd-bridge/examples/cancel_invoice_api.rs | crates/lnd-bridge/examples/cancel_invoice_api.rs | use anyhow::Result;
#[tokio::main]
async fn main() -> Result<()> {
tracing_subscriber::fmt()
.with_env_filter("info,lnd_bridge=trace")
.init();
let macaroon = "[enter macroon here]".to_string();
let lnd_bridge = lnd_bridge::LndBridge::new("localhost:18080".to_string(), macaroon, false);
let r_hash = "".to_string();
lnd_bridge.cancel_invoice(r_hash).await?;
Ok(())
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/lnd-bridge/examples/subscribe_invoice_api.rs | crates/lnd-bridge/examples/subscribe_invoice_api.rs | use anyhow::Result;
use futures_util::TryStreamExt;
#[tokio::main]
async fn main() -> Result<()> {
tracing_subscriber::fmt()
.with_env_filter("info,lnd_bridge=trace")
.init();
let macaroon = "[enter macaroon here]".to_string();
let lnd_bridge = lnd_bridge::LndBridge::new("localhost:18080".to_string(), macaroon, false);
let r_hash = "UPJS32pkCZlzhMAYsEYnPkMq0AD8Vnnd6BnHcGQnvBw=".to_string();
let mut stream = lnd_bridge.subscribe_to_invoice(r_hash);
while let Some(result) = stream.try_next().await? {
tracing::info!("{result:?}");
}
Ok(())
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/lnd-bridge/examples/settle_invoice_api.rs | crates/lnd-bridge/examples/settle_invoice_api.rs | use anyhow::Result;
use base64::engine::general_purpose;
use base64::Engine;
use sha2::Digest;
use sha2::Sha256;
#[tokio::main]
async fn main() -> Result<()> {
tracing_subscriber::fmt()
.with_env_filter("info,lnd_bridge=trace")
.init();
let macaroon = "[enter macaroon here]".to_string();
let lnd_bridge = lnd_bridge::LndBridge::new("localhost:18080".to_string(), macaroon, false);
let pre_image = "5PfDXnydoLscQ2qk-0WR94TY9zWAXMcN8A2-0NW2RJw=".to_string();
let mut hasher = Sha256::new();
hasher.update(pre_image.clone());
let hash = hasher.finalize();
let hash = general_purpose::STANDARD.encode(hash);
tracing::info!("r_hash: {hash}");
lnd_bridge.settle_invoice(pre_image).await?;
Ok(())
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/lnd-bridge/examples/mock.rs | crates/lnd-bridge/examples/mock.rs | use anyhow::Context;
use anyhow::Result;
use axum::extract::ws::Message;
use axum::extract::ws::WebSocketUpgrade;
use axum::extract::Extension;
use axum::extract::Json;
use axum::extract::Query;
use axum::extract::State;
use axum::http::HeaderMap;
use axum::http::StatusCode;
use axum::response::IntoResponse;
use axum::response::Response;
use axum::routing::get;
use axum::routing::post;
use axum::Router;
use base64::engine::general_purpose;
use base64::Engine;
use lnd_bridge::CancelInvoice;
use lnd_bridge::Invoice;
use lnd_bridge::InvoiceParams;
use lnd_bridge::InvoiceResponse;
use lnd_bridge::InvoiceResult;
use lnd_bridge::InvoiceState;
use lnd_bridge::SettleInvoice;
use serde::Deserialize;
use std::net::SocketAddr;
use std::sync::Arc;
use std::sync::RwLock;
use time::macros::format_description;
use tokio::sync::broadcast;
use tower::ServiceBuilder;
use tower_http::trace::TraceLayer;
use tracing::level_filters::LevelFilter;
use tracing_subscriber::filter::Directive;
use tracing_subscriber::fmt::time::UtcTime;
use tracing_subscriber::layer::SubscriberExt;
use tracing_subscriber::util::SubscriberInitExt;
use tracing_subscriber::EnvFilter;
use tracing_subscriber::Layer;
#[tokio::main]
async fn main() -> Result<()> {
init_tracing(LevelFilter::DEBUG)?;
let (tx, _rx) = broadcast::channel::<String>(100);
// Build the Axum router
let app = Router::new()
.route("/v2/invoices/subscribe/:r_hash", get(subscribe_invoices))
.route("/v2/invoices/hodl", post(create_invoice))
.route("/v2/invoices/settle", post(settle_invoice))
.route("/v2/invoices/cancel", post(cancel_invoice))
.route("/pay_invoice", post(pay_invoice))
.layer(
ServiceBuilder::new()
.layer(TraceLayer::new_for_http())
.layer(Extension(tx.clone())),
)
.with_state(Arc::new(RwLock::new("".to_string())));
let addr = SocketAddr::from(([0, 0, 0, 0], 18080));
tracing::info!("Listening on http://{}", addr);
axum::Server::bind(&addr)
.serve(app.into_make_service())
.await?;
Ok(())
}
async fn create_invoice(
Extension(tx): Extension<broadcast::Sender<String>>,
State(state): State<Arc<RwLock<String>>>,
headers: HeaderMap,
Json(params): Json<InvoiceParams>,
) -> impl IntoResponse {
match headers.get("Grpc-Metadata-macaroon") {
Some(_) => {
let payment_request = "lntbs101010n1pnyw6wppp59sgej6qrv25s6k7y4eg2e43kqjywhlfd9knk0kvuuluv0jzlx4jqdp8ge6kuepq09hh2u3qxycrzvp3ypcx7umfw35k7mscqzzsxqzfvsp5zuekkq7kfall8gkfu4a9f8d90nma7z2hhe026kka4k7tfnpekamq9qxpqysgq265wu2x0hrujk2lyuhftqa9drpte8tp69gd5jehjxqyq526c9ayzy2zyx9eeacj0zvmnz874e59th37un8w280q8dyc5y2pjyy6c6ngqgp78j3".to_string();
let payment_addr = "mFIzgKvND7dKGkKdSeEqkR29c22dULKJrQZ-RHP4_I4=".to_string();
let response = InvoiceResponse {
add_index: 1,
payment_addr: payment_addr.clone(),
payment_request: payment_request.clone(),
};
let result = InvoiceResult {
result: Invoice {
memo: params.memo,
expiry: params.expiry,
amt_paid_sat: 0,
state: InvoiceState::Open,
payment_request,
r_hash: params.hash.clone(),
add_index: 1,
settle_index: 2,
},
};
let hash = general_purpose::URL_SAFE
.decode(¶ms.hash)
.expect("to decode");
*state.write().expect("") = general_purpose::STANDARD.encode(hash);
let message = serde_json::to_string(&result).expect("to serialize");
let _ = tx.send(message);
(StatusCode::OK, Json(response)).into_response()
}
None => Response::builder()
.status(StatusCode::UNAUTHORIZED)
.body::<String>("Missing macaroon".into())
.expect("body")
.into_response(),
}
}
async fn settle_invoice(
Extension(tx): Extension<broadcast::Sender<String>>,
State(state): State<Arc<RwLock<String>>>,
headers: HeaderMap,
Json(_): Json<SettleInvoice>,
) -> impl IntoResponse {
match headers.get("Grpc-Metadata-macaroon") {
Some(_) => {
let message = serde_json::to_string(&InvoiceResult {
result: Invoice {
memo: "".to_string(),
expiry: 0,
amt_paid_sat: 0,
state: InvoiceState::Settled,
payment_request: "".to_string(),
r_hash: state.read().expect("").to_string(),
add_index: 0,
settle_index: 0,
},
})
.expect("to serialize");
let _ = tx.send(message);
(StatusCode::OK, Json(())).into_response()
}
None => Response::builder()
.status(StatusCode::UNAUTHORIZED)
.body::<String>("Missing macaroon".into())
.expect("body")
.into_response(),
}
}
async fn cancel_invoice(
Extension(tx): Extension<broadcast::Sender<String>>,
State(state): State<Arc<RwLock<String>>>,
headers: HeaderMap,
Json(_): Json<CancelInvoice>,
) -> impl IntoResponse {
match headers.get("Grpc-Metadata-macaroon") {
Some(_) => {
let message = serde_json::to_string(&InvoiceResult {
result: Invoice {
memo: "".to_string(),
expiry: 0,
amt_paid_sat: 0,
state: InvoiceState::Canceled,
payment_request: "".to_string(),
r_hash: state.read().expect("").to_string(),
add_index: 0,
settle_index: 0,
},
})
.expect("to serialize");
let _ = tx.send(message);
(StatusCode::OK, Json(())).into_response()
}
None => Response::builder()
.status(StatusCode::UNAUTHORIZED)
.body::<String>("Missing macaroon".into())
.expect("body")
.into_response(),
}
}
async fn pay_invoice(
Extension(tx): Extension<broadcast::Sender<String>>,
State(state): State<Arc<RwLock<String>>>,
) -> impl IntoResponse {
let message = serde_json::to_string(&InvoiceResult {
result: Invoice {
memo: "".to_string(),
expiry: 0,
amt_paid_sat: 0,
state: InvoiceState::Accepted,
payment_request: "".to_string(),
r_hash: state.read().expect("").to_string(),
add_index: 0,
settle_index: 0,
},
})
.expect("to serialize");
let _ = tx.send(message);
StatusCode::OK
}
#[derive(Deserialize)]
struct SubscribeQuery {
settle_index: Option<u64>,
}
async fn subscribe_invoices(
ws: WebSocketUpgrade,
Query(params): Query<SubscribeQuery>,
Extension(tx): Extension<broadcast::Sender<String>>,
headers: HeaderMap,
) -> impl IntoResponse {
match headers.get("Grpc-Metadata-macaroon") {
Some(_) => ws.on_upgrade(move |socket| handle_socket(socket, tx, params.settle_index)),
None => Response::builder()
.status(StatusCode::UNAUTHORIZED)
.body::<String>("Missing macaroon".into())
.expect("body")
.into_response(),
}
}
async fn handle_socket(
mut socket: axum::extract::ws::WebSocket,
tx: broadcast::Sender<String>,
_settle_index: Option<u64>,
) {
let mut rx = tx.subscribe();
tokio::spawn({
async move {
while let Ok(msg) = rx.recv().await {
match serde_json::from_str::<InvoiceResult>(&msg) {
Ok(invoice) => {
if let Err(e) = socket.send(Message::Text(msg)).await {
tracing::error!("Failed to send msg on socket. Error: {e:#}");
}
if matches!(
invoice.result.state,
InvoiceState::Canceled | InvoiceState::Settled
) {
return;
}
}
Err(e) => {
tracing::error!("Failed to parse msg. Error: {e:#}");
}
}
}
}
});
}
const RUST_LOG_ENV: &str = "RUST_LOG";
pub fn init_tracing(level: LevelFilter) -> Result<()> {
if level == LevelFilter::OFF {
return Ok(());
}
let mut filter = EnvFilter::new("")
.add_directive(Directive::from(level))
.add_directive("hyper=warn".parse()?);
// Parse additional log directives from env variable
let filter = match std::env::var_os(RUST_LOG_ENV).map(|s| s.into_string()) {
Some(Ok(env)) => {
for directive in env.split(',') {
#[allow(clippy::print_stdout)]
match directive.parse() {
Ok(d) => filter = filter.add_directive(d),
Err(e) => println!("WARN ignoring log directive: `{directive}`: {e}"),
};
}
filter
}
_ => filter,
};
let fmt_layer = tracing_subscriber::fmt::layer()
.with_writer(std::io::stderr)
.with_ansi(true);
let fmt_layer = fmt_layer
.with_timer(UtcTime::new(format_description!(
"[year]-[month]-[day] [hour]:[minute]:[second]"
)))
.boxed();
tracing_subscriber::registry()
.with(filter)
.with(fmt_layer)
.try_init()
.context("Failed to init tracing")?;
tracing::info!("Initialized logger");
Ok(())
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/bitmex-client/src/lib.rs | crates/bitmex-client/src/lib.rs | pub mod client;
pub mod models;
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/bitmex-client/src/client.rs | crates/bitmex-client/src/client.rs | use crate::models::ContractSymbol;
use crate::models::ContractSymbol::XbtUsd;
use crate::models::GetInstrumentRequest;
use crate::models::GetPositionRequest;
use crate::models::Instrument;
use crate::models::Network;
use crate::models::OrdType;
use crate::models::Order;
use crate::models::Position;
use crate::models::PostOrderRequest;
use crate::models::Request;
use crate::models::Side;
use anyhow::bail;
use anyhow::Context;
use anyhow::Result;
use hex::encode as hexify;
use reqwest;
use reqwest::Method;
use reqwest::Response;
use reqwest::Url;
use ring::hmac;
use serde::de::DeserializeOwned;
use serde::Deserialize;
use serde::Serialize;
use serde_json::from_str;
use serde_json::to_string as to_jstring;
use serde_urlencoded::to_string as to_ustring;
use std::ops::Add;
use std::time::Duration;
use std::time::SystemTime;
use std::time::UNIX_EPOCH;
#[derive(Clone)]
pub struct Client {
url: String,
credentials: Option<Credentials>,
client: reqwest::Client,
}
impl Client {
pub fn new(network: Network) -> Self {
Self {
client: reqwest::Client::new(),
url: network.to_url(),
credentials: None,
}
}
pub fn with_credentials(self, api_key: impl ToString, secret: impl ToString) -> Self {
Self {
credentials: Some(Credentials::new(api_key.to_string(), secret.to_string())),
..self
}
}
pub fn is_signed_in(&self) -> bool {
self.credentials.is_some()
}
pub async fn create_order(
&self,
symbol: ContractSymbol,
quantity: i32,
side: Side,
text: Option<String>,
) -> Result<Order> {
let order = self
.send_request(PostOrderRequest {
symbol,
side: Some(side),
order_qty: Some(quantity),
ord_type: Some(OrdType::Market),
text,
})
.await?;
Ok(order)
}
/// Retrieve the position information for all contract symbols.
pub async fn positions(&self) -> Result<Vec<Position>> {
let positions = self.send_request(GetPositionRequest).await?;
Ok(positions)
}
/// Returns the latest instrument
pub async fn latest_instrument(&self) -> Result<Instrument> {
let instruments = self
.send_request(GetInstrumentRequest {
symbol: Some(XbtUsd),
count: Some(1),
end_time: None,
start_time: None,
reverse: Some(true),
})
.await?
.first()
.cloned();
let instrument = instruments.context("No instrument found")?;
Ok(instrument)
}
async fn send_request<R>(&self, req: R) -> Result<R::Response>
where
R: Request,
R::Response: DeserializeOwned,
{
let url = format!("{}{}", self.url, R::ENDPOINT);
let mut url = Url::parse(&url)?;
if matches!(R::METHOD, Method::GET | Method::DELETE) && R::HAS_PAYLOAD {
url.set_query(Some(&to_ustring(&req)?));
}
let body = match R::METHOD {
Method::PUT | Method::POST => to_jstring(&req)?,
_ => "".to_string(),
};
let mut builder = self.client.request(R::METHOD, url.clone());
if R::SIGNED {
let credentials = match &self.credentials {
None => {
bail!("Bitmex client not signed in")
}
Some(credentials) => credentials,
};
let start = SystemTime::now();
let expires = start
.duration_since(UNIX_EPOCH)
.expect("Time went backwards")
.add(Duration::from_secs(5))
.as_secs();
let (key, signature) = credentials.signature(R::METHOD, expires, &url, &body);
builder = builder
.header("api-expires", expires)
.header("api-key", key)
.header("api-signature", signature)
}
let resp = builder
.header("content-type", "application/json")
.body(body)
.send()
.await?;
let response = self.handle_response(resp).await?;
Ok(response)
}
async fn handle_response<T: DeserializeOwned>(&self, resp: Response) -> Result<T> {
let status = resp.status();
let content = resp.text().await?;
if status.is_success() {
match from_str::<T>(&content) {
Ok(ret) => Ok(ret),
Err(e) => {
bail!("Cannot deserialize '{}'. '{}'", content, e);
}
}
} else {
match from_str::<BitMEXErrorResponse>(&content) {
Ok(ret) => bail!("Bitmex error: {:?}", ret),
Err(e) => {
bail!("Cannot deserialize error '{}'. '{}'", content, e);
}
}
}
}
}
#[derive(Clone)]
struct Credentials {
api_key: String,
secret: String,
}
impl Credentials {
fn new(api_key: impl Into<String>, secret: impl Into<String>) -> Self {
Self {
api_key: api_key.into(),
secret: secret.into(),
}
}
fn signature(&self, method: Method, expires: u64, url: &Url, body: &str) -> (&str, String) {
// Signature: hex(HMAC_SHA256(apiSecret, verb + path + expires + data))
let signed_key = hmac::Key::new(hmac::HMAC_SHA256, self.secret.as_bytes());
let sign_message = match url.query() {
Some(query) => format!(
"{}{}?{}{}{}",
method.as_str(),
url.path(),
query,
expires,
body
),
None => format!("{}{}{}{}", method.as_str(), url.path(), expires, body),
};
let signature = hexify(hmac::sign(&signed_key, sign_message.as_bytes()));
(self.api_key.as_str(), signature)
}
}
#[cfg(test)]
mod test {
use super::Credentials;
use anyhow::Result;
use reqwest::Method;
use reqwest::Url;
#[test]
fn test_signature_get() -> Result<()> {
let tr = Credentials::new(
"LAqUlngMIQkIUjXMUreyu3qn",
"chNOOS4KvNXR_Xq4k4c9qsfoKWvnDecLATCRlcBwyKDYnWgO",
);
let (_, sig) = tr.signature(
Method::GET,
1518064236,
&Url::parse("http://a.com/api/v1/instrument")?,
"",
);
assert_eq!(
sig,
"c7682d435d0cfe87c16098df34ef2eb5a549d4c5a3c2b1f0f77b8af73423bf00"
);
Ok(())
}
#[test]
fn test_signature_get_param() -> Result<()> {
let tr = Credentials::new(
"LAqUlngMIQkIUjXMUreyu3qn",
"chNOOS4KvNXR_Xq4k4c9qsfoKWvnDecLATCRlcBwyKDYnWgO",
);
let (_, sig) = tr.signature(
Method::GET,
1518064237,
&Url::parse_with_params(
"http://a.com/api/v1/instrument",
&[("filter", r#"{"symbol": "XBTM15"}"#)],
)?,
"",
);
assert_eq!(
sig,
"e2f422547eecb5b3cb29ade2127e21b858b235b386bfa45e1c1756eb3383919f"
);
Ok(())
}
#[test]
fn test_signature_post() -> Result<()> {
let credentials = Credentials::new(
"LAqUlngMIQkIUjXMUreyu3qn",
"chNOOS4KvNXR_Xq4k4c9qsfoKWvnDecLATCRlcBwyKDYnWgO",
);
let (_, sig) = credentials.signature(
Method::POST,
1518064238,
&Url::parse("http://a.com/api/v1/order")?,
r#"{"symbol":"XBTM15","price":219.0,"clOrdID":"mm_bitmex_1a/oemUeQ4CAJZgP3fjHsA","orderQty":98}"#,
);
assert_eq!(
sig,
"1749cd2ccae4aa49048ae09f0b95110cee706e0944e6a14ad0b3a8cb45bd336b"
);
Ok(())
}
}
// The error response from bitmex;
#[derive(Deserialize, Serialize, Debug, Clone)]
pub(crate) struct BitMEXErrorResponse {
pub(crate) error: BitMEXErrorMessage,
}
#[derive(Deserialize, Serialize, Debug, Clone)]
pub(crate) struct BitMEXErrorMessage {
pub(crate) message: String,
pub(crate) name: String,
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/bitmex-client/src/models.rs | crates/bitmex-client/src/models.rs | use reqwest::Method;
use serde::de::DeserializeOwned;
use serde::Deserialize;
use serde::Serialize;
use time::OffsetDateTime;
use uuid::Uuid;
pub enum Network {
Mainnet,
Testnet,
}
impl Network {
pub fn to_url(&self) -> String {
match self {
Network::Mainnet => "https://www.bitmex.com/api/v1".to_string(),
Network::Testnet => "https://testnet.bitmex.com/api/v1".to_string(),
}
}
}
pub trait Request: Serialize {
const METHOD: Method;
const SIGNED: bool = false;
const ENDPOINT: &'static str;
const HAS_PAYLOAD: bool = true;
type Response: DeserializeOwned;
#[inline]
fn no_payload(&self) -> bool {
!Self::HAS_PAYLOAD
}
}
/// Placement, Cancellation, Amending, and History
#[derive(Clone, Debug, Deserialize)]
pub struct Order {
#[serde(rename = "orderID")]
pub order_id: Uuid,
pub account: Option<i64>,
pub symbol: Option<String>,
pub side: Option<Side>,
#[serde(rename = "orderQty")]
pub order_qty: Option<i64>,
pub price: Option<f64>,
#[serde(rename = "displayQty")]
pub display_qty: Option<i64>,
#[serde(rename = "pegPriceType")]
pub peg_price_type: Option<PegPriceType>,
#[serde(rename = "ordType")]
pub ord_type: Option<OrdType>,
#[serde(rename = "ordStatus")]
pub ord_status: Option<OrderStatus>,
pub text: Option<String>,
#[serde(rename = "transactTime", with = "time::serde::rfc3339::option")]
pub transact_time: Option<OffsetDateTime>,
#[serde(with = "time::serde::rfc3339::option")]
pub timestamp: Option<OffsetDateTime>,
}
#[derive(Clone, Copy, Debug, Deserialize, Serialize, PartialEq, Eq)]
pub enum Side {
Buy,
Sell,
#[serde(rename = "")]
Unknown, // BitMEX sometimes has empty side due to unknown reason
}
#[derive(Clone, Copy, Debug, Deserialize, Serialize, PartialEq, Eq)]
pub enum OrderStatus {
Filled,
Open,
New,
#[serde(other)]
Unknown,
}
#[derive(Clone, Copy, Debug, Deserialize, PartialEq, Eq)]
pub enum ExecType {
Funding,
Trade,
#[serde(other)]
Unknown,
}
/// http://fixwiki.org/fixwiki/PegPriceType
#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
pub enum PegPriceType {
LastPeg,
OpeningPeg,
MidPricePeg,
MarketPeg,
PrimaryPeg,
PegToVWAP,
TrailingStopPeg,
PegToLimitPrice,
ShortSaleMinPricePeg,
#[serde(rename = "")]
Unknown, // BitMEX sometimes has empty due to unknown reason
}
#[derive(Clone, Copy, Debug, Deserialize, Serialize, PartialEq, Eq)]
pub enum OrdType {
Market,
Limit,
Stop,
StopLimit,
MarketIfTouched,
LimitIfTouched,
MarketWithLeftOverAsLimit,
Pegged,
}
/// https://www.onixs.biz/fix-dictionary/5.0.SP2/tagNum_59.html
#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
pub enum TimeInForce {
Day,
GoodTillCancel,
AtTheOpening,
ImmediateOrCancel,
FillOrKill,
GoodTillCrossing,
GoodTillDate,
AtTheClose,
GoodThroughCrossing,
AtCrossing,
}
#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
pub enum ExecInst {
ParticipateDoNotInitiate,
AllOrNone,
MarkPrice,
IndexPrice,
LastPrice,
Close,
ReduceOnly,
Fixed,
#[serde(rename = "")]
Unknown, // BitMEX sometimes has empty due to unknown reason
}
#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
pub enum ContingencyType {
OneCancelsTheOther,
OneTriggersTheOther,
OneUpdatesTheOtherAbsolute,
OneUpdatesTheOtherProportional,
#[serde(rename = "")]
Unknown, // BitMEX sometimes has empty due to unknown reason
}
/// Create a new order.
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct PostOrderRequest {
/// Instrument symbol. e.g. 'XBTUSD'.
pub symbol: ContractSymbol,
/// Order side. Valid options: Buy, Sell. Defaults to 'Buy' unless `orderQty` is negative.
pub side: Option<Side>,
/// Order quantity in units of the instrument (i.e. contracts).
#[serde(rename = "orderQty", skip_serializing_if = "Option::is_none")]
pub order_qty: Option<i32>,
/// Order type. Valid options: Market, Limit, Stop, StopLimit, MarketIfTouched, LimitIfTouched,
/// Pegged. Defaults to 'Limit' when `price` is specified. Defaults to 'Stop' when `stopPx` is
/// specified. Defaults to 'StopLimit' when `price` and `stopPx` are specified.
#[serde(rename = "ordType", skip_serializing_if = "Option::is_none")]
pub ord_type: Option<OrdType>,
/// Optional order annotation. e.g. 'Take profit'.
#[serde(skip_serializing_if = "Option::is_none")]
pub text: Option<String>,
}
impl Request for PostOrderRequest {
const METHOD: Method = Method::POST;
const SIGNED: bool = true;
const ENDPOINT: &'static str = "/order";
const HAS_PAYLOAD: bool = true;
type Response = Order;
}
#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize, Hash, Eq)]
pub enum ContractSymbol {
#[serde(rename = "XBTUSD")]
XbtUsd,
}
/// Get your positions.
#[derive(Clone, Debug, Serialize, Default)]
pub struct GetPositionRequest;
impl Request for GetPositionRequest {
const METHOD: Method = Method::GET;
const SIGNED: bool = true;
const ENDPOINT: &'static str = "/position";
const HAS_PAYLOAD: bool = true;
type Response = Vec<Position>;
}
/// Summary of Open and Closed Positions
#[derive(Clone, Debug, Deserialize)]
pub struct Position {
pub account: i64,
pub symbol: ContractSymbol,
pub currency: String,
pub underlying: Option<String>,
#[serde(rename = "quoteCurrency")]
pub quote_currency: Option<String>,
pub leverage: Option<f64>,
#[serde(rename = "crossMargin")]
pub cross_margin: Option<bool>,
#[serde(rename = "currentQty")]
pub current_qty: Option<i64>,
#[serde(rename = "maintMargin")]
pub maint_margin: Option<i64>,
#[serde(rename = "unrealisedPnl")]
pub unrealised_pnl: Option<i64>,
#[serde(rename = "liquidationPrice")]
pub liquidation_price: Option<f64>,
#[serde(with = "time::serde::rfc3339::option")]
pub timestamp: Option<OffsetDateTime>,
}
#[derive(Clone, Debug, Serialize)]
pub struct GetInstrumentRequest {
pub symbol: Option<ContractSymbol>,
/// Number of results to fetch.
pub count: Option<u64>,
/// If true, will sort results newest first.
pub reverse: Option<bool>,
/// Starting date filter for results.
#[serde(with = "time::serde::rfc3339::option", rename = "startTime")]
pub start_time: Option<OffsetDateTime>,
/// Ending date filter for results.
#[serde(with = "time::serde::rfc3339::option", rename = "endTime")]
pub end_time: Option<OffsetDateTime>,
}
impl Request for GetInstrumentRequest {
const METHOD: Method = Method::GET;
const SIGNED: bool = false;
const ENDPOINT: &'static str = "/instrument";
const HAS_PAYLOAD: bool = true;
type Response = Vec<Instrument>;
}
/// Note: only relevant fields have been added
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct Instrument {
pub symbol: ContractSymbol,
#[serde(rename = "fundingTimestamp", with = "time::serde::rfc3339")]
pub funding_timestamp: OffsetDateTime,
#[serde(rename = "fundingInterval", with = "time::serde::rfc3339")]
pub funding_interval: OffsetDateTime,
#[serde(rename = "fundingRate")]
pub funding_rate: f64,
/// Predicted funding rate for the the next interval after funding_timestamp
#[serde(rename = "indicativeFundingRate")]
pub indicative_funding_rate: f64,
#[serde(with = "time::serde::rfc3339")]
pub timestamp: OffsetDateTime,
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/bitmex-client/examples/example.rs | crates/bitmex-client/examples/example.rs | use bitmex_client::client::Client;
use bitmex_client::models::ContractSymbol;
use bitmex_client::models::Network;
use bitmex_client::models::Side;
#[tokio::main]
async fn main() {
let api_key = "some_api_key";
let api_secret = "some_secret";
let client = Client::new(Network::Testnet).with_credentials(api_key, api_secret);
let _order = client
.create_order(
ContractSymbol::XbtUsd,
100,
Side::Buy,
Some("example".to_string()),
)
.await
.expect("To be able to post order");
let _positions = client
.positions()
.await
.expect("To be able to get positions");
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/tests-e2e/src/setup.rs | crates/tests-e2e/src/setup.rs | use crate::app::refresh_wallet_info;
use crate::app::run_app;
use crate::app::submit_channel_opening_order;
use crate::app::AppHandle;
use crate::bitcoind::Bitcoind;
use crate::coordinator::Coordinator;
use crate::http::init_reqwest;
use crate::logger::init_tracing;
use crate::wait_until;
use bitcoin::address::NetworkUnchecked;
use bitcoin::Address;
use bitcoin::Amount;
use native::api;
use native::api::ContractSymbol;
use native::trade::order::api::NewOrder;
use native::trade::order::api::OrderType;
use native::trade::position::PositionState;
use xxi_node::node::rust_dlc_manager::manager::NB_CONFIRMATIONS;
pub struct TestSetup {
pub app: AppHandle,
pub coordinator: Coordinator,
pub bitcoind: Bitcoind,
}
impl TestSetup {
pub async fn new() -> Self {
init_tracing();
let client = init_reqwest();
let bitcoind = Bitcoind::new_local(client.clone());
// Coordinator setup
let coordinator = Coordinator::new_local(client.clone());
assert!(coordinator.is_running().await);
// App setup
let app = run_app(None).await;
assert_eq!(
app.rx.wallet_info().unwrap().balances.on_chain,
0,
"App should start with empty on-chain wallet"
);
assert_eq!(
app.rx.wallet_info().unwrap().balances.off_chain,
Some(0),
"App should start with empty off-chain wallet"
);
Self {
app,
coordinator,
bitcoind,
}
}
/// Funds the coordinator with [`amount`/`n_utxos`] utxos
///
/// E.g. if amount = 3 BTC, and n_utxos = 3, it would create 3 UTXOs a 1 BTC
pub async fn fund_coordinator(&self, amount: Amount, n_utxos: u64) {
// Ensure that the coordinator has a free UTXO available.
let address = self
.coordinator
.get_new_address()
.await
.unwrap()
.assume_checked();
let sats_per_fund = amount.to_sat() / n_utxos;
for _ in 0..n_utxos {
self.bitcoind
.send_to_address(&address, Amount::from_sat(sats_per_fund))
.await
.unwrap();
}
self.bitcoind.mine(1).await.unwrap();
self.sync_coordinator().await;
// TODO: Get coordinator balance to verify this claim.
tracing::info!("Successfully funded coordinator");
}
pub async fn fund_app(&self, fund_amount: Amount) {
let address = api::get_new_address().unwrap();
let address: Address<NetworkUnchecked> = address.parse().unwrap();
self.bitcoind
.send_to_address(&address.assume_checked(), fund_amount)
.await
.unwrap();
self.bitcoind.mine(1).await.unwrap();
wait_until!({
refresh_wallet_info();
self.app.rx.wallet_info().unwrap().balances.on_chain >= fund_amount.to_sat()
});
let on_chain_balance = self.app.rx.wallet_info().unwrap().balances.on_chain;
tracing::info!(%fund_amount, %on_chain_balance, "Successfully funded app");
}
/// Start test with a running app and a funded wallet.
pub async fn new_after_funding() -> Self {
let setup = Self::new().await;
setup.fund_coordinator(Amount::ONE_BTC, 2).await;
setup.fund_app(Amount::ONE_BTC).await;
setup
}
/// Start test with a running app with a funded wallet and an open position.
pub async fn new_with_open_position() -> Self {
let order = dummy_order();
Self::new_with_open_position_custom(order, 0, 0).await
}
/// Start test with a running app with a funded wallet and an open position based on a custom
/// [`NewOrder`].
pub async fn new_with_open_position_custom(
order: NewOrder,
coordinator_collateral_reserve: u64,
trader_collateral_reserve: u64,
) -> Self {
let setup = Self::new_after_funding().await;
let rx = &setup.app.rx;
tracing::info!(
?order,
%coordinator_collateral_reserve,
%trader_collateral_reserve,
"Opening a position"
);
submit_channel_opening_order(
order.clone(),
coordinator_collateral_reserve,
trader_collateral_reserve,
);
wait_until!(rx.order().is_some());
wait_until!(rx.position().is_some());
wait_until!(rx.position().unwrap().position_state == PositionState::Open);
// Wait for coordinator to open position.
tokio::time::sleep(std::time::Duration::from_secs(10)).await;
if NB_CONFIRMATIONS == 0 {
// No confirmations are required to get the channel/contract `Confirmed`, but the change
// output won't be added to the on-chain balance until we get one confirmation because
// of https://github.com/get10101/10101/issues/2286.
//
// We need to know about funding transaction change outputs so that we can accurately
// assert on on-chain balance changes after DLC channels are closed on-chain.
setup.bitcoind.mine(1).await.unwrap();
} else {
setup.bitcoind.mine(NB_CONFIRMATIONS as u16).await.unwrap();
}
tokio::time::sleep(std::time::Duration::from_secs(10)).await;
// Includes on-chain sync and DLC channels sync.
refresh_wallet_info();
setup.sync_coordinator().await;
setup
}
async fn sync_coordinator(&self) {
if let Err(e) = self.coordinator.sync_node().await {
tracing::error!("Got error from coordinator sync: {e:#}");
};
}
}
pub fn dummy_order() -> NewOrder {
NewOrder {
leverage: 2.0,
contract_symbol: ContractSymbol::BtcUsd,
direction: api::Direction::Long,
quantity: 1000.0,
order_type: Box::new(OrderType::Market),
stable: false,
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/tests-e2e/src/app.rs | crates/tests-e2e/src/app.rs | use crate::bitcoind::Bitcoind;
use crate::test_subscriber::TestSubscriber;
use crate::test_subscriber::ThreadSafeSenders;
use crate::wait_until;
use native::api;
use native::api::DlcChannel;
use native::trade::order::api::NewOrder;
use tempfile::TempDir;
use tokio::task::block_in_place;
pub struct AppHandle {
pub rx: TestSubscriber,
_app_dir: TempDir,
_seed_dir: TempDir,
_handle: tokio::task::JoinHandle<()>,
_tx: ThreadSafeSenders,
}
impl AppHandle {
pub fn stop(&self) {
self._handle.abort()
}
}
pub async fn run_app(seed_phrase: Option<Vec<String>>) -> AppHandle {
let app_dir = TempDir::new().unwrap();
let seed_dir = TempDir::new().unwrap();
tracing::debug!(?app_dir, ?seed_dir, "Starting 10101 backend");
let _app_handle = {
let as_string = |dir: &TempDir| dir.path().to_str().unwrap().to_string();
let app_dir = as_string(&app_dir);
let seed_dir = as_string(&seed_dir);
native::api::set_config(test_config(), app_dir, seed_dir.clone()).unwrap();
if let Some(seed_phrase) = seed_phrase {
tokio::task::spawn_blocking({
let seed_dir = seed_dir.clone();
move || {
api::restore_from_seed_phrase(
seed_phrase.join(" "),
format!("{seed_dir}/regtest/seed"),
)
.unwrap();
}
})
.await
.unwrap();
}
tokio::task::spawn_blocking(move || native::api::run_in_test(seed_dir).unwrap())
};
let (rx, tx) = TestSubscriber::new().await;
let app = AppHandle {
_app_dir: app_dir,
_seed_dir: seed_dir,
_handle: _app_handle,
rx,
_tx: tx.clone(),
};
native::event::subscribe(tx);
wait_until!(app.rx.init_msg() == Some("10101 is ready.".to_string()));
wait_until!(app.rx.wallet_info().is_some()); // wait for initial wallet sync
block_in_place(move || {
api::register_beta("hello@10101.finance".to_string(), None).expect("to work")
});
app
}
/// Refresh the app's wallet information.
///
/// To call this make sure that you are either outside of a runtime or in a multi-threaded runtime
/// (i.e. use `flavor = "multi_thread"` in a `tokio::test`).
pub fn refresh_wallet_info() {
// We must `block_in_place` because calling `refresh_wallet_info` starts a new runtime and that
// cannot happen within another runtime.
block_in_place(move || api::refresh_wallet_info().unwrap());
}
/// Force close DLC channel.
///
/// To call this make sure that you are either outside of a runtime or in a multi-threaded runtime
/// (i.e. use `flavor = "multi_thread"` in a `tokio::test`).
pub async fn force_close_dlc_channel(bitcoind: &Bitcoind) {
// We need to move the blockchain time forward to be able to publish the buffer transaction
// (there is a timestamp timelock on it).
tokio::time::sleep(std::time::Duration::from_secs(5)).await;
bitcoind.mine(10).await.unwrap();
// We must `block_in_place` because calling `force_close_channel` starts a new runtime and that
// cannot happen within another runtime.
block_in_place(move || api::force_close_channel().unwrap());
}
/// Get the ID of the currently open DLC channel, if there is one.
///
/// To call this make sure that you are either outside of a runtime or in a multi-threaded runtime
/// (i.e. use `flavor = "multi_thread"` in a `tokio::test`).
pub fn get_dlc_channel_id() -> Option<String> {
block_in_place(move || api::get_dlc_channel_id().unwrap())
}
pub fn get_dlc_channels() -> Vec<DlcChannel> {
block_in_place(move || api::list_dlc_channels().unwrap())
}
pub fn submit_order(order: NewOrder) -> String {
block_in_place(move || api::submit_order(order).unwrap())
}
pub fn submit_unfunded_channel_opening_order(
order: NewOrder,
coordinator_reserve: u64,
trader_reserve: u64,
estimated_margin: u64,
order_matching_fee: u64,
) -> anyhow::Result<()> {
block_in_place(move || {
api::submit_unfunded_channel_opening_order(
order,
coordinator_reserve,
trader_reserve,
estimated_margin,
order_matching_fee,
)
})?;
Ok(())
}
pub fn submit_channel_opening_order(
order: NewOrder,
coordinator_reserve: u64,
trader_reserve: u64,
) {
block_in_place(move || {
api::submit_channel_opening_order(order, coordinator_reserve, trader_reserve).unwrap()
});
}
// Values mostly taken from `environment.dart`
fn test_config() -> native::config::api::Config {
native::config::api::Config {
coordinator_pubkey: "02dd6abec97f9a748bf76ad502b004ce05d1b2d1f43a9e76bd7d85e767ffb022c9"
.to_string(),
electrs_endpoint: "http://127.0.0.1:3000".to_string(),
host: "127.0.0.1".to_string(),
p2p_port: 9045,
http_port: 8000,
network: "regtest".to_string(),
oracle_endpoint: "http://127.0.0.1:8081".to_string(),
oracle_pubkey: "16f88cf7d21e6c0f46bcbc983a4e3b19726c6c98858cc31c83551a88fde171c0"
.to_string(),
health_check_interval_secs: 1, // We want to measure health more often in tests
meme_endpoint: "https://localhost:8080/memes/".to_string(),
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/tests-e2e/src/lib.rs | crates/tests-e2e/src/lib.rs | #![allow(clippy::unwrap_used)]
pub mod app;
pub mod bitcoind;
pub mod coordinator;
pub mod http;
pub mod lnd_mock;
pub mod logger;
pub mod setup;
pub mod test_flow;
pub mod test_subscriber;
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/tests-e2e/src/http.rs | crates/tests-e2e/src/http.rs | use reqwest::Client;
pub fn init_reqwest() -> Client {
Client::builder()
.timeout(std::time::Duration::from_secs(30))
.build()
.unwrap()
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/tests-e2e/src/lnd_mock.rs | crates/tests-e2e/src/lnd_mock.rs | use anyhow::Context;
use anyhow::Result;
use reqwest::Client;
use serde::Serialize;
/// A wrapper over the lnd mock HTTP API.
///
/// It does not aim to be complete, functionality will be added as needed.
pub struct LndMock {
client: Client,
host: String,
}
impl LndMock {
pub fn new(client: Client, host: &str) -> Self {
Self {
client,
host: host.to_string(),
}
}
pub fn new_local(client: Client) -> Self {
Self::new(client, "http://localhost:18080")
}
pub async fn pay_invoice(&self) -> Result<reqwest::Response> {
self.post::<()>("/pay_invoice", None).await
}
async fn post<T: Serialize>(&self, path: &str, body: Option<T>) -> Result<reqwest::Response> {
let request = self.client.post(format!("{0}{path}", self.host));
let request = match body {
Some(ref body) => {
let body = serde_json::to_string(body)?;
request
.header("Content-Type", "application/json")
.body(body)
}
None => request,
};
request
.send()
.await
.context("Could not send POST request to lnd mock")?
.error_for_status()
.context("Lnd mock did not return 200 OK")
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/tests-e2e/src/test_subscriber.rs | crates/tests-e2e/src/test_subscriber.rs | use native::api::ContractSymbol;
use native::event::api::WalletInfo;
use native::event::subscriber::Subscriber;
use native::event::EventType;
use native::health::Service;
use native::health::ServiceStatus;
use native::health::ServiceUpdate;
use native::trade::order::Order;
use native::trade::position::Position;
use parking_lot::Mutex;
use rust_decimal::Decimal;
use std::collections::HashMap;
use std::sync::Arc;
use tokio::sync::watch;
pub struct Senders {
wallet_info: watch::Sender<Option<WalletInfo>>,
order: watch::Sender<Option<Order>>,
position: watch::Sender<Option<Position>>,
/// Init messages are simple strings
init_msg: watch::Sender<Option<String>>,
ask_price: watch::Sender<Option<Decimal>>,
bid_price: watch::Sender<Option<Decimal>>,
position_close: watch::Sender<Option<ContractSymbol>>,
service: watch::Sender<Option<ServiceUpdate>>,
}
/// Subscribes to events destined for the frontend (typically Flutter app) and
/// provides a convenient way to access the current state.
pub struct TestSubscriber {
wallet_info: watch::Receiver<Option<WalletInfo>>,
order: watch::Receiver<Option<Order>>,
position: watch::Receiver<Option<Position>>,
init_msg: watch::Receiver<Option<String>>,
ask_price: watch::Receiver<Option<Decimal>>,
bid_price: watch::Receiver<Option<Decimal>>,
position_close: watch::Receiver<Option<ContractSymbol>>,
services: Arc<Mutex<HashMap<Service, ServiceStatus>>>,
_service_map_updater: tokio::task::JoinHandle<()>,
}
impl TestSubscriber {
pub async fn new() -> (Self, ThreadSafeSenders) {
let (wallet_info_tx, wallet_info_rx) = watch::channel(None);
let (order_tx, order_rx) = watch::channel(None);
let (position_tx, position_rx) = watch::channel(None);
let (init_msg_tx, init_msg_rx) = watch::channel(None);
let (ask_prices_tx, ask_prices_rx) = watch::channel(None);
let (bid_prices_tx, bid_prices_rx) = watch::channel(None);
let (position_close_tx, position_close_rx) = watch::channel(None);
let (service_tx, mut service_rx) = watch::channel(None);
let senders = Senders {
wallet_info: wallet_info_tx,
order: order_tx,
position: position_tx,
init_msg: init_msg_tx,
ask_price: ask_prices_tx,
bid_price: bid_prices_tx,
position_close: position_close_tx,
service: service_tx,
};
let services = Arc::new(Mutex::new(HashMap::new()));
let _service_map_updater = {
let services = services.clone();
tokio::spawn(async move {
while let Ok(()) = service_rx.changed().await {
if let Some(ServiceUpdate { service, status }) = *service_rx.borrow() {
tracing::debug!(?service, ?status, "Updating status in the services map");
services.lock().insert(service, status);
}
}
panic!("service_rx channel closed");
})
};
let subscriber = Self {
wallet_info: wallet_info_rx,
order: order_rx,
position: position_rx,
init_msg: init_msg_rx,
ask_price: ask_prices_rx,
bid_price: bid_prices_rx,
position_close: position_close_rx,
services,
_service_map_updater,
};
(subscriber, ThreadSafeSenders(Arc::new(Mutex::new(senders))))
}
pub fn wallet_info(&self) -> Option<WalletInfo> {
self.wallet_info.borrow().as_ref().cloned()
}
pub fn order(&self) -> Option<Order> {
self.order.borrow().as_ref().cloned()
}
pub fn position(&self) -> Option<Position> {
self.position.borrow().as_ref().cloned()
}
pub fn init_msg(&self) -> Option<String> {
self.init_msg.borrow().as_ref().cloned()
}
pub fn ask_price(&self) -> Option<Decimal> {
self.ask_price.borrow().as_ref().cloned()
}
pub fn bid_price(&self) -> Option<Decimal> {
self.bid_price.borrow().as_ref().cloned()
}
pub fn position_close(&self) -> Option<ContractSymbol> {
self.position_close.borrow().as_ref().cloned()
}
pub fn status(&self, service: Service) -> ServiceStatus {
self.services
.lock()
.get(&service)
.copied()
.unwrap_or_default()
}
}
impl Subscriber for Senders {
fn notify(&self, event: &native::event::EventInternal) {
if let Err(e) = self.handle_event(event) {
tracing::error!(?e, ?event, "Failed to handle event");
}
}
fn events(&self) -> Vec<EventType> {
vec![
EventType::Init,
EventType::WalletInfoUpdateNotification,
EventType::OrderUpdateNotification,
EventType::PositionUpdateNotification,
EventType::PositionClosedNotification,
EventType::AskPriceUpdateNotification,
EventType::BidPriceUpdateNotification,
EventType::ServiceHealthUpdate,
EventType::ChannelStatusUpdate,
]
}
}
impl Senders {
fn handle_event(&self, event: &native::event::EventInternal) -> anyhow::Result<()> {
tracing::trace!(?event, "Received event");
match event {
native::event::EventInternal::Init(init) => {
self.init_msg.send(Some(init.to_string()))?;
}
native::event::EventInternal::Log(_log) => {
// Ignore log events for now
}
native::event::EventInternal::OrderUpdateNotification(order) => {
self.order.send(Some(order.clone()))?;
}
native::event::EventInternal::WalletInfoUpdateNotification(wallet_info) => {
self.wallet_info.send(Some(wallet_info.clone()))?;
}
native::event::EventInternal::PositionUpdateNotification(position) => {
self.position.send(Some(*position))?;
}
native::event::EventInternal::PositionCloseNotification(contract_symbol) => {
self.position_close.send(Some(*contract_symbol))?;
}
native::event::EventInternal::AskPriceUpdateNotification(price) => {
self.ask_price.send(Some(*price))?;
}
native::event::EventInternal::BidPriceUpdateNotification(price) => {
self.bid_price.send(Some(*price))?;
}
native::event::EventInternal::ServiceHealthUpdate(update) => {
self.service.send(Some(update.clone()))?;
}
native::event::EventInternal::BackgroundNotification(_task) => {
// ignored
}
native::event::EventInternal::SpendableOutputs => {
unreachable!("SpendableOutputs event should not be sent to the subscriber");
}
native::event::EventInternal::Authenticated(_) => {
// ignored
}
native::event::EventInternal::DlcChannelEvent(_) => {
// ignored
}
native::event::EventInternal::FundingChannelNotification(_) => {
// ignored
}
native::event::EventInternal::LnPaymentReceived { .. } => {
// ignored
}
native::event::EventInternal::NewTrade(_) => {
// ignored
}
native::event::EventInternal::FundingFeeEvent(_) => {
// ignored
}
native::event::EventInternal::NextFundingRate(_) => {
// ignored
}
}
Ok(())
}
}
// This is so cumbersome because of EventHub requiring a Send + Sync + Clone subscriber
#[derive(Clone)]
pub struct ThreadSafeSenders(Arc<Mutex<Senders>>);
impl Subscriber for ThreadSafeSenders {
fn notify(&self, event: &native::event::EventInternal) {
self.0.lock().notify(event)
}
fn events(&self) -> Vec<EventType> {
self.0.lock().events()
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/tests-e2e/src/logger.rs | crates/tests-e2e/src/logger.rs | use std::sync::Once;
pub fn init_tracing() {
static TRACING_TEST_SUBSCRIBER: Once = Once::new();
TRACING_TEST_SUBSCRIBER.call_once(|| {
tracing_subscriber::fmt()
.with_env_filter(
"debug,\
hyper=warn,\
reqwest=warn,\
rustls=warn,\
bdk=info,\
lightning::ln::peer_handler=debug,\
lightning=trace,\
sled=info,\
ureq=info",
)
.with_test_writer()
.init()
})
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/tests-e2e/src/bitcoind.rs | crates/tests-e2e/src/bitcoind.rs | use anyhow::bail;
use anyhow::Result;
use bitcoin::address::NetworkUnchecked;
use bitcoin::Address;
use bitcoin::Amount;
use reqwest::Client;
use reqwest::Response;
use serde::Deserialize;
use serde_json::json;
use std::time::Duration;
/// A wrapper over the bitcoind HTTP API
///
/// It does not aim to be complete, functionality will be added as needed
pub struct Bitcoind {
client: Client,
host: String,
}
impl Bitcoind {
pub fn new(client: Client, host: String) -> Self {
Self { client, host }
}
pub fn new_local(client: Client) -> Self {
let host = "http://localhost:8080/bitcoin".to_string();
Self::new(client, host)
}
/// Instructs `bitcoind` to generate to address.
pub async fn mine(&self, n: u16) -> Result<()> {
tracing::info!(n, "Mining blocks");
let response: GetNewAddressResponse = self
.client
.post(&self.host)
.body(r#"{"jsonrpc": "1.0", "method": "getnewaddress", "params": []}"#.to_string())
.send()
.await?
.json()
.await?;
self.client
.post(&self.host)
.body(format!(
r#"{{"jsonrpc": "1.0", "method": "generatetoaddress", "params": [{}, "{}"]}}"#,
n, response.result
))
.send()
.await?;
// For the mined blocks to be picked up by the subsequent wallet syncs
tokio::time::sleep(Duration::from_secs(5)).await;
Ok(())
}
/// An alias for send_to_address
pub async fn fund(&self, address: &Address, amount: Amount) -> Result<Response> {
self.send_to_address(address, amount).await
}
pub async fn send_to_address(&self, address: &Address, amount: Amount) -> Result<Response> {
let response = self
.client
.post(&self.host)
.body(format!(
r#"{{"jsonrpc": "1.0", "method": "sendtoaddress", "params": ["{}", "{}", "", "", false, false, null, null, false, 1.0]}}"#,
address,
amount.to_btc(),
))
.send()
.await?;
Ok(response)
}
pub async fn send_multiple_utxos_to_address<F>(
&self,
address_fn: F,
utxo_amount: Amount,
n_utxos: u64,
) -> Result<()>
where
F: Fn() -> Address<NetworkUnchecked>,
{
let total_amount = utxo_amount * n_utxos;
let response: ListUnspentResponse = self
.client
.post(&self.host)
.body(r#"{"jsonrpc": "1.0", "method": "listunspent", "params": []}"#)
.send()
.await?
.json()
.await?;
let utxo = response
.result
.iter()
// We try to find one UTXO that can cover the whole transaction. We could cover the
// amount with multiple UTXOs too, but this is simpler and will probably succeed.
.find(|utxo| utxo.spendable && utxo.amount >= total_amount)
.expect("to find UTXO to cover multi-payment");
let mut outputs = serde_json::value::Map::new();
for _ in 0..n_utxos {
let address = address_fn();
outputs.insert(
address.assume_checked().to_string(),
json!(utxo_amount.to_btc()),
);
}
let create_raw_tx_request = json!(
{
"jsonrpc": "1.0",
"method": "createrawtransaction",
"params":
[
[ {"txid": utxo.txid, "vout": utxo.vout} ],
outputs
]
}
);
let create_raw_tx_response: CreateRawTransactionResponse = self
.client
.post(&self.host)
.json(&create_raw_tx_request)
.send()
.await?
.json()
.await?;
let sign_raw_tx_with_wallet_request = json!(
{
"jsonrpc": "1.0",
"method": "signrawtransactionwithwallet",
"params": [ create_raw_tx_response.result ]
}
);
let sign_raw_tx_with_wallet_response: SignRawTransactionWithWalletResponse = self
.client
.post(&self.host)
.json(&sign_raw_tx_with_wallet_request)
.send()
.await?
.json()
.await?;
let send_raw_tx_request = json!(
{
"jsonrpc": "1.0",
"method": "sendrawtransaction",
"params": [ sign_raw_tx_with_wallet_response.result.hex, 0 ]
}
);
let send_raw_tx_response: SendRawTransactionResponse = self
.client
.post(&self.host)
.json(&send_raw_tx_request)
.send()
.await?
.json()
.await?;
tracing::info!(
txid = %send_raw_tx_response.result,
%utxo_amount,
%n_utxos,
"Published multi-utxo transaction"
);
Ok(())
}
pub async fn post(&self, endpoint: &str, body: Option<String>) -> Result<Response> {
let mut builder = self.client.post(endpoint.to_string());
if let Some(body) = body {
builder = builder.body(body);
}
let response = builder.send().await?;
if !response.status().is_success() {
bail!(response.text().await?)
}
Ok(response)
}
}
#[derive(Deserialize, Debug)]
struct GetNewAddressResponse {
result: String,
}
#[derive(Deserialize, Debug)]
struct ListUnspentResponse {
result: Vec<Utxo>,
}
#[derive(Deserialize, Debug)]
struct Utxo {
txid: String,
vout: usize,
#[serde(with = "bitcoin::amount::serde::as_btc")]
amount: Amount,
spendable: bool,
}
#[derive(Deserialize, Debug)]
struct CreateRawTransactionResponse {
result: String,
}
#[derive(Deserialize, Debug)]
struct SignRawTransactionWithWalletResponse {
result: SignRawTransactionWithWalletResponseBody,
}
#[derive(Deserialize, Debug)]
struct SignRawTransactionWithWalletResponseBody {
hex: String,
}
#[derive(Deserialize, Debug)]
struct SendRawTransactionResponse {
result: String,
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/tests-e2e/src/test_flow.rs | crates/tests-e2e/src/test_flow.rs | /// Waits until the specified condition is met
#[macro_export]
macro_rules! wait_until {
($expr:expr) => {
// Waiting time for the time on the watch channel before returning error
let next_wait_time: std::time::Duration = std::time::Duration::from_secs(120);
let result = tokio::time::timeout(next_wait_time, async {
let mut wait_time = std::time::Duration::from_millis(10);
loop {
if $expr {
break;
}
tokio::time::sleep(wait_time).await;
wait_time *= 2; // Increase wait time exponentially
}
})
.await;
match result {
Ok(_) => {
tracing::debug!("Expression satisfied: {}", quote::quote!($expr));
}
Err(_) => {
panic!(
"Expression timed out after {}s. Expression: {}",
next_wait_time.as_secs(),
quote::quote!($expr)
);
}
}
};
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/tests-e2e/src/coordinator.rs | crates/tests-e2e/src/coordinator.rs | use anyhow::Context;
use anyhow::Result;
use bitcoin::address::NetworkUnchecked;
use bitcoin::Address;
use native::api::ContractSymbol;
use reqwest::Client;
use rust_decimal::Decimal;
use serde::Deserialize;
use serde::Serialize;
use time::OffsetDateTime;
/// A wrapper over the coordinator HTTP API.
///
/// It does not aim to be complete, functionality will be added as needed.
pub struct Coordinator {
client: Client,
host: String,
db_host: String,
}
impl Coordinator {
pub fn new(client: Client, host: &str, db_host: &str) -> Self {
Self {
client,
host: host.to_string(),
db_host: db_host.to_string(),
}
}
pub fn new_local(client: Client) -> Self {
Self::new(client, "http://localhost:8000", "http://localhost:3002")
}
/// Check whether the coordinator is running.
pub async fn is_running(&self) -> bool {
self.get(format!("{}/health", self.host)).await.is_ok()
}
pub async fn sync_node(&self) -> Result<()> {
self.post::<()>(format!("{}/api/admin/sync", self.host), None)
.await?;
Ok(())
}
pub async fn get_balance(&self) -> Result<Balance> {
let balance = self
.get(format!("{}/api/admin/wallet/balance", self.host))
.await?
.json()
.await?;
Ok(balance)
}
pub async fn get_new_address(&self) -> Result<Address<NetworkUnchecked>> {
Ok(self
.get(format!("{}/api/newaddress", self.host))
.await?
.text()
.await?
.parse()?)
}
pub async fn get_dlc_channels(&self) -> Result<Vec<DlcChannelDetails>> {
Ok(self
.get(format!("{}/api/admin/dlc_channels", self.host))
.await?
.json()
.await?)
}
pub async fn rollover(&self, dlc_channel_id: &str) -> Result<reqwest::Response> {
self.post::<()>(
format!("{}/api/admin/rollover/{dlc_channel_id}", self.host),
None,
)
.await
}
pub async fn get_positions(&self, trader_pubkey: &str) -> Result<Vec<Position>> {
let positions = self
.get(format!(
"{}/positions?trader_pubkey=eq.{trader_pubkey}",
self.db_host
))
.await?
.json()
.await?;
Ok(positions)
}
pub async fn collaborative_revert(
&self,
request: CollaborativeRevertCoordinatorRequest,
) -> Result<()> {
self.post(
format!("{}/api/admin/channels/revert", self.host),
Some(request),
)
.await?;
Ok(())
}
pub async fn post_funding_rates(&self, request: FundingRates) -> Result<()> {
self.post(
format!("{}/api/admin/funding-rates", self.host),
Some(request),
)
.await?;
Ok(())
}
/// Modify the `creation_timestamp` of the trader positions stored in the coordinator database.
///
/// This can be used together with `post_funding_rates` to force the coordinator to generate a
/// funding fee event for a given position.
pub async fn modify_position_creation_timestamp(
&self,
timestamp: OffsetDateTime,
trader_pubkey: &str,
) -> Result<()> {
#[derive(Serialize)]
struct Request {
#[serde(with = "time::serde::rfc3339")]
creation_timestamp: OffsetDateTime,
}
self.patch(
format!(
"{}/positions?trader_pubkey=eq.{trader_pubkey}",
self.db_host
),
Some(Request {
creation_timestamp: timestamp,
}),
)
.await?;
Ok(())
}
pub async fn get_funding_fee_events(
&self,
trader_pubkey: &str,
position_id: u64,
) -> Result<Vec<FundingFeeEvent>> {
let funding_fee_events = self
.get(format!(
"{}/funding_fee_events?trader_pubkey=eq.{trader_pubkey}&position_id=eq.{position_id}",
self.db_host
))
.await?
.json()
.await?;
Ok(funding_fee_events)
}
async fn get(&self, path: String) -> Result<reqwest::Response> {
self.client
.get(path)
.send()
.await
.context("Could not send GET request to coordinator")?
.error_for_status()
.context("Coordinator did not return 200 OK")
}
async fn post<T: Serialize>(&self, path: String, body: Option<T>) -> Result<reqwest::Response> {
let request = self.client.post(path);
let request = match body {
Some(ref body) => {
let body = serde_json::to_string(body)?;
request
.header("Content-Type", "application/json")
.body(body)
}
None => request,
};
request
.send()
.await
.context("Could not send POST request to coordinator")?
.error_for_status()
.context("Coordinator did not return 200 OK")
}
async fn patch<T: Serialize>(
&self,
path: String,
body: Option<T>,
) -> Result<reqwest::Response> {
let request = self.client.patch(path);
let request = match body {
Some(ref body) => {
let body = serde_json::to_string(body)?;
request
.header("Content-Type", "application/json")
.body(body)
}
None => request,
};
request
.send()
.await
.context("Could not send PATCH request to coordinator")?
.error_for_status()
.context("Coordinator did not return 200 OK")
}
}
#[derive(Deserialize, Debug)]
pub struct Balance {
pub onchain: u64,
pub dlc_channel: u64,
}
#[derive(Deserialize, Debug)]
pub struct DlcChannels {
#[serde(flatten)]
pub channels: Vec<DlcChannelDetails>,
}
#[derive(Deserialize, Debug)]
pub struct DlcChannelDetails {
pub dlc_channel_id: Option<String>,
pub counter_party: String,
pub channel_state: ChannelState,
pub signed_channel_state: Option<SignedChannelState>,
pub update_idx: Option<u64>,
}
#[derive(Deserialize, Debug)]
pub enum ChannelState {
Offered,
Accepted,
Signed,
Closing,
Closed,
CounterClosed,
ClosedPunished,
CollaborativelyClosed,
FailedAccept,
FailedSign,
Cancelled,
}
#[derive(Deserialize, Debug, PartialEq)]
pub enum SignedChannelState {
Established,
SettledOffered,
SettledReceived,
SettledAccepted,
SettledConfirmed,
Settled,
RenewOffered,
RenewAccepted,
RenewConfirmed,
RenewFinalized,
Closing,
CollaborativeCloseOffered,
}
#[derive(Serialize)]
pub struct CollaborativeRevertCoordinatorRequest {
pub channel_id: String,
pub fee_rate_sats_vb: u64,
pub counter_payout: u64,
pub price: Decimal,
}
#[derive(Debug, Deserialize, Clone)]
// For `insta`.
#[derive(Serialize)]
pub struct Position {
pub id: u64,
pub contract_symbol: ContractSymbol,
pub trader_leverage: Decimal,
pub quantity: Decimal,
pub trader_direction: Direction,
pub average_entry_price: Decimal,
pub trader_liquidation_price: Decimal,
pub position_state: PositionState,
pub coordinator_margin: u64,
#[serde(with = "time::serde::rfc3339")]
pub creation_timestamp: OffsetDateTime,
#[serde(with = "time::serde::rfc3339")]
pub expiry_timestamp: OffsetDateTime,
#[serde(with = "time::serde::rfc3339")]
pub update_timestamp: OffsetDateTime,
pub trader_pubkey: String,
pub temporary_contract_id: Option<String>,
pub trader_realized_pnl_sat: Option<i64>,
pub trader_unrealized_pnl_sat: Option<i64>,
pub closing_price: Option<Decimal>,
pub coordinator_leverage: Decimal,
pub trader_margin: i64,
pub coordinator_liquidation_price: Decimal,
pub order_matching_fees: i64,
}
#[derive(Debug, Deserialize, PartialEq, Clone, Copy)]
#[serde(rename_all = "lowercase")]
// For `insta`.
#[derive(Serialize)]
pub enum Direction {
Long,
Short,
}
#[derive(Debug, Deserialize, PartialEq, Clone, Copy)]
// For `insta`.
#[derive(Serialize)]
pub enum PositionState {
Proposed,
Open,
Closing,
Rollover,
Closed,
Failed,
Resizing,
}
#[derive(Debug, Serialize)]
pub struct FundingRates(pub Vec<FundingRate>);
#[derive(Debug, Serialize)]
pub struct FundingRate {
pub rate: Decimal,
#[serde(with = "time::serde::rfc3339")]
pub start_date: OffsetDateTime,
#[serde(with = "time::serde::rfc3339")]
pub end_date: OffsetDateTime,
}
#[derive(Debug, Deserialize)]
pub struct FundingFeeEvent {
pub amount_sats: i64,
pub trader_pubkey: String,
#[serde(with = "time::serde::rfc3339::option")]
pub paid_date: Option<OffsetDateTime>,
pub position_id: u64,
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/tests-e2e/tests/e2e_close_position.rs | crates/tests-e2e/tests/e2e_close_position.rs | #![allow(clippy::unwrap_used)]
use native::api;
use native::api::ContractSymbol;
use native::trade::order::api::NewOrder;
use native::trade::order::api::OrderType;
use native::trade::position::PositionState;
use tests_e2e::app::submit_order;
use tests_e2e::coordinator::SignedChannelState;
use tests_e2e::setup;
use tests_e2e::setup::dummy_order;
use tests_e2e::wait_until;
// Comments are based on a fixed price of 40_000.
// TODO: Add assertions when the maker price can be fixed.
#[tokio::test(flavor = "multi_thread")]
#[ignore = "need to be run with 'just e2e' command"]
async fn can_open_close_open_close_position() {
let test = setup::TestSetup::new_with_open_position().await;
// - App margin is 1_250_000 sats.
// - Opening fee of 7_500 paid to coordinator collateral reserve from app on-chain balance.
// - App off-chain balance is 0 (first trade uses full DLC channel collateral for now).
let app_off_chain_balance = test
.app
.rx
.wallet_info()
.unwrap()
.balances
.off_chain
.unwrap();
tracing::info!(%app_off_chain_balance, "Opened first position");
let closing_order = {
let mut order = dummy_order();
order.direction = api::Direction::Short;
order
};
tracing::info!("Closing first position");
submit_order(closing_order.clone());
wait_until!(test.app.rx.position_close().is_some());
tokio::time::sleep(std::time::Duration::from_secs(10)).await;
// - App off-chain balance is 1_242_500 sats (margin minus 7_500 fee).
let app_off_chain_balance = test
.app
.rx
.wallet_info()
.unwrap()
.balances
.off_chain
.unwrap();
tracing::info!(%app_off_chain_balance, "Closed first position");
tracing::info!("Opening second position");
let order = NewOrder {
leverage: 2.0,
contract_symbol: ContractSymbol::BtcUsd,
direction: api::Direction::Long,
quantity: 500.0,
order_type: Box::new(OrderType::Market),
stable: false,
};
submit_order(order.clone());
wait_until!(test.app.rx.position().is_some());
wait_until!(test.app.rx.position().unwrap().position_state == PositionState::Open);
tokio::time::sleep(std::time::Duration::from_secs(5)).await;
// - App margin is 625_000 sats.
// - Opening fee of 3_750 paid to coordinator collateral reserve from app off-chain balance.
// - App off-chain balance is 613_750.
let app_off_chain_balance = test
.app
.rx
.wallet_info()
.unwrap()
.balances
.off_chain
.unwrap();
tracing::info!(%app_off_chain_balance, "Opened second position");
// rolling over before closing the second position
tracing::info!("Rollover second position");
let coordinator = test.coordinator;
let app_pubkey = api::get_node_id().0;
let dlc_channels = coordinator.get_dlc_channels().await.unwrap();
let dlc_channel = dlc_channels
.into_iter()
.find(|chan| chan.counter_party == app_pubkey)
.unwrap();
coordinator
.rollover(&dlc_channel.dlc_channel_id.unwrap())
.await
.unwrap();
wait_until!(test
.app
.rx
.position()
.map(|p| PositionState::Rollover == p.position_state)
.unwrap_or(false));
wait_until!(test
.app
.rx
.position()
.map(|p| PositionState::Open == p.position_state)
.unwrap_or(false));
tracing::info!("Closing second position");
let closing_order = NewOrder {
direction: api::Direction::Short,
..order
};
submit_order(closing_order);
wait_until!(test.app.rx.position_close().is_some());
wait_until!({
let dlc_channels = coordinator.get_dlc_channels().await.unwrap();
let dlc_channel = dlc_channels
.into_iter()
.find(|chan| chan.counter_party == app_pubkey)
.unwrap();
Some(SignedChannelState::Settled) == dlc_channel.signed_channel_state
});
tokio::time::sleep(std::time::Duration::from_secs(5)).await;
// - App off-chain balance is 1_235_000 sats (reserve + margin - 3_750 fee).
let app_off_chain_balance = test
.app
.rx
.wallet_info()
.unwrap()
.balances
.off_chain
.unwrap();
tracing::info!(%app_off_chain_balance, "Closed second position");
// TODO: Assert that the position is closed in the coordinator
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/tests-e2e/tests/e2e_force_close_settled_channel.rs | crates/tests-e2e/tests/e2e_force_close_settled_channel.rs | #![allow(clippy::unwrap_used)]
use native::api::ChannelState;
use native::api::Direction;
use native::api::SignedChannelState;
use tests_e2e::app::force_close_dlc_channel;
use tests_e2e::app::get_dlc_channels;
use tests_e2e::app::refresh_wallet_info;
use tests_e2e::app::submit_order;
use tests_e2e::setup;
use tests_e2e::setup::dummy_order;
use tests_e2e::wait_until;
#[tokio::test(flavor = "multi_thread")]
#[ignore = "need to be run with 'just e2e' command"]
async fn can_force_close_settled_channel() {
let setup = setup::TestSetup::new_with_open_position().await;
let closing_order = {
let mut order = dummy_order();
order.direction = Direction::Short;
order
};
submit_order(closing_order.clone());
wait_until!(setup.app.rx.position_close().is_some());
let app_balance_before = setup.app.rx.wallet_info().unwrap().balances.on_chain;
let coordinator_balance_before = setup.coordinator.get_balance().await.unwrap();
force_close_dlc_channel(&setup.bitcoind).await;
let channels = get_dlc_channels();
let channel = channels.first().unwrap();
wait_until!(matches!(
channel.channel_state,
ChannelState::Signed {
state: SignedChannelState::SettledClosing,
..
}
));
setup.bitcoind.mine(288).await.unwrap();
refresh_wallet_info();
setup.coordinator.sync_node().await.unwrap();
let channels = get_dlc_channels();
let channel = channels.first().unwrap();
wait_until!(matches!(
channel.channel_state,
ChannelState::SettledClosing { .. }
));
wait_until!({
setup.bitcoind.mine(1).await.unwrap();
refresh_wallet_info();
setup.coordinator.sync_node().await.unwrap();
let app_balance_after = setup.app.rx.wallet_info().unwrap().balances.on_chain;
let coordinator_balance_after = setup.coordinator.get_balance().await.unwrap();
// TODO: Verify that the wallets are paid the expected amounts.
app_balance_before < app_balance_after
&& coordinator_balance_before.onchain < coordinator_balance_after.onchain
});
let channels = get_dlc_channels();
let channel = channels.first().unwrap();
wait_until!(matches!(channel.channel_state, ChannelState::Closed { .. }));
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/tests-e2e/tests/e2e_basic.rs | crates/tests-e2e/tests/e2e_basic.rs | use anyhow::Result;
use tests_e2e::setup::TestSetup;
#[tokio::test(flavor = "multi_thread")]
#[ignore = "need to be run with 'just e2e' command"]
async fn app_can_be_funded_with_bitcoind() -> Result<()> {
TestSetup::new_after_funding().await;
Ok(())
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/tests-e2e/tests/e2e_collaborative_close_channel.rs | crates/tests-e2e/tests/e2e_collaborative_close_channel.rs | #![allow(clippy::unwrap_used)]
use native::api;
use tests_e2e::app::refresh_wallet_info;
use tests_e2e::setup;
use tests_e2e::setup::dummy_order;
use tests_e2e::wait_until;
use tokio::task::spawn_blocking;
#[tokio::test(flavor = "multi_thread")]
#[ignore = "need to be run with 'just e2e' command"]
async fn can_open_and_collab_close_channel() {
// Setup
let test = setup::TestSetup::new_with_open_position().await;
let app_off_chain_balance = test
.app
.rx
.wallet_info()
.unwrap()
.balances
.off_chain
.unwrap();
tracing::info!(%app_off_chain_balance, "Opened position");
let closing_order = {
let mut order = dummy_order();
order.direction = api::Direction::Short;
order
};
tracing::info!("Closing first position");
spawn_blocking(move || api::submit_order(closing_order).unwrap())
.await
.unwrap();
wait_until!(test.app.rx.position_close().is_some());
tokio::time::sleep(std::time::Duration::from_secs(10)).await;
let app_on_chain_balance = test.app.rx.wallet_info().unwrap().balances.on_chain;
let app_off_chain_balance = test
.app
.rx
.wallet_info()
.unwrap()
.balances
.off_chain
.unwrap();
tracing::info!(%app_off_chain_balance, "Closed first position");
// Act
spawn_blocking(move || api::close_channel().unwrap())
.await
.unwrap();
// wait until there is no balance off-chain anymore
wait_until!({
test.bitcoind.mine(1).await.unwrap();
refresh_wallet_info();
let app_balance = test.app.rx.wallet_info().unwrap().balances;
tracing::info!(
off_chain = app_balance.off_chain,
on_chain = app_balance.on_chain,
"Balance while waiting"
);
app_balance.off_chain.unwrap() == 0
});
// Assert
let wallet_info = test.app.rx.wallet_info().unwrap();
assert_eq!(
wallet_info.balances.on_chain,
app_on_chain_balance + app_off_chain_balance
);
// TODO: Assert that the coordinator's balance
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/tests-e2e/tests/e2e_collaborative_revert.rs | crates/tests-e2e/tests/e2e_collaborative_revert.rs | #![allow(clippy::unwrap_used)]
use native::api::PaymentFlow;
use native::api::WalletHistoryItemType;
use rust_decimal_macros::dec;
use tests_e2e::app::get_dlc_channel_id;
use tests_e2e::app::refresh_wallet_info;
use tests_e2e::coordinator::CollaborativeRevertCoordinatorRequest;
use tests_e2e::setup;
use tests_e2e::wait_until;
// Use `flavor = "multi_thread"` to be able to call `block_in_place`.
#[tokio::test(flavor = "multi_thread")]
#[ignore = "need to be run with 'just e2e' command"]
async fn can_revert_channel() {
// Arrange
let test = setup::TestSetup::new_with_open_position().await;
let coordinator = &test.coordinator;
let bitcoin = &test.bitcoind;
let app = &test.app;
let position = app.rx.position().unwrap();
let app_margin = position.collateral;
let dlc_channel_id = get_dlc_channel_id().unwrap();
let app_balance_before = app.rx.wallet_info().unwrap().balances.on_chain;
// Act
let collaborative_revert_app_payout = app_margin / 2;
coordinator
.collaborative_revert(CollaborativeRevertCoordinatorRequest {
channel_id: dlc_channel_id,
counter_payout: collaborative_revert_app_payout,
price: dec!(40_000),
fee_rate_sats_vb: 1,
})
.await
.unwrap();
// Assert
wait_until!({
bitcoin.mine(1).await.unwrap();
refresh_wallet_info();
let app_balance = app.rx.wallet_info().unwrap().balances.on_chain;
tracing::debug!(
before = %app_balance_before,
now = %app_balance,
"Checking on-chain balance"
);
app_balance > app_balance_before
});
let wallet_info = app.rx.wallet_info().unwrap();
let collab_revert_entry = wallet_info
.history
.iter()
.filter(|entry| {
matches!(entry.flow, PaymentFlow::Inbound)
&& matches!(entry.wallet_type, WalletHistoryItemType::OnChain { .. })
})
.max_by(|a, b| a.timestamp.cmp(&b.timestamp))
.unwrap();
let total_tx_fee = match collab_revert_entry.wallet_type {
WalletHistoryItemType::OnChain {
fee_sats: Some(fee_sats),
..
} => fee_sats,
_ => unreachable!(),
};
// The transaction fee for the collaborative revert transaction is split evenly among the two
// parties.
let tx_fee = total_tx_fee / 2;
let expected_payout = collaborative_revert_app_payout - tx_fee;
assert_eq!(collab_revert_entry.amount_sats, expected_payout);
// TODO: Check coordinator balance too.
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/tests-e2e/tests/e2e_open_position.rs | crates/tests-e2e/tests/e2e_open_position.rs | use native::api;
use native::api::ContractSymbol;
use native::health::Service;
use native::health::ServiceStatus;
use native::trade::order::api::NewOrder;
use native::trade::order::api::OrderType;
use native::trade::position::PositionState;
use tests_e2e::app::submit_channel_opening_order;
use tests_e2e::setup::TestSetup;
use tests_e2e::wait_until;
#[tokio::test(flavor = "multi_thread")]
#[ignore = "need to be run with 'just e2e' command"]
async fn can_open_position() {
let test = TestSetup::new_after_funding().await;
let app = &test.app;
let order = NewOrder {
leverage: 2.0,
contract_symbol: ContractSymbol::BtcUsd,
direction: api::Direction::Long,
quantity: 1.0,
order_type: Box::new(OrderType::Market),
stable: false,
};
submit_channel_opening_order(order.clone(), 10_000, 10_000);
assert_eq!(app.rx.status(Service::Orderbook), ServiceStatus::Online);
assert_eq!(app.rx.status(Service::Coordinator), ServiceStatus::Online);
// Assert that the order was posted
wait_until!(app.rx.order().is_some());
assert_eq!(app.rx.order().unwrap().quantity, order.quantity);
assert_eq!(app.rx.order().unwrap().direction, order.direction);
assert_eq!(
app.rx.order().unwrap().contract_symbol,
order.contract_symbol
);
assert_eq!(app.rx.order().unwrap().leverage, order.leverage);
// Assert that the position is opened in the app
wait_until!(app.rx.position().is_some());
assert_eq!(app.rx.position().unwrap().quantity, order.quantity);
assert_eq!(app.rx.position().unwrap().direction, order.direction);
assert_eq!(
app.rx.position().unwrap().contract_symbol,
order.contract_symbol
);
assert_eq!(app.rx.position().unwrap().leverage, order.leverage);
wait_until!(app.rx.position().unwrap().position_state == PositionState::Open);
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/tests-e2e/tests/e2e_restore_from_backup.rs | crates/tests-e2e/tests/e2e_restore_from_backup.rs | use native::api;
use native::trade::position::PositionState;
use tests_e2e::app::run_app;
use tests_e2e::app::submit_order;
use tests_e2e::logger::init_tracing;
use tests_e2e::setup;
use tests_e2e::setup::dummy_order;
use tests_e2e::wait_until;
use tokio::task::spawn_blocking;
#[tokio::test(flavor = "multi_thread")]
#[ignore = "need to be run with 'just e2e' command"]
async fn app_can_be_restored_from_a_backup() {
init_tracing();
let test = setup::TestSetup::new_with_open_position().await;
let seed_phrase = api::get_seed_phrase();
let off_chain = test.app.rx.wallet_info().unwrap().balances.off_chain;
// Give any pending backup time to complete.
tokio::time::sleep(std::time::Duration::from_secs(5)).await;
// kill the app
test.app.stop();
tracing::info!("Shutting down app!");
let app = run_app(Some(seed_phrase.0)).await;
assert_eq!(app.rx.wallet_info().unwrap().balances.off_chain, off_chain);
let positions = spawn_blocking(|| api::get_positions().unwrap())
.await
.unwrap();
assert_eq!(1, positions.len());
// Test if full backup is running without errors
spawn_blocking(|| api::full_backup().unwrap())
.await
.unwrap();
let closing_order = {
let mut order = dummy_order();
order.direction = api::Direction::Short;
order
};
tracing::info!("Closing a position");
submit_order(closing_order);
wait_until!(test.app.rx.position().unwrap().position_state == PositionState::Closing);
wait_until!(test.app.rx.position_close().is_some());
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/tests-e2e/tests/e2e_force_close_position.rs | crates/tests-e2e/tests/e2e_force_close_position.rs | #![allow(clippy::unwrap_used)]
use native::api::ChannelState;
use native::api::SignedChannelState;
use tests_e2e::app::force_close_dlc_channel;
use tests_e2e::app::get_dlc_channels;
use tests_e2e::setup;
use tests_e2e::wait_until;
#[tokio::test(flavor = "multi_thread")]
#[ignore = "need to be run with 'just e2e' command"]
async fn can_force_close_position() {
let setup = setup::TestSetup::new_with_open_position().await;
force_close_dlc_channel(&setup.bitcoind).await;
let channels = get_dlc_channels();
let channel = channels.first().unwrap();
wait_until!(matches!(
channel.channel_state,
ChannelState::Signed {
state: SignedChannelState::Closing { .. },
..
}
));
// TODO: Assert that the position is closed in the app and that the DLC is claimed correctly
// on-chain.
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/tests-e2e/tests/e2e_rollover_position.rs | crates/tests-e2e/tests/e2e_rollover_position.rs | #![allow(clippy::unwrap_used)]
use bitcoin::Network;
use native::api;
use native::api::ChannelState;
use native::api::SignedChannelState;
use native::trade::position;
use position::PositionState;
use rust_decimal_macros::dec;
use tests_e2e::app::force_close_dlc_channel;
use tests_e2e::app::get_dlc_channels;
use tests_e2e::app::AppHandle;
use tests_e2e::coordinator;
use tests_e2e::coordinator::FundingRate;
use tests_e2e::coordinator::FundingRates;
use tests_e2e::setup;
use tests_e2e::wait_until;
use time::ext::NumericalDuration;
use time::OffsetDateTime;
use xxi_node::commons;
#[tokio::test(flavor = "multi_thread")]
#[ignore]
async fn can_rollover_position() {
let test = setup::TestSetup::new_with_open_position().await;
let coordinator = &test.coordinator;
let dlc_channels = coordinator.get_dlc_channels().await.unwrap();
let app_pubkey = api::get_node_id().0;
let position_coordinator_before =
coordinator.get_positions(&app_pubkey).await.unwrap()[0].clone();
tracing::info!("{:?}", dlc_channels);
let dlc_channel = dlc_channels
.into_iter()
.find(|chan| chan.counter_party == app_pubkey)
.unwrap();
let new_expiry = commons::calculate_next_expiry(OffsetDateTime::now_utc(), Network::Regtest);
generate_outstanding_funding_fee_event(&test, &app_pubkey, position_coordinator_before.id)
.await;
coordinator
.rollover(&dlc_channel.dlc_channel_id.unwrap())
.await
.unwrap();
wait_until!(check_rollover_position(&test.app, new_expiry));
wait_until!(test
.app
.rx
.position()
.map(|p| PositionState::Open == p.position_state)
.unwrap_or(false));
wait_until_funding_fee_event_is_paid(&test, &app_pubkey, position_coordinator_before.id).await;
let position_coordinator_after =
coordinator.get_positions(&app_pubkey).await.unwrap()[0].clone();
verify_coordinator_position_after_rollover(
&position_coordinator_before,
&position_coordinator_after,
new_expiry,
);
// Once the rollover is complete, we also want to verify that the channel can still be
// force-closed. This should be tested in `rust-dlc`, but we recently encountered a bug in our
// branch: https://github.com/get10101/10101/pull/2079.
force_close_dlc_channel(&test.bitcoind).await;
let channels = get_dlc_channels();
let channel = channels.first().unwrap();
wait_until!(matches!(
channel.channel_state,
ChannelState::Signed {
state: SignedChannelState::Closing { .. },
..
}
));
}
fn check_rollover_position(app: &AppHandle, new_expiry: OffsetDateTime) -> bool {
let position = app.rx.position().unwrap();
tracing::debug!(
"expect {:?} to be {:?}",
position.position_state,
PositionState::Rollover
);
tracing::debug!(
"expect {} to be {}",
position.expiry.unix_timestamp(),
new_expiry.unix_timestamp()
);
PositionState::Rollover == position.position_state
&& new_expiry.unix_timestamp() == position.expiry.unix_timestamp()
}
/// Verify the coordinator's position after executing a rollover, given that a funding fee was paid
/// from the trader to the coordinator.
fn verify_coordinator_position_after_rollover(
before: &coordinator::Position,
after: &coordinator::Position,
new_expiry: OffsetDateTime,
) {
assert_eq!(after.position_state, coordinator::PositionState::Open);
assert_eq!(before.quantity, after.quantity);
assert_eq!(before.trader_direction, after.trader_direction);
assert_eq!(before.average_entry_price, after.average_entry_price);
assert_eq!(before.coordinator_leverage, after.coordinator_leverage);
assert_eq!(
before.coordinator_liquidation_price,
after.coordinator_liquidation_price
);
assert_eq!(before.coordinator_margin, after.coordinator_margin);
assert_eq!(before.contract_symbol, after.contract_symbol);
assert_eq!(before.order_matching_fees, after.order_matching_fees);
assert_eq!(after.expiry_timestamp, new_expiry);
insta::assert_json_snapshot!(after, {
".id" => "[u64]".to_string(),
".creation_timestamp" => "[timestamp]".to_string(),
".update_timestamp" => "[timestamp]".to_string(),
".expiry_timestamp" => "[timestamp]".to_string(),
".trader_pubkey" => "[public-key]".to_string(),
".temporary_contract_id" => "[public-key]".to_string(),
});
}
async fn generate_outstanding_funding_fee_event(
test: &setup::TestSetup,
node_id_app: &str,
position_id: u64,
) {
let end_date = OffsetDateTime::now_utc() - 1.minutes();
let start_date = end_date - 8.hours();
// Let coordinator know about past funding rate.
test.coordinator
.post_funding_rates(FundingRates(vec![FundingRate {
// The trader will owe the coordinator.
rate: dec!(0.001),
start_date,
end_date,
}]))
.await
.unwrap();
// Make the coordinator think that the trader's position was created before the funding period
// ended.
test.coordinator
.modify_position_creation_timestamp(end_date - 1.hours(), node_id_app)
.await
.unwrap();
wait_until_funding_fee_event_is_created(test, node_id_app, position_id).await;
}
async fn wait_until_funding_fee_event_is_created(
test: &setup::TestSetup,
node_id_app: &str,
position_id: u64,
) {
wait_until!({
test.coordinator
.get_funding_fee_events(node_id_app, position_id)
.await
.unwrap()
.first()
.is_some()
});
}
async fn wait_until_funding_fee_event_is_paid(
test: &setup::TestSetup,
node_id_app: &str,
position_id: u64,
) {
wait_until!({
let funding_fee_events = test
.coordinator
.get_funding_fee_events(node_id_app, position_id)
.await
.unwrap();
funding_fee_events
.iter()
.all(|event| event.paid_date.is_some())
});
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/tests-e2e/tests/e2e_open_position_small_utxos.rs | crates/tests-e2e/tests/e2e_open_position_small_utxos.rs | use bitcoin::Amount;
use native::api;
use native::api::calculate_margin;
use native::api::ContractSymbol;
use native::trade::order::api::NewOrder;
use native::trade::order::api::OrderType;
use native::trade::position::PositionState;
use rust_decimal::prelude::ToPrimitive;
use std::str::FromStr;
use tests_e2e::app::refresh_wallet_info;
use tests_e2e::app::submit_channel_opening_order;
use tests_e2e::setup::TestSetup;
use tests_e2e::wait_until;
#[tokio::test(flavor = "multi_thread")]
#[ignore = "need to be run with 'just e2e' command"]
async fn can_open_position_with_multiple_small_utxos() {
// Arrange
let setup = TestSetup::new().await;
setup.fund_coordinator(Amount::ONE_BTC, 2).await;
let app = &setup.app;
// Fund app with multiple small UTXOs that can cover the required margin.
let order = NewOrder {
leverage: 2.0,
contract_symbol: ContractSymbol::BtcUsd,
direction: api::Direction::Long,
quantity: 100.0,
order_type: Box::new(OrderType::Market),
stable: false,
};
// We take the ask price because the app is going long.
let ask_price = app.rx.ask_price().unwrap().to_f32().unwrap();
let margin_app = calculate_margin(ask_price, order.quantity, order.leverage).0;
// We want to use small UTXOs.
let utxo_size = 1_000;
let n_utxos = margin_app / utxo_size;
// Double the number of UTXOs to cover costs beyond the margin i.e. fees.
let n_utxos = 2 * n_utxos;
let address_fn = || bitcoin::Address::from_str(&api::get_new_address().unwrap()).unwrap();
setup
.bitcoind
.send_multiple_utxos_to_address(address_fn, Amount::from_sat(utxo_size), n_utxos)
.await
.unwrap();
let fund_amount = n_utxos * utxo_size;
setup.bitcoind.mine(1).await.unwrap();
wait_until!({
refresh_wallet_info();
app.rx.wallet_info().unwrap().balances.on_chain >= fund_amount
});
// Act
submit_channel_opening_order(order.clone(), 0, 0);
// Assert
wait_until!(matches!(
app.rx.position(),
Some(native::trade::position::Position {
position_state: PositionState::Open,
..
})
));
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/tests-e2e/tests/e2e_resize_position.rs | crates/tests-e2e/tests/e2e_resize_position.rs | #![allow(clippy::unwrap_used)]
use native::api::ContractSymbol;
use native::api::Direction;
use native::api::PaymentFlow;
use native::api::WalletHistoryItemType;
use native::trade::order::api::NewOrder;
use native::trade::order::api::OrderType;
use tests_e2e::app::submit_order;
use tests_e2e::setup;
use tests_e2e::setup::TestSetup;
use tests_e2e::wait_until;
#[tokio::test(flavor = "multi_thread")]
#[ignore = "need to be run with 'just e2e' command"]
async fn can_resize_position() {
let position_direction = Direction::Short;
let order = NewOrder {
leverage: 2.0,
quantity: 250.0,
contract_symbol: ContractSymbol::BtcUsd,
direction: position_direction,
order_type: Box::new(OrderType::Market),
stable: false,
};
let test =
setup::TestSetup::new_with_open_position_custom(order.clone(), 1_000_000, 1_000_000).await;
let off_chain_balance_after_open = test
.app
.rx
.wallet_info()
.unwrap()
.balances
.off_chain
.unwrap();
// We start with 1_000_000 sats in the reserve and 250_005 sats as DLC margin.
tracing::info!(
app_off_chain_balance = %off_chain_balance_after_open,
position = ?test.app.rx.position(),
"Opened position"
);
let increasing_order = NewOrder {
quantity: 250.0,
..order.clone()
};
tracing::info!(?increasing_order, "Increasing position");
let order_id = submit_order(increasing_order);
wait_until_position_matches(&test, 500, Direction::Short).await;
// To increase the position, we must increase the margin (and decrease the reserve).
//
// 250_005 [extra margin] = 250 [order contracts] / (49_999 [price] * 2 [leverage])
//
// 1_500 [fee] = 250 [order contracts] * (1 / 49_999 [price]) * 0.0030 [fee coefficient]
//
// 748_495 [new reserve] = 1_000_000 [reserve] - 250_005 [extra margin] - 1_500 [fee]
let expected_off_chain_balance = 748_495;
wait_until_balance_equals(&test, expected_off_chain_balance).await;
// -251_505 [trade amount] = -250_005 [extra margin] - 1_500 [fee]
check_trade(
&test,
&order_id,
Direction::Short,
250,
None,
1_500,
-251_505,
);
tracing::info!(
app_off_chain_balance = %expected_off_chain_balance,
position = ?test.app.rx.position(),
"Increased position"
);
let decreasing_order = NewOrder {
quantity: 400.0,
direction: order.direction.opposite(),
..order.clone()
};
tracing::info!(?decreasing_order, "Decreasing position");
let order_id = submit_order(decreasing_order);
wait_until_position_matches(&test, 100, Direction::Short).await;
// To decrease the position, we must decrease the margin (and increase the reserve).
//
// 400_008 [margin reduction] = 400 [order contracts] / (49_999 [opening price] * 2 [leverage])
//
// 2_400 [fee] = 400 [order contracts] * (1 / 50_001 [closing price]) * 0.0030 [fee coefficient]
//
// -32 [pnl] = (400 [order contracts] / 50_001 [closing price]) - (400 [order contracts] /
// 49_999 [opening price])
//
// 1_146_071 [new reserve] = 748_495 [reserve] + 400_008 [margin reduction] - 2_400 [fee] - 32
// [pnl]
let expected_off_chain_balance = 1_146_071;
wait_until_balance_equals(&test, expected_off_chain_balance).await;
// 397_576 [trade amount] = 400_008 [margin reduction] - 2_400 [fee] - 32 [pnl]
check_trade(
&test,
&order_id,
Direction::Long,
400,
Some(-32),
2_400,
397_576,
);
tracing::info!(
app_off_chain_balance = %expected_off_chain_balance,
position = ?test.app.rx.position(),
"Decreased position"
);
let direction_changing_order = NewOrder {
quantity: 300.0,
direction: order.direction.opposite(),
..order
};
tracing::info!(?direction_changing_order, "Changing position direction");
let order_id = submit_order(direction_changing_order);
wait_until_position_matches(&test, 200, Direction::Long).await;
// To change direction, we must decrease the margin to 0 and then increase it. The total effect
// depends on the specific order executed.
//
// 100_002 [closed margin] = 100 [close contracts] / (49_999 [opening price] * 2 [leverage])
//
// -8 [pnl] = (100 [close contracts] / 50_001 [closing price]) - (100 [close contracts] / 49_999
// [opening price])
//
// 199_996 [new margin] = 200 [remaining contracts] / (50_001 [price] * 2 [leverage])
//
// 1_800 [fee] = 300 [total contracts] * (1 / 50_001 [closing price]) * 0.0030 [fee
// coefficient]
//
// 1_044_269 [new reserve] = 1_146_071 [reserve] + 100_002 [closed margin] - 199_996 [new
// margin] - 1_800 [fee] - 8 [pnl]
let expected_off_chain_balance = 1_044_269;
wait_until_balance_equals(&test, expected_off_chain_balance).await;
// The direction changing order is split into two trades: one to close the short position and
// another to open the long position.
// Close short position.
//
// 99_394 [1st trade amount] = 100_002 [closed margin] - 600 [fee] - 8 [pnl]
check_trade(
&test,
&order_id,
Direction::Long,
100,
Some(-8),
600,
99_394,
);
// Open long position, threfore no PNL.
//
// -201_196 [2nd trade amount] = -199_996 [new margin] - 1_200 [fee]
check_trade(
&test,
&order_id,
Direction::Long,
200,
None,
1_200,
-201_196,
);
tracing::info!(
app_off_chain_balance = %expected_off_chain_balance,
position = ?test.app.rx.position(),
"Changed position direction"
);
}
async fn wait_until_position_matches(test: &TestSetup, contracts: u64, direction: Direction) {
wait_until!(matches!(
test.app
.rx
.position()
.map(|p| p.quantity == contracts as f32 && p.direction == direction),
Some(true)
));
}
async fn wait_until_balance_equals(test: &TestSetup, target: u64) {
wait_until!(
target
== test
.app
.rx
.wallet_info()
.unwrap()
.balances
.off_chain
.unwrap()
);
}
#[track_caller]
fn check_trade(
test: &TestSetup,
order_id: &str,
direction: Direction,
contracts: u64,
pnl: Option<i64>,
fee_sat: u64,
// Positive if the trader received coins; negative if the trader paid coins.
amount_sat: i64,
) {
let can_find_trade = test
.app
.rx
.wallet_info()
.unwrap()
.history
.iter()
.any(|item| match &item.wallet_type {
WalletHistoryItemType::Trade {
order_id: trade_order_id,
fee_sat: trade_fee_sat,
pnl: trade_pnl,
contracts: trade_contracts,
direction: trade_direction,
} => {
if trade_order_id == order_id {
tracing::debug!(?item, "Checking trade values");
let relative_amount_sat = relative_amount_sat(item.amount_sats, item.flow);
*trade_fee_sat == fee_sat
&& trade_pnl == &pnl
&& *trade_contracts == contracts
&& trade_direction == &direction.to_string()
&& relative_amount_sat == amount_sat
} else {
false
}
}
_ => false,
});
assert!(can_find_trade)
}
fn relative_amount_sat(amount_sat: u64, flow: PaymentFlow) -> i64 {
let amount_sat = amount_sat as i64;
match flow {
PaymentFlow::Inbound => amount_sat,
PaymentFlow::Outbound => -amount_sat,
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/tests-e2e/tests/e2e_reject_offer.rs | crates/tests-e2e/tests/e2e_reject_offer.rs | use bitcoin::Amount;
use native::api;
use native::api::ContractSymbol;
use native::health::Service;
use native::health::ServiceStatus;
use native::trade::order::api::NewOrder;
use native::trade::order::api::OrderType;
use native::trade::order::OrderState;
use native::trade::position::PositionState;
use std::time::Duration;
use tests_e2e::app::submit_channel_opening_order;
use tests_e2e::setup::TestSetup;
use tests_e2e::wait_until;
#[tokio::test(flavor = "multi_thread")]
#[ignore = "need to be run with 'just e2e' command"]
async fn reject_offer() {
let test = TestSetup::new().await;
test.fund_coordinator(Amount::ONE_BTC, 2).await;
test.fund_app(Amount::from_sat(250_000)).await;
let app = &test.app;
let invalid_order = NewOrder {
leverage: 1.0,
contract_symbol: ContractSymbol::BtcUsd,
direction: api::Direction::Long,
quantity: 2000.0,
order_type: Box::new(OrderType::Market),
stable: false,
};
// submit order for which the app does not have enough liquidity. will fail with `Failed to
// accept dlc channel offer. Invalid state: Not enough UTXOs for amount`
submit_channel_opening_order(invalid_order.clone(), 0, 0);
assert_eq!(app.rx.status(Service::Orderbook), ServiceStatus::Online);
assert_eq!(app.rx.status(Service::Coordinator), ServiceStatus::Online);
// Assert that the order was posted
wait_until!(app.rx.order().is_some());
assert_eq!(app.rx.order().unwrap().quantity, invalid_order.quantity);
assert_eq!(app.rx.order().unwrap().direction, invalid_order.direction);
assert_eq!(
app.rx.order().unwrap().contract_symbol,
invalid_order.contract_symbol
);
assert_eq!(app.rx.order().unwrap().leverage, invalid_order.leverage);
// Assert that the order failed
wait_until!(matches!(
app.rx.order().unwrap().state,
OrderState::Failed { .. }
));
// Assert that no position has been opened
wait_until!(app.rx.position().is_none());
// Retry with a smaller order
let order = NewOrder {
leverage: 2.0,
contract_symbol: ContractSymbol::BtcUsd,
direction: api::Direction::Long,
quantity: 100.0,
order_type: Box::new(OrderType::Market),
stable: false,
};
// give the coordinator some time to process the reject message, before submitting the next
// order.
tokio::time::sleep(Duration::from_secs(5)).await;
tracing::info!("Retry channel opening order");
submit_channel_opening_order(order.clone(), 0, 0);
// Assert that the order was posted
wait_until!(app.rx.order().is_some());
assert_eq!(app.rx.order().unwrap().quantity, order.quantity);
assert_eq!(app.rx.order().unwrap().direction, order.direction);
assert_eq!(
app.rx.order().unwrap().contract_symbol,
order.contract_symbol
);
assert_eq!(app.rx.order().unwrap().leverage, order.leverage);
// Assert that the position is opened in the app
wait_until!(app.rx.position().is_some());
assert_eq!(app.rx.position().unwrap().quantity, order.quantity);
assert_eq!(app.rx.position().unwrap().direction, order.direction);
assert_eq!(
app.rx.position().unwrap().contract_symbol,
order.contract_symbol
);
assert_eq!(app.rx.position().unwrap().leverage, order.leverage);
wait_until!(app.rx.position().unwrap().position_state == PositionState::Open);
// TODO(holzeis): Add reject tests for SettleOffer and RenewOffer.
// Unfortunately its not easy to provoke a reject for a settle offer or renew offer from a grey
// box integration test.
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/tests-e2e/tests/e2e_open_position_with_external_funding.rs | crates/tests-e2e/tests/e2e_open_position_with_external_funding.rs | use bitcoin::Amount;
use native::api;
use native::api::ContractSymbol;
use native::health::Service;
use native::health::ServiceStatus;
use native::trade::order::api::NewOrder;
use native::trade::order::api::OrderType;
use native::trade::position::PositionState;
use std::time::Duration;
use tests_e2e::app::submit_unfunded_channel_opening_order;
use tests_e2e::http::init_reqwest;
use tests_e2e::lnd_mock::LndMock;
use tests_e2e::setup::TestSetup;
use tests_e2e::wait_until;
#[tokio::test(flavor = "multi_thread")]
#[ignore = "need to be run with 'just e2e' command"]
async fn can_open_position_with_external_lightning_funding() {
let test = TestSetup::new().await;
test.fund_coordinator(Amount::ONE_BTC, 2).await;
let app = &test.app;
let order = NewOrder {
leverage: 2.0,
contract_symbol: ContractSymbol::BtcUsd,
direction: api::Direction::Long,
quantity: 1.0,
order_type: Box::new(OrderType::Market),
stable: false,
};
submit_unfunded_channel_opening_order(order.clone(), 10_000, 10_000, 5_000, 1_000).unwrap();
let client = init_reqwest();
let lnd_mock = LndMock::new_local(client.clone());
// wait for the watchers before paying the invoice.
tokio::time::sleep(Duration::from_secs(1)).await;
tracing::info!("Paying invoice");
lnd_mock.pay_invoice().await.unwrap();
assert_eq!(app.rx.status(Service::Orderbook), ServiceStatus::Online);
assert_eq!(app.rx.status(Service::Coordinator), ServiceStatus::Online);
// Assert that the order was posted
wait_until!(app.rx.order().is_some());
assert_eq!(app.rx.order().unwrap().quantity, order.quantity);
assert_eq!(app.rx.order().unwrap().direction, order.direction);
assert_eq!(
app.rx.order().unwrap().contract_symbol,
order.contract_symbol
);
assert_eq!(app.rx.order().unwrap().leverage, order.leverage);
// Assert that the position is opened in the app
wait_until!(app.rx.position().is_some());
assert_eq!(app.rx.position().unwrap().quantity, order.quantity);
assert_eq!(app.rx.position().unwrap().direction, order.direction);
assert_eq!(
app.rx.position().unwrap().contract_symbol,
order.contract_symbol
);
assert_eq!(app.rx.position().unwrap().leverage, order.leverage);
wait_until!(app.rx.position().unwrap().position_state == PositionState::Open);
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/bitmex-stream/src/lib.rs | crates/bitmex-stream/src/lib.rs | use crate::tungstenite::http::Method;
use anyhow::anyhow;
use anyhow::Context;
use anyhow::Result;
use async_stream::stream;
use futures::SinkExt;
use futures::Stream;
use futures::StreamExt;
use serde::ser::SerializeTuple;
use serde::Serialize;
use serde_json::to_string;
use std::ops::Add;
use std::time::Duration;
use std::time::Instant;
use std::time::SystemTime;
use std::time::UNIX_EPOCH;
use tokio_tungstenite::tungstenite;
use tracing::Instrument;
use url::Url;
/// Connects to the BitMex websocket API
///
/// It subscribes to the specified topics (comma-separated) and yields all messages.
/// If the topics need authentication please use `subscribe_with_credentials` instead.
pub fn subscribe<const N: usize>(
topics: [String; N],
network: Network,
) -> impl Stream<Item = Result<String>> + Unpin {
subscribe_impl(topics, network, None)
}
/// Connects to the BitMex websocket API with authentication
///
/// It subscribes to the specified topics (comma-separated) and yields all messages.
/// If invalid credentials have been provided but a topic was provided which needs authentication
/// the stream will be closed.
pub fn subscribe_with_credentials<const N: usize>(
topics: [String; N],
network: Network,
credentials: Credentials,
) -> impl Stream<Item = Result<String>> + Unpin {
subscribe_impl(topics, network, Some(credentials))
}
/// Connects to the BitMex websocket API, subscribes to the specified topics (comma-separated) and
/// yields all messages.
///
/// To keep the connection alive, a websocket `Ping` is sent every 5 seconds in case no other
/// message was received in-between. This is according to BitMex's API documentation: https://www.bitmex.com/app/wsAPI#Heartbeats
fn subscribe_impl<const N: usize>(
topics: [String; N],
network: Network,
credentials: Option<Credentials>,
) -> impl Stream<Item = Result<String>> + Unpin {
let url = network.to_url();
let url = format!("wss://{url}/realtime");
let stream = stream! {
tracing::debug!("Connecting to BitMex realtime API");
let (mut connection, _) = tokio_tungstenite::connect_async(url.clone())
.await.context("Could not connect to websocket")?;
tracing::info!("Connected to BitMex realtime API");
if let Some(credentials) = credentials {
let start = SystemTime::now();
let expires = start
.duration_since(UNIX_EPOCH)?
.add(Duration::from_secs(5))
.as_secs();
let signature = credentials.sign(Method::GET, expires, &Url::parse(url.as_str())?, "");
let _ = connection
.send(tungstenite::Message::try_from(Command::from(signature))?)
.await;
}
let _ = connection
.send(tungstenite::Message::try_from(Command::Subscribe(
topics.to_vec(),
))?)
.await;
let mut last_bitmex_message = Instant::now();
loop {
tokio::select! {
_ = tokio::time::sleep(Duration::from_secs(5)) => {
if last_bitmex_message.elapsed() > Duration::from_secs(20) {
yield Err(anyhow!("BitMex websocket timed out"));
return;
}
let span = tracing::trace_span!("Ping BitMex");
span.in_scope(|| tracing::trace!("No message from BitMex in the last 5 seconds, pinging"));
let res = connection
.send(tungstenite::Message::Ping([0u8; 32].to_vec()))
.instrument(span)
.await;
if let Err(e) = res {
yield Err(anyhow!(e));
return;
}
},
msg = connection.next() => {
last_bitmex_message = Instant::now();
let msg = match msg {
Some(Ok(msg)) => {
msg
},
None => {
return;
}
Some(Err(e)) => {
yield Err(anyhow!(e));
return;
}
};
match msg {
tungstenite::Message::Pong(_) => {
tracing::trace!("Received pong");
continue;
}
tungstenite::Message::Text(text) => {
yield Ok(text);
}
other => {
tracing::trace!("Unsupported message: {:?}", other);
continue;
}
}
}
}
}
};
stream.boxed()
}
#[derive(Debug, Copy, Clone)]
pub enum Network {
Mainnet,
Testnet,
}
impl Network {
pub fn to_url(&self) -> String {
match self {
Network::Mainnet => "ws.bitmex.com".to_string(),
Network::Testnet => "ws.testnet.bitmex.com".to_string(),
}
}
}
#[derive(Debug, Serialize)]
#[serde(tag = "op", content = "args")]
#[serde(rename_all = "camelCase")]
pub enum Command {
Subscribe(Vec<String>),
#[serde(rename = "authKeyExpires")]
Authenticate(Signature),
}
impl TryFrom<Command> for tungstenite::Message {
type Error = anyhow::Error;
fn try_from(command: Command) -> Result<Self> {
let msg = to_string(&command)?;
Ok(tungstenite::Message::Text(msg))
}
}
#[derive(Clone, Debug, Serialize)]
pub struct Credentials {
pub api_key: String,
pub secret: String,
}
#[derive(Debug)]
pub struct Signature {
api_key: String,
signature: String,
expires: u64,
}
impl Credentials {
pub fn new(api_key: impl Into<String>, secret: impl Into<String>) -> Self {
Self {
api_key: api_key.into(),
secret: secret.into(),
}
}
fn sign(&self, method: Method, expires: u64, url: &Url, body: &str) -> Signature {
let signed_key = ring::hmac::Key::new(ring::hmac::HMAC_SHA256, self.secret.as_bytes());
let sign_message = match url.query() {
Some(query) => format!(
"{}{}?{}{}{}",
method.as_str(),
url.path(),
query,
expires,
body
),
None => format!("{}{}{}{}", method.as_str(), url.path(), expires, body),
};
let signature = hex::encode(ring::hmac::sign(&signed_key, sign_message.as_bytes()));
Signature {
api_key: self.api_key.clone(),
signature,
expires,
}
}
}
impl From<Signature> for Command {
fn from(sig: Signature) -> Self {
Command::Authenticate(sig)
}
}
impl Serialize for Signature {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
let mut tup = serializer.serialize_tuple(3)?;
tup.serialize_element(&self.api_key)?;
tup.serialize_element(&self.expires)?;
tup.serialize_element(&self.signature)?;
tup.end()
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_signature_get() -> Result<()> {
let tr = Credentials::new(
"LAqUlngMIQkIUjXMUreyu3qn",
"chNOOS4KvNXR_Xq4k4c9qsfoKWvnDecLATCRlcBwyKDYnWgO",
);
let Signature { signature, .. } = tr.sign(
Method::GET,
1518064236,
&Url::parse("http://a.com/api/v1/instrument")?,
"",
);
assert_eq!(
signature,
"c7682d435d0cfe87c16098df34ef2eb5a549d4c5a3c2b1f0f77b8af73423bf00"
);
Ok(())
}
#[test]
fn test_signature_get_param() -> Result<()> {
let tr = Credentials::new(
"LAqUlngMIQkIUjXMUreyu3qn",
"chNOOS4KvNXR_Xq4k4c9qsfoKWvnDecLATCRlcBwyKDYnWgO",
);
let Signature { signature, .. } = tr.sign(
Method::GET,
1518064237,
&Url::parse_with_params(
"http://a.com/api/v1/instrument",
&[("filter", r#"{"symbol": "XBTM15"}"#)],
)?,
"",
);
assert_eq!(
signature,
"e2f422547eecb5b3cb29ade2127e21b858b235b386bfa45e1c1756eb3383919f"
);
Ok(())
}
#[test]
fn test_signature_post() -> Result<()> {
let credentials = Credentials::new(
"LAqUlngMIQkIUjXMUreyu3qn",
"chNOOS4KvNXR_Xq4k4c9qsfoKWvnDecLATCRlcBwyKDYnWgO",
);
let Signature { signature, .. } = credentials.sign(
Method::POST,
1518064238,
&Url::parse("http://a.com/api/v1/order")?,
r#"{"symbol":"XBTM15","price":219.0,"clOrdID":"mm_bitmex_1a/oemUeQ4CAJZgP3fjHsA","orderQty":98}"#,
);
assert_eq!(
signature,
"1749cd2ccae4aa49048ae09f0b95110cee706e0944e6a14ad0b3a8cb45bd336b"
);
Ok(())
}
#[test]
fn test_serialize_signature() {
let sig = Signature {
api_key: "api_key123".to_string(),
signature: "signature0x42".to_string(),
expires: 42,
};
let serialized = to_string(&sig).unwrap();
assert_eq!(serialized, r#"["api_key123",42,"signature0x42"]"#);
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/bitmex-stream/examples/xbt_usd_quote_testnet.rs | crates/bitmex-stream/examples/xbt_usd_quote_testnet.rs | use anyhow::Result;
use bitmex_stream::Network;
use futures::TryStreamExt;
#[tokio::main]
async fn main() -> Result<()> {
tracing_subscriber::fmt()
.with_env_filter("info,bitmex_stream=trace")
.init();
let mut stream = bitmex_stream::subscribe(["quoteBin1m:XBTUSD".to_owned()], Network::Testnet);
while let Some(result) = stream.try_next().await? {
tracing::info!("{result}");
}
Ok(())
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/bitmex-stream/examples/authenticated_api.rs | crates/bitmex-stream/examples/authenticated_api.rs | use anyhow::Result;
use bitmex_stream::Credentials;
use bitmex_stream::Network;
use futures::TryStreamExt;
#[tokio::main]
async fn main() -> Result<()> {
tracing_subscriber::fmt()
.with_env_filter("info,bitmex_stream=trace")
.init();
let mut stream = bitmex_stream::subscribe_with_credentials(
["execution".to_owned()],
Network::Testnet,
Credentials {
api_key: "some_api_key".to_string(),
secret: "some_secret".to_string(),
},
);
while let Some(result) = stream.try_next().await? {
tracing::info!("{result}");
}
Ok(())
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/bitmex-stream/examples/xbt_usd_quote.rs | crates/bitmex-stream/examples/xbt_usd_quote.rs | use anyhow::Result;
use bitmex_stream::Network;
use futures::TryStreamExt;
#[tokio::main]
async fn main() -> Result<()> {
tracing_subscriber::fmt()
.with_env_filter("info,bitmex_stream=trace")
.init();
let mut stream = bitmex_stream::subscribe(["quoteBin1m:XBTUSD".to_owned()], Network::Mainnet);
while let Some(result) = stream.try_next().await? {
tracing::info!("{result}");
}
Ok(())
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/crates/mempool/src/lib.rs | crates/mempool/src/lib.rs | use anyhow::Result;
use serde::Deserialize;
const MEMPOOL_FEE_RATE_URL_MAINNET: &str = "https://mempool.space";
const MEMPOOL_FEE_RATE_URL_SIGNET: &str = "https://mempool.space/signet";
const MEMPOOL_FEE_RATE_URL_TESTNET: &str = "https://mempool.space/testnet";
#[derive(Deserialize, Debug)]
#[serde(rename_all = "camelCase")]
pub struct FeeRate {
pub fastest_fee: usize,
pub half_hour_fee: usize,
pub hour_fee: usize,
pub economy_fee: usize,
pub minimum_fee: usize,
}
impl FeeRate {
fn local_fee_rate() -> Self {
FeeRate {
// we on purpose have different values to see an effect for clients asking for different
// priorities
fastest_fee: 5,
half_hour_fee: 4,
hour_fee: 3,
economy_fee: 2,
minimum_fee: 1,
}
}
}
#[derive(PartialEq)]
pub enum Network {
Mainnet,
Signet,
Testnet,
/// We assume a local regtest setup and will not perform any request to mempool.space
Local,
}
pub struct MempoolFeeRateEstimator {
url: String,
network: Network,
}
impl MempoolFeeRateEstimator {
pub fn new(network: Network) -> Self {
let url = match network {
Network::Mainnet => MEMPOOL_FEE_RATE_URL_MAINNET,
Network::Signet => MEMPOOL_FEE_RATE_URL_SIGNET,
Network::Testnet => MEMPOOL_FEE_RATE_URL_TESTNET,
Network::Local => "http://thereisnosuchthingasabitcoinmempool.com",
}
.to_string();
Self { url, network }
}
pub async fn fetch_fee(&self) -> Result<FeeRate> {
if Network::Local == self.network {
return Ok(FeeRate::local_fee_rate());
}
let client = reqwest::Client::new();
let url = format!("{}/api/v1/fees/recommended", self.url);
let response = client.get(url).send().await?;
let fee_rate = response.json().await?;
Ok(fee_rate)
}
}
#[cfg(test)]
mod tests {
use super::*;
// we keep this test running on CI even though it connects to the internet. This allows us to
// be notified if the API ever changes
#[tokio::test]
pub async fn test_fetching_fee_rate_from_mempool() {
let mempool = MempoolFeeRateEstimator::new(Network::Testnet);
let _testnet_fee_rate = mempool.fetch_fee().await.unwrap();
let mempool = MempoolFeeRateEstimator::new(Network::Mainnet);
let _testnet_fee_rate = mempool.fetch_fee().await.unwrap();
let mempool = MempoolFeeRateEstimator::new(Network::Signet);
let _testnet_fee_rate = mempool.fetch_fee().await.unwrap();
let mempool = MempoolFeeRateEstimator::new(Network::Local);
let _testnet_fee_rate = mempool.fetch_fee().await.unwrap();
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/build.rs | coordinator/build.rs | use std::process::Command;
fn main() {
let output = Command::new("git")
.args(["rev-parse", "HEAD"])
.output()
.expect("To be able to get commit hash");
let git_hash = String::from_utf8(output.stdout).expect("To be a valid string");
let output = Command::new("git")
.args(["rev-parse", "--abbrev-ref", "HEAD"])
.output()
.expect("To be able to get branch name");
let branch_name = String::from_utf8(output.stdout).expect("To be a valid string");
println!("cargo:rustc-env=COMMIT_HASH={}", git_hash);
println!("cargo:rustc-env=BRANCH_NAME={}", branch_name);
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/dlc_protocol.rs | coordinator/src/dlc_protocol.rs | use crate::db;
use crate::funding_fee::insert_protocol_funding_fee_event;
use crate::funding_fee::mark_funding_fee_event_as_paid;
use crate::position::models::PositionState;
use crate::trade::models::NewTrade;
use crate::trade::websocket::InternalPositionUpdateMessage;
use anyhow::Context;
use anyhow::Result;
use bitcoin::secp256k1::PublicKey;
use bitcoin::Amount;
use bitcoin::SignedAmount;
use diesel::r2d2::ConnectionManager;
use diesel::r2d2::Pool;
use diesel::result::Error::RollbackTransaction;
use diesel::Connection;
use diesel::PgConnection;
use diesel::QueryResult;
use dlc_manager::ContractId;
use rust_decimal::prelude::FromPrimitive;
use rust_decimal::prelude::ToPrimitive;
use rust_decimal::Decimal;
use time::OffsetDateTime;
use tokio::sync::broadcast::Sender;
use xxi_node::cfd::calculate_pnl;
use xxi_node::commons;
use xxi_node::commons::Direction;
use xxi_node::node::rust_dlc_manager::DlcChannelId;
use xxi_node::node::ProtocolId;
pub struct DlcProtocol {
pub id: ProtocolId,
pub previous_id: Option<ProtocolId>,
pub timestamp: OffsetDateTime,
pub channel_id: DlcChannelId,
pub contract_id: Option<ContractId>,
pub trader: PublicKey,
pub protocol_state: DlcProtocolState,
pub protocol_type: DlcProtocolType,
}
#[derive(Clone, Copy, Debug)]
pub struct TradeParams {
pub protocol_id: ProtocolId,
pub trader: PublicKey,
pub quantity: f32,
pub leverage: f32,
pub average_price: f32,
pub direction: Direction,
pub matching_fee: Amount,
pub trader_pnl: Option<SignedAmount>,
}
impl TradeParams {
fn new(
trade_params: &commons::TradeParams,
protocol_id: ProtocolId,
trader_pnl: Option<SignedAmount>,
) -> Self {
Self {
protocol_id,
trader: trade_params.pubkey,
quantity: trade_params.quantity,
leverage: trade_params.leverage,
average_price: trade_params
.average_execution_price()
.to_f32()
.expect("to fit"),
direction: trade_params.direction,
matching_fee: trade_params.order_matching_fee(),
trader_pnl,
}
}
}
#[derive(Clone, Debug)]
pub struct RolloverParams {
pub protocol_id: ProtocolId,
pub trader_pubkey: PublicKey,
pub margin_coordinator: Amount,
pub margin_trader: Amount,
pub leverage_coordinator: Decimal,
pub leverage_trader: Decimal,
pub liquidation_price_coordinator: Decimal,
pub liquidation_price_trader: Decimal,
pub expiry_timestamp: OffsetDateTime,
}
pub enum DlcProtocolState {
Pending,
Success,
Failed,
}
#[derive(Clone, Debug)]
pub enum DlcProtocolType {
/// Opening a channel also opens a position.
OpenChannel {
trade_params: TradeParams,
},
OpenPosition {
trade_params: TradeParams,
},
ResizePosition {
trade_params: TradeParams,
},
Rollover {
rollover_params: RolloverParams,
},
Settle {
trade_params: TradeParams,
},
Close {
trader: PublicKey,
},
ForceClose {
trader: PublicKey,
},
}
pub struct DlcProtocolExecutor {
pool: Pool<ConnectionManager<PgConnection>>,
}
impl DlcProtocolExecutor {
pub fn new(pool: Pool<ConnectionManager<PgConnection>>) -> Self {
DlcProtocolExecutor { pool }
}
#[allow(clippy::too_many_arguments)]
pub fn start_open_channel_protocol(
&self,
protocol_id: ProtocolId,
temporary_contract_id: &ContractId,
temporary_channel_id: &DlcChannelId,
trade_params: &commons::TradeParams,
) -> Result<()> {
let mut conn = self.pool.get()?;
conn.transaction(|conn| {
let trader_pubkey = trade_params.pubkey;
db::dlc_protocols::create(
conn,
protocol_id,
None,
Some(temporary_contract_id),
temporary_channel_id,
db::dlc_protocols::DlcProtocolType::OpenChannel,
&trader_pubkey,
)?;
db::trade_params::insert(conn, &TradeParams::new(trade_params, protocol_id, None))?;
diesel::result::QueryResult::Ok(())
})?;
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub fn start_open_position_protocol(
&self,
protocol_id: ProtocolId,
previous_protocol_id: Option<ProtocolId>,
temporary_contract_id: &ContractId,
channel_id: &DlcChannelId,
trade_params: &commons::TradeParams,
) -> Result<()> {
let mut conn = self.pool.get()?;
conn.transaction(|conn| {
let trader_pubkey = trade_params.pubkey;
db::dlc_protocols::create(
conn,
protocol_id,
previous_protocol_id,
Some(temporary_contract_id),
channel_id,
db::dlc_protocols::DlcProtocolType::OpenPosition,
&trader_pubkey,
)?;
db::trade_params::insert(conn, &TradeParams::new(trade_params, protocol_id, None))?;
diesel::result::QueryResult::Ok(())
})?;
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub fn start_resize_protocol(
&self,
protocol_id: ProtocolId,
previous_protocol_id: Option<ProtocolId>,
temporary_contract_id: Option<&ContractId>,
channel_id: &DlcChannelId,
trade_params: &commons::TradeParams,
realized_pnl: Option<SignedAmount>,
funding_fee_event_ids: Vec<i32>,
) -> Result<()> {
let mut conn = self.pool.get()?;
conn.transaction(|conn| {
let trader_pubkey = trade_params.pubkey;
db::dlc_protocols::create(
conn,
protocol_id,
previous_protocol_id,
temporary_contract_id,
channel_id,
db::dlc_protocols::DlcProtocolType::ResizePosition,
&trader_pubkey,
)?;
insert_protocol_funding_fee_event(conn, protocol_id, &funding_fee_event_ids)?;
db::trade_params::insert(
conn,
&TradeParams::new(trade_params, protocol_id, realized_pnl),
)?;
diesel::result::QueryResult::Ok(())
})?;
Ok(())
}
pub fn start_settle_protocol(
&self,
protocol_id: ProtocolId,
previous_protocol_id: Option<ProtocolId>,
contract_id: &ContractId,
channel_id: &DlcChannelId,
trade_params: &commons::TradeParams,
funding_fee_event_ids: Vec<i32>,
) -> Result<()> {
let mut conn = self.pool.get()?;
conn.transaction(|conn| {
let trader_pubkey = trade_params.pubkey;
db::dlc_protocols::create(
conn,
protocol_id,
previous_protocol_id,
Some(contract_id),
channel_id,
db::dlc_protocols::DlcProtocolType::Settle,
&trader_pubkey,
)?;
insert_protocol_funding_fee_event(conn, protocol_id, &funding_fee_event_ids)?;
db::trade_params::insert(conn, &TradeParams::new(trade_params, protocol_id, None))?;
diesel::result::QueryResult::Ok(())
})?;
Ok(())
}
/// Persist a new rollover protocol and update technical tables in a single transaction.
pub fn start_rollover(
&self,
protocol_id: ProtocolId,
previous_protocol_id: Option<ProtocolId>,
temporary_contract_id: &ContractId,
channel_id: &DlcChannelId,
rollover_params: RolloverParams,
funding_fee_event_ids: Vec<i32>,
) -> Result<()> {
let mut conn = self.pool.get()?;
conn.transaction(|conn| {
let trader_pubkey = rollover_params.trader_pubkey;
db::dlc_protocols::create(
conn,
protocol_id,
previous_protocol_id,
Some(temporary_contract_id),
channel_id,
db::dlc_protocols::DlcProtocolType::Rollover,
&trader_pubkey,
)?;
insert_protocol_funding_fee_event(conn, protocol_id, &funding_fee_event_ids)?;
db::rollover_params::insert(conn, &rollover_params)?;
diesel::result::QueryResult::Ok(())
})?;
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub fn start_close_channel_protocol(
&self,
protocol_id: ProtocolId,
previous_protocol_id: Option<ProtocolId>,
channel_id: &DlcChannelId,
trader_id: &PublicKey,
) -> Result<()> {
let mut conn = self.pool.get()?;
db::dlc_protocols::create(
&mut conn,
protocol_id,
previous_protocol_id,
None,
channel_id,
db::dlc_protocols::DlcProtocolType::Close,
trader_id,
)?;
Ok(())
}
pub fn fail_dlc_protocol(&self, protocol_id: ProtocolId) -> Result<()> {
let mut conn = self.pool.get()?;
db::dlc_protocols::set_dlc_protocol_state_to_failed(&mut conn, protocol_id)?;
Ok(())
}
/// Update the state of the database and the position feed based on the completion of a DLC
/// protocol.
pub fn finish_dlc_protocol(
&self,
protocol_id: ProtocolId,
trader_id: &PublicKey,
contract_id: Option<ContractId>,
channel_id: &DlcChannelId,
tx_position_feed: Sender<InternalPositionUpdateMessage>,
) -> Result<()> {
let mut conn = self.pool.get()?;
let dlc_protocol = db::dlc_protocols::get_dlc_protocol(&mut conn, protocol_id)?;
conn.transaction(|conn| {
match &dlc_protocol.protocol_type {
DlcProtocolType::OpenChannel { trade_params }
| DlcProtocolType::OpenPosition { trade_params } => {
let contract_id = contract_id
.context("missing contract id")
.map_err(|_| RollbackTransaction)?;
self.finish_open_position_dlc_protocol(
conn,
trade_params,
protocol_id,
&contract_id,
channel_id,
)
}
DlcProtocolType::ResizePosition { trade_params } => {
let contract_id = contract_id
.context("missing contract id")
.map_err(|_| RollbackTransaction)?;
self.finish_resize_position_dlc_protocol(
conn,
trade_params,
protocol_id,
&contract_id,
channel_id,
)
}
DlcProtocolType::Settle { trade_params } => {
let settled_contract = dlc_protocol.contract_id;
self.finish_settle_dlc_protocol(
conn,
trade_params,
protocol_id,
// If the contract got settled, we do not get a new contract id, hence we
// copy the contract id of the settled contract.
settled_contract.as_ref(),
channel_id,
)
}
DlcProtocolType::Rollover { rollover_params } => {
let contract_id = contract_id
.context("missing contract id")
.map_err(|_| RollbackTransaction)?;
self.finish_rollover_dlc_protocol(
conn,
trader_id,
protocol_id,
&contract_id,
channel_id,
rollover_params,
)
}
DlcProtocolType::Close { .. } => {
self.finish_close_channel_dlc_protocol(conn, trader_id, protocol_id, channel_id)
}
DlcProtocolType::ForceClose { .. } => {
debug_assert!(false, "Finishing unexpected dlc protocol types");
Ok(())
}
}
})?;
match &dlc_protocol.protocol_type {
DlcProtocolType::OpenChannel { trade_params }
| DlcProtocolType::OpenPosition { trade_params }
| DlcProtocolType::ResizePosition { trade_params }
| DlcProtocolType::Settle { trade_params } => {
if let Err(e) = {
tx_position_feed.send(InternalPositionUpdateMessage::NewTrade {
quantity: if trade_params.direction == Direction::Short {
trade_params.quantity
} else {
// We want to reflect the quantity as seen by the coordinator
trade_params.quantity * -1.0
},
average_entry_price: trade_params.average_price,
})
} {
tracing::error!("Could not notify channel about finished trade {e:#}");
}
}
_ => {
// A trade only happens in `OpenChannel`, `OpenPosition`, `ResizePosition` and
// `Settle`.
}
}
Ok(())
}
/// Complete the settle DLC protocol as successful and update the 10101 metadata accordingly in
/// a single database transaction.
///
/// - Set settle DLC protocol to success.
///
/// - Calculate the PNL and update the `[PositionState::Closing`] to `[PositionState::Closed`].
///
/// - Create and insert new trade.
///
/// - Mark relevant funding fee events as paid.
fn finish_settle_dlc_protocol(
&self,
conn: &mut PgConnection,
trade_params: &TradeParams,
protocol_id: ProtocolId,
settled_contract: Option<&ContractId>,
channel_id: &DlcChannelId,
) -> QueryResult<()> {
db::dlc_protocols::set_dlc_protocol_state_to_success(
conn,
protocol_id,
settled_contract,
channel_id,
)?;
// TODO(holzeis): We are still updating the position based on the position state. This
// will change once we only have a single position per user and representing
// the position only as view on multiple trades.
let position = match db::positions::Position::get_position_by_trader(
conn,
trade_params.trader,
vec![
// The price doesn't matter here.
PositionState::Closing { closing_price: 0.0 },
],
)? {
Some(position) => position,
None => {
tracing::error!("No position in state Closing found.");
return Err(RollbackTransaction);
}
};
tracing::debug!(
?position,
trader_id = %trade_params.trader,
"Finalize closing position",
);
let trader_realized_pnl_sat = {
let trader_position_direction = position.trader_direction;
let (initial_margin_long, initial_margin_short) = match trader_position_direction {
Direction::Long => (position.trader_margin, position.coordinator_margin),
Direction::Short => (position.coordinator_margin, position.trader_margin),
};
match calculate_pnl(
Decimal::from_f32(position.average_entry_price).expect("to fit into decimal"),
Decimal::from_f32(trade_params.average_price).expect("to fit into decimal"),
trade_params.quantity,
trader_position_direction,
initial_margin_long.to_sat(),
initial_margin_short.to_sat(),
) {
Ok(pnl) => pnl,
Err(e) => {
tracing::error!("Failed to calculate pnl. Error: {e:#}");
return Err(RollbackTransaction);
}
}
};
let closing_price =
Decimal::try_from(trade_params.average_price).expect("to fit into decimal");
db::positions::Position::set_position_to_closed_with_pnl(
conn,
position.id,
trader_realized_pnl_sat,
closing_price,
)?;
let order_matching_fee = trade_params.matching_fee;
let new_trade = NewTrade {
position_id: position.id,
contract_symbol: position.contract_symbol,
trader_pubkey: trade_params.trader,
quantity: trade_params.quantity,
trader_leverage: trade_params.leverage,
trader_direction: trade_params.direction,
average_price: trade_params.average_price,
order_matching_fee,
trader_realized_pnl_sat: Some(trader_realized_pnl_sat),
};
db::trades::insert(conn, new_trade)?;
mark_funding_fee_event_as_paid(conn, protocol_id)?;
Ok(())
}
/// Complete a DLC protocol that opens a position, by updating several database tables in a
/// single transaction.
///
/// Specifically, we:
///
/// - Set DLC protocol to success.
/// - Update the position state to [`PositionState::Open`].
/// - Create and insert the new trade.
fn finish_open_position_dlc_protocol(
&self,
conn: &mut PgConnection,
trade_params: &TradeParams,
protocol_id: ProtocolId,
contract_id: &ContractId,
channel_id: &DlcChannelId,
) -> QueryResult<()> {
db::dlc_protocols::set_dlc_protocol_state_to_success(
conn,
protocol_id,
Some(contract_id),
channel_id,
)?;
// TODO(holzeis): We are still updating the position based on the position state. This
// will change once we only have a single position per user and representing
// the position only as view on multiple trades.
let position = db::positions::Position::update_position_state(
conn,
trade_params.trader.to_string(),
vec![PositionState::Proposed],
PositionState::Open,
)?;
let order_matching_fee = trade_params.matching_fee;
let new_trade = NewTrade {
position_id: position.id,
contract_symbol: position.contract_symbol,
trader_pubkey: trade_params.trader,
quantity: trade_params.quantity,
trader_leverage: trade_params.leverage,
trader_direction: trade_params.direction,
average_price: trade_params.average_price,
order_matching_fee,
trader_realized_pnl_sat: None,
};
db::trades::insert(conn, new_trade)?;
Ok(())
}
/// Complete a DLC protocol that resizes a position, by updating several database tables in a
/// single transaction.
fn finish_resize_position_dlc_protocol(
&self,
conn: &mut PgConnection,
trade_params: &TradeParams,
protocol_id: ProtocolId,
contract_id: &ContractId,
channel_id: &DlcChannelId,
) -> QueryResult<()> {
db::dlc_protocols::set_dlc_protocol_state_to_success(
conn,
protocol_id,
Some(contract_id),
channel_id,
)?;
// TODO(holzeis): We are still updating the position based on the position state. This
// will change once we only have a single position per user and representing
// the position only as view on multiple trades.
let position = db::positions::Position::update_position_state(
conn,
trade_params.trader.to_string(),
vec![PositionState::Resizing],
PositionState::Open,
)?;
let order_matching_fee = trade_params.matching_fee;
let new_trade = NewTrade {
position_id: position.id,
contract_symbol: position.contract_symbol,
trader_pubkey: trade_params.trader,
quantity: trade_params.quantity,
trader_leverage: trade_params.leverage,
trader_direction: trade_params.direction,
average_price: trade_params.average_price,
order_matching_fee,
trader_realized_pnl_sat: trade_params.trader_pnl.map(|pnl| pnl.to_sat()),
};
db::trades::insert(conn, new_trade)?;
mark_funding_fee_event_as_paid(conn, protocol_id)?;
Ok(())
}
/// Complete the rollover DLC protocol as successful and update the 10101 metadata accordingly,
/// in a single database transaction.
fn finish_rollover_dlc_protocol(
&self,
conn: &mut PgConnection,
trader: &PublicKey,
protocol_id: ProtocolId,
contract_id: &ContractId,
channel_id: &DlcChannelId,
rollover_params: &RolloverParams,
) -> QueryResult<()> {
tracing::debug!(%trader, %protocol_id, "Finalizing rollover");
db::dlc_protocols::set_dlc_protocol_state_to_success(
conn,
protocol_id,
Some(contract_id),
channel_id,
)?;
db::positions::Position::finish_rollover_protocol(
conn,
trader.to_string(),
*contract_id,
rollover_params.leverage_coordinator,
rollover_params.margin_coordinator,
rollover_params.liquidation_price_coordinator,
rollover_params.leverage_trader,
rollover_params.margin_trader,
rollover_params.liquidation_price_trader,
)?;
mark_funding_fee_event_as_paid(conn, protocol_id)?;
Ok(())
}
/// Completes the collab close dlc protocol as successful
fn finish_close_channel_dlc_protocol(
&self,
conn: &mut PgConnection,
trader: &PublicKey,
protocol_id: ProtocolId,
channel_id: &DlcChannelId,
) -> QueryResult<()> {
tracing::debug!(%trader, %protocol_id, "Finalizing channel close");
db::dlc_protocols::set_dlc_protocol_state_to_success(conn, protocol_id, None, channel_id)
}
}
#[cfg(test)]
mod test {
use crate::dlc_protocol::ProtocolId;
use dlc_manager::ReferenceId;
#[test]
fn test_protocol_id_roundtrip() {
let protocol_id_0 = ProtocolId::new();
let reference_id = ReferenceId::from(protocol_id_0);
let protocol_id_1 = ProtocolId::try_from(reference_id).unwrap();
assert_eq!(protocol_id_0, protocol_id_1)
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/leaderboard.rs | coordinator/src/leaderboard.rs | use crate::db;
use crate::position::models::Position;
use anyhow::Result;
use bitcoin::secp256k1::PublicKey;
use diesel::r2d2::ConnectionManager;
use diesel::r2d2::PooledConnection;
use diesel::PgConnection;
use rust_decimal::prelude::FromPrimitive;
use rust_decimal::Decimal;
use serde::Deserialize;
use serde::Serialize;
use std::collections::HashMap;
use time::OffsetDateTime;
#[derive(Serialize)]
pub struct LeaderBoard {
pub(crate) entries: Vec<LeaderBoardEntry>,
}
#[derive(Serialize, Clone)]
pub struct LeaderBoardEntry {
pub trader: PublicKey,
pub nickname: String,
pub pnl: Decimal,
pub volume: Decimal,
pub rank: usize,
}
#[derive(Debug, Deserialize)]
pub struct LeaderBoardQueryParams {
pub(crate) top: Option<usize>,
pub(crate) reverse: Option<bool>,
pub(crate) category: Option<LeaderBoardCategory>,
pub(crate) start: Option<String>,
pub(crate) end: Option<String>,
}
#[derive(Debug, Deserialize, Clone)]
pub enum LeaderBoardCategory {
Pnl,
Volume,
}
/// Returns the traders
///
/// Optional arguments:
/// - `[top]` defines how many traders are returned, default to 5
/// - `[category]` can be `PnL` or `Volume`, default is `PnL`
/// - `[reverse]` will return the traders with the lowest values, default is `false`
pub(crate) fn generate_leader_board(
conn: &mut PooledConnection<ConnectionManager<PgConnection>>,
top: usize,
category: LeaderBoardCategory,
reverse: bool,
start: OffsetDateTime,
end: OffsetDateTime,
) -> Result<Vec<LeaderBoardEntry>> {
let positions = load_positions(conn, start, end)?;
let leader_board = sort_leader_board(top, category, reverse, positions);
let leader_board = leader_board
.into_iter()
.map(|entry| {
let nickname = db::user::get_user(conn, &entry.trader).unwrap_or_default();
LeaderBoardEntry {
nickname: nickname.and_then(|user| user.nickname).unwrap_or_default(),
..entry
}
})
.collect();
Ok(leader_board)
}
fn sort_leader_board(
top: usize,
category: LeaderBoardCategory,
reverse: bool,
positions: HashMap<PublicKey, Vec<Position>>,
) -> Vec<LeaderBoardEntry> {
let mut leader_board = positions
.into_iter()
.map(|(trader, positions)| {
LeaderBoardEntry {
trader,
nickname: "".to_string(),
pnl: positions
.iter()
.map(|p| Decimal::from(p.trader_realized_pnl_sat.unwrap_or_default()))
.sum(),
volume: positions
.iter()
.map(|p| Decimal::from_f32(p.quantity).expect("to fit into decimal"))
.sum(),
// default all ranks are 0, this will be filled later
rank: 0,
}
})
.collect::<Vec<LeaderBoardEntry>>();
leader_board.sort_by(|a, b| {
if reverse {
match category {
LeaderBoardCategory::Pnl => a.pnl.cmp(&b.pnl),
LeaderBoardCategory::Volume => a.volume.cmp(&b.volume),
}
} else {
match category {
LeaderBoardCategory::Pnl => b.pnl.cmp(&a.pnl),
LeaderBoardCategory::Volume => b.volume.cmp(&a.volume),
}
}
});
let top_x = if top > leader_board.len() {
leader_board.len()
} else {
top
};
let leader_board = &leader_board[0..top_x];
let mut leader_board = leader_board.to_vec();
for (index, entry) in leader_board.iter_mut().enumerate() {
entry.rank = index + 1; // we want to start with the rank 1
}
leader_board
}
fn load_positions(
conn: &mut PooledConnection<ConnectionManager<PgConnection>>,
start: OffsetDateTime,
end: OffsetDateTime,
) -> Result<HashMap<PublicKey, Vec<Position>>> {
let positions = db::positions::Position::get_all_closed_positions(conn)?;
let positions = positions
.into_iter()
.filter(|pos| pos.creation_timestamp > start && pos.creation_timestamp < end)
.collect::<Vec<_>>();
let mut positions_by_trader = HashMap::new();
for position in positions {
let trader_id = position.trader;
positions_by_trader
.entry(trader_id)
.or_insert(Vec::new())
.push(position);
}
Ok(positions_by_trader)
}
#[cfg(test)]
pub mod tests {
use crate::leaderboard::sort_leader_board;
use crate::leaderboard::LeaderBoardCategory;
use crate::position::models::Position;
use crate::position::models::PositionState;
use bitcoin::secp256k1::PublicKey;
use bitcoin::Amount;
use rust_decimal_macros::dec;
use std::collections::HashMap;
use std::str::FromStr;
use time::OffsetDateTime;
use xxi_node::commons::ContractSymbol;
use xxi_node::commons::Direction;
#[test]
pub fn given_3_leaders_sort_by_pnl() {
let trader_0 = leader_0();
let trader_1 = leader_1();
let trader_2 = leader_2();
let pos_0 = create_dummy_position(trader_0, 100, 100.0);
let pos_1 = create_dummy_position(trader_0, 100, 100.0);
let pos_2 = create_dummy_position(trader_1, 0, 100.0);
let pos_3 = create_dummy_position(trader_2, -100, 300.0);
let positions: HashMap<PublicKey, Vec<Position>> = [
(trader_0, vec![pos_0, pos_1]),
(trader_1, vec![pos_2]),
(trader_2, vec![pos_3]),
]
.into();
let leader_board = sort_leader_board(3, LeaderBoardCategory::Pnl, false, positions.clone());
assert_eq!(leader_board.first().unwrap().pnl, dec!(200));
assert_eq!(leader_board.first().unwrap().rank, 1);
assert_eq!(leader_board.first().unwrap().trader, trader_0);
assert_eq!(leader_board.get(1).unwrap().pnl, dec!(0));
assert_eq!(leader_board.get(1).unwrap().rank, 2);
assert_eq!(leader_board.get(1).unwrap().trader, trader_1);
assert_eq!(leader_board.get(2).unwrap().pnl, dec!(-100));
assert_eq!(leader_board.get(2).unwrap().rank, 3);
assert_eq!(leader_board.get(2).unwrap().trader, trader_2);
let leader_board = sort_leader_board(3, LeaderBoardCategory::Pnl, true, positions);
assert_eq!(leader_board.first().unwrap().pnl, dec!(-100));
assert_eq!(leader_board.first().unwrap().rank, 1);
assert_eq!(leader_board.first().unwrap().trader, trader_2);
assert_eq!(leader_board.get(1).unwrap().pnl, dec!(0));
assert_eq!(leader_board.get(1).unwrap().rank, 2);
assert_eq!(leader_board.get(1).unwrap().trader, trader_1);
assert_eq!(leader_board.get(2).unwrap().pnl, dec!(200));
assert_eq!(leader_board.get(2).unwrap().rank, 3);
assert_eq!(leader_board.get(2).unwrap().trader, trader_0);
}
#[test]
pub fn given_3_take_2_leaders_sort_by_volume() {
let trader_0 = leader_0();
let trader_1 = leader_1();
let trader_2 = leader_2();
let pos_0 = create_dummy_position(trader_0, 100, 100.0);
let pos_1 = create_dummy_position(trader_0, 100, 100.0);
let pos_2 = create_dummy_position(trader_1, 0, 100.0);
let pos_3 = create_dummy_position(trader_2, -100, 300.0);
let positions: HashMap<PublicKey, Vec<Position>> = [
(trader_0, vec![pos_0, pos_1]),
(trader_1, vec![pos_2]),
(trader_2, vec![pos_3]),
]
.into();
let leader_board =
sort_leader_board(2, LeaderBoardCategory::Volume, false, positions.clone());
assert_eq!(leader_board.len(), 2);
assert_eq!(leader_board.first().unwrap().volume, dec!(300));
assert_eq!(leader_board.first().unwrap().rank, 1);
assert_eq!(leader_board.first().unwrap().trader, trader_2);
assert_eq!(leader_board.get(1).unwrap().volume, dec!(200));
assert_eq!(leader_board.get(1).unwrap().rank, 2);
assert_eq!(leader_board.get(1).unwrap().trader, trader_0);
let leader_board = sort_leader_board(2, LeaderBoardCategory::Volume, true, positions);
assert_eq!(leader_board.len(), 2);
assert_eq!(leader_board.first().unwrap().volume, dec!(100));
assert_eq!(leader_board.first().unwrap().rank, 1);
assert_eq!(leader_board.first().unwrap().trader, trader_1);
assert_eq!(leader_board.get(1).unwrap().volume, dec!(200));
assert_eq!(leader_board.get(1).unwrap().rank, 2);
assert_eq!(leader_board.get(1).unwrap().trader, trader_0);
}
fn create_dummy_position(trader: PublicKey, pnl: i64, quantity: f32) -> Position {
Position {
id: 0,
contract_symbol: ContractSymbol::BtcUsd,
trader_leverage: 0.0,
quantity,
trader_direction: Direction::Long,
average_entry_price: 0.0,
trader_liquidation_price: 0.0,
coordinator_liquidation_price: 0.0,
position_state: PositionState::Closed { pnl: 0 },
coordinator_margin: Amount::ZERO,
creation_timestamp: OffsetDateTime::now_utc(),
expiry_timestamp: OffsetDateTime::now_utc(),
update_timestamp: OffsetDateTime::now_utc(),
trader,
coordinator_leverage: 0.0,
temporary_contract_id: None,
closing_price: None,
trader_margin: Amount::ZERO,
stable: false,
trader_realized_pnl_sat: Some(pnl),
order_matching_fees: Amount::ZERO,
}
}
fn leader_2() -> PublicKey {
PublicKey::from_str("02d5aa8fce495f6301b466594af056a46104dcdc6d735ec4793aa43108854cbd4a")
.unwrap()
}
fn leader_1() -> PublicKey {
PublicKey::from_str("03b6fbc0de09815e2eb508feb8288ba6ac7f24aa27bd63435f6247d010334eaff2")
.unwrap()
}
fn leader_0() -> PublicKey {
PublicKey::from_str("0218845781f631c48f1c9709e23092067d06837f30aa0cd0544ac887fe91ddd166")
.unwrap()
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/settings.rs | coordinator/src/settings.rs | use crate::funding_fee::IndexPriceSource;
use crate::node::NodeSettings;
use anyhow::Context;
use anyhow::Result;
use bitcoin::secp256k1::PublicKey;
use serde::Deserialize;
use serde::Serialize;
use std::path::Path;
use std::path::PathBuf;
use tokio::fs;
use tokio::io::AsyncWriteExt;
use xxi_node::node::XXINodeSettings;
const SETTINGS_FILE_NAME: &str = "coordinator-settings.toml";
/// Top-level settings.
#[derive(Debug, Clone, Serialize)]
pub struct Settings {
pub new_positions_enabled: bool,
pub xxi: XXINodeSettings,
// We don't want the doc block below to be auto-formatted.
#[rustfmt::skip]
/// A cron syntax for sending notifications about the rollover window being open.
///
/// The format is:
/// sec min hour day of month month day of week year
/// * * * * * * *
pub rollover_window_open_scheduler: String,
// We don't want the doc block below to be auto-formatted.
#[rustfmt::skip]
/// A cron syntax for sending notifications about the rollover window closing.
///
/// The format is:
/// sec min hour day of month month day of week year
/// * * * * * * *
pub rollover_window_close_scheduler: String,
// We don't want the doc block below to be auto-formatted.
#[rustfmt::skip]
/// A cron syntax for sending notifications to close an expired position.
///
/// The format is:
/// sec min hour day of month month day of week year
/// * * * * * * *
pub close_expired_position_scheduler: String,
// We don't want the doc block below to be auto-formatted.
#[rustfmt::skip]
/// A cron syntax for sending notifications to close a liquidated position.
///
/// The format is:
/// sec min hour day of month month day of week year
/// * * * * * * *
pub close_liquidated_position_scheduler: String,
// We don't want the doc block below to be auto-formatted.
#[rustfmt::skip]
/// A cron syntax for updating users bonus status.
///
/// The format is:
/// sec min hour day of month month day of week year
/// * * * * * * *
pub update_user_bonus_status_scheduler: String,
// We don't want the doc block below to be auto-formatted.
#[rustfmt::skip]
/// A cron syntax for collecting metrics
///
/// The format is :
/// sec min hour day of month month day of week year
/// * * * * * * *
pub collect_metrics_scheduler: String,
/// A cron syntax for generating funding fee events.
///
/// The format is:
/// sec min hour day of month month day of week year
/// * * * * * * *
pub generate_funding_fee_events_scheduler: String,
// Location of the settings file in the file system.
path: PathBuf,
/// If enabled, only makers in [`whitelisted_makers`] are allowed to post limit orders
pub whitelist_enabled: bool,
/// A list of makers who are allowed to post limit orders. This is to prevent spam.
pub whitelisted_makers: Vec<PublicKey>,
/// The min quantity that we accept to be traded with.
pub min_quantity: u64,
/// The maintenance margin in percent, defining the required margin in the position. If the
/// margin drops below that the position gets liquidated.
pub maintenance_margin_rate: f32,
/// The order matching fee rate, which is charged for matching an order. Note, this is at the
/// moment applied for taker and maker orders.
pub order_matching_fee_rate: f32,
/// Where to get the index price from. This value is used to calculate funding fees.
pub index_price_source: IndexPriceSource,
/// The max leverage a trader can take
pub max_leverage: u8,
}
impl Settings {
pub async fn new(data_dir: &Path) -> Result<Self> {
let settings_path = data_dir.join(SETTINGS_FILE_NAME);
let data = fs::read_to_string(&settings_path)
.await
.with_context(|| format!("Failed to read settings at {settings_path:?}"))?;
let settings =
toml::from_str::<SettingsFile>(&data).context("Unable to parse settings file")?;
let settings = Self::from_file(settings, settings_path);
tracing::info!(?settings, "Read settings from file system");
Ok(settings)
}
pub async fn write_to_file(&self) -> Result<()> {
let data = toml::to_string_pretty(&SettingsFile::from(self.clone()))
.context("Unable to serialize settings to TOML format")?;
let mut file = fs::File::create(&self.path).await?;
file.write_all(data.as_bytes()).await?;
file.flush().await?;
Ok(())
}
/// Return the node settings part of the settings file
pub fn to_node_settings(&self) -> NodeSettings {
NodeSettings {
allow_opening_positions: self.new_positions_enabled,
maintenance_margin_rate: self.maintenance_margin_rate,
order_matching_fee_rate: self.order_matching_fee_rate,
}
}
pub fn update(&mut self, file: SettingsFile) {
*self = Self::from_file(file, self.path.clone());
}
fn from_file(file: SettingsFile, path: PathBuf) -> Self {
Self {
new_positions_enabled: file.new_positions_enabled,
xxi: file.xxi,
rollover_window_open_scheduler: file.rollover_window_open_scheduler,
rollover_window_close_scheduler: file.rollover_window_close_scheduler,
close_expired_position_scheduler: file.close_expired_position_scheduler,
close_liquidated_position_scheduler: file.close_liquidated_position_scheduler,
update_user_bonus_status_scheduler: file.update_user_bonus_status_scheduler,
collect_metrics_scheduler: file.collect_metrics_scheduler,
generate_funding_fee_events_scheduler: file.generate_funding_fee_events_scheduler,
path,
whitelist_enabled: file.whitelist_enabled,
whitelisted_makers: file.whitelisted_makers,
min_quantity: file.min_quantity,
maintenance_margin_rate: file.maintenance_margin_rate,
order_matching_fee_rate: file.order_matching_fee_rate,
index_price_source: file.index_price_source,
max_leverage: file.max_leverage,
}
}
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq)]
pub struct SettingsFile {
new_positions_enabled: bool,
xxi: XXINodeSettings,
rollover_window_open_scheduler: String,
rollover_window_close_scheduler: String,
close_expired_position_scheduler: String,
close_liquidated_position_scheduler: String,
update_user_bonus_status_scheduler: String,
collect_metrics_scheduler: String,
generate_funding_fee_events_scheduler: String,
whitelist_enabled: bool,
whitelisted_makers: Vec<PublicKey>,
min_quantity: u64,
maintenance_margin_rate: f32,
order_matching_fee_rate: f32,
index_price_source: IndexPriceSource,
max_leverage: u8,
}
impl From<Settings> for SettingsFile {
fn from(value: Settings) -> Self {
Self {
new_positions_enabled: value.new_positions_enabled,
xxi: value.xxi,
rollover_window_open_scheduler: value.rollover_window_open_scheduler,
rollover_window_close_scheduler: value.rollover_window_close_scheduler,
close_expired_position_scheduler: value.close_expired_position_scheduler,
close_liquidated_position_scheduler: value.close_liquidated_position_scheduler,
update_user_bonus_status_scheduler: value.update_user_bonus_status_scheduler,
collect_metrics_scheduler: value.collect_metrics_scheduler,
generate_funding_fee_events_scheduler: value.generate_funding_fee_events_scheduler,
whitelist_enabled: false,
whitelisted_makers: value.whitelisted_makers,
min_quantity: value.min_quantity,
maintenance_margin_rate: value.maintenance_margin_rate,
order_matching_fee_rate: value.order_matching_fee_rate,
index_price_source: value.index_price_source,
max_leverage: value.max_leverage,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::str::FromStr;
#[test]
fn toml_serde_roundtrip() {
let original = SettingsFile {
new_positions_enabled: true,
xxi: XXINodeSettings {
off_chain_sync_interval: std::time::Duration::from_secs(1),
on_chain_sync_interval: std::time::Duration::from_secs(1),
fee_rate_sync_interval: std::time::Duration::from_secs(1),
sub_channel_manager_periodic_check_interval: std::time::Duration::from_secs(1),
shadow_sync_interval: std::time::Duration::from_secs(1),
},
rollover_window_open_scheduler: "foo".to_string(),
rollover_window_close_scheduler: "bar".to_string(),
close_expired_position_scheduler: "baz".to_string(),
close_liquidated_position_scheduler: "baz".to_string(),
update_user_bonus_status_scheduler: "bazinga".to_string(),
collect_metrics_scheduler: "42".to_string(),
generate_funding_fee_events_scheduler: "qux".to_string(),
whitelist_enabled: false,
whitelisted_makers: vec![PublicKey::from_str(
"0218845781f631c48f1c9709e23092067d06837f30aa0cd0544ac887fe91ddd166",
)
.unwrap()],
min_quantity: 1,
maintenance_margin_rate: 0.1,
order_matching_fee_rate: 0.003,
index_price_source: IndexPriceSource::Bitmex,
max_leverage: 5,
};
let serialized = toml::to_string_pretty(&original).unwrap();
let deserialized = toml::from_str(&serialized).unwrap();
assert_eq!(original, deserialized);
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/referrals.rs | coordinator/src/referrals.rs | use crate::db;
use crate::db::bonus_status::BonusType;
use crate::db::bonus_tiers::BonusTier;
use anyhow::Context;
use anyhow::Result;
use bitcoin::secp256k1::PublicKey;
use diesel::r2d2::ConnectionManager;
use diesel::r2d2::PooledConnection;
use diesel::PgConnection;
use rust_decimal::prelude::FromPrimitive;
use rust_decimal::Decimal;
use std::str::FromStr;
use time::OffsetDateTime;
use xxi_node::commons::ReferralStatus;
/// When updating a referral status we only want to look at users which had a login in the last 48h.
const DAYS_SINCE_LAST_LOGIN: i64 = 2;
pub fn get_referral_status(
trader_pubkey: PublicKey,
connection: &mut PooledConnection<ConnectionManager<PgConnection>>,
) -> Result<ReferralStatus> {
let mut bonus_status = db::bonus_status::active_status_for_user(connection, &trader_pubkey)?;
let user = db::user::get_user(connection, &trader_pubkey)?.context("User not found")?;
// we sort by fee_rebate
bonus_status.sort_by(|a, b| {
b.fee_rebate
.partial_cmp(&a.fee_rebate)
.expect("to be able to sort")
});
// next we pick the highest
if let Some(bonus) = bonus_status.first() {
let referrals =
db::bonus_tiers::all_referrals_by_referring_user(connection, &trader_pubkey)?;
return Ok(ReferralStatus {
referral_code: user.referral_code,
number_of_activated_referrals: referrals
.iter()
.filter(|referral| referral.referred_user_total_quantity.floor() > 0.0)
.count(),
number_of_total_referrals: referrals.len(),
referral_tier: bonus.tier_level as usize,
referral_fee_bonus: Decimal::from_f32(bonus.fee_rebate).expect("to fit"),
bonus_status_type: Some(bonus.bonus_type.into()),
});
}
// None of the above, user is a boring normal user
Ok(ReferralStatus::new(trader_pubkey))
}
pub fn update_referral_status(
connection: &mut PooledConnection<ConnectionManager<PgConnection>>,
) -> Result<usize> {
let users = db::user::all_with_login_date(
connection,
OffsetDateTime::now_utc() - time::Duration::days(DAYS_SINCE_LAST_LOGIN),
)?;
let len = users.len();
for user in users {
let trader_pubkey = user.pubkey.clone();
if let Err(err) = update_referral_status_for_user(connection, user.pubkey) {
tracing::error!(
trader_pubkey,
"Failed at updating referral status for user {err}"
)
}
}
Ok(len)
}
/// Updates the referral status for a user based on data in the database
pub fn update_referral_status_for_user(
connection: &mut PooledConnection<ConnectionManager<PgConnection>>,
trader_pubkey_str: String,
) -> Result<ReferralStatus> {
let trader_pubkey =
PublicKey::from_str(trader_pubkey_str.as_str()).expect("to be a valid pubkey");
tracing::debug!(
trader_pubkey = trader_pubkey_str,
"Updating referral status"
);
// first we check his existing status. If we need to compare it later against the newly computed
// one
let existing_status = get_referral_status(trader_pubkey, connection)?;
// next we need to calculate if he qualifies for a referral program
let user = db::user::get_user(connection, &trader_pubkey)?.context("User not found")?;
let referrals = db::bonus_tiers::all_referrals_by_referring_user(connection, &trader_pubkey)?;
let bonus_tiers = db::bonus_tiers::all_active_by_type(connection, vec![BonusType::Referral])?;
let total_referrals = referrals.len();
let active_referrals = referrals
.iter()
.filter(|referrals| referrals.referred_user_total_quantity > 0.0)
.count();
let referral_code = user.referral_code;
if active_referrals > 0 {
let referral_tier = calculate_bonus_status_inner(bonus_tiers.clone(), active_referrals)?;
let status = db::bonus_status::insert(
connection,
&trader_pubkey,
referral_tier,
BonusType::Referral,
)?;
tracing::debug!(
trader_pubkey = trader_pubkey.to_string(),
tier_level = status.tier_level,
bonus_type = ?status.bonus_type,
activation_timestamp = status.activation_timestamp.to_string(),
deactivation_timestamp = status.deactivation_timestamp.to_string(),
"Updated user's bonus status"
);
let maybe_bonus_tier = bonus_tiers
.into_iter()
.find(|tier| tier.tier_level == referral_tier)
.context("Calculated bonus tier does not exist")?;
tracing::debug!(
trader_pubkey = trader_pubkey.to_string(),
tier_level = maybe_bonus_tier.tier_level,
bonus_tier_type = ?maybe_bonus_tier.bonus_tier_type,
total_referrals,
active_referrals,
"Trader has referral status"
);
let referral_fee_bonus = Decimal::from_f32(maybe_bonus_tier.fee_rebate).expect("to fit");
if existing_status.referral_fee_bonus > referral_fee_bonus {
tracing::debug!(
trader_pubkey = trader_pubkey_str,
bonus_tier = existing_status.referral_tier,
bonus_level = ?existing_status.bonus_status_type,
"User has active bonus status"
);
return Ok(existing_status);
}
return Ok(ReferralStatus {
referral_code,
number_of_activated_referrals: active_referrals,
number_of_total_referrals: total_referrals,
referral_tier: maybe_bonus_tier.tier_level as usize,
referral_fee_bonus,
bonus_status_type: Some(maybe_bonus_tier.bonus_tier_type.into()),
});
}
tracing::debug!(
trader_pubkey = trader_pubkey_str,
bonus_tier = existing_status.referral_tier,
bonus_status_type = ?existing_status.bonus_status_type,
"User's bonus status"
);
Ok(existing_status)
}
/// Returns the tier_level of the calculated tier.
///
/// e.g. user has 10 referrals, first 5 have already traded
/// bonus_tier_0 needs 3 referrals
/// bonus_tier_1 needs 5 referrals
/// bonus_tier_2 needs 10 referrals
///
/// each referral only counts if they have traded at least once.
///
/// In this case, we should return tier 1.
fn calculate_bonus_status_inner(bonus_tiers: Vec<BonusTier>, referred_users: usize) -> Result<i32> {
let mut bonus_tiers = bonus_tiers;
// we sort descending by min referrals to have
bonus_tiers.sort_by(|a, b| b.min_users_to_refer.cmp(&a.min_users_to_refer));
if let Some(tier) = bonus_tiers
.iter()
.find(|tier| tier.min_users_to_refer <= referred_users as i32)
{
Ok(tier.tier_level)
} else {
Ok(0)
}
}
#[cfg(test)]
pub mod tests {
use crate::db::bonus_status::BonusType;
use crate::db::bonus_tiers::BonusTier;
use crate::referrals::calculate_bonus_status_inner;
#[test]
pub fn given_no_referred_users_then_tier_level_0() {
let referral_tier = calculate_bonus_status_inner(create_dummy_tiers(), 0).unwrap();
assert_eq!(referral_tier, 0);
}
#[test]
pub fn given_tier_1_referred_users_then_tier_level_1() {
let referral_tier = calculate_bonus_status_inner(create_dummy_tiers(), 10).unwrap();
assert_eq!(referral_tier, 1);
}
#[test]
pub fn given_tier_2_referred_users_then_tier_level_2() {
let referral_tier = calculate_bonus_status_inner(create_dummy_tiers(), 20).unwrap();
assert_eq!(referral_tier, 2);
}
#[test]
pub fn given_tier_1_and_not_enough_tier_2_referred_users_then_tier_level_1() {
let referral_tier = calculate_bonus_status_inner(create_dummy_tiers(), 15).unwrap();
assert_eq!(referral_tier, 1);
}
#[test]
pub fn given_enough_tier_3_referred_users_then_tier_level_3() {
let referral_tier = calculate_bonus_status_inner(create_dummy_tiers(), 30).unwrap();
assert_eq!(referral_tier, 3);
}
fn create_dummy_tiers() -> Vec<BonusTier> {
vec![
BonusTier {
id: 0,
tier_level: 0,
min_users_to_refer: 0,
fee_rebate: 0.0,
bonus_tier_type: BonusType::Referral,
active: true,
},
BonusTier {
id: 1,
tier_level: 1,
min_users_to_refer: 10,
fee_rebate: 0.2,
bonus_tier_type: BonusType::Referral,
active: true,
},
BonusTier {
id: 2,
tier_level: 2,
min_users_to_refer: 20,
fee_rebate: 0.3,
bonus_tier_type: BonusType::Referral,
active: true,
},
BonusTier {
id: 3,
tier_level: 3,
min_users_to_refer: 30,
fee_rebate: 0.3,
bonus_tier_type: BonusType::Referral,
active: true,
},
]
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/node.rs | coordinator/src/node.rs | use crate::db;
use crate::dlc_protocol;
use crate::message::OrderbookMessage;
use crate::node::storage::NodeStorage;
use crate::position::models::PositionState;
use crate::storage::CoordinatorTenTenOneStorage;
use crate::trade::websocket::InternalPositionUpdateMessage;
use anyhow::bail;
use anyhow::Context;
use anyhow::Result;
use bitcoin::secp256k1::PublicKey;
use diesel::r2d2::ConnectionManager;
use diesel::r2d2::Pool;
use diesel::PgConnection;
use dlc_manager::channel::signed_channel::SignedChannel;
use dlc_manager::channel::signed_channel::SignedChannelState;
use dlc_manager::channel::Channel;
use dlc_messages::channel::AcceptChannel;
use dlc_messages::channel::Reject;
use dlc_messages::channel::RenewFinalize;
use dlc_messages::channel::SettleFinalize;
use dlc_messages::channel::SignChannel;
use lnd_bridge::LndBridge;
use std::sync::Arc;
use tokio::sync::broadcast::Sender;
use tokio::sync::mpsc;
use tokio::sync::RwLock;
use xxi_node::bitcoin_conversion::to_secp_pk_29;
use xxi_node::bitcoin_conversion::to_secp_pk_30;
use xxi_node::commons::Message::RolloverError;
use xxi_node::commons::Message::TradeError;
use xxi_node::commons::TradingError;
use xxi_node::dlc_message::DlcMessage;
use xxi_node::dlc_message::SerializedDlcMessage;
use xxi_node::message_handler::TenTenOneAcceptChannel;
use xxi_node::message_handler::TenTenOneCollaborativeCloseOffer;
use xxi_node::message_handler::TenTenOneMessage;
use xxi_node::message_handler::TenTenOneMessageType;
use xxi_node::message_handler::TenTenOneReject;
use xxi_node::message_handler::TenTenOneRenewFinalize;
use xxi_node::message_handler::TenTenOneRolloverFinalize;
use xxi_node::message_handler::TenTenOneSettleFinalize;
use xxi_node::message_handler::TenTenOneSignChannel;
use xxi_node::node;
use xxi_node::node::event::NodeEvent;
use xxi_node::node::tentenone_message_name;
use xxi_node::node::ProtocolId;
use xxi_node::node::RunningNode;
pub mod channel;
pub mod expired_positions;
pub mod invoice;
pub mod liquidated_positions;
pub mod rollover;
pub mod storage;
pub mod unrealized_pnl;
#[derive(Debug, Clone)]
pub struct NodeSettings {
// At times, we want to disallow opening new positions (e.g. before
// scheduled upgrade)
pub allow_opening_positions: bool,
pub maintenance_margin_rate: f32,
pub order_matching_fee_rate: f32,
}
#[derive(Clone)]
pub struct Node {
pub inner: Arc<
node::Node<
bdk_file_store::Store<bdk::wallet::ChangeSet>,
CoordinatorTenTenOneStorage,
NodeStorage,
>,
>,
_running: Arc<RunningNode>,
pub pool: Pool<ConnectionManager<PgConnection>>,
pub settings: Arc<RwLock<NodeSettings>>,
pub tx_position_feed: Sender<InternalPositionUpdateMessage>,
trade_notifier: mpsc::Sender<OrderbookMessage>,
pub lnd_bridge: LndBridge,
}
impl Node {
pub fn new(
inner: Arc<
node::Node<
bdk_file_store::Store<bdk::wallet::ChangeSet>,
CoordinatorTenTenOneStorage,
NodeStorage,
>,
>,
running: RunningNode,
pool: Pool<ConnectionManager<PgConnection>>,
settings: NodeSettings,
tx_position_feed: Sender<InternalPositionUpdateMessage>,
trade_notifier: mpsc::Sender<OrderbookMessage>,
lnd_bridge: LndBridge,
) -> Self {
Self {
inner,
pool,
settings: Arc::new(RwLock::new(settings)),
_running: Arc::new(running),
tx_position_feed,
trade_notifier,
lnd_bridge,
}
}
/// Returns true or false, whether the given peer_id is connected with us.
pub fn is_connected(&self, peer_id: PublicKey) -> bool {
self.inner
.peer_manager
.get_peer_node_ids()
.iter()
.any(|(id, _)| *id == to_secp_pk_29(peer_id))
}
pub fn process_incoming_dlc_messages(&self) {
if !self
.inner
.dlc_message_handler
.has_pending_messages_to_process()
{
return;
}
let messages = self
.inner
.dlc_message_handler
.get_and_clear_received_messages();
for (node_id, msg) in messages {
let msg_name = tentenone_message_name(&msg);
if let Err(e) = self.process_dlc_message(to_secp_pk_30(node_id), &msg) {
if let Err(e) = self.set_dlc_protocol_to_failed(&msg) {
tracing::error!(
from = %node_id,
"Failed to set dlc protocol to failed. {e:#}"
);
}
tokio::spawn({
let trade_notifier = self.trade_notifier.clone();
let error = TradingError::Other(format!("{e:#}"));
async move {
let message = match msg.get_tentenone_message_type() {
TenTenOneMessageType::Trade
| TenTenOneMessageType::Expire
| TenTenOneMessageType::Liquidate => {
if let Some(order_id) = msg.get_order_id() {
OrderbookMessage::TraderMessage {
trader_id: to_secp_pk_30(node_id),
message: TradeError { order_id, error },
notification: None,
}
} else {
tracing::warn!("Could not send trade error to user due to missing order id");
return;
}
}
TenTenOneMessageType::Rollover => OrderbookMessage::TraderMessage {
trader_id: to_secp_pk_30(node_id),
message: RolloverError { error },
notification: None,
},
TenTenOneMessageType::Other => {
tracing::debug!("Not sending errors to the app unrelated to a trade or rollover.");
return;
}
};
if let Err(e) = trade_notifier.send(message).await {
tracing::error!("Failed to send trade error to user. Error: {e:#}");
}
}
});
tracing::error!(
from = %node_id,
kind = %msg_name,
"Failed to process DLC message: {e:#}"
);
}
}
}
fn set_dlc_protocol_to_failed(&self, msg: &TenTenOneMessage) -> Result<()> {
if let Some(protocol_id) = msg.get_reference_id() {
let protocol_id = ProtocolId::try_from(protocol_id)?;
dlc_protocol::DlcProtocolExecutor::new(self.pool.clone())
.fail_dlc_protocol(protocol_id)?;
}
Ok(())
}
/// Process an incoming [`TenTenOneMessage`] and update the 10101 position accordingly.
///
/// - Any other kind of message will be ignored.
/// - Any message that has already been processed will be skipped.
///
/// Offers such as [`TenTenOneMessage::Offer`], [`TenTenOneMessage::SettleOffer`],
/// [`TenTenOneMessage::RolloverOffer`], [`TenTenOneMessage::CollaborativeCloseOffer`] and
/// [`TenTenOneMessage::RenewOffer`] are automatically accepted. Unless the maturity date of
/// the offer is already outdated.
///
/// FIXME(holzeis): This function manipulates different data objects from different data sources
/// and should use a transaction to make all changes atomic. Not doing so risks ending up in an
/// inconsistent state. One way of fixing that could be to: (1) use a single data source for the
/// 10101 data and the `rust-dlc` data; (2) wrap the function into a DB transaction which can be
/// atomically rolled back on error or committed on success.
pub fn process_dlc_message(&self, node_id: PublicKey, msg: &TenTenOneMessage) -> Result<()> {
tracing::info!(
from = %node_id,
kind = %tentenone_message_name(msg),
"Processing message"
);
let protocol_id = match msg.get_reference_id() {
Some(reference_id) => Some(ProtocolId::try_from(reference_id)?),
None => None,
};
tracing::debug!(
from = %node_id,
?protocol_id,
"Received message"
);
let inbound_msg = {
let mut conn = self.pool.get()?;
let serialized_inbound_message = SerializedDlcMessage::try_from(msg)?;
let inbound_msg = DlcMessage::new(node_id, serialized_inbound_message, true)?;
match db::dlc_messages::get(&mut conn, &inbound_msg.message_hash)? {
Some(_) => {
tracing::debug!(%node_id, kind=%tentenone_message_name(msg), "Received message that has already been processed, skipping.");
return Ok(());
}
None => inbound_msg,
}
};
self.verify_collab_close_offer(&node_id, msg)?;
let resp = self
.inner
.process_tentenone_message(msg.clone(), node_id)
.with_context(|| {
format!(
"Failed to handle {} dlc message from {node_id}",
tentenone_message_name(msg)
)
})?;
if let Some(msg) = resp.clone() {
// store dlc message immediately so we do not lose the response if something
// goes wrong afterwards.
self.inner
.event_handler
.publish(NodeEvent::StoreDlcMessage { peer: node_id, msg });
}
{
let mut conn = self.pool.get()?;
db::dlc_messages::insert(&mut conn, inbound_msg)?;
}
match msg {
TenTenOneMessage::RenewFinalize(TenTenOneRenewFinalize {
renew_finalize:
RenewFinalize {
channel_id,
reference_id,
..
},
..
})
| TenTenOneMessage::RolloverFinalize(TenTenOneRolloverFinalize {
renew_finalize:
RenewFinalize {
channel_id,
reference_id,
..
},
}) => {
let channel_id_hex_string = hex::encode(channel_id);
let reference_id = match reference_id {
Some(reference_id) => *reference_id,
// If the app did not yet update to the latest version, it will not
// send us the reference id in the message. In that case we will
// have to look up the reference id ourselves from the channel.
// TODO(holzeis): Remove this fallback handling once not needed
// anymore.
None => self
.inner
.get_dlc_channel_by_id(channel_id)?
.get_reference_id()
.context("missing reference id")?,
};
let protocol_id = ProtocolId::try_from(reference_id)?;
tracing::info!(
channel_id = channel_id_hex_string,
node_id = node_id.to_string(),
%protocol_id,
"DLC channel renew protocol was finalized"
);
let channel = self.inner.get_dlc_channel_by_id(channel_id)?;
let protocol_executor = dlc_protocol::DlcProtocolExecutor::new(self.pool.clone());
protocol_executor.finish_dlc_protocol(
protocol_id,
&node_id,
channel.get_contract_id(),
channel_id,
self.tx_position_feed.clone(),
)?;
}
TenTenOneMessage::SettleFinalize(TenTenOneSettleFinalize {
settle_finalize:
SettleFinalize {
channel_id,
reference_id,
..
},
..
}) => {
let channel_id_hex_string = hex::encode(channel_id);
let reference_id = match reference_id {
Some(reference_id) => *reference_id,
// If the app did not yet update to the latest version, it will not
// send us the reference id in the message. In that case we will
// have to look up the reference id ourselves from the channel.
// TODO(holzeis): Remove this fallback handling once not needed
// anymore.
None => self
.inner
.get_dlc_channel_by_id(channel_id)?
.get_reference_id()
.context("missing reference id")?,
};
let protocol_id = ProtocolId::try_from(reference_id)?;
tracing::info!(
channel_id = channel_id_hex_string,
node_id = node_id.to_string(),
%protocol_id,
"DLC channel settle protocol was finalized"
);
let protocol_executor = dlc_protocol::DlcProtocolExecutor::new(self.pool.clone());
protocol_executor.finish_dlc_protocol(
protocol_id,
&node_id,
// the settled signed channel does not have a contract
None,
channel_id,
self.tx_position_feed.clone(),
)?;
}
TenTenOneMessage::CollaborativeCloseOffer(TenTenOneCollaborativeCloseOffer {
collaborative_close_offer: close_offer,
}) => {
tracing::info!(
channel_id = hex::encode(close_offer.channel_id),
node_id = node_id.to_string(),
"Accepting offer to collaboratively close a channel"
);
self.inner
.accept_dlc_channel_collaborative_close(&close_offer.channel_id)?;
}
TenTenOneMessage::Accept(TenTenOneAcceptChannel {
accept_channel:
AcceptChannel {
temporary_channel_id,
reference_id,
..
},
..
}) => {
let channel_id = match resp {
Some(TenTenOneMessage::Sign(TenTenOneSignChannel {
sign_channel: SignChannel { channel_id, .. },
..
})) => channel_id,
_ => *temporary_channel_id,
};
let reference_id = match reference_id {
Some(reference_id) => *reference_id,
// If the app did not yet update to the latest version, it will not
// send us the reference id in the message. In that case we will
// have to look up the reference id ourselves from the channel.
// TODO(holzeis): Remove this fallback handling once not needed
// anymore.
None => self
.inner
.get_dlc_channel_by_id(&channel_id)?
.get_reference_id()
.context("missing reference id")?,
};
let protocol_id = ProtocolId::try_from(reference_id)?;
tracing::info!(
channel_id = hex::encode(channel_id),
node_id = node_id.to_string(),
%protocol_id,
"DLC channel open protocol was finalized"
);
let channel = self.inner.get_dlc_channel_by_id(&channel_id)?;
let protocol_executor = dlc_protocol::DlcProtocolExecutor::new(self.pool.clone());
protocol_executor.finish_dlc_protocol(
protocol_id,
&node_id,
channel.get_contract_id(),
&channel_id,
self.tx_position_feed.clone(),
)?;
}
TenTenOneMessage::Reject(TenTenOneReject {
reject:
Reject {
channel_id,
reference_id,
..
},
}) => {
let channel_id_hex_string = hex::encode(channel_id);
let reference_id = match reference_id {
Some(reference_id) => *reference_id,
// If the app did not yet update to the latest version, it will not
// send us the reference id in the message. In that case we will
// have to look up the reference id ourselves from the channel.
// TODO(holzeis): Remove this fallback handling once not needed
// anymore.
None => self
.inner
.get_dlc_channel_by_id(channel_id)?
.get_reference_id()
.context("missing reference id")?,
};
let protocol_id = ProtocolId::try_from(reference_id)?;
let protocol_executor = dlc_protocol::DlcProtocolExecutor::new(self.pool.clone());
protocol_executor.fail_dlc_protocol(protocol_id)?;
let channel = self.inner.get_dlc_channel_by_id(channel_id)?;
let mut connection = self.pool.get()?;
match channel {
Channel::Cancelled(_) => {
tracing::info!(
channel_id = channel_id_hex_string,
node_id = node_id.to_string(),
"DLC Channel offer has been rejected. Setting position to failed."
);
db::positions::Position::update_position_state(
&mut connection,
node_id.to_string(),
vec![PositionState::Proposed],
PositionState::Failed,
)?;
}
Channel::Signed(SignedChannel {
state: SignedChannelState::Established { .. },
..
}) => {
// TODO(holzeis): Reverting the position state back from `Closing`
// to `Open` only works as long as we do not support resizing. This
// logic needs to be adapted when we implement resize.
tracing::info!(
channel_id = channel_id_hex_string,
node_id = node_id.to_string(),
"DLC Channel settle offer has been rejected. Setting position to back to open."
);
db::positions::Position::update_position_state(
&mut connection,
node_id.to_string(),
vec![
// the closing price doesn't matter here.
PositionState::Closing { closing_price: 0.0 },
PositionState::Rollover,
],
PositionState::Open,
)?;
}
Channel::Signed(SignedChannel {
state: SignedChannelState::Settled { .. },
..
}) => {
tracing::info!(
channel_id = channel_id_hex_string,
node_id = node_id.to_string(),
"DLC Channel renew offer has been rejected. Setting position to failed."
);
db::positions::Position::update_position_state(
&mut connection,
node_id.to_string(),
vec![PositionState::Proposed],
PositionState::Failed,
)?;
}
_ => {}
}
}
_ => {}
};
if let Some(msg) = resp {
// Everything has been processed successfully, we can safely send the last dlc message,
// that has been stored before.
tracing::info!(
to = %node_id,
kind = %tentenone_message_name(&msg),
"Sending message"
);
self.inner
.event_handler
.publish(NodeEvent::SendLastDlcMessage { peer: node_id });
}
Ok(())
}
/// TODO(holzeis): We need to intercept the collaborative close offer before
/// processing it in `rust-dlc` as it would otherwise overwrite the `own_payout`
/// amount, which would prevent us from verifying the proposed payout amount.
///
/// If the expected own payout amount does not match the offered own payout amount,
/// we will simply ignore the offer.
fn verify_collab_close_offer(&self, node_id: &PublicKey, msg: &TenTenOneMessage) -> Result<()> {
let close_offer = match msg {
TenTenOneMessage::CollaborativeCloseOffer(TenTenOneCollaborativeCloseOffer {
collaborative_close_offer: close_offer,
}) => close_offer,
_ => return Ok(()),
};
let channel = self.inner.get_dlc_channel_by_id(&close_offer.channel_id)?;
match channel {
Channel::Signed(SignedChannel {
state: SignedChannelState::Established { .. },
channel_id,
..
}) => {
let channel_id_hex = hex::encode(channel_id);
tracing::debug!(%node_id, channel_id = %channel_id_hex, "Ignoring dlc channel collaborative close offer");
bail!("channel_id = {channel_id_hex}, node_id = {node_id}, state = Established Received DLC channel \
collaborative close offer in an unexpected signed channel state");
}
Channel::Signed(SignedChannel {
state:
SignedChannelState::Settled {
own_payout: expected_own_payout,
..
},
channel_id,
..
}) => {
let offered_own_payout = close_offer.counter_payout;
if expected_own_payout != offered_own_payout {
let channel_id_hex = hex::encode(channel_id);
// TODO(holzeis): Implement reject collaborative close offer flow https://github.com/get10101/10101/issues/2019
tracing::debug!(%node_id, channel_id = %channel_id_hex, "Ignoring dlc channel collaborative close offer");
bail!("node_id = {node_id}, channel_id = {channel_id_hex}, offered_own_payout = {offered_own_payout}, \
expected_own_payout = {expected_own_payout}, Received DLC channel collaborative close offer with an invalid payout");
}
}
_ => {}
};
let protocol_id = close_offer.reference_id.context("Missing reference id")?;
let protocol_id = ProtocolId::try_from(protocol_id)?;
let previous_id = channel
.get_reference_id()
.map(ProtocolId::try_from)
.transpose()?;
let protocol_executor = dlc_protocol::DlcProtocolExecutor::new(self.pool.clone());
protocol_executor.start_close_channel_protocol(
protocol_id,
previous_id,
&channel.get_id(),
node_id,
)?;
Ok(())
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/lib.rs | coordinator/src/lib.rs | use anyhow::anyhow;
use anyhow::Result;
use axum::http::StatusCode;
use axum::response::IntoResponse;
use axum::response::Response;
use axum::Json;
use bitcoin::Amount;
use diesel::PgConnection;
use diesel_migrations::embed_migrations;
use diesel_migrations::EmbeddedMigrations;
use diesel_migrations::MigrationHarness;
use dlc_manager::DlcChannelId;
use hex::FromHex;
use lightning::ln::ChannelId;
use rust_decimal::prelude::FromPrimitive;
use rust_decimal::prelude::ToPrimitive;
use rust_decimal::Decimal;
use serde_json::json;
use xxi_node::commons;
mod collaborative_revert;
mod emergency_kit;
mod leaderboard;
mod payout_curve;
pub mod backup;
pub mod campaign;
pub mod check_version;
pub mod cli;
pub mod db;
pub mod dlc_handler;
pub mod dlc_protocol;
pub mod funding_fee;
pub mod logger;
pub mod message;
mod metrics;
pub mod node;
pub mod notifications;
pub mod orderbook;
pub mod position;
pub mod referrals;
pub mod routes;
pub mod routing_fee;
pub mod scheduler;
pub mod schema;
pub mod settings;
pub mod storage;
pub mod trade;
pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!();
pub fn run_migration(conn: &mut PgConnection) {
conn.run_pending_migrations(MIGRATIONS)
.expect("migrations to succeed");
}
/// Our app's top level error type.
#[derive(Debug)]
pub enum AppError {
InternalServerError(String),
BadRequest(String),
ServiceUnavailable(String),
Unauthorized,
}
impl IntoResponse for AppError {
fn into_response(self) -> Response {
let (status, error_message) = match self {
AppError::InternalServerError(msg) => (StatusCode::INTERNAL_SERVER_ERROR, msg),
AppError::BadRequest(msg) => (StatusCode::BAD_REQUEST, msg),
AppError::ServiceUnavailable(msg) => (StatusCode::SERVICE_UNAVAILABLE, msg),
AppError::Unauthorized => (StatusCode::UNAUTHORIZED, "".to_string()),
};
let body = Json(json!({
"error": error_message,
}));
(status, body).into_response()
}
}
pub fn parse_channel_id(channel_id: &str) -> Result<ChannelId> {
let channel_id = hex::decode(channel_id)?
.try_into()
.map_err(|_| anyhow!("Could not parse channel ID"))?;
Ok(ChannelId(channel_id))
}
pub fn parse_dlc_channel_id(channel_id: &str) -> Result<DlcChannelId> {
Ok(DlcChannelId::from_hex(channel_id)?)
}
pub fn compute_relative_contracts(contracts: Decimal, direction: &commons::Direction) -> Decimal {
match direction {
commons::Direction::Long => contracts,
commons::Direction::Short => -contracts,
}
}
#[track_caller]
pub fn decimal_from_f32(float: f32) -> Decimal {
Decimal::from_f32(float).expect("f32 to fit into Decimal")
}
#[track_caller]
pub fn f32_from_decimal(decimal: Decimal) -> f32 {
decimal.to_f32().expect("Decimal to fit into f32")
}
#[derive(Clone, Copy, Debug)]
pub struct ChannelOpeningParams {
pub trader_reserve: Amount,
pub coordinator_reserve: Amount,
pub external_funding: Option<Amount>,
}
#[derive(Debug, Clone, Copy)]
pub enum FundingFee {
Zero,
CoordinatorPays(Amount),
TraderPays(Amount),
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/check_version.rs | coordinator/src/check_version.rs | use crate::db;
use crate::db::user::User;
use anyhow::ensure;
use anyhow::Context;
use anyhow::Result;
use bitcoin::secp256k1::PublicKey;
use diesel::PgConnection;
use semver::Comparator;
use semver::Op;
use semver::Version;
use semver::VersionReq;
pub fn check_version(conn: &mut PgConnection, trader_id: &PublicKey) -> Result<()> {
let user: User = db::user::get_user(conn, trader_id)?.context("Couldn't find user")?;
let app_version = user.version.context("No version found")?;
let app_version = Version::parse(app_version.as_str())?;
let coordinator_version = env!("CARGO_PKG_VERSION").to_string();
let coordinator_version = Version::parse(coordinator_version.as_str())?;
check_compatibility(app_version, coordinator_version)?;
Ok(())
}
fn check_compatibility(app_version: Version, coordinator_version: Version) -> Result<()> {
let req = VersionReq {
comparators: vec![
Comparator {
op: Op::Exact,
major: coordinator_version.major,
minor: Some(coordinator_version.minor),
patch: None,
pre: Default::default(),
},
// We introduced sematic versioning only after this version, hence, we say each app
// needs to have at least this version.
Comparator {
op: Op::GreaterEq,
major: 2,
minor: Some(0),
patch: Some(6),
pre: Default::default(),
},
],
};
ensure!(
req.matches(&app_version),
format!("Please upgrade to the latest version: {coordinator_version}")
);
Ok(())
}
#[cfg(test)]
pub mod tests {
use crate::check_version::check_compatibility;
use semver::Version;
#[test]
pub fn same_versions_are_compatible() {
let app_version = Version::new(2, 1, 1);
let coordinator_version = Version::new(2, 1, 1);
assert!(check_compatibility(app_version, coordinator_version).is_ok());
}
#[test]
pub fn same_minor_and_major_are_compatible() {
let app_version = Version::new(2, 1, 1);
let coordinator_version = Version::new(2, 1, 3);
assert!(check_compatibility(app_version, coordinator_version).is_ok());
}
#[test]
pub fn different_minor_are_incompatible() {
let app_version = Version::new(2, 1, 1);
let coordinator_version = Version::new(2, 2, 1);
assert!(check_compatibility(app_version, coordinator_version).is_err());
}
#[test]
pub fn different_major_are_incompatible() {
let app_version = Version::new(2, 1, 1);
let coordinator_version = Version::new(3, 1, 1);
assert!(check_compatibility(app_version, coordinator_version).is_err());
}
#[test]
pub fn if_version_smaller_than_2_0_6_then_error() {
let app_version = Version::new(2, 0, 5);
let coordinator_version = Version::new(2, 0, 7);
assert!(check_compatibility(app_version, coordinator_version).is_err());
}
#[test]
pub fn if_version_greater_equal_than_2_0_6_then_ok() {
let app_version = Version::new(2, 0, 6);
let coordinator_version = Version::new(2, 0, 7);
assert!(check_compatibility(app_version, coordinator_version).is_ok());
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/payout_curve.rs | coordinator/src/payout_curve.rs | use anyhow::ensure;
use anyhow::Context;
use anyhow::Result;
use bitcoin::Amount;
use dlc_manager::contract::numerical_descriptor::NumericalDescriptor;
use dlc_manager::contract::ContractDescriptor;
use dlc_manager::payout_curve::PayoutFunction;
use dlc_manager::payout_curve::PayoutFunctionPiece;
use dlc_manager::payout_curve::PayoutPoint;
use dlc_manager::payout_curve::PolynomialPayoutCurvePiece;
use dlc_manager::payout_curve::RoundingInterval;
use dlc_manager::payout_curve::RoundingIntervals;
use rust_decimal::prelude::FromPrimitive;
use rust_decimal::Decimal;
use tracing::instrument;
use xxi_node::cfd::calculate_long_bankruptcy_price;
use xxi_node::cfd::calculate_short_bankruptcy_price;
use xxi_node::commons::ContractSymbol;
use xxi_node::commons::Direction;
/// Builds the contract descriptor from the point of view of the coordinator.
///
/// It's the direction of the coordinator because the coordinator is always proposing.
#[instrument]
#[allow(clippy::too_many_arguments)]
pub fn build_contract_descriptor(
initial_price: Decimal,
coordinator_margin: Amount,
trader_margin: Amount,
leverage_coordinator: f32,
leverage_trader: f32,
coordinator_direction: Direction,
coordinator_collateral_reserve: Amount,
trader_collateral_reserve: Amount,
quantity: f32,
symbol: ContractSymbol,
) -> Result<ContractDescriptor> {
ensure!(
symbol == ContractSymbol::BtcUsd,
"We only support BTCUSD at the moment. \
For other symbols we will need a different payout curve"
);
tracing::info!("Building contract descriptor");
let (payout_function, rounding_intervals) = build_inverse_payout_function(
coordinator_margin,
trader_margin,
initial_price,
leverage_trader,
leverage_coordinator,
coordinator_collateral_reserve,
trader_collateral_reserve,
coordinator_direction,
quantity,
)?;
Ok(ContractDescriptor::Numerical(NumericalDescriptor {
payout_function,
rounding_intervals,
difference_params: None,
oracle_numeric_infos: dlc_trie::OracleNumericInfo {
base: 2,
nb_digits: vec![20],
},
}))
}
/// Build a [`PayoutFunction`] for an inverse perpetual future e.g. BTCUSD. Perspective is always
/// from the person who offers, i.e. in our case from the coordinator.
///
/// Additionally returns the [`RoundingIntervals`] to indicate how it should be discretized.
#[allow(clippy::too_many_arguments)]
fn build_inverse_payout_function(
// TODO: The `coordinator_margin` and `trader_margin` are _not_ orthogonal to the other
// arguments passed in.
coordinator_margin: Amount,
trader_margin: Amount,
initial_price: Decimal,
leverage_trader: f32,
leverage_coordinator: f32,
coordinator_collateral_reserve: Amount,
trader_collateral_reserve: Amount,
coordinator_direction: Direction,
quantity: f32,
) -> Result<(PayoutFunction, RoundingIntervals)> {
let leverage_coordinator =
Decimal::from_f32(leverage_coordinator).expect("to fit into decimal");
let leverage_trader = Decimal::from_f32(leverage_trader).expect("to fit into decimal");
let (coordinator_liquidation_price, trader_liquidation_price) = get_liquidation_prices(
initial_price,
coordinator_direction,
leverage_coordinator,
leverage_trader,
);
let (long_liquidation_price, short_liquidation_price) = match coordinator_direction {
Direction::Long => (coordinator_liquidation_price, trader_liquidation_price),
Direction::Short => (trader_liquidation_price, coordinator_liquidation_price),
};
let price_params = payout_curve::PriceParams::new_btc_usd(
initial_price,
long_liquidation_price,
short_liquidation_price,
)?;
let party_params_coordinator =
payout_curve::PartyParams::new(coordinator_margin, coordinator_collateral_reserve);
let party_params_trader =
payout_curve::PartyParams::new(trader_margin, trader_collateral_reserve);
let payout_points = payout_curve::build_inverse_payout_function(
quantity,
party_params_coordinator,
party_params_trader,
price_params,
coordinator_direction,
)?;
let mut pieces = vec![];
for (lower, upper) in payout_points {
let lower_range = PolynomialPayoutCurvePiece::new(vec![
PayoutPoint {
event_outcome: lower.event_outcome,
outcome_payout: lower.outcome_payout,
extra_precision: lower.extra_precision,
},
PayoutPoint {
event_outcome: upper.event_outcome,
outcome_payout: upper.outcome_payout,
extra_precision: upper.extra_precision,
},
])?;
pieces.push(PayoutFunctionPiece::PolynomialPayoutCurvePiece(lower_range));
}
let payout_function =
PayoutFunction::new(pieces).context("could not create payout function")?;
let rounding_intervals = RoundingIntervals {
intervals: vec![RoundingInterval {
begin_interval: 0,
// No rounding needed because we are giving `rust-dlc` a step function already.
rounding_mod: 1,
}],
};
Ok((payout_function, rounding_intervals))
}
/// Returns the liquidation price for `(coordinator, maker)` with a maintenance margin of 0%. also
/// known as the bankruptcy price.
fn get_liquidation_prices(
initial_price: Decimal,
coordinator_direction: Direction,
leverage_coordinator: Decimal,
leverage_trader: Decimal,
) -> (Decimal, Decimal) {
let (coordinator_liquidation_price, trader_liquidation_price) = match coordinator_direction {
Direction::Long => (
calculate_long_bankruptcy_price(leverage_coordinator, initial_price),
calculate_short_bankruptcy_price(leverage_trader, initial_price),
),
Direction::Short => (
calculate_short_bankruptcy_price(leverage_coordinator, initial_price),
calculate_long_bankruptcy_price(leverage_trader, initial_price),
),
};
(coordinator_liquidation_price, trader_liquidation_price)
}
#[cfg(test)]
mod tests {
use super::*;
use proptest::prelude::*;
use rust_decimal_macros::dec;
use xxi_node::cfd::calculate_margin;
#[test]
fn payout_price_range_is_below_max_price() {
let initial_price = dec!(36780);
let quantity = 19.0;
let leverage_coordinator = 2.0;
let coordinator_margin = calculate_margin(initial_price, quantity, leverage_coordinator);
let leverage_trader = 1.0;
let trader_margin = calculate_margin(initial_price, quantity, leverage_trader);
let coordinator_direction = Direction::Long;
let coordinator_collateral_reserve = Amount::from_sat(1000);
let trader_collateral_reserve = Amount::from_sat(1000);
let total_collateral = coordinator_margin
+ trader_margin
+ coordinator_collateral_reserve
+ trader_collateral_reserve;
let symbol = ContractSymbol::BtcUsd;
let descriptor = build_contract_descriptor(
initial_price,
coordinator_margin,
trader_margin,
leverage_coordinator,
leverage_trader,
coordinator_direction,
coordinator_collateral_reserve,
trader_collateral_reserve,
quantity,
symbol,
)
.unwrap();
let range_payouts = match descriptor {
ContractDescriptor::Enum(_) => unreachable!(),
ContractDescriptor::Numerical(numerical) => numerical
.get_range_payouts(total_collateral.to_sat())
.unwrap(),
};
let max_price = 2usize.pow(20);
for range_payout in &range_payouts {
assert!(
range_payout.start + range_payout.count <= max_price,
"{} + {} = {} > {}",
range_payout.start,
range_payout.count,
range_payout.start + range_payout.count,
max_price
);
}
}
#[test]
/// We check that the generated payout function takes into account the provided collateral
/// reserves. A party's collateral reserve is their coins in the DLC channel that are not being
/// wagered. As such, we expect _any_ of their payouts to be _at least_ their collateral
/// reserve.
fn payout_function_respects_collateral_reserve() {
// Arrange
let initial_price = dec!(28_251);
let quantity = 500.0;
let leverage_offer = 2.0;
let margin_offer = calculate_margin(initial_price, quantity, leverage_offer);
let leverage_accept = 2.0;
let margin_accept = calculate_margin(initial_price, quantity, leverage_accept);
let direction_offer = Direction::Short;
let collateral_reserve_offer = Amount::from_sat(2_120_386);
let collateral_reserve_accept = Amount::from_sat(5_115_076);
let total_collateral =
margin_offer + margin_accept + collateral_reserve_offer + collateral_reserve_accept;
let symbol = ContractSymbol::BtcUsd;
// Act
let descriptor = build_contract_descriptor(
initial_price,
margin_offer,
margin_accept,
leverage_offer,
leverage_accept,
direction_offer,
collateral_reserve_offer,
collateral_reserve_accept,
quantity,
symbol,
)
.unwrap();
// Assert
// Extract the payouts from the generated `ContractDescriptor`.
let range_payouts = match descriptor {
ContractDescriptor::Enum(_) => unreachable!(),
ContractDescriptor::Numerical(numerical) => numerical
.get_range_payouts(total_collateral.to_sat())
.unwrap(),
};
// The offer party gets liquidated when they get the minimum amount of sats as a payout.
let liquidation_payout_offer = range_payouts
.iter()
.min_by(|a, b| a.payout.offer.cmp(&b.payout.offer))
.unwrap()
.payout
.offer;
// The minimum amount the offer party can get as a payout is their collateral reserve.
assert_eq!(liquidation_payout_offer, collateral_reserve_offer.to_sat());
// The accept party gets liquidated when they get the minimum amount of sats as a payout.
let liquidation_payout_accept = range_payouts
.iter()
.min_by(|a, b| a.payout.accept.cmp(&b.payout.accept))
.unwrap()
.payout
.accept;
// The minimum amount the accept party can get as a payout is their collateral reserve.
assert_eq!(
liquidation_payout_accept,
collateral_reserve_accept.to_sat()
);
}
proptest! {
#[test]
fn payout_function_always_respects_reserves(
quantity in 1.0f32..10_000.0,
initial_price in 20_000u32..80_000,
leverage_coordinator in 1u32..5,
leverage_trader in 1u32..5,
is_coordinator_long in proptest::bool::ANY,
collateral_reserve_coordinator in 0u64..1_000_000,
collateral_reserve_trader in 0u64..1_000_000,
) {
let initial_price = Decimal::from(initial_price);
let leverage_coordinator = leverage_coordinator as f32;
let leverage_trader = leverage_trader as f32;
let margin_coordinator = calculate_margin(initial_price, quantity, leverage_coordinator);
let margin_trader = calculate_margin(initial_price, quantity, leverage_trader);
let coordinator_direction = if is_coordinator_long {
Direction::Long
} else {
Direction::Short
};
let collateral_reserve_coordinator = Amount::from_sat(collateral_reserve_coordinator);
let collateral_reserve_trader = Amount::from_sat(collateral_reserve_trader);
let total_collateral = margin_coordinator
+ margin_trader
+ collateral_reserve_coordinator
+ collateral_reserve_trader;
let symbol = ContractSymbol::BtcUsd;
let descriptor = build_contract_descriptor(
initial_price,
margin_coordinator,
margin_trader,
leverage_coordinator,
leverage_trader,
coordinator_direction,
collateral_reserve_coordinator,
collateral_reserve_trader,
quantity,
symbol,
)
.unwrap();
let range_payouts = match descriptor {
ContractDescriptor::Enum(_) => unreachable!(),
ContractDescriptor::Numerical(numerical) => numerical
.get_range_payouts(total_collateral.to_sat())
.unwrap(),
};
let liquidation_payout_offer = range_payouts
.iter()
.min_by(|a, b| a.payout.offer.cmp(&b.payout.offer))
.unwrap()
.payout
.offer;
assert_eq!(liquidation_payout_offer, collateral_reserve_coordinator.to_sat());
let liquidation_payout_accept = range_payouts
.iter()
.min_by(|a, b| a.payout.accept.cmp(&b.payout.accept))
.unwrap()
.payout
.accept;
assert_eq!(liquidation_payout_accept, collateral_reserve_trader.to_sat());
}
}
#[test]
fn calculate_liquidation_price_coordinator_long() {
let initial_price = dec!(30_000);
let coordinator_direction = Direction::Long;
let leverage_coordinator = dec!(2.0);
let leverage_trader = dec!(3.0);
let (coordinator, maker) = get_liquidation_prices(
initial_price,
coordinator_direction,
leverage_coordinator,
leverage_trader,
);
assert_eq!(coordinator, dec!(20_000));
assert_eq!(maker, dec!(45_000));
}
#[test]
fn calculate_liquidation_price_coordinator_short() {
let initial_price = dec!(30_000);
let coordinator_direction = Direction::Short;
let leverage_coordinator = dec!(2.0);
let leverage_trader = dec!(3.0);
let (coordinator, maker) = get_liquidation_prices(
initial_price,
coordinator_direction,
leverage_coordinator,
leverage_trader,
);
assert_eq!(coordinator, dec!(60_000));
assert_eq!(maker, dec!(22_500));
}
#[test]
fn build_contract_descriptor_does_not_panic() {
let initial_price = dec!(36404.5);
let quantity = 20.0;
let leverage_coordinator = 2.0;
let coordinator_margin = Amount::from_sat(18_313);
let leverage_trader = 3.0;
let trader_margin = Amount::from_sat(27_469);
let coordinator_direction = Direction::Short;
let coordinator_collateral_reserve = Amount::ZERO;
let trader_collateral_reserve = Amount::ZERO;
let symbol = ContractSymbol::BtcUsd;
let _descriptor = build_contract_descriptor(
initial_price,
coordinator_margin,
trader_margin,
leverage_coordinator,
leverage_trader,
coordinator_direction,
coordinator_collateral_reserve,
trader_collateral_reserve,
quantity,
symbol,
)
.unwrap();
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/logger.rs | coordinator/src/logger.rs | use anyhow::Context;
use anyhow::Result;
use time::macros::format_description;
use tracing::metadata::LevelFilter;
use tracing_subscriber::filter::Directive;
use tracing_subscriber::fmt::time::UtcTime;
use tracing_subscriber::layer::SubscriberExt;
use tracing_subscriber::util::SubscriberInitExt;
use tracing_subscriber::EnvFilter;
use tracing_subscriber::Layer;
const RUST_LOG_ENV: &str = "RUST_LOG";
// Configure and initialise tracing subsystem
pub fn init_tracing(level: LevelFilter, json_format: bool, tokio_console: bool) -> Result<()> {
if level == LevelFilter::OFF {
return Ok(());
}
let is_terminal = atty::is(atty::Stream::Stderr);
let filter = EnvFilter::new("")
.add_directive(Directive::from(level))
.add_directive("hyper=warn".parse()?)
.add_directive("rustls=warn".parse()?)
.add_directive("sled=warn".parse()?)
.add_directive("bdk=warn".parse()?) // bdk is quite spamy on debug
.add_directive("lightning_transaction_sync=warn".parse()?)
.add_directive("lightning::ln::peer_handler=debug".parse()?)
.add_directive("lightning=trace".parse()?)
.add_directive("ureq=info".parse()?);
let mut filter = if tokio_console {
filter
.add_directive("tokio=trace".parse()?)
.add_directive("runtime=trace".parse()?)
} else {
filter
};
let console_layer = if tokio_console {
Some(
console_subscriber::ConsoleLayer::builder()
.server_addr(([0, 0, 0, 0], 6669))
.spawn(),
)
} else {
None
};
// Parse additional log directives from env variable
let filter = match std::env::var_os(RUST_LOG_ENV).map(|s| s.into_string()) {
Some(Ok(env)) => {
for directive in env.split(',') {
#[allow(clippy::print_stdout)]
match directive.parse() {
Ok(d) => filter = filter.add_directive(d),
Err(e) => println!("WARN ignoring log directive: `{directive}`: {e}"),
};
}
filter
}
_ => filter,
};
let fmt_layer = tracing_subscriber::fmt::layer()
.with_writer(std::io::stderr)
.with_ansi(is_terminal);
let fmt_layer = if json_format {
fmt_layer.json().with_timer(UtcTime::rfc_3339()).boxed()
} else {
fmt_layer
.with_timer(UtcTime::new(format_description!(
"[year]-[month]-[day] [hour]:[minute]:[second]"
)))
.boxed()
};
tracing_subscriber::registry()
.with(filter)
.with(console_layer)
.with(fmt_layer)
.try_init()
.context("Failed to init tracing")?;
tracing::info!("Initialized logger");
Ok(())
}
/// Initialise tracing for tests
#[cfg(test)]
pub(crate) fn init_tracing_for_test() {
static TRACING_TEST_SUBSCRIBER: std::sync::Once = std::sync::Once::new();
TRACING_TEST_SUBSCRIBER.call_once(|| {
tracing_subscriber::fmt()
.with_env_filter("debug")
.with_test_writer()
.init()
})
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/emergency_kit.rs | coordinator/src/emergency_kit.rs | use crate::node::Node;
use crate::orderbook::db;
use anyhow::Context;
use anyhow::Result;
use bitcoin::secp256k1::PublicKey;
use bitcoin_old::secp256k1::SecretKey;
use dlc_manager::Signer;
use dlc_messages::channel::RenewRevoke;
use lightning::ln::chan_utils::build_commitment_secret;
use xxi_node::commons::OrderState;
use xxi_node::message_handler::TenTenOneMessage;
use xxi_node::message_handler::TenTenOneRenewRevoke;
use xxi_node::node::event::NodeEvent;
impl Node {
pub fn resend_renew_revoke_message_internal(&self, trader: PublicKey) -> Result<()> {
tracing::warn!("Executing emergency kit! Resending renew revoke message");
let signed_channel = self.inner.get_signed_channel_by_trader_id(trader)?;
let per_update_seed_pk = signed_channel.own_per_update_seed;
let per_update_seed = self
.inner
.dlc_wallet
.get_secret_key_for_pubkey(&per_update_seed_pk)?;
let prev_per_update_secret = SecretKey::from_slice(&build_commitment_secret(
per_update_seed.as_ref(),
signed_channel.update_idx + 1,
))?;
let mut conn = self.pool.clone().get()?;
// We assume the last taken order to be the relevant order.
let order = db::orders::get_by_trader_id_and_state(&mut conn, trader, OrderState::Taken)?
.with_context(|| {
format!("Couldn't find last order in state taken. trader_id={trader}")
})?;
let msg = TenTenOneMessage::RenewRevoke(TenTenOneRenewRevoke {
order_id: order.id,
renew_revoke: RenewRevoke {
channel_id: signed_channel.channel_id,
per_update_secret: prev_per_update_secret,
reference_id: signed_channel.reference_id,
},
});
self.inner.event_handler.publish(NodeEvent::SendDlcMessage {
peer: trader,
msg: msg.clone(),
});
Ok(())
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/campaign.rs | coordinator/src/campaign.rs | use crate::notifications::Notification;
use crate::notifications::NotificationKind;
use crate::routes::AppState;
use crate::AppError;
use axum::extract::State;
use axum::Json;
use bitcoin::secp256k1::PublicKey;
use serde::Deserialize;
use serde::Serialize;
use std::sync::Arc;
use tracing::instrument;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PushCampaignParams {
pub node_ids: Vec<PublicKey>,
pub title: String,
pub message: String,
pub dry_run: Option<bool>,
}
#[instrument(skip_all, err(Debug))]
pub async fn post_push_campaign(
State(state): State<Arc<AppState>>,
params: Json<PushCampaignParams>,
) -> Result<String, AppError> {
let params = params.0;
tracing::info!(?params, "Sending campaign with push notifications");
let notification_kind = NotificationKind::Custom {
title: params.title.clone(),
message: params.message.clone(),
};
tracing::info!(
params.title,
params.message,
receivers = params.node_ids.len(),
"Sending push notification campaign",
);
if params.dry_run.unwrap_or(true) {
tracing::debug!("Not sending push notification campaign because of dry run flag.");
} else {
state
.notification_sender
.send(Notification::new_batch(
params.clone().node_ids,
notification_kind,
))
.await
.map_err(|e| {
AppError::InternalServerError(format!("Failed to send push notifications: {e:#}"))
})?;
}
Ok(format!(
"Sending push notification campaign (title: {}, message: {} to {} users",
params.title,
params.message,
params.node_ids.len(),
))
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/cli.rs | coordinator/src/cli.rs | use anyhow::Result;
use bitcoin::secp256k1::XOnlyPublicKey;
use clap::Parser;
use std::env::current_dir;
use std::net::SocketAddr;
use std::path::PathBuf;
use std::str::FromStr;
use xxi_node::node::OracleInfo;
#[derive(Parser)]
pub struct Opts {
/// The address to listen on for the lightning and dlc peer2peer API.
#[clap(long, default_value = "0.0.0.0:9045")]
pub p2p_address: SocketAddr,
/// The IP address to listen on for the HTTP API.
#[clap(long, default_value = "0.0.0.0:8000")]
pub http_address: SocketAddr,
/// Where to permanently store data, defaults to the current working directory.
#[clap(long)]
data_dir: Option<PathBuf>,
#[clap(value_enum, default_value = "regtest")]
pub network: Network,
/// If enabled logs will be in json format
#[clap(short, long)]
pub json: bool,
/// The address where to find the database including username and password
#[clap(
long,
default_value = "postgres://postgres:mysecretpassword@localhost:5432/orderbook"
)]
pub database: String,
/// The address to connect to the Electrs API.
#[clap(long, default_value = "http://localhost:3000", aliases = ["esplora"])]
pub electrs: String,
/// If enabled, tokio runtime can be locally debugged with tokio_console
#[clap(long)]
pub tokio_console: bool,
/// If specified, metrics will be printed at the given interval
#[clap(long)]
pub tokio_metrics_interval_seconds: Option<u64>,
/// Server API key for the LSP notification service.
/// If not specified, the notifications will not be sent.
#[clap(long, default_value = "")]
pub fcm_api_key: String,
/// The endpoint of the p2p-derivatives oracle
#[arg(num_args(0..))]
#[clap(
long,
default_value = "16f88cf7d21e6c0f46bcbc983a4e3b19726c6c98858cc31c83551a88fde171c0@http://localhost:8081"
)]
oracle: Vec<String>,
/// Defines the default oracle to be used for propose a dlc channel. Note this pubkey has to be
/// included in the oracle arguments.
///
/// FIXME(holzeis): Remove this argument once we have migrated successfully from the old oracle
/// to the new one. This is needed to instruct the coordinator to use only the new oracle for
/// proposing dlc channels.
#[clap(
long,
default_value = "16f88cf7d21e6c0f46bcbc983a4e3b19726c6c98858cc31c83551a88fde171c0"
)]
pub oracle_pubkey: String,
/// The endpoint of the lnd rest api
#[clap(long, default_value = "localhost:18080")]
pub lnd_endpoint: String,
/// Defines the macaroon to be used for the lnd http api.
#[clap(long, default_value = "")]
pub macaroon: String,
/// If enabled the coordinator will try to connect to lnd via https, wss.
#[clap(short, long)]
pub secure_lnd: bool,
}
#[derive(Debug, Clone, Copy, clap::ValueEnum)]
pub enum Network {
Regtest,
Signet,
Testnet,
Mainnet,
}
impl From<Network> for bitcoin::Network {
fn from(network: Network) -> Self {
match network {
Network::Regtest => bitcoin::Network::Regtest,
Network::Signet => bitcoin::Network::Signet,
Network::Testnet => bitcoin::Network::Testnet,
Network::Mainnet => bitcoin::Network::Bitcoin,
}
}
}
impl Opts {
// use this method to parse the options from the cli.
pub fn read() -> Opts {
Opts::parse()
}
pub fn network(&self) -> bitcoin::Network {
self.network.into()
}
pub fn get_oracle_infos(&self) -> Vec<OracleInfo> {
self.oracle
.iter()
.map(|oracle| {
let oracle: Vec<&str> = oracle.split('@').collect();
OracleInfo {
public_key: XOnlyPublicKey::from_str(
oracle.first().expect("public key to be set"),
)
.expect("Valid oracle public key"),
endpoint: oracle.get(1).expect("endpoint to be set").to_string(),
}
})
.collect()
}
pub fn data_dir(&self) -> Result<PathBuf> {
let data_dir = match self.data_dir.clone() {
None => current_dir()?.join("data"),
Some(path) => path,
}
.join("coordinator");
Ok(data_dir)
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/funding_fee.rs | coordinator/src/funding_fee.rs | use crate::decimal_from_f32;
use crate::message::OrderbookMessage;
use crate::FundingFee;
use anyhow::bail;
use anyhow::Context;
use anyhow::Result;
use bitcoin::secp256k1::PublicKey;
use bitcoin::Amount;
use bitcoin::SignedAmount;
use diesel::r2d2::ConnectionManager;
use diesel::r2d2::Pool;
use diesel::PgConnection;
use rust_decimal::prelude::ToPrimitive;
use rust_decimal::Decimal;
use rust_decimal::RoundingStrategy;
use std::time::Duration;
use time::ext::NumericalDuration;
use time::format_description;
use time::OffsetDateTime;
use tokio::sync::broadcast;
use tokio::task::block_in_place;
use tokio_cron_scheduler::JobScheduler;
use xxi_node::commons::ContractSymbol;
use xxi_node::commons::Direction;
use xxi_node::commons::FundingRate;
use xxi_node::commons::Message;
mod db;
pub use db::get_funding_fee_events_for_active_trader_positions;
pub use db::get_next_funding_rate;
pub use db::get_outstanding_funding_fee_events;
pub use db::insert_protocol_funding_fee_event;
pub use db::mark_funding_fee_event_as_paid;
const RETRY_INTERVAL: Duration = Duration::from_secs(5);
/// A record that a funding fee is owed between the coordinator and a trader.
#[derive(Clone, Copy, Debug)]
pub struct FundingFeeEvent {
pub id: i32,
/// A positive amount indicates that the trader pays the coordinator; a negative amount
/// indicates that the coordinator pays the trader.
pub amount: SignedAmount,
pub trader_pubkey: PublicKey,
pub position_id: i32,
pub due_date: OffsetDateTime,
pub price: Decimal,
pub funding_rate: Decimal,
pub paid_date: Option<OffsetDateTime>,
}
impl From<FundingFeeEvent> for xxi_node::message_handler::FundingFeeEvent {
fn from(value: FundingFeeEvent) -> Self {
Self {
due_date: value.due_date,
funding_rate: value.funding_rate,
price: value.price,
funding_fee: value.amount,
}
}
}
#[derive(Clone, Copy, Debug, serde::Serialize, serde::Deserialize, PartialEq)]
pub enum IndexPriceSource {
Bitmex,
/// The index price will be hard-coded for testing.
Test,
}
pub async fn generate_funding_fee_events_periodically(
scheduler: &JobScheduler,
pool: Pool<ConnectionManager<PgConnection>>,
auth_users_notifier: tokio::sync::mpsc::Sender<OrderbookMessage>,
schedule: String,
index_price_source: IndexPriceSource,
) -> Result<()> {
scheduler
.add(tokio_cron_scheduler::Job::new(
schedule.as_str(),
move |_, _| {
let mut attempts_left = 10;
// We want to retry
while let (Err(e), true) = (
generate_funding_fee_events(
&pool,
index_price_source,
auth_users_notifier.clone(),
),
attempts_left > 0,
) {
attempts_left -= 1;
tracing::error!(
retry_interval = ?RETRY_INTERVAL,
attempts_left,
"Failed to generate funding fee events: {e:#}. \
Trying again"
);
std::thread::sleep(RETRY_INTERVAL);
}
},
)?)
.await?;
scheduler.start().await?;
Ok(())
}
/// Generate [`FundingFeeEvent`]s for all active positions.
///
/// When called, a [`FundingFeeEvent`] will be generated for an active position if:
///
/// - We can get a [`FundingRate`] that is at most 1 hour old from the DB.
/// - We can get a BitMEX index price for the `end_date` of the [`FundingRate`].
/// - There is no other [`FundingFeeEvent`] in the DB with the same `position_id` and `end_date`.
/// - The position was created _before_ the `end_date` of the [`FundingRate`].
///
/// This function should be safe to retry. Retry should come in handy if the index price is
/// not available.
fn generate_funding_fee_events(
pool: &Pool<ConnectionManager<PgConnection>>,
index_price_source: IndexPriceSource,
auth_users_notifier: tokio::sync::mpsc::Sender<OrderbookMessage>,
) -> Result<()> {
let mut conn = pool.get()?;
tracing::trace!("Generating funding fee events");
let funding_rate = db::funding_rates::get_funding_rate_charged_in_the_last_hour(&mut conn)?;
let funding_rate = match funding_rate {
Some(funding_rate) => funding_rate,
None => {
tracing::trace!("No current funding rate for this hour");
return Ok(());
}
};
// TODO: Funding rates should be specific to contract symbols.
let contract_symbol = ContractSymbol::BtcUsd;
let index_price = match index_price_source {
IndexPriceSource::Bitmex => block_in_place(move || {
let current_index_price =
get_bitmex_index_price(&contract_symbol, funding_rate.end_date())?;
anyhow::Ok(current_index_price)
})?,
IndexPriceSource::Test => {
#[cfg(not(debug_assertions))]
panic!("Cannot use a test index price in release mode");
#[cfg(debug_assertions)]
rust_decimal_macros::dec!(50_000)
}
};
if index_price.is_zero() {
bail!("Cannot generate funding fee events with zero index price");
}
// We exclude active positions which were open after this funding period ended.
let positions = crate::db::positions::Position::get_all_active_positions_open_before(
&mut conn,
funding_rate.end_date(),
)?;
for position in positions {
let amount = calculate_funding_fee(
position.quantity,
funding_rate.rate(),
index_price,
position.trader_direction,
);
if let Some(funding_fee_event) = db::funding_fee_events::insert(
&mut conn,
amount,
position.trader,
position.id,
funding_rate.end_date(),
index_price,
funding_rate.rate(),
)
.context("Failed to insert funding fee event")?
{
block_in_place(|| {
auth_users_notifier
.blocking_send(OrderbookMessage::TraderMessage {
trader_id: position.trader,
message: Message::FundingFeeEvent(xxi_node::FundingFeeEvent {
contract_symbol,
contracts: decimal_from_f32(position.quantity),
direction: position.trader_direction,
price: funding_fee_event.price,
fee: funding_fee_event.amount,
due_date: funding_fee_event.due_date,
}),
notification: None,
})
.map_err(anyhow::Error::new)
.context("Could not send pending funding fee event to trader")
})?;
tracing::debug!(
position_id = %position.id,
trader_pubkey = %position.trader,
fee_amount = ?amount,
?funding_rate,
"Generated funding fee event"
);
}
}
anyhow::Ok(())
}
/// Calculate the funding fee.
///
/// We assume that the `index_price` is not zero. Otherwise, the function panics.
fn calculate_funding_fee(
quantity: f32,
// Positive means longs pay shorts; negative means shorts pay longs.
funding_rate: Decimal,
index_price: Decimal,
trader_direction: Direction,
) -> SignedAmount {
// Transform the funding rate from a global perspective (longs and shorts) to a local
// perspective (the coordinator-trader position).
let funding_rate = match trader_direction {
Direction::Long => funding_rate,
Direction::Short => -funding_rate,
};
let quantity = Decimal::try_from(quantity).expect("to fit");
// E.g. 500 [$] / 20_000 [$/BTC] = 0.025 [BTC]
let mark_value = quantity / index_price;
let funding_fee_btc = mark_value * funding_rate;
let funding_fee_btc = funding_fee_btc
.round_dp_with_strategy(8, RoundingStrategy::MidpointAwayFromZero)
.to_f64()
.expect("to fit");
SignedAmount::from_btc(funding_fee_btc).expect("to fit")
}
fn get_bitmex_index_price(
contract_symbol: &ContractSymbol,
timestamp: OffsetDateTime,
) -> Result<Decimal> {
let symbol = bitmex_symbol(contract_symbol);
let time_format = format_description::parse("[year]-[month]-[day] [hour]:[minute]")?;
// Ideally we get the price indicated by `timestamp`, but if it is not available we are happy to
// take a price up to 1 minute in the past.
let start_time = (timestamp - 1.minutes()).format(&time_format)?;
let end_time = timestamp.format(&time_format)?;
let mut url = reqwest::Url::parse("https://www.bitmex.com/api/v1/instrument/compositeIndex")?;
url.query_pairs_mut()
.append_pair("symbol", &format!(".{symbol}"))
.append_pair(
"filter",
// The `reference` is set to `BMI` to get the _composite_ index.
&format!("{{\"symbol\": \".{symbol}\", \"startTime\": \"{start_time}\", \"endTime\": \"{end_time}\", \"reference\": \"BMI\"}}"),
)
.append_pair("columns", "lastPrice,timestamp,reference")
// Reversed to get the latest one.
.append_pair("reverse", "true")
// Only need one index.
.append_pair("count", "1");
let indices = reqwest::blocking::get(url)?.json::<Vec<Index>>()?;
let index = &indices[0];
let index_price = Decimal::try_from(index.last_price)?;
Ok(index_price)
}
fn bitmex_symbol(contract_symbol: &ContractSymbol) -> &str {
match contract_symbol {
ContractSymbol::BtcUsd => "BXBT",
}
}
#[derive(serde::Deserialize, Debug)]
#[serde(rename_all = "camelCase")]
struct Index {
#[serde(with = "time::serde::rfc3339")]
#[serde(rename = "timestamp")]
_timestamp: OffsetDateTime,
last_price: f64,
#[serde(rename = "reference")]
_reference: String,
}
pub fn funding_fee_from_funding_fee_events(events: &[FundingFeeEvent]) -> FundingFee {
let funding_fee_amount = events
.iter()
.fold(SignedAmount::ZERO, |acc, e| acc + e.amount);
match funding_fee_amount.to_sat() {
0 => FundingFee::Zero,
n if n.is_positive() => FundingFee::TraderPays(Amount::from_sat(n.unsigned_abs())),
n => FundingFee::CoordinatorPays(Amount::from_sat(n.unsigned_abs())),
}
}
pub fn insert_funding_rates(
conn: &mut PgConnection,
tx_orderbook_feed: broadcast::Sender<Message>,
funding_rates: &[FundingRate],
) -> Result<()> {
db::insert_funding_rates(conn, funding_rates)?;
// There is no guarantee that the next funding rate has changed, but sending the message
// unconditionally is simpler and should cause no problems.
let next_funding_rate = get_next_funding_rate(conn)?;
if let Some(next_funding_rate) = next_funding_rate {
if let Err(e) = tx_orderbook_feed.send(Message::NextFundingRate(next_funding_rate)) {
tracing::error!("Failed to notify traders about next funding rate: {e}");
}
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use insta::assert_debug_snapshot;
use rust_decimal_macros::dec;
#[test]
fn calculate_funding_fee_test() {
assert_debug_snapshot!(calculate_funding_fee(
500.0,
dec!(0.003),
dec!(20_000),
Direction::Long
));
assert_debug_snapshot!(calculate_funding_fee(
500.0,
dec!(0.003),
dec!(20_000),
Direction::Short
));
assert_debug_snapshot!(calculate_funding_fee(
500.0,
dec!(-0.003),
dec!(20_000),
Direction::Long
));
assert_debug_snapshot!(calculate_funding_fee(
500.0,
dec!(-0.003),
dec!(20_000),
Direction::Short
));
assert_debug_snapshot!(calculate_funding_fee(
500.0,
dec!(0.003),
dec!(40_000),
Direction::Long
));
assert_debug_snapshot!(calculate_funding_fee(
500.0,
dec!(0.003),
dec!(40_000),
Direction::Short
));
assert_debug_snapshot!(calculate_funding_fee(
100.0,
dec!(0.003),
dec!(20_000),
Direction::Long
));
assert_debug_snapshot!(calculate_funding_fee(
100.0,
dec!(0.003),
dec!(20_000),
Direction::Short
));
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/storage.rs | coordinator/src/storage.rs | use std::fs;
use std::path::PathBuf;
use std::sync::Arc;
use xxi_node::storage::sled::SledStorageProvider;
use xxi_node::storage::DlcStoreProvider;
use xxi_node::storage::KeyValue;
#[derive(Clone)]
pub struct CoordinatorTenTenOneStorage {
pub dlc_storage: Arc<SledStorageProvider>,
pub data_dir: String,
}
impl CoordinatorTenTenOneStorage {
pub fn new(data_dir: String) -> CoordinatorTenTenOneStorage {
let data_dir = PathBuf::from(data_dir);
if !data_dir.exists() {
fs::create_dir_all(data_dir.as_path()).expect("Failed to create data dir");
}
let data_dir = data_dir.to_string_lossy().to_string();
let dlc_storage = Arc::new(SledStorageProvider::new(&data_dir));
CoordinatorTenTenOneStorage {
dlc_storage,
data_dir,
}
}
}
impl DlcStoreProvider for CoordinatorTenTenOneStorage {
fn read(&self, kind: u8, key: Option<Vec<u8>>) -> anyhow::Result<Vec<KeyValue>> {
self.dlc_storage.read(kind, key)
}
fn write(&self, kind: u8, key: Vec<u8>, value: Vec<u8>) -> anyhow::Result<()> {
self.dlc_storage.write(kind, key, value)
}
fn delete(&self, kind: u8, key: Option<Vec<u8>>) -> anyhow::Result<()> {
self.dlc_storage.delete(kind, key)
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/schema.rs | coordinator/src/schema.rs | // @generated automatically by Diesel CLI.
pub mod sql_types {
#[derive(diesel::sql_types::SqlType)]
#[diesel(postgres_type(name = "BonusStatus_Type"))]
pub struct BonusStatusType;
#[derive(diesel::sql_types::SqlType)]
#[diesel(postgres_type(name = "ChannelState_Type"))]
pub struct ChannelStateType;
#[derive(diesel::sql_types::SqlType)]
#[diesel(postgres_type(name = "ContractSymbol_Type"))]
pub struct ContractSymbolType;
#[derive(diesel::sql_types::SqlType)]
#[diesel(postgres_type(name = "Direction_Type"))]
pub struct DirectionType;
#[derive(diesel::sql_types::SqlType)]
#[diesel(postgres_type(name = "Dlc_Channel_State_Type"))]
pub struct DlcChannelStateType;
#[derive(diesel::sql_types::SqlType)]
#[diesel(postgres_type(name = "Htlc_Status_Type"))]
pub struct HtlcStatusType;
#[derive(diesel::sql_types::SqlType)]
#[diesel(postgres_type(name = "InvoiceState_Type"))]
pub struct InvoiceStateType;
#[derive(diesel::sql_types::SqlType)]
#[diesel(postgres_type(name = "MatchState_Type"))]
pub struct MatchStateType;
#[derive(diesel::sql_types::SqlType)]
#[diesel(postgres_type(name = "Message_Type_Type"))]
pub struct MessageTypeType;
#[derive(diesel::sql_types::SqlType)]
#[diesel(postgres_type(name = "OrderReason_Type"))]
pub struct OrderReasonType;
#[derive(diesel::sql_types::SqlType)]
#[diesel(postgres_type(name = "OrderState_Type"))]
pub struct OrderStateType;
#[derive(diesel::sql_types::SqlType)]
#[diesel(postgres_type(name = "OrderType_Type"))]
pub struct OrderTypeType;
#[derive(diesel::sql_types::SqlType)]
#[diesel(postgres_type(name = "Payment_Flow_Type"))]
pub struct PaymentFlowType;
#[derive(diesel::sql_types::SqlType)]
#[diesel(postgres_type(name = "Poll_Type_Type"))]
pub struct PollTypeType;
#[derive(diesel::sql_types::SqlType)]
#[diesel(postgres_type(name = "PositionState_Type"))]
pub struct PositionStateType;
#[derive(diesel::sql_types::SqlType)]
#[diesel(postgres_type(name = "Protocol_State_Type"))]
pub struct ProtocolStateType;
#[derive(diesel::sql_types::SqlType)]
#[diesel(postgres_type(name = "Protocol_Type_Type"))]
pub struct ProtocolTypeType;
}
diesel::table! {
answers (id) {
id -> Int4,
choice_id -> Int4,
trader_pubkey -> Text,
value -> Text,
creation_timestamp -> Timestamptz,
}
}
diesel::table! {
use diesel::sql_types::*;
use super::sql_types::BonusStatusType;
bonus_status (id) {
id -> Int4,
trader_pubkey -> Text,
tier_level -> Int4,
fee_rebate -> Float4,
bonus_type -> BonusStatusType,
activation_timestamp -> Timestamptz,
deactivation_timestamp -> Timestamptz,
}
}
diesel::table! {
use diesel::sql_types::*;
use super::sql_types::BonusStatusType;
bonus_tiers (id) {
id -> Int4,
tier_level -> Int4,
min_users_to_refer -> Int4,
fee_rebate -> Float4,
bonus_tier_type -> BonusStatusType,
active -> Bool,
}
}
diesel::table! {
channel_opening_params (order_id) {
order_id -> Text,
coordinator_reserve -> Int8,
trader_reserve -> Int8,
created_at -> Int8,
external_funding -> Nullable<Int8>,
}
}
diesel::table! {
use diesel::sql_types::*;
use super::sql_types::ChannelStateType;
channels (user_channel_id) {
user_channel_id -> Text,
channel_id -> Nullable<Text>,
inbound_sats -> Int8,
outbound_sats -> Int8,
funding_txid -> Nullable<Text>,
channel_state -> ChannelStateType,
counterparty_pubkey -> Text,
created_at -> Timestamptz,
updated_at -> Timestamptz,
liquidity_option_id -> Nullable<Int4>,
fee_sats -> Nullable<Int8>,
}
}
diesel::table! {
choices (id) {
id -> Int4,
poll_id -> Int4,
value -> Text,
editable -> Bool,
}
}
diesel::table! {
collaborative_reverts (id) {
id -> Int4,
channel_id -> Text,
trader_pubkey -> Text,
price -> Float4,
coordinator_address -> Text,
coordinator_amount_sats -> Int8,
trader_amount_sats -> Int8,
timestamp -> Timestamptz,
}
}
diesel::table! {
use diesel::sql_types::*;
use super::sql_types::DlcChannelStateType;
dlc_channels (id) {
id -> Int4,
open_protocol_id -> Uuid,
channel_id -> Text,
trader_pubkey -> Text,
channel_state -> DlcChannelStateType,
trader_reserve_sats -> Int8,
coordinator_reserve_sats -> Int8,
funding_txid -> Nullable<Text>,
close_txid -> Nullable<Text>,
settle_txid -> Nullable<Text>,
buffer_txid -> Nullable<Text>,
claim_txid -> Nullable<Text>,
punish_txid -> Nullable<Text>,
created_at -> Timestamptz,
updated_at -> Timestamptz,
coordinator_funding_sats -> Int8,
trader_funding_sats -> Int8,
}
}
diesel::table! {
use diesel::sql_types::*;
use super::sql_types::MessageTypeType;
dlc_messages (message_hash) {
message_hash -> Text,
inbound -> Bool,
peer_id -> Text,
message_type -> MessageTypeType,
timestamp -> Timestamptz,
}
}
diesel::table! {
use diesel::sql_types::*;
use super::sql_types::ProtocolStateType;
use super::sql_types::ProtocolTypeType;
dlc_protocols (id) {
id -> Int4,
protocol_id -> Uuid,
previous_protocol_id -> Nullable<Uuid>,
channel_id -> Text,
contract_id -> Nullable<Text>,
protocol_state -> ProtocolStateType,
trader_pubkey -> Text,
timestamp -> Timestamptz,
protocol_type -> ProtocolTypeType,
}
}
diesel::table! {
use diesel::sql_types::*;
use super::sql_types::InvoiceStateType;
hodl_invoices (id) {
id -> Int4,
trader_pubkey -> Text,
r_hash -> Text,
amount_sats -> Int8,
pre_image -> Nullable<Text>,
created_at -> Timestamptz,
updated_at -> Nullable<Timestamptz>,
invoice_state -> InvoiceStateType,
order_id -> Nullable<Uuid>,
}
}
diesel::table! {
funding_fee_events (id) {
id -> Int4,
amount_sats -> Int8,
trader_pubkey -> Text,
position_id -> Int4,
due_date -> Timestamptz,
price -> Float4,
funding_rate -> Float4,
paid_date -> Nullable<Timestamptz>,
timestamp -> Timestamptz,
}
}
diesel::table! {
funding_rates (id) {
id -> Int4,
start_date -> Timestamptz,
end_date -> Timestamptz,
rate -> Float4,
timestamp -> Timestamptz,
}
}
diesel::table! {
last_outbound_dlc_messages (peer_id) {
peer_id -> Text,
message_hash -> Text,
message -> Text,
timestamp -> Timestamptz,
}
}
diesel::table! {
legacy_collaborative_reverts (id) {
id -> Int4,
channel_id -> Text,
trader_pubkey -> Text,
price -> Float4,
coordinator_address -> Text,
coordinator_amount_sats -> Int8,
trader_amount_sats -> Int8,
funding_txid -> Text,
funding_vout -> Int4,
timestamp -> Timestamptz,
}
}
diesel::table! {
liquidity_options (id) {
id -> Int4,
rank -> Int2,
title -> Text,
trade_up_to_sats -> Int8,
min_deposit_sats -> Int8,
max_deposit_sats -> Int8,
min_fee_sats -> Nullable<Int8>,
fee_percentage -> Float8,
coordinator_leverage -> Float4,
active -> Bool,
created_at -> Timestamptz,
updated_at -> Timestamptz,
}
}
diesel::table! {
liquidity_request_logs (id) {
id -> Int4,
trader_pk -> Text,
timestamp -> Timestamptz,
requested_amount_sats -> Int8,
liquidity_option -> Int4,
successfully_requested -> Bool,
}
}
diesel::table! {
use diesel::sql_types::*;
use super::sql_types::MatchStateType;
matches (id) {
id -> Uuid,
match_state -> MatchStateType,
order_id -> Uuid,
trader_id -> Text,
match_order_id -> Uuid,
match_trader_id -> Text,
execution_price -> Float4,
quantity -> Float4,
created_at -> Timestamptz,
updated_at -> Timestamptz,
matching_fee_sats -> Int8,
}
}
diesel::table! {
metrics (id) {
id -> Int4,
created_at -> Timestamptz,
on_chain_balance_sats -> Int8,
}
}
diesel::table! {
use diesel::sql_types::*;
use super::sql_types::DirectionType;
use super::sql_types::OrderTypeType;
use super::sql_types::OrderStateType;
use super::sql_types::ContractSymbolType;
use super::sql_types::OrderReasonType;
orders (id) {
id -> Int4,
trader_order_id -> Uuid,
price -> Float4,
trader_id -> Text,
direction -> DirectionType,
quantity -> Float4,
timestamp -> Timestamptz,
order_type -> OrderTypeType,
expiry -> Timestamptz,
order_state -> OrderStateType,
contract_symbol -> ContractSymbolType,
leverage -> Float4,
order_reason -> OrderReasonType,
stable -> Bool,
}
}
diesel::table! {
use diesel::sql_types::*;
use super::sql_types::HtlcStatusType;
use super::sql_types::PaymentFlowType;
payments (id) {
id -> Int4,
payment_hash -> Text,
preimage -> Nullable<Text>,
secret -> Nullable<Text>,
htlc_status -> HtlcStatusType,
amount_msat -> Nullable<Int8>,
flow -> PaymentFlowType,
payment_timestamp -> Timestamptz,
created_at -> Timestamptz,
updated_at -> Timestamptz,
description -> Text,
invoice -> Nullable<Text>,
fee_msat -> Nullable<Int8>,
}
}
diesel::table! {
use diesel::sql_types::*;
use super::sql_types::PollTypeType;
polls (id) {
id -> Int4,
poll_type -> PollTypeType,
question -> Text,
active -> Bool,
creation_timestamp -> Timestamptz,
whitelisted -> Bool,
}
}
diesel::table! {
polls_whitelist (id) {
id -> Int4,
poll_id -> Int4,
trader_pubkey -> Text,
}
}
diesel::table! {
use diesel::sql_types::*;
use super::sql_types::ContractSymbolType;
use super::sql_types::DirectionType;
use super::sql_types::PositionStateType;
positions (id) {
id -> Int4,
contract_symbol -> ContractSymbolType,
trader_leverage -> Float4,
quantity -> Float4,
trader_direction -> DirectionType,
average_entry_price -> Float4,
trader_liquidation_price -> Float4,
position_state -> PositionStateType,
coordinator_margin -> Int8,
creation_timestamp -> Timestamptz,
expiry_timestamp -> Timestamptz,
update_timestamp -> Timestamptz,
trader_pubkey -> Text,
temporary_contract_id -> Nullable<Text>,
trader_realized_pnl_sat -> Nullable<Int8>,
trader_unrealized_pnl_sat -> Nullable<Int8>,
closing_price -> Nullable<Float4>,
coordinator_leverage -> Float4,
trader_margin -> Int8,
stable -> Bool,
coordinator_liquidation_price -> Float4,
order_matching_fees -> Int8,
}
}
diesel::table! {
protocol_funding_fee_events (id) {
id -> Int4,
protocol_id -> Uuid,
funding_fee_event_id -> Int4,
timestamp -> Timestamptz,
}
}
diesel::table! {
reported_errors (id) {
id -> Int4,
trader_pubkey -> Text,
error -> Text,
timestamp -> Timestamptz,
version -> Text,
}
}
diesel::table! {
rollover_params (id) {
id -> Int4,
protocol_id -> Uuid,
trader_pubkey -> Text,
margin_coordinator_sat -> Int8,
margin_trader_sat -> Int8,
leverage_coordinator -> Float4,
leverage_trader -> Float4,
liquidation_price_coordinator -> Float4,
liquidation_price_trader -> Float4,
expiry_timestamp -> Timestamptz,
}
}
diesel::table! {
routing_fees (id) {
id -> Int4,
amount_msats -> Int8,
prev_channel_id -> Nullable<Text>,
next_channel_id -> Nullable<Text>,
created_at -> Timestamptz,
}
}
diesel::table! {
spendable_outputs (id) {
id -> Int4,
txid -> Text,
vout -> Int4,
descriptor -> Text,
}
}
diesel::table! {
use diesel::sql_types::*;
use super::sql_types::DirectionType;
trade_params (id) {
id -> Int4,
protocol_id -> Uuid,
trader_pubkey -> Text,
quantity -> Float4,
leverage -> Float4,
average_price -> Float4,
direction -> DirectionType,
matching_fee -> Int8,
trader_pnl_sat -> Nullable<Int8>,
}
}
diesel::table! {
use diesel::sql_types::*;
use super::sql_types::ContractSymbolType;
use super::sql_types::DirectionType;
trades (id) {
id -> Int4,
position_id -> Int4,
contract_symbol -> ContractSymbolType,
trader_pubkey -> Text,
quantity -> Float4,
trader_leverage -> Float4,
direction -> DirectionType,
average_price -> Float4,
timestamp -> Timestamptz,
order_matching_fee_sat -> Int8,
trader_realized_pnl_sat -> Nullable<Int8>,
}
}
diesel::table! {
transactions (txid) {
txid -> Text,
fee -> Int8,
created_at -> Timestamptz,
updated_at -> Timestamptz,
raw -> Text,
}
}
diesel::table! {
users (id) {
id -> Int4,
pubkey -> Text,
contact -> Text,
timestamp -> Timestamptz,
fcm_token -> Text,
last_login -> Timestamptz,
nickname -> Nullable<Text>,
version -> Nullable<Text>,
referral_code -> Text,
used_referral_code -> Nullable<Text>,
os -> Nullable<Text>,
}
}
diesel::joinable!(answers -> choices (choice_id));
diesel::joinable!(choices -> polls (poll_id));
diesel::joinable!(funding_fee_events -> positions (position_id));
diesel::joinable!(last_outbound_dlc_messages -> dlc_messages (message_hash));
diesel::joinable!(liquidity_request_logs -> liquidity_options (liquidity_option));
diesel::joinable!(polls_whitelist -> polls (poll_id));
diesel::joinable!(protocol_funding_fee_events -> funding_fee_events (funding_fee_event_id));
diesel::joinable!(trades -> positions (position_id));
diesel::allow_tables_to_appear_in_same_query!(
answers,
bonus_status,
bonus_tiers,
channel_opening_params,
channels,
choices,
collaborative_reverts,
dlc_channels,
dlc_messages,
dlc_protocols,
funding_fee_events,
funding_rates,
hodl_invoices,
last_outbound_dlc_messages,
legacy_collaborative_reverts,
liquidity_options,
liquidity_request_logs,
matches,
metrics,
orders,
payments,
polls,
polls_whitelist,
positions,
protocol_funding_fee_events,
reported_errors,
rollover_params,
routing_fees,
spendable_outputs,
trade_params,
trades,
transactions,
users,
);
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/backup.rs | coordinator/src/backup.rs | use anyhow::Result;
use bitcoin::secp256k1::PublicKey;
use sled::Db;
use xxi_node::commons::Backup;
use xxi_node::commons::DeleteBackup;
use xxi_node::commons::Restore;
const BACKUPS_DIRECTORY: &str = "user_backups";
/// Holds the user backups in a sled database
///
/// TODO(holzeis): This is fine for now, once we grow we should consider moving that into a dedicate
/// KV database, potentially to a managed service.
pub struct SledBackup {
db: Db,
}
impl SledBackup {
pub fn new(data_dir: String) -> Self {
SledBackup {
db: sled::open(format!("{data_dir}/{BACKUPS_DIRECTORY}")).expect("valid path"),
}
}
pub fn restore(&self, node_id: PublicKey) -> Result<Vec<Restore>> {
tracing::debug!(%node_id, "Restoring backup");
let tree = self.db.open_tree(node_id.to_string())?;
let mut backup = vec![];
for entry in tree.into_iter() {
let entry = entry?;
let key = String::from_utf8(entry.0.to_vec())?;
let value = entry.1.to_vec();
backup.push(Restore { key, value });
}
Ok(backup)
}
pub async fn back_up(&self, node_id: PublicKey, backup: Backup) -> Result<()> {
tracing::debug!(%node_id, backup.key, "Create user backup");
let tree = self.db.open_tree(node_id.to_string())?;
tree.insert(backup.key, backup.value)?;
tree.flush()?;
Ok(())
}
pub fn delete(&self, node_id: PublicKey, backup: DeleteBackup) -> Result<()> {
tracing::debug!(%node_id, key=backup.key, "Deleting user backup");
let tree = self.db.open_tree(node_id.to_string())?;
tree.remove(backup.key)?;
tree.flush()?;
Ok(())
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/metrics.rs | coordinator/src/metrics.rs | use crate::db;
use crate::node::Node;
use anyhow::Result;
use diesel::r2d2::ConnectionManager;
use diesel::r2d2::PooledConnection;
use diesel::PgConnection;
pub fn collect_metrics(
mut conn: PooledConnection<ConnectionManager<PgConnection>>,
node: Node,
) -> Result<()> {
let balance = node.inner.wallet().get_balance();
db::metrics::create_metrics_entry(
&mut conn,
balance.confirmed + balance.untrusted_pending + balance.trusted_pending + balance.immature,
)?;
// TODO: also collect LN balance
Ok(())
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/collaborative_revert.rs | coordinator/src/collaborative_revert.rs | use crate::db;
use crate::db::positions::Position;
use crate::message::OrderbookMessage;
use crate::node::storage::NodeStorage;
use crate::notifications::NotificationKind;
use crate::position;
use crate::storage::CoordinatorTenTenOneStorage;
use anyhow::ensure;
use anyhow::Context;
use anyhow::Result;
use bitcoin::secp256k1::ecdsa::Signature;
use bitcoin::Address;
use bitcoin::Amount;
use bitcoin::Transaction;
use diesel::r2d2::ConnectionManager;
use diesel::r2d2::Pool;
use diesel::r2d2::PooledConnection;
use diesel::PgConnection;
use dlc::util::tx_weight_to_fee;
use dlc_manager::channel::ClosedChannel;
use dlc_manager::DlcChannelId;
use dlc_manager::Signer;
use dlc_manager::Storage;
use rust_decimal::Decimal;
use std::sync::Arc;
use time::OffsetDateTime;
use tokio::sync::mpsc;
use xxi_node::bitcoin_conversion::to_ecdsa_signature_29;
use xxi_node::bitcoin_conversion::to_secp_pk_30;
use xxi_node::bitcoin_conversion::to_tx_29;
use xxi_node::bitcoin_conversion::to_tx_30;
use xxi_node::bitcoin_conversion::to_txid_29;
use xxi_node::commons::Message;
use xxi_node::node::Node;
/// The weight for the collaborative revert transaction. The transaction is expected to have 1 input
/// (the funding TXO) and 2 outputs, one for each party.
///
/// If either party were to _not_ have an output, we would be overestimating the weight of the
/// transaction and would end up paying higher fees than necessary.
const COLLABORATIVE_REVERT_TX_WEIGHT: usize = 672;
/// Propose to collaboratively revert the channel identified by `channel_id`.
///
/// A collaborative revert involves signing a new transaction spending from the funding output
/// directly. This can be used to circumvent bugs related to position and DLC channel state.
///
/// This API will only work if the DLC [`Channel`] is in state [`Channel::Signed`].
#[allow(clippy::too_many_arguments)]
pub async fn propose_collaborative_revert(
node: Arc<
Node<
bdk_file_store::Store<bdk::wallet::ChangeSet>,
CoordinatorTenTenOneStorage,
NodeStorage,
>,
>,
pool: Pool<ConnectionManager<PgConnection>>,
sender: mpsc::Sender<OrderbookMessage>,
channel_id: DlcChannelId,
fee_rate_sats_vb: u64,
trader_amount_sats: u64,
closing_price: Decimal,
) -> Result<()> {
let channel_id_hex = hex::encode(channel_id);
let dlc_channels = node
.list_signed_dlc_channels()
.context("Could not get list of subchannels")?;
let channel = dlc_channels
.iter()
.find(|c| c.channel_id == channel_id)
.context("Could not find signed DLC channel")?;
let peer_id = channel.counter_party;
let fund_tx_output = channel
.fund_tx
.output
.get(channel.fund_output_index)
.expect("to be the correct index");
let coordinator_amount_sats = fund_tx_output
.value
.checked_sub(trader_amount_sats)
.context("Could not substract trader amount from total value without overflow")?;
let fee = tx_weight_to_fee(COLLABORATIVE_REVERT_TX_WEIGHT, fee_rate_sats_vb)
.context("Could not calculate fee")?;
let fee_half = fee.checked_div(2).context("Could not divide fee")?;
let coordinator_address = node.get_new_address()?;
let coordinator_amount = Amount::from_sat(
coordinator_amount_sats
.checked_sub(fee_half)
.context("Could not subtract fee from coordinator amount")?,
);
let trader_amount = Amount::from_sat(
trader_amount_sats
.checked_sub(fee_half)
.context("Could not subtract fee from trader amount")?,
);
tracing::info!(
channel_id = channel_id_hex,
coordinator_address = %coordinator_address,
coordinator_amount = %coordinator_amount,
trader_amount = %trader_amount,
"Proposing collaborative revert"
);
{
let mut conn = pool.get().context("Could not acquire DB lock")?;
db::collaborative_reverts::insert(
&mut conn,
position::models::CollaborativeRevert {
channel_id,
trader_pubkey: to_secp_pk_30(peer_id),
coordinator_address: coordinator_address.clone(),
coordinator_amount_sats: coordinator_amount,
trader_amount_sats: trader_amount,
timestamp: OffsetDateTime::now_utc(),
price: closing_price,
},
)
.context("Could not insert new collaborative revert")?
};
sender
.send(OrderbookMessage::TraderMessage {
trader_id: to_secp_pk_30(peer_id),
message: Message::DlcChannelCollaborativeRevert {
channel_id,
coordinator_address: Address::new(
coordinator_address.network,
coordinator_address.payload,
),
coordinator_amount,
trader_amount,
execution_price: closing_price,
},
notification: Some(NotificationKind::CollaborativeRevert),
})
.await
.context("Failed to notify user")?;
Ok(())
}
/// Complete the collaborative revert protocol by:
///
/// 1. Verifying the contents of the transaction sent by the counterparty.
/// 2. Signing the transaction.
/// 3. Broadcasting the signed transaction.
pub fn confirm_collaborative_revert(
node: Arc<
Node<
bdk_file_store::Store<bdk::wallet::ChangeSet>,
CoordinatorTenTenOneStorage,
NodeStorage,
>,
>,
conn: &mut PooledConnection<ConnectionManager<PgConnection>>,
channel_id: DlcChannelId,
revert_transaction: Transaction,
counterparty_signature: Signature,
) -> Result<Transaction> {
let channel_id_hex = hex::encode(channel_id);
let record = db::collaborative_reverts::get_by_channel_id(conn, &channel_id, node.network)?
.with_context(|| {
format!(
"No matching record to confirm collaborative revert for channel {channel_id_hex}"
)
})?;
tracing::info!(
collaborative_revert_record = ?record,
"Confirming collaborative revert"
);
// TODO: Check if provided amounts are as expected.
let does_revert_pay_to_coordinator = revert_transaction
.output
.iter()
.any(|output| node.is_mine(&output.script_pubkey));
ensure!(
does_revert_pay_to_coordinator,
"Proposed collaborative revert transaction doesn't pay the coordinator"
);
let signed_channels = node
.list_signed_dlc_channels()
.context("Failed to list signed DLC channels")?;
let signed_channel = signed_channels
.iter()
.find(|c| c.channel_id == channel_id)
.context("DLC channel to be reverted not found")?;
let fund_out_amount = signed_channel.fund_tx.output[signed_channel.fund_output_index].value;
let own_fund_sk = node
.dlc_wallet
.get_secret_key_for_pubkey(&signed_channel.own_params.fund_pubkey)?;
let mut revert_transaction = to_tx_29(revert_transaction);
dlc::util::sign_multi_sig_input(
&bitcoin_old::secp256k1::Secp256k1::new(),
&mut revert_transaction,
&to_ecdsa_signature_29(counterparty_signature),
&signed_channel.counter_params.fund_pubkey,
&own_fund_sk,
&signed_channel.fund_script_pubkey,
fund_out_amount,
0,
)?;
let revert_transaction = to_tx_30(revert_transaction);
tracing::info!(
txid = revert_transaction.txid().to_string(),
"Broadcasting collaborative revert transaction"
);
node.blockchain
.broadcast_transaction_blocking(&revert_transaction)
.context("Could not broadcast transaction")?;
// TODO: We should probably not modify the state until the transaction has been confirmed.
let position = Position::get_position_by_trader(conn, record.trader_pubkey, vec![])?
.with_context(|| format!("Could not load position for subchannel {channel_id_hex}"))?;
Position::set_position_to_closed(conn, position.id)
.context("Could not set position to closed")?;
db::collaborative_reverts::delete(conn, channel_id)?;
node.dlc_manager.get_store().upsert_channel(
dlc_manager::channel::Channel::CollaborativelyClosed(ClosedChannel {
counter_party: signed_channel.counter_party,
temporary_channel_id: signed_channel.temporary_channel_id,
channel_id: signed_channel.channel_id,
reference_id: None,
closing_txid: to_txid_29(revert_transaction.txid()),
}),
// The contract doesn't matter anymore.
None,
)?;
Ok(revert_transaction)
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/message.rs | coordinator/src/message.rs | use crate::notifications::Notification;
use crate::notifications::NotificationKind;
use anyhow::Context;
use anyhow::Result;
use bitcoin::secp256k1::PublicKey;
use futures::future::RemoteHandle;
use futures::FutureExt;
use parking_lot::RwLock;
use std::collections::HashMap;
use std::sync::Arc;
use tokio::sync::broadcast;
use tokio::sync::broadcast::error::RecvError;
use tokio::sync::mpsc;
use tokio::sync::mpsc::Sender;
use xxi_node::commons::Message;
/// This value is arbitrarily set to 100 and defines the message accepted in the message
/// channel buffer.
const NOTIFICATION_BUFFER_SIZE: usize = 100;
/// Message sent to users via the websocket.
#[derive(Debug)]
pub enum OrderbookMessage {
TraderMessage {
trader_id: PublicKey,
message: Message,
notification: Option<NotificationKind>,
},
}
#[derive(Clone)]
pub struct NewUserMessage {
pub new_user: PublicKey,
pub sender: Sender<Message>,
}
pub fn spawn_delivering_messages_to_authenticated_users(
notification_sender: Sender<Notification>,
tx_user_feed: broadcast::Sender<NewUserMessage>,
) -> (RemoteHandle<()>, Sender<OrderbookMessage>) {
let (sender, mut receiver) = mpsc::channel::<OrderbookMessage>(NOTIFICATION_BUFFER_SIZE);
let authenticated_users = Arc::new(RwLock::new(HashMap::new()));
tokio::task::spawn({
let traders = authenticated_users.clone();
async move {
let mut user_feed = tx_user_feed.subscribe();
loop {
match user_feed.recv().await {
Ok(new_user_msg) => {
traders
.write()
.insert(new_user_msg.new_user, new_user_msg.sender);
}
Err(RecvError::Closed) => {
tracing::error!("New user message sender died! Channel closed");
break;
}
Err(RecvError::Lagged(skip)) => {
tracing::warn!(%skip, "Lagging behind on new user message")
}
}
}
}
});
let (fut, remote_handle) = {
async move {
while let Some(notification) = receiver.recv().await {
if let Err(e) = process_orderbook_message(
&authenticated_users,
¬ification_sender,
notification,
)
.await
{
tracing::error!("Failed to process orderbook message: {e:#}");
}
}
tracing::error!("Channel closed");
}
.remote_handle()
};
tokio::spawn(fut);
(remote_handle, sender)
}
async fn process_orderbook_message(
authenticated_users: &RwLock<HashMap<PublicKey, Sender<Message>>>,
notification_sender: &Sender<Notification>,
notification: OrderbookMessage,
) -> Result<()> {
match notification {
OrderbookMessage::TraderMessage {
trader_id,
message,
notification,
} => {
tracing::info!(%trader_id, ?message, "Sending trader message");
let trader = authenticated_users.read().get(&trader_id).cloned();
match trader {
Some(sender) => {
if let Err(e) = sender.send(message).await {
tracing::warn!(%trader_id, "Connection lost to trader: {e:#}");
} else {
tracing::trace!(
%trader_id,
"Skipping optional push notifications as the user was successfully \
notified via the websocket"
);
return Ok(());
}
}
None => tracing::warn!(%trader_id, "Trader is not connected"),
};
if let Some(notification_kind) = notification {
tracing::debug!(%trader_id, "Sending push notification to user");
notification_sender
.send(Notification::new(trader_id, notification_kind))
.await
.with_context(|| {
format!("Failed to send push notification to trader {trader_id}")
})?;
}
}
}
Ok(())
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/routes.rs | coordinator/src/routes.rs | use crate::backup::SledBackup;
use crate::campaign::post_push_campaign;
use crate::collaborative_revert::confirm_collaborative_revert;
use crate::db;
use crate::db::user;
use crate::db::user::User;
use crate::leaderboard::generate_leader_board;
use crate::leaderboard::LeaderBoard;
use crate::leaderboard::LeaderBoardCategory;
use crate::leaderboard::LeaderBoardQueryParams;
use crate::message::NewUserMessage;
use crate::message::OrderbookMessage;
use crate::node::invoice;
use crate::node::Node;
use crate::notifications::Notification;
use crate::orderbook::trading::NewOrderMessage;
use crate::parse_dlc_channel_id;
use crate::routes::admin::post_funding_rates;
use crate::settings::Settings;
use crate::trade::websocket::InternalPositionUpdateMessage;
use crate::AppError;
use admin::close_channel;
use admin::collaborative_revert;
use admin::delete_dlc_channel;
use admin::get_balance;
use admin::get_fee_rate_estimation;
use admin::get_settings;
use admin::get_user_referral_status;
use admin::get_utxos;
use admin::is_connected;
use admin::list_dlc_channels;
use admin::list_on_chain_transactions;
use admin::list_peers;
use admin::migrate_dlc_channels;
use admin::post_sync;
use admin::resend_renew_revoke_message;
use admin::roll_back_dlc_channel;
use admin::rollover;
use admin::update_settings;
use anyhow::anyhow;
use anyhow::Context;
use anyhow::Result;
use axum::extract::ConnectInfo;
use axum::extract::DefaultBodyLimit;
use axum::extract::Path;
use axum::extract::Query;
use axum::extract::State;
use axum::extract::WebSocketUpgrade;
use axum::response::IntoResponse;
use axum::routing::delete;
use axum::routing::get;
use axum::routing::post;
use axum::routing::put;
use axum::Json;
use axum::Router;
use bitcoin::consensus::encode::serialize_hex;
use bitcoin::secp256k1::ecdsa::Signature;
use bitcoin::secp256k1::PublicKey;
use bitcoin::secp256k1::Secp256k1;
use bitcoin::secp256k1::VerifyOnly;
use diesel::r2d2::ConnectionManager;
use diesel::r2d2::Pool;
use diesel::PgConnection;
use lnd_bridge::InvoiceParams;
use lnd_bridge::LndBridge;
use orderbook::delete_order;
use orderbook::get_order;
use orderbook::get_orders;
use orderbook::post_order;
use orderbook::websocket_handler;
use serde::Serialize;
use std::net::SocketAddr;
use std::str::FromStr;
use std::sync::Arc;
use time::macros::format_description;
use time::Date;
use time::OffsetDateTime;
use tokio::sync::broadcast;
use tokio::sync::mpsc;
use tokio::sync::RwLock;
use tokio::task::spawn_blocking;
use tracing::instrument;
use xxi_node::commons;
use xxi_node::commons::Backup;
use xxi_node::commons::CollaborativeRevertTraderResponse;
use xxi_node::commons::DeleteBackup;
use xxi_node::commons::Message;
use xxi_node::commons::Poll;
use xxi_node::commons::PollAnswers;
use xxi_node::commons::RegisterParams;
use xxi_node::commons::ReportedError;
use xxi_node::commons::Restore;
use xxi_node::commons::SignedValue;
use xxi_node::commons::UpdateUsernameParams;
use xxi_node::node::NodeInfo;
mod admin;
mod orderbook;
pub struct AppState {
pub node: Node,
// Channel used to send messages to all connected clients.
pub tx_orderbook_feed: broadcast::Sender<Message>,
/// A channel used to send messages about position updates
pub tx_position_feed: broadcast::Sender<InternalPositionUpdateMessage>,
pub tx_user_feed: broadcast::Sender<NewUserMessage>,
pub trading_sender: mpsc::Sender<NewOrderMessage>,
pub pool: Pool<ConnectionManager<PgConnection>>,
pub settings: RwLock<Settings>,
pub node_alias: String,
pub auth_users_notifier: mpsc::Sender<OrderbookMessage>,
pub notification_sender: mpsc::Sender<Notification>,
pub user_backup: SledBackup,
pub secp: Secp256k1<VerifyOnly>,
pub lnd_bridge: LndBridge,
}
#[allow(clippy::too_many_arguments)]
pub fn router(
node: Node,
pool: Pool<ConnectionManager<PgConnection>>,
settings: Settings,
node_alias: &str,
trading_sender: mpsc::Sender<NewOrderMessage>,
tx_orderbook_feed: broadcast::Sender<Message>,
tx_position_feed: broadcast::Sender<InternalPositionUpdateMessage>,
tx_user_feed: broadcast::Sender<NewUserMessage>,
auth_users_notifier: mpsc::Sender<OrderbookMessage>,
notification_sender: mpsc::Sender<Notification>,
user_backup: SledBackup,
lnd_bridge: LndBridge,
) -> Router {
let secp = Secp256k1::verification_only();
let app_state = Arc::new(AppState {
node,
pool,
settings: RwLock::new(settings),
tx_orderbook_feed,
tx_position_feed,
tx_user_feed,
trading_sender,
node_alias: node_alias.to_string(),
auth_users_notifier,
notification_sender,
user_backup,
secp,
lnd_bridge,
});
Router::new()
.route("/", get(lightning_peer_ws_handler))
.route("/api/version", get(version))
.route("/api/polls", post(post_poll_answer))
.route("/api/polls/:node_id", get(get_polls))
.route(
"/api/fee_rate_estimate/:target",
get(get_fee_rate_estimation),
)
.route("/api/backup/:node_id", post(back_up).delete(delete_backup))
.route("/api/restore/:node_id", get(restore))
.route("/api/newaddress", get(get_unused_address))
.route("/api/node", get(get_node_info))
.route("/api/orderbook/orders", get(get_orders).post(post_order))
.route("/api/orderbook/orders/:order_id", get(get_order))
.route("/api/orderbook/websocket", get(websocket_handler))
.route("/api/invoice", post(create_invoice))
.route("/api/users", post(post_register))
.route("/api/users/:trader_pubkey", get(get_user))
.route("/api/users/nickname", put(update_nickname))
.route("/api/report-error", post(post_error))
// TODO: we should move this back into public once we add signing to this function
.route(
"/api/admin/orderbook/orders/:order_id",
delete(delete_order),
)
.route("/api/admin/rollover/:dlc_channel_id", post(rollover))
.route("/api/admin/wallet/balance", get(get_balance))
.route("/api/admin/wallet/utxos", get(get_utxos))
.route("/api/admin/channels/:channel_id", delete(close_channel))
.route("/api/admin/peers", get(list_peers))
.route("/api/admin/dlc_channels", get(list_dlc_channels))
.route(
"/api/admin/dlc_channels/:channel_id",
delete(delete_dlc_channel),
)
.route(
"/api/admin/dlc_channels/rollback/:channel_id",
post(roll_back_dlc_channel),
)
.route("/api/admin/transactions", get(list_on_chain_transactions))
.route("/api/admin/channels/revert", post(collaborative_revert))
.route(
"/api/channels/confirm-collab-revert",
post(collaborative_revert_confirm),
)
.route("/api/admin/is_connected/:target_pubkey", get(is_connected))
.route(
"/api/admin/settings",
get(get_settings).put(update_settings),
)
.route("/api/admin/sync", post(post_sync))
.route("/api/admin/campaign/push", post(post_push_campaign))
.route(
"/api/admin/resend_renew_revoke_message/:trader_pubkey",
post(resend_renew_revoke_message),
)
.route(
"/api/admin/migrate_dlc_channels",
post(migrate_dlc_channels),
)
.route(
"/api/admin/users/:trader_pubkey/referrals",
get(get_user_referral_status),
)
.route("/api/admin/funding-rates", post(post_funding_rates))
.route("/health", get(get_health))
.route("/api/leaderboard", get(get_leaderboard))
.route(
"/api/admin/trade/websocket",
get(crate::trade::websocket::websocket_handler),
)
.layer(DefaultBodyLimit::disable())
.layer(DefaultBodyLimit::max(50 * 1024))
.with_state(app_state)
}
#[derive(serde::Serialize)]
struct HelloWorld {
hello: String,
}
pub async fn lightning_peer_ws_handler(
ws: Option<WebSocketUpgrade>,
ConnectInfo(addr): ConnectInfo<SocketAddr>,
State(state): State<Arc<AppState>>,
) -> impl IntoResponse {
match ws {
Some(ws) => {
let peer_manager = state.node.inner.peer_manager.clone();
ws.on_upgrade(move |socket| {
xxi_node::networking::axum::setup_inbound(peer_manager, socket, addr)
})
.into_response()
}
None => Json(HelloWorld {
hello: "world".to_string(),
})
.into_response(),
}
}
pub async fn get_unused_address(
State(app_state): State<Arc<AppState>>,
) -> Result<String, AppError> {
let address = app_state.node.inner.get_unused_address().map_err(|e| {
AppError::InternalServerError(format!("Could not get unused address: {e:#}"))
})?;
Ok(address.to_string())
}
#[instrument(skip_all, err(Debug))]
pub async fn get_node_info(
State(app_state): State<Arc<AppState>>,
) -> Result<Json<NodeInfo>, AppError> {
let node_info = app_state.node.inner.info;
Ok(Json(node_info))
}
#[instrument(skip_all, err(Debug))]
pub async fn post_register(
State(state): State<Arc<AppState>>,
params: Json<RegisterParams>,
) -> Result<(), AppError> {
let register_params = params.0;
tracing::info!(?register_params, "Registered new user");
spawn_blocking(move || {
let mut conn = state.pool.get()?;
user::upsert_user(
&mut conn,
register_params.pubkey,
register_params.contact.clone(),
register_params.nickname.clone(),
register_params.version.clone(),
register_params.os,
register_params.referral_code,
)
.map_err(|e| anyhow!(e))
})
.await
.expect("task to finish")
.map_err(|e| AppError::InternalServerError(format!("Could not upsert user: {e:#}")))?;
Ok(())
}
#[instrument(skip_all, err(Debug))]
pub async fn update_nickname(
State(state): State<Arc<AppState>>,
params: Json<UpdateUsernameParams>,
) -> Result<(), AppError> {
let register_params = params.0;
tracing::info!(?register_params, "Updating user's nickname");
spawn_blocking(move || {
let mut conn = state.pool.get().context("Could not get connection")?;
user::update_nickname(&mut conn, register_params.pubkey, register_params.nickname)
.map_err(|e| anyhow!(e))
})
.await
.expect("task to finish")
.map_err(|e| AppError::InternalServerError(format!("Could not update nickname: {e:#}")))?;
Ok(())
}
impl TryFrom<User> for commons::User {
type Error = AppError;
fn try_from(value: User) -> Result<Self, Self::Error> {
Ok(commons::User {
pubkey: PublicKey::from_str(&value.pubkey).map_err(|_| {
AppError::InternalServerError("Could not parse user pubkey".to_string())
})?,
contact: Some(value.contact).filter(|s| !s.is_empty()),
nickname: value.nickname,
referral_code: value.referral_code,
})
}
}
#[instrument(skip_all, err(Debug))]
pub async fn get_user(
State(state): State<Arc<AppState>>,
Path(trader_pubkey): Path<String>,
) -> Result<Json<commons::User>, AppError> {
let trader_pubkey = PublicKey::from_str(trader_pubkey.as_str())
.map_err(|_| AppError::BadRequest("Invalid trader id provided".to_string()))?;
let option = spawn_blocking(move || {
let mut conn = state.pool.get().context("Could not get connection")?;
user::get_user(&mut conn, &trader_pubkey)
})
.await
.expect("task to finish")
.map_err(|e| AppError::InternalServerError(format!("Could not load users: {e:#}")))?;
match option {
None => Err(AppError::BadRequest("No user found".to_string())),
Some(user) => Ok(Json(user.try_into()?)),
}
}
pub async fn get_health() -> Result<Json<String>, AppError> {
// TODO: Implement any health check logic we'd need
// So far this just returns if the server is running
Ok(Json("Server is healthy".to_string()))
}
#[derive(Serialize)]
pub struct Version {
version: String,
commit_hash: String,
branch: String,
}
pub async fn version() -> Result<Json<Version>, AppError> {
Ok(Json(Version {
version: env!("CARGO_PKG_VERSION").to_string(),
commit_hash: env!("COMMIT_HASH").to_string(),
branch: env!("BRANCH_NAME").to_string(),
}))
}
pub async fn get_polls(
Path(node_id): Path<String>,
State(state): State<Arc<AppState>>,
) -> Result<Json<Vec<Poll>>, AppError> {
let node_id = PublicKey::from_str(&node_id)
.map_err(|e| AppError::BadRequest(format!("Invalid node id provided. {e:#}")))?;
let polls = spawn_blocking(move || {
let mut connection = state.pool.get().context("Could not get db connection")?;
db::polls::active(&mut connection, &node_id).map_err(|e| anyhow!(e))
})
.await
.expect("task to finish")
.map_err(|error| AppError::InternalServerError(format!("Could not fetch new polls {error}")))?;
Ok(Json(polls))
}
pub async fn post_poll_answer(
State(state): State<Arc<AppState>>,
poll_answer: Json<PollAnswers>,
) -> Result<(), AppError> {
tracing::trace!(
poll_id = poll_answer.poll_id,
trader_pk = poll_answer.trader_pk.to_string(),
answers = ?poll_answer.answers,
"Received new answer");
spawn_blocking(move || {
let mut connection = state.pool.get().context("Could not get db connection")?;
db::polls::add_answer(&mut connection, poll_answer.0)
})
.await
.expect("to finish task")
.map_err(|error| {
AppError::InternalServerError(format!("Could not save answer in db: {error:?}"))
})?;
Ok(())
}
#[instrument(skip_all, err(Debug))]
pub async fn collaborative_revert_confirm(
State(state): State<Arc<AppState>>,
revert_params: Json<CollaborativeRevertTraderResponse>,
) -> Result<Json<String>, AppError> {
let channel_id_string = revert_params.channel_id.clone();
let channel_id = parse_dlc_channel_id(channel_id_string.as_str()).map_err(|error| {
tracing::error!(
channel_id = channel_id_string,
"Invalid channel id provided. {error:#}"
);
AppError::BadRequest("Invalid channel id provided".to_string())
})?;
tracing::info!(
channel_id = channel_id_string,
"Confirming collaborative channel revert"
);
let inner_node = state.node.inner.clone();
let raw_tx = spawn_blocking(move || {
let mut conn = state
.pool
.clone()
.get()
.context("Could not acquire db lock")?;
confirm_collaborative_revert(
inner_node,
&mut conn,
channel_id,
revert_params.transaction.clone(),
revert_params.signature,
)
})
.await
.expect("task to finish")
.map_err(|error| {
tracing::error!(
channel_id = channel_id_string,
"Could not confirm collaborative revert: {error:#}"
);
AppError::InternalServerError("Could not confirm collaborative revert".to_string())
})?;
Ok(Json(serialize_hex(&raw_tx)))
}
// TODO(holzeis): There is no reason the backup and restore api has to run on the coordinator. On
// the contrary it would be much more reasonable to have the backup and restore api run separately.
#[instrument(skip_all, err(Debug))]
pub async fn back_up(
Path(node_id): Path<String>,
State(state): State<Arc<AppState>>,
backup: Json<Backup>,
) -> Result<(), AppError> {
let node_id = PublicKey::from_str(&node_id)
.map_err(|e| AppError::BadRequest(format!("Invalid node id provided. {e:#}")))?;
backup
.verify(&state.secp, &node_id)
.map_err(|_| AppError::Unauthorized)?;
state
.user_backup
.back_up(node_id, backup.0)
.await
.map_err(|e| AppError::InternalServerError(e.to_string()))
}
#[instrument(skip_all, err(Debug))]
pub async fn delete_backup(
Path(node_id): Path<String>,
State(state): State<Arc<AppState>>,
backup: Json<DeleteBackup>,
) -> Result<(), AppError> {
let node_id = PublicKey::from_str(&node_id)
.map_err(|e| AppError::BadRequest(format!("Invalid node id provided. {e:#}")))?;
backup
.verify(&state.secp, &node_id)
.map_err(|_| AppError::Unauthorized)?;
state
.user_backup
.delete(node_id, backup.0)
.map_err(|e| AppError::InternalServerError(e.to_string()))
}
#[instrument(skip_all, err(Debug))]
async fn restore(
Path(node_id): Path<String>,
State(state): State<Arc<AppState>>,
signature: Json<Signature>,
) -> Result<Json<Vec<Restore>>, AppError> {
let node_id = PublicKey::from_str(&node_id)
.map_err(|e| AppError::BadRequest(format!("Invalid node id provided. {e:#}")))?;
let message = node_id.to_string().as_bytes().to_vec();
let message = commons::create_sign_message(message);
state
.secp
.verify_ecdsa(&message, &signature, &node_id)
.map_err(|_| AppError::Unauthorized)?;
let backup = state
.user_backup
.restore(node_id)
.map_err(|e| AppError::InternalServerError(format!("Failed to restore backup. {e:#}")))?;
Ok(Json(backup))
}
fn parse_offset_datetime(date_str: String) -> Result<Option<OffsetDateTime>> {
if date_str.is_empty() {
return Ok(None);
}
let format = format_description!("[year]-[month]-[day]");
let date = Date::parse(date_str.as_str(), &format)?;
let date_time = date.midnight().assume_utc();
Ok(Some(date_time))
}
#[instrument(skip_all, err(Debug))]
pub async fn get_leaderboard(
State(state): State<Arc<AppState>>,
params: Query<LeaderBoardQueryParams>,
) -> Result<Json<LeaderBoard>, AppError> {
let reverse = params.reverse.unwrap_or_default();
let top = params.top.unwrap_or(5);
let start = params.start.clone().unwrap_or_default();
let start = parse_offset_datetime(start.clone())
.map_err(|err| {
AppError::BadRequest(format!(
"Invalid start date provided `{err}`. String provided {start}"
))
})?
.unwrap_or(OffsetDateTime::UNIX_EPOCH);
let end = params.end.clone().unwrap_or_default();
let end = parse_offset_datetime(end.clone())
.map_err(|err| {
AppError::BadRequest(format!(
"Invalid start date provided `{err}`. String provided {end}"
))
})?
.unwrap_or(OffsetDateTime::now_utc());
let category = params.category.clone().unwrap_or(LeaderBoardCategory::Pnl);
let leader_board = spawn_blocking(move || {
let mut conn = state.pool.get().context("Could not access db")?;
generate_leader_board(&mut conn, top, category, reverse, start, end)
})
.await
.expect("task to finish")
.map_err(|error| {
AppError::InternalServerError(format!("Could not build leaderboard {error}"))
})?;
Ok(Json(LeaderBoard {
entries: leader_board,
}))
}
#[instrument(skip_all, err(Debug))]
async fn post_error(
State(state): State<Arc<AppState>>,
app_error: Json<ReportedError>,
) -> Result<(), AppError> {
spawn_blocking(move || {
let mut conn = state.pool.get().context("Could not get connection")?;
db::reported_errors::insert(&mut conn, app_error.0).map_err(|e| anyhow!(e))
})
.await
.expect("task to finish")
.map_err(|e| AppError::InternalServerError(format!("Could not save error in db: {e}")))?;
Ok(())
}
#[instrument(skip_all, err(Debug))]
async fn create_invoice(
State(state): State<Arc<AppState>>,
Json(invoice_params): Json<SignedValue<commons::HodlInvoiceParams>>,
) -> Result<Json<String>, AppError> {
let public_key = invoice_params.value.trader_pubkey;
invoice_params
.verify(&state.secp, &public_key)
.map_err(|_| AppError::Unauthorized)?;
let invoice_params = invoice_params.value;
let invoice_amount = invoice_params.amt_sats;
let r_hash = invoice_params.r_hash.clone();
let response = state
.lnd_bridge
.create_invoice(InvoiceParams {
value: invoice_amount,
memo: "Fund your 10101 position".to_string(),
expiry: 5 * 60, // 5 minutes
hash: r_hash.clone(),
})
.await
.map_err(|e| AppError::InternalServerError(format!("{e:#}")))?;
spawn_blocking({
let pool = state.pool.clone();
let r_hash = r_hash.clone();
move || {
let mut connection = pool.get().context("Could not get db connection")?;
db::hodl_invoice::create_hodl_invoice(
&mut connection,
r_hash.as_str(),
public_key,
invoice_amount,
)
.context("Could not create hodl invoice")
}
})
.await
.expect("to finish task")
.map_err(|error| {
AppError::InternalServerError(format!("Could not process hodl invoice {error:?}"))
})?;
// watch for the created hodl invoice
invoice::spawn_invoice_watch(
state.pool.clone(),
state.auth_users_notifier.clone(),
state.lnd_bridge.clone(),
invoice_params,
);
tracing::info!(
trader_pubkey = public_key.to_string(),
r_hash,
amount_sats = invoice_amount,
"Started watching for hodl invoice"
);
Ok(Json(response.payment_request))
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/notifications.rs | coordinator/src/notifications.rs | use crate::db;
use anyhow::ensure;
use anyhow::Context;
use anyhow::Result;
use bitcoin::secp256k1::PublicKey;
use diesel::r2d2::ConnectionManager;
use diesel::r2d2::Pool;
use diesel::PgConnection;
use std::fmt::Display;
use tokio::sync::mpsc;
/// Types of notification that can be sent to 10101 app users
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum NotificationKind {
RolloverWindowOpen,
PositionSoonToExpire,
PositionExpired,
CollaborativeRevert,
Custom { title: String, message: String },
}
impl Display for NotificationKind {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
NotificationKind::PositionSoonToExpire => write!(f, "PositionSoonToExpire"),
NotificationKind::PositionExpired => write!(f, "PositionExpired"),
NotificationKind::RolloverWindowOpen => write!(f, "RolloverWindowOpen"),
NotificationKind::CollaborativeRevert => write!(f, "CollaborativeRevertPending"),
NotificationKind::Custom { .. } => write!(f, "Custom"),
}
}
}
#[derive(Debug, Clone)]
pub struct Notification {
trader_ids: Vec<PublicKey>,
notification_kind: NotificationKind,
}
impl Notification {
pub fn new(trader_id: PublicKey, notification_kind: NotificationKind) -> Self {
Self {
notification_kind,
trader_ids: vec![trader_id],
}
}
pub fn new_batch(trader_ids: Vec<PublicKey>, notification_kind: NotificationKind) -> Self {
Self {
notification_kind,
trader_ids,
}
}
}
/// Actor managing the notifications
pub struct NotificationService {
notification_sender: mpsc::Sender<Notification>,
}
impl NotificationService {
/// Start the notification service
///
/// If an empty string is passed in the constructor, the service will not send any notification.
/// It will only log the notification that it would have sent.
pub fn new(fcm_api_key: String, pool: Pool<ConnectionManager<PgConnection>>) -> Self {
if fcm_api_key.is_empty() {
// Log it as error, as in production it should always be set
tracing::error!("FCM API key is empty. No notifications will not be sent.");
}
let (notification_sender, mut notification_receiver) = mpsc::channel(100);
// TODO: use RAII here
tokio::spawn({
let client = fcm::Client::new();
async move {
while let Some(Notification {
trader_ids,
notification_kind,
}) = notification_receiver.recv().await
{
let result = tokio::task::spawn_blocking({
let pool = pool.clone();
move || {
let mut conn = pool.get()?;
let users = db::user::get_users(&mut conn, trader_ids)?;
anyhow::Ok(users)
}
})
.await
.expect("task to complete");
let users = match result {
Ok(users) => users,
Err(e) => {
tracing::error!("Failed to fetch users. Error: {e:#}");
continue;
}
};
let fcm_tokens = users
.iter()
.map(|user| user.fcm_token.clone())
.filter(|token| !token.is_empty() && token != "unavailable")
.map(FcmToken::new)
.filter_map(Result::ok)
.collect::<Vec<_>>();
for user_fcm_token in fcm_tokens {
tracing::info!(%notification_kind, %user_fcm_token, "Sending notification");
if !fcm_api_key.is_empty() {
let notification = build_notification(¬ification_kind);
if let Err(e) = send_notification(
&client,
&fcm_api_key,
&user_fcm_token,
notification,
)
.await
{
tracing::error!("Could not send notification to FCM: {:?}", e);
}
}
}
}
}
});
Self {
notification_sender,
}
}
/// Constructs a new sender. Use a sender to send notification from any part of the system.
pub fn get_sender(&self) -> mpsc::Sender<Notification> {
self.notification_sender.clone()
}
}
/// Prepares the notification text
fn build_notification(kind: &NotificationKind) -> fcm::Notification<'_> {
let mut notification_builder = fcm::NotificationBuilder::new();
match kind {
NotificationKind::PositionSoonToExpire => {
notification_builder.title("Your position is about to expire ⏳");
notification_builder
.body("Open your app to roll over your position for the next cycle.");
}
NotificationKind::PositionExpired => {
notification_builder.title("Your position has expired 🥴");
notification_builder.body("Open your app to execute the expiration.");
}
NotificationKind::RolloverWindowOpen => {
notification_builder.title("Rollover window is open 🪟");
notification_builder
.body("Open your app to roll over your position for the next cycle.");
}
NotificationKind::CollaborativeRevert => {
notification_builder.title("Error detected");
notification_builder.body("Please open your app to recover your funds.");
}
NotificationKind::Custom { title, message } => {
notification_builder.title(title);
notification_builder.body(message);
}
}
notification_builder.finalize()
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct FcmToken(String);
impl FcmToken {
pub fn new(token: String) -> Result<Self> {
ensure!(!token.is_empty(), "FCM token cannot be empty");
Ok(Self(token))
}
pub fn get(&self) -> &str {
&self.0
}
}
impl Display for FcmToken {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", &self.0)
}
}
async fn send_notification<'a>(
client: &fcm::Client,
api_key: &str,
fcm_token: &FcmToken,
notification: fcm::Notification<'a>,
) -> Result<()> {
ensure!(!api_key.is_empty(), "FCM API key is empty");
let mut message_builder = fcm::MessageBuilder::new(api_key, fcm_token.get());
message_builder.notification(notification);
let message = message_builder.finalize();
let response = client
.send(message)
.await
.context("Could not send FCM notification")?;
tracing::debug!("Sent notification. Response: {:?}", response);
Ok(())
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/scheduler.rs | coordinator/src/scheduler.rs | use crate::db;
use crate::metrics::collect_metrics;
use crate::node::Node;
use crate::notifications::Notification;
use crate::notifications::NotificationKind;
use crate::orderbook;
use crate::referrals;
use crate::settings::Settings;
use anyhow::Result;
use bitcoin::Network;
use diesel::r2d2::ConnectionManager;
use diesel::r2d2::Pool;
use diesel::PgConnection;
use time::OffsetDateTime;
use tokio::sync::mpsc;
use tokio_cron_scheduler::Job;
use tokio_cron_scheduler::JobScheduler;
use tokio_cron_scheduler::JobSchedulerError;
use xxi_node::commons;
pub struct NotificationScheduler {
pub scheduler: JobScheduler,
sender: mpsc::Sender<Notification>,
settings: Settings,
network: Network,
node: Node,
}
impl NotificationScheduler {
pub async fn new(
sender: mpsc::Sender<Notification>,
settings: Settings,
network: Network,
node: Node,
) -> Self {
let scheduler = JobScheduler::new()
.await
.expect("To be able to start the scheduler");
Self {
scheduler,
sender,
settings,
network,
node,
}
}
pub async fn update_bonus_status_for_users(
&self,
pool: Pool<ConnectionManager<PgConnection>>,
) -> Result<()> {
let schedule = self.settings.update_user_bonus_status_scheduler.clone();
let uuid = self
.scheduler
.add(build_update_bonus_status_job(schedule.as_str(), pool)?)
.await?;
tracing::debug!(
job_id = uuid.to_string(),
"Started new job to update users bonus status"
);
Ok(())
}
pub async fn add_collect_metrics_job(
&self,
pool: Pool<ConnectionManager<PgConnection>>,
) -> Result<()> {
let schedule = self.settings.collect_metrics_scheduler.clone();
let uuid = self
.scheduler
.add(build_metrics_collector_job(
schedule.as_str(),
pool,
self.node.clone(),
)?)
.await?;
tracing::debug!(
job_id = uuid.to_string(),
"Started new job to collect metrics"
);
Ok(())
}
pub async fn add_reminder_to_close_expired_position_job(
&self,
pool: Pool<ConnectionManager<PgConnection>>,
) -> Result<()> {
let sender = self.sender.clone();
let schedule = self.settings.close_expired_position_scheduler.clone();
let uuid = self
.scheduler
.add(build_remind_to_close_expired_position_notification_job(
schedule.as_str(),
sender,
pool,
)?)
.await?;
tracing::debug!(
job_id = uuid.to_string(),
"Started new job to remind to close an expired position"
);
Ok(())
}
pub async fn add_reminder_to_close_liquidated_position_job(
&self,
pool: Pool<ConnectionManager<PgConnection>>,
) -> Result<()> {
let sender = self.sender.clone();
let schedule = self.settings.close_liquidated_position_scheduler.clone();
let uuid = self
.scheduler
.add(build_remind_to_close_liquidated_position_notification_job(
schedule.as_str(),
sender,
pool,
)?)
.await?;
tracing::debug!(
job_id = uuid.to_string(),
"Started new job to remind to close a liquidated position"
);
Ok(())
}
pub async fn add_rollover_window_reminder_job(
&self,
pool: Pool<ConnectionManager<PgConnection>>,
) -> Result<()> {
let schedule = self.settings.rollover_window_open_scheduler.clone();
let network = self.network;
let node = self.node.clone();
let sender = self.sender.clone();
let uuid = self
.scheduler
.add(build_rollover_notification_job(
schedule.as_str(),
pool,
network,
NotificationKind::RolloverWindowOpen,
node,
sender,
)?)
.await?;
tracing::debug!(
job_id = uuid.to_string(),
"Started new job to remind rollover window is open"
);
Ok(())
}
pub async fn add_rollover_window_close_reminder_job(
&self,
pool: Pool<ConnectionManager<PgConnection>>,
) -> Result<()> {
let schedule = self.settings.rollover_window_close_scheduler.clone();
let network = self.network;
let node = self.node.clone();
let sender = self.sender.clone();
let uuid = self
.scheduler
.add(build_rollover_notification_job(
schedule.as_str(),
pool,
network,
NotificationKind::PositionSoonToExpire,
node,
sender,
)?)
.await?;
tracing::debug!(
job_id = uuid.to_string(),
"Started new job to remind rollover window is closing"
);
Ok(())
}
pub async fn start(&self) -> Result<()> {
self.scheduler.start().await?;
Ok(())
}
}
fn build_rollover_notification_job(
schedule: &str,
pool: Pool<ConnectionManager<PgConnection>>,
network: Network,
notification: NotificationKind,
node: Node,
notifier: mpsc::Sender<Notification>,
) -> Result<Job, JobSchedulerError> {
Job::new_async(schedule, move |_, _| {
let notifier = notifier.clone();
let mut conn = match pool.get() {
Ok(conn) => conn,
Err(e) => {
return Box::pin(async move {
tracing::error!("Failed to get connection. Error: {e:#}")
});
}
};
if !commons::is_eligible_for_rollover(OffsetDateTime::now_utc(), network) {
return Box::pin(async move {
tracing::warn!("Rollover window hasn't started yet. Job schedule seems to be miss-aligned with the rollover window. Skipping user notifications.");
});
}
// calculates the expiry of the next rollover window. positions which have an
// expiry before that haven't rolled over yet, and need to be reminded.
let expiry = commons::calculate_next_expiry(OffsetDateTime::now_utc(), network);
match db::positions::Position::get_all_open_positions_with_expiry_before(&mut conn, expiry)
{
Ok(positions) => Box::pin({
tracing::debug!(
nr_of_positions = positions.len(),
"Found positions to rollover"
);
let notification = notification.clone();
let node = node.clone();
async move {
for position in positions {
if let Err(e) = node
.check_rollover(
&mut conn,
position,
node.inner.network,
¬ifier,
Some(notification.clone()),
)
.await
{
tracing::error!(trader_id=%position.trader, "Failed to check rollover. {e:#}");
}
}
}
}),
Err(error) => Box::pin(async move {
tracing::error!("Could not load positions with fcm token {error:#}")
}),
}
})
}
fn build_update_bonus_status_job(
schedule: &str,
pool: Pool<ConnectionManager<PgConnection>>,
) -> Result<Job, JobSchedulerError> {
Job::new_async(schedule, move |_, _| {
let mut conn = match pool.get() {
Ok(conn) => conn,
Err(e) => {
return Box::pin(async move {
tracing::error!("Failed to get connection. Error: {e:#}")
});
}
};
match referrals::update_referral_status(&mut conn) {
Ok(number_of_updated_users) => Box::pin({
async move {
tracing::debug!(
number_of_updated_users,
"Successfully updated users bonus status."
)
}
}),
Err(error) => {
Box::pin(
async move { tracing::error!("Could not load update bonus status {error:#}") },
)
}
}
})
}
fn build_remind_to_close_expired_position_notification_job(
schedule: &str,
notification_sender: mpsc::Sender<Notification>,
pool: Pool<ConnectionManager<PgConnection>>,
) -> Result<Job, JobSchedulerError> {
Job::new_async(schedule, move |_, _| {
let notification_sender = notification_sender.clone();
let mut conn = match pool.get() {
Ok(conn) => conn,
Err(e) => {
return Box::pin(async move {
tracing::error!("Failed to get connection. Error: {e:#}")
});
}
};
// Note, positions that are expired longer than
// [`crate::node::expired_positions::EXPIRED_POSITION_TIMEOUT`] are set to closing, hence
// those positions will not get notified anymore afterwards.
match orderbook::db::orders::get_all_matched_market_orders_by_order_reason(
&mut conn,
vec![commons::OrderReason::Expired],
) {
Ok(orders) => Box::pin({
async move {
for order in orders {
tracing::debug!(trader_id=%order.trader_id, "Sending reminder to close expired position.");
if let Err(e) = notification_sender
.send(Notification::new(
order.trader_id,
NotificationKind::PositionExpired,
))
.await
{
tracing::error!(
"Failed to send {:?} notification: {e:?}",
NotificationKind::PositionExpired
);
}
}
}
}),
Err(error) => Box::pin(async move {
tracing::error!("Could not load positions with fcm token {error:#}")
}),
}
})
}
fn build_remind_to_close_liquidated_position_notification_job(
schedule: &str,
notification_sender: mpsc::Sender<Notification>,
pool: Pool<ConnectionManager<PgConnection>>,
) -> Result<Job, JobSchedulerError> {
Job::new_async(schedule, move |_, _| {
let notification_sender = notification_sender.clone();
let mut conn = match pool.get() {
Ok(conn) => conn,
Err(e) => {
return Box::pin(async move {
tracing::error!("Failed to get connection. Error: {e:#}")
});
}
};
// Note, positions that are liquidated longer than
// [`crate::node::liquidated_positions::LIQUIDATED_POSITION_TIMEOUT`] are set to closing,
// hence those positions will not get notified anymore afterwards.
match orderbook::db::orders::get_all_matched_market_orders_by_order_reason(
&mut conn,
vec![
commons::OrderReason::TraderLiquidated,
commons::OrderReason::CoordinatorLiquidated,
],
) {
Ok(orders) => Box::pin({
async move {
for order in orders {
tracing::debug!(trader_id=%order.trader_id, "Sending reminder to close liquidated position.");
let notification_kind = NotificationKind::Custom {
title: "Pending liquidation 💸".to_string(),
message: "Open your app to execute the liquidation ".to_string(),
};
if let Err(e) = notification_sender
.send(Notification::new(
order.trader_id,
notification_kind.clone(),
))
.await
{
tracing::error!(
"Failed to send {:?} notification: {e:?}",
notification_kind
);
}
}
}
}),
Err(error) => Box::pin(async move {
tracing::error!("Could not load orders with fcm token {error:#}")
}),
}
})
}
fn build_metrics_collector_job(
schedule: &str,
pool: Pool<ConnectionManager<PgConnection>>,
node: Node,
) -> Result<Job, JobSchedulerError> {
Job::new_async(schedule, move |_, _| {
let conn = match pool.get() {
Ok(conn) => conn,
Err(e) => {
return Box::pin(async move {
tracing::error!("Failed to get connection. Error: {e:#}")
});
}
};
let node = node.clone();
Box::pin({
async move {
match collect_metrics(conn, node) {
Ok(_) => {}
Err(error) => {
tracing::error!("Failed collecting metrics {error:#}");
}
}
}
})
})
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/dlc_handler.rs | coordinator/src/dlc_handler.rs | use crate::db;
use crate::node::storage::NodeStorage;
use crate::storage::CoordinatorTenTenOneStorage;
use anyhow::Result;
use bitcoin::secp256k1::PublicKey;
use diesel::r2d2::ConnectionManager;
use diesel::r2d2::Pool;
use diesel::PgConnection;
use dlc_manager::channel::signed_channel::SignedChannel;
use dlc_manager::channel::signed_channel::SignedChannelState;
use futures::future::RemoteHandle;
use futures::FutureExt;
use std::sync::Arc;
use tokio::sync::broadcast;
use tokio::sync::broadcast::error::RecvError;
use xxi_node::bitcoin_conversion::to_secp_pk_29;
use xxi_node::dlc_message::DlcMessage;
use xxi_node::dlc_message::SerializedDlcMessage;
use xxi_node::message_handler::TenTenOneMessage;
use xxi_node::node::dlc_channel::send_dlc_message;
use xxi_node::node::event::NodeEvent;
use xxi_node::node::Node;
/// The DlcHandler is responsible for sending dlc messages and marking received ones as
/// processed. It's main purpose is to ensure the following.
///
/// 1. Mark all received inbound messages as processed.
/// 2. Save the last outbound dlc message, so it can be resend on the next reconnect.
/// 3. Check if a receive message has already been processed and if so inform to skip the message.
#[derive(Clone)]
pub struct DlcHandler {
node: Arc<
Node<
bdk_file_store::Store<bdk::wallet::ChangeSet>,
CoordinatorTenTenOneStorage,
NodeStorage,
>,
>,
pool: Pool<ConnectionManager<PgConnection>>,
}
impl DlcHandler {
pub fn new(
pool: Pool<ConnectionManager<PgConnection>>,
node: Arc<
Node<
bdk_file_store::Store<bdk::wallet::ChangeSet>,
CoordinatorTenTenOneStorage,
NodeStorage,
>,
>,
) -> Self {
DlcHandler { node, pool }
}
}
/// [`spawn_handling_outbound_dlc_messages`] handles sending outbound dlc messages as well as
/// keeping track of what dlc messages have already been processed and what was the last outbound
/// dlc message so it can be resend on reconnect.
pub fn spawn_handling_outbound_dlc_messages(
dlc_handler: DlcHandler,
mut receiver: broadcast::Receiver<NodeEvent>,
) -> RemoteHandle<()> {
let (fut, remote_handle) = async move {
loop {
match receiver.recv().await {
Ok(NodeEvent::Connected { peer }) => {
if let Err(e) = dlc_handler.on_connect(peer) {
tracing::error!(peer=%peer, "Failed to process on connect event. {e:#}");
}
}
Ok(NodeEvent::SendDlcMessage { peer, msg }) => {
if let Err(e) = dlc_handler.send_dlc_message(peer, msg) {
tracing::error!(peer=%peer, "Failed to process send dlc message event. {e:#}");
}
}
Ok(NodeEvent::StoreDlcMessage { peer, msg }) => {
if let Err(e) = dlc_handler.store_dlc_message(peer, msg) {
tracing::error!(peer=%peer, "Failed to store dlc message. {e:#}");
}
}
Ok(NodeEvent::SendLastDlcMessage { peer }) => {
if let Err(e) = dlc_handler.send_last_dlc_message(peer) {
tracing::error!(peer=%peer, "Failed to send last dlc message. {e:#}")
}
}
Ok(NodeEvent::DlcChannelEvent { .. }) => {} // ignored
Err(RecvError::Lagged(skipped)) => {
tracing::warn!("Skipped {skipped} messages");
}
Err(RecvError::Closed) => {
tracing::error!("Lost connection to sender!");
break;
}
}
}
}.remote_handle();
tokio::spawn(fut);
remote_handle
}
impl DlcHandler {
pub fn send_dlc_message(&self, peer: PublicKey, msg: TenTenOneMessage) -> Result<()> {
self.store_dlc_message(peer, msg.clone())?;
send_dlc_message(
&self.node.dlc_message_handler,
&self.node.peer_manager,
peer,
msg,
);
Ok(())
}
pub fn store_dlc_message(&self, peer: PublicKey, msg: TenTenOneMessage) -> Result<()> {
let mut conn = self.pool.get()?;
let serialized_outbound_message = SerializedDlcMessage::try_from(&msg)?;
let outbound_msg = DlcMessage::new(peer, serialized_outbound_message.clone(), false)?;
db::dlc_messages::insert(&mut conn, outbound_msg)?;
db::last_outbound_dlc_message::upsert(&mut conn, &peer, serialized_outbound_message)
}
pub fn send_last_dlc_message(&self, peer: PublicKey) -> Result<()> {
let mut conn = self.pool.get()?;
let last_serialized_message = db::last_outbound_dlc_message::get(&mut conn, &peer)?;
if let Some(last_serialized_message) = last_serialized_message {
let message = TenTenOneMessage::try_from(&last_serialized_message)?;
send_dlc_message(
&self.node.dlc_message_handler,
&self.node.peer_manager,
peer,
message,
);
} else {
tracing::debug!(%peer, "No last dlc message found. Nothing todo.");
}
Ok(())
}
pub fn on_connect(&self, peer: PublicKey) -> Result<()> {
let signed_dlc_channels = self.node.list_signed_dlc_channels()?;
if let Some(SignedChannel {
channel_id,
state:
SignedChannelState::CollaborativeCloseOffered {
is_offer: false, ..
},
..
}) = signed_dlc_channels
.iter()
.find(|c| c.counter_party == to_secp_pk_29(peer))
{
tracing::info!("Accepting pending dlc channel close offer.");
// Pending dlc channel close offer with the intend to close the dlc channel
// on-chain
// TODO(bonomat): we should verify that the proposed amount is acceptable
self.node
.accept_dlc_channel_collaborative_close(channel_id)?;
return Ok(());
}
self.send_last_dlc_message(peer)?;
Ok(())
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/routing_fee/mod.rs | coordinator/src/routing_fee/mod.rs | pub mod models;
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/routing_fee/models.rs | coordinator/src/routing_fee/models.rs | use lightning::ln::ChannelId;
use time::OffsetDateTime;
#[derive(Debug)]
pub struct NewRoutingFee {
pub amount_msats: u64,
pub prev_channel_id: Option<ChannelId>,
pub next_channel_id: Option<ChannelId>,
}
#[derive(Debug)]
pub struct RoutingFee {
pub id: i32,
pub amount_msats: u64,
pub prev_channel_id: Option<ChannelId>,
pub next_channel_id: Option<ChannelId>,
pub created_at: OffsetDateTime,
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/node/expired_positions.rs | coordinator/src/node/expired_positions.rs | use crate::db;
use crate::node::Node;
use crate::orderbook;
use crate::orderbook::db::orders;
use crate::orderbook::trading::NewOrderMessage;
use crate::position::models::Position;
use crate::position::models::PositionState;
use anyhow::anyhow;
use anyhow::Context;
use anyhow::Result;
use rust_decimal::prelude::FromPrimitive;
use rust_decimal::Decimal;
use std::ops::Add;
use time::Duration;
use time::OffsetDateTime;
use tokio::sync::mpsc;
use xxi_node::commons::average_execution_price;
use xxi_node::commons::Match;
use xxi_node::commons::MatchState;
use xxi_node::commons::NewMarketOrder;
use xxi_node::commons::OrderReason;
use xxi_node::commons::OrderState;
/// The timeout before we give up on closing an expired position collaboratively. This value should
/// not be larger than our refund transaction time lock.
pub const EXPIRED_POSITION_TIMEOUT: Duration = Duration::days(7);
pub async fn close(node: Node, trading_sender: mpsc::Sender<NewOrderMessage>) -> Result<()> {
let mut conn = node.pool.get()?;
let positions = db::positions::Position::get_all_open_positions(&mut conn)
.context("Failed to fetch open positions")?;
let positions = positions
.into_iter()
.filter(|p| {
p.position_state == PositionState::Open
&& OffsetDateTime::now_utc().ge(&p.expiry_timestamp)
})
.collect::<Vec<Position>>();
for position in positions.into_iter() {
if let Some(order) = orderbook::db::orders::get_by_trader_id_and_state(
&mut conn,
position.trader,
OrderState::Matched,
)? {
let trader_id = order.trader_id.to_string();
let order_id = order.id.to_string();
if order.expiry < OffsetDateTime::now_utc() {
tracing::warn!(trader_id, order_id, "Matched order expired! Giving up on that position, looks like the corresponding dlc channel has to get force closed.");
orderbook::db::orders::set_order_state(&mut conn, order.id, OrderState::Expired)?;
orderbook::db::matches::set_match_state_by_order_id(
&mut conn,
order.id,
MatchState::Failed,
)?;
let matches = orderbook::db::matches::get_matches_by_order_id(&mut conn, order.id)?;
let matches: Vec<Match> = matches.into_iter().map(Match::from).collect();
db::positions::Position::set_open_position_to_closing(
&mut conn,
&position.trader,
Some(average_execution_price(matches)),
)?;
continue;
} else {
tracing::trace!(trader_id, order_id, "Skipping expired position as match has already been found. Waiting for trader to come online to execute the trade.");
continue;
}
}
tracing::debug!(trader_pk=%position.trader, %position.expiry_timestamp, "Attempting to close expired position");
let new_order = NewMarketOrder {
id: uuid::Uuid::new_v4(),
contract_symbol: position.contract_symbol,
quantity: Decimal::try_from(position.quantity).expect("to fit into decimal"),
trader_id: position.trader,
direction: position.trader_direction.opposite(),
leverage: Decimal::from_f32(position.trader_leverage).expect("to fit into decimal"),
// This order can basically not expire, but if the user does not come back online within
// a certain time period we can assume the channel to be abandoned and we should force
// close.
expiry: OffsetDateTime::now_utc().add(EXPIRED_POSITION_TIMEOUT),
stable: position.stable,
};
let order = orders::insert_market_order(&mut conn, new_order.clone(), OrderReason::Expired)
.map_err(|e| anyhow!(e))
.context("Failed to insert expired order into DB")?;
let message = NewOrderMessage {
order,
channel_opening_params: None,
order_reason: OrderReason::Expired,
};
if let Err(e) = trading_sender.send(message).await {
tracing::error!(order_id=%new_order.id, trader_id=%new_order.trader_id, "Failed to submit new order for closing expired position. Error: {e:#}");
continue;
}
}
Ok(())
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/node/storage.rs | coordinator/src/node/storage.rs | use crate::db;
use anyhow::anyhow;
use anyhow::Result;
use diesel::r2d2::ConnectionManager;
use diesel::r2d2::Pool;
use diesel::PgConnection;
use lightning::chain::transaction::OutPoint;
use lightning::sign::SpendableOutputDescriptor;
use xxi_node::node;
use xxi_node::transaction::Transaction;
#[derive(Clone)]
pub struct NodeStorage {
pool: Pool<ConnectionManager<PgConnection>>,
}
impl NodeStorage {
pub fn new(pool: Pool<ConnectionManager<PgConnection>>) -> Self {
Self { pool }
}
}
impl node::Storage for NodeStorage {
// Spendable outputs
fn insert_spendable_output(&self, output: SpendableOutputDescriptor) -> Result<()> {
let mut conn = self.pool.get()?;
db::spendable_outputs::insert(&mut conn, output)?;
Ok(())
}
fn get_spendable_output(
&self,
outpoint: &OutPoint,
) -> Result<Option<SpendableOutputDescriptor>> {
let mut conn = self.pool.get()?;
db::spendable_outputs::get(&mut conn, outpoint)
}
fn delete_spendable_output(&self, outpoint: &OutPoint) -> Result<()> {
let mut conn = self.pool.get()?;
db::spendable_outputs::delete(&mut conn, outpoint)
}
fn all_spendable_outputs(&self) -> Result<Vec<SpendableOutputDescriptor>> {
let mut conn = self.pool.get()?;
db::spendable_outputs::get_all(&mut conn)
}
// Transaction
fn upsert_transaction(&self, transaction: Transaction) -> Result<()> {
let mut conn = self.pool.get()?;
db::transactions::upsert(transaction.into(), &mut conn)
}
fn get_transaction(&self, txid: &str) -> Result<Option<Transaction>> {
let mut conn = self.pool.get()?;
let transaction = db::transactions::get(txid, &mut conn)
.map_err(|e| anyhow!("{e:#}"))?
.map(|t| t.into());
Ok(transaction)
}
fn all_transactions_without_fees(&self) -> Result<Vec<Transaction>> {
let mut conn = self.pool.get()?;
let transactions = db::transactions::get_all_without_fees(&mut conn)?
.into_iter()
.map(|t| t.into())
.collect::<Vec<_>>();
Ok(transactions)
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/node/invoice.rs | coordinator/src/node/invoice.rs | use crate::db;
use crate::message::OrderbookMessage;
use crate::notifications::NotificationKind;
use bitcoin::Amount;
use diesel::r2d2::ConnectionManager;
use diesel::r2d2::Pool;
use diesel::PgConnection;
use futures_util::TryStreamExt;
use lnd_bridge::InvoiceState;
use lnd_bridge::LndBridge;
use tokio::sync::mpsc;
use tokio::task::spawn_blocking;
use xxi_node::commons;
use xxi_node::commons::Message;
/// Watches a hodl invoice with the given r_hash
pub fn spawn_invoice_watch(
pool: Pool<ConnectionManager<PgConnection>>,
trader_sender: mpsc::Sender<OrderbookMessage>,
lnd_bridge: LndBridge,
invoice_params: commons::HodlInvoiceParams,
) {
tokio::spawn(async move {
let trader_pubkey = invoice_params.trader_pubkey;
let r_hash = invoice_params.r_hash;
tracing::info!(r_hash, "Subscribing to invoice updates");
let mut stream = lnd_bridge.subscribe_to_invoice(r_hash.clone());
loop {
match stream.try_next().await {
Ok(Some(invoice)) => match invoice.state {
InvoiceState::Open => {
tracing::debug!(%trader_pubkey, r_hash, "Watching hodl invoice.");
continue;
}
InvoiceState::Settled => {
tracing::info!(%trader_pubkey, r_hash, "Accepted hodl invoice has been settled.");
if let Err(e) = spawn_blocking({
let r_hash = r_hash.clone();
move || {
let mut conn = pool.get()?;
db::hodl_invoice::update_hodl_invoice_to_settled(
&mut conn, r_hash,
)?;
anyhow::Ok(())
}
})
.await
.expect("task to finish")
{
tracing::error!(
r_hash,
"Failed to set hodl invoice to failed. Error: {e:#}"
);
}
break;
}
InvoiceState::Canceled => {
tracing::warn!(%trader_pubkey, r_hash, "Pending hodl invoice has been canceled.");
if let Err(e) = spawn_blocking({
let r_hash = r_hash.clone();
move || {
let mut conn = pool.get()?;
db::hodl_invoice::update_hodl_invoice_to_canceled(
&mut conn, r_hash,
)?;
anyhow::Ok(())
}
})
.await
.expect("task to finish")
{
tracing::error!(
r_hash,
"Failed to set hodl invoice to failed. Error: {e:#}"
);
}
break;
}
InvoiceState::Accepted => {
tracing::info!(%trader_pubkey, r_hash, "Pending hodl invoice has been accepted.");
if let Err(e) = trader_sender.send(OrderbookMessage::TraderMessage {
trader_id: trader_pubkey,
message: Message::LnPaymentReceived {
r_hash: r_hash.clone(),
amount: Amount::from_sat(invoice.amt_paid_sat),
},
notification: Some(NotificationKind::Custom { title: "Open your DLC channel now!".to_string(), message: "Pending payment received, open the app to open your DLC channel.".to_string() }),
}).await {
tracing::error!(%trader_pubkey, r_hash, "Failed to send payment received event to app. Error: {e:#}")
}
continue;
}
},
Ok(None) => {
tracing::error!(%trader_pubkey, r_hash, "Websocket sender died.");
break;
}
Err(e) => {
tracing::error!(%trader_pubkey, r_hash, "Websocket closed the connection. Error: {e:#}");
break;
}
}
}
tracing::info!(%trader_pubkey, r_hash, "Stopping hodl invoice watch.");
});
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/node/unrealized_pnl.rs | coordinator/src/node/unrealized_pnl.rs | use crate::db;
use crate::node::Node;
use crate::position::models::Position;
use anyhow::Context;
use anyhow::Result;
use diesel::r2d2::ConnectionManager;
use diesel::r2d2::PooledConnection;
use diesel::PgConnection;
use time::OffsetDateTime;
use xxi_node::bitmex_client::BitmexClient;
use xxi_node::bitmex_client::Quote;
pub async fn sync(node: Node) -> Result<()> {
let mut conn = node.pool.get()?;
let positions = db::positions::Position::get_all_open_or_closing_positions(&mut conn)?;
// TODO(holzeis): we should not use the bitmex quote here, but rather our own orderbook.
let current_quote = BitmexClient::get_quote(&node.inner.network, &OffsetDateTime::now_utc())
.await
.context("Failed to fetch quote from BitMEX")?;
for position in positions.iter() {
if let Err(e) = sync_position(&mut conn, position, current_quote.clone()) {
tracing::error!(position_id=%position.id, ?current_quote, "Failed to update position's unrealized pnl in database: {e:#}")
}
}
Ok(())
}
fn sync_position(
conn: &mut PooledConnection<ConnectionManager<PgConnection>>,
position: &Position,
quote: Quote,
) -> Result<()> {
let coordinator_pnl = position.calculate_coordinator_pnl(quote)?;
let trader_pnl = -coordinator_pnl;
db::positions::Position::update_unrealized_pnl(conn, position.id, trader_pnl)
.context("Failed to update unrealized pnl in db")?;
Ok(())
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/node/rollover.rs | coordinator/src/node/rollover.rs | use crate::check_version::check_version;
use crate::db;
use crate::db::positions;
use crate::decimal_from_f32;
use crate::dlc_protocol;
use crate::dlc_protocol::RolloverParams;
use crate::funding_fee::funding_fee_from_funding_fee_events;
use crate::funding_fee::get_outstanding_funding_fee_events;
use crate::node::Node;
use crate::notifications::Notification;
use crate::notifications::NotificationKind;
use crate::payout_curve::build_contract_descriptor;
use crate::position::models::Position;
use crate::position::models::PositionState;
use anyhow::bail;
use anyhow::Context;
use anyhow::Result;
use bitcoin::secp256k1::PublicKey;
use bitcoin::Network;
use diesel::r2d2::ConnectionManager;
use diesel::r2d2::Pool;
use diesel::r2d2::PooledConnection;
use diesel::PgConnection;
use dlc_manager::contract::contract_input::ContractInput;
use dlc_manager::contract::contract_input::ContractInputInfo;
use dlc_manager::contract::contract_input::OracleInput;
use dlc_manager::contract::Contract;
use dlc_manager::DlcChannelId;
use futures::future::RemoteHandle;
use futures::FutureExt;
use rust_decimal::Decimal;
use time::OffsetDateTime;
use tokio::sync::broadcast;
use tokio::sync::broadcast::error::RecvError;
use tokio::sync::mpsc;
use tokio::task::spawn_blocking;
use xxi_node::commons;
use xxi_node::node::event::NodeEvent;
use xxi_node::node::ProtocolId;
pub fn monitor(
pool: Pool<ConnectionManager<PgConnection>>,
mut receiver: broadcast::Receiver<NodeEvent>,
notifier: mpsc::Sender<Notification>,
network: Network,
node: Node,
) -> RemoteHandle<()> {
let (fut, remote_handle) = async move {
loop {
match receiver.recv().await {
Ok(NodeEvent::Connected { peer }) => {
tokio::spawn({
let notifier = notifier.clone();
let node = node.clone();
let pool = pool.clone();
async move {
if let Err(e) = node
.check_if_eligible_for_rollover(pool, notifier, peer, network)
.await
{
tracing::error!(
trader_id = peer.to_string(),
"Failed to check if eligible for rollover. Error: {e:#}"
);
}
}
});
}
Ok(_) => {} // ignoring other node events
Err(RecvError::Closed) => {
tracing::error!("Node event sender died! Channel closed.");
break;
}
Err(RecvError::Lagged(skip)) => tracing::warn!(%skip,
"Lagging behind on node events."
),
}
}
}
.remote_handle();
tokio::spawn(fut);
remote_handle
}
impl Node {
async fn check_if_eligible_for_rollover(
&self,
pool: Pool<ConnectionManager<PgConnection>>,
notifier: mpsc::Sender<Notification>,
trader_id: PublicKey,
network: Network,
) -> Result<()> {
let mut conn = spawn_blocking(move || pool.get())
.await
.expect("task to complete")?;
tracing::debug!(%trader_id, "Checking if the user's position is eligible for rollover");
if check_version(&mut conn, &trader_id).is_err() {
tracing::info!(
%trader_id,
"User is not on the latest version. \
Will not check if their position is eligible for rollover"
);
return Ok(());
}
let position = match positions::Position::get_position_by_trader(
&mut conn,
trader_id,
vec![PositionState::Open, PositionState::Rollover],
)? {
Some(position) => position,
None => return Ok(()),
};
self.check_rollover(&mut conn, position, network, ¬ifier, None)
.await
}
pub async fn check_rollover(
&self,
connection: &mut PooledConnection<ConnectionManager<PgConnection>>,
position: Position,
network: Network,
notifier: &mpsc::Sender<Notification>,
notification: Option<NotificationKind>,
) -> Result<()> {
let trader_id = position.trader;
let expiry_timestamp = position.expiry_timestamp;
let signed_channel = self.inner.get_signed_channel_by_trader_id(trader_id)?;
if commons::is_eligible_for_rollover(OffsetDateTime::now_utc(), network)
// not expired
&& OffsetDateTime::now_utc() < expiry_timestamp
{
let next_expiry = commons::calculate_next_expiry(OffsetDateTime::now_utc(), network);
if expiry_timestamp >= next_expiry {
tracing::trace!(%trader_id, "Position has already been rolled over");
return Ok(());
}
tracing::debug!(%trader_id, "Push notifying user about rollover");
if let Some(notification) = notification {
if let Err(e) = notifier
.send(Notification::new(trader_id, notification))
.await
{
tracing::warn!("Failed to push notify trader. Error: {e:#}");
}
}
if self.is_connected(trader_id) {
tracing::info!(%trader_id, "Proposing to rollover DLC channel");
self.propose_rollover(
connection,
&signed_channel.channel_id,
position,
self.inner.network,
)
.await?;
} else {
tracing::warn!(%trader_id, "Skipping rollover, user is not connected.");
}
}
Ok(())
}
/// Initiates the rollover protocol with the app.
pub async fn propose_rollover(
&self,
conn: &mut PooledConnection<ConnectionManager<PgConnection>>,
dlc_channel_id: &DlcChannelId,
position: Position,
network: Network,
) -> Result<()> {
let trader_pubkey = position.trader;
let next_expiry = commons::calculate_next_expiry(OffsetDateTime::now_utc(), network);
let (oracle_pk, contract_tx_fee_rate) = {
let old_contract = self.inner.get_contract_by_dlc_channel_id(dlc_channel_id)?;
let old_offered_contract = match old_contract {
Contract::Confirmed(contract) => contract.accepted_contract.offered_contract,
_ => bail!("Cannot rollover a contract that is not confirmed"),
};
let contract_info = old_offered_contract
.contract_info
.first()
.context("contract info to exist on a signed contract")?;
let oracle_announcement = contract_info
.oracle_announcements
.first()
.context("oracle announcement to exist on signed contract")?;
(
oracle_announcement.oracle_public_key,
old_offered_contract.fee_rate_per_vb,
)
};
let maintenance_margin_rate = { self.settings.read().await.maintenance_margin_rate };
let maintenance_margin_rate =
Decimal::try_from(maintenance_margin_rate).expect("to fit into decimal");
let funding_fee_events =
get_outstanding_funding_fee_events(conn, trader_pubkey, position.id)?;
let funding_fee = funding_fee_from_funding_fee_events(&funding_fee_events);
let position = position.apply_funding_fee(funding_fee, maintenance_margin_rate);
let (collateral_reserve_coordinator, collateral_reserve_trader) =
self.apply_funding_fee_to_channel(*dlc_channel_id, funding_fee)?;
let Position {
coordinator_margin: margin_coordinator,
trader_margin: margin_trader,
coordinator_leverage: leverage_coordinator,
trader_leverage: leverage_trader,
coordinator_liquidation_price: liquidation_price_coordinator,
trader_liquidation_price: liquidation_price_trader,
..
} = position;
let contract_descriptor = build_contract_descriptor(
Decimal::try_from(position.average_entry_price).expect("to fit"),
margin_coordinator,
margin_trader,
leverage_coordinator,
leverage_trader,
position.trader_direction,
collateral_reserve_coordinator,
collateral_reserve_trader,
position.quantity,
position.contract_symbol,
)
.context("Could not build contract descriptor")?;
let next_event_id = format!(
"{}{}",
position.contract_symbol,
next_expiry.unix_timestamp()
);
let new_contract_input = ContractInput {
offer_collateral: (margin_coordinator + collateral_reserve_coordinator).to_sat(),
accept_collateral: (margin_trader + collateral_reserve_trader).to_sat(),
fee_rate: contract_tx_fee_rate,
contract_infos: vec![ContractInputInfo {
contract_descriptor,
oracles: OracleInput {
public_keys: vec![oracle_pk],
event_id: next_event_id,
threshold: 1,
},
}],
};
let protocol_id = ProtocolId::new();
tracing::debug!(
%trader_pubkey,
%protocol_id,
?funding_fee,
"DLC channel rollover"
);
let channel = self.inner.get_dlc_channel_by_id(dlc_channel_id)?;
let previous_id = match channel.get_reference_id() {
Some(reference_id) => Some(ProtocolId::try_from(reference_id)?),
None => None,
};
let funding_fee_event_ids = funding_fee_events
.iter()
.map(|event| event.id)
.collect::<Vec<_>>();
let funding_fee_events = funding_fee_events
.into_iter()
.map(xxi_node::message_handler::FundingFeeEvent::from)
.collect();
let temporary_contract_id = self
.inner
.propose_rollover(
dlc_channel_id,
new_contract_input,
protocol_id.into(),
funding_fee_events,
)
.await?;
let protocol_executor = dlc_protocol::DlcProtocolExecutor::new(self.pool.clone());
protocol_executor
.start_rollover(
protocol_id,
previous_id,
&temporary_contract_id,
dlc_channel_id,
RolloverParams {
protocol_id,
trader_pubkey,
margin_coordinator,
margin_trader,
leverage_coordinator: decimal_from_f32(leverage_coordinator),
leverage_trader: decimal_from_f32(leverage_trader),
liquidation_price_coordinator: decimal_from_f32(liquidation_price_coordinator),
liquidation_price_trader: decimal_from_f32(liquidation_price_trader),
expiry_timestamp: next_expiry,
},
funding_fee_event_ids,
)
.context("Failed to insert start of rollover protocol in dlc_protocols table")?;
db::positions::Position::rollover_position(conn, trader_pubkey, &next_expiry)
.context("Failed to set position state to rollover")?;
self.inner
.event_handler
.publish(NodeEvent::SendLastDlcMessage {
peer: trader_pubkey,
});
Ok(())
}
pub fn is_in_rollover(&self, trader_id: PublicKey) -> Result<bool> {
let mut conn = self.pool.get()?;
let position = db::positions::Position::get_position_by_trader(
&mut conn,
trader_id,
vec![PositionState::Rollover],
)?;
Ok(position.is_some())
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/node/channel.rs | coordinator/src/node/channel.rs | use crate::db;
use crate::dlc_protocol;
use crate::dlc_protocol::DlcProtocolType;
use crate::node::Node;
use crate::position::models::PositionState;
use crate::FundingFee;
use anyhow::bail;
use anyhow::Context;
use anyhow::Result;
use bitcoin::secp256k1::PublicKey;
use bitcoin::Amount;
use bitcoin::ScriptBuf;
use bitcoin::Txid;
use bitcoin_old::Transaction;
use diesel::PgConnection;
use dlc_manager::channel::signed_channel::SignedChannel;
use dlc_manager::channel::signed_channel::SignedChannelState;
use dlc_manager::channel::Channel;
use dlc_manager::channel::ClosedChannel;
use dlc_manager::channel::ClosedPunishedChannel;
use dlc_manager::channel::ClosingChannel;
use dlc_manager::channel::SettledClosingChannel;
use dlc_manager::contract::ClosedContract;
use dlc_manager::contract::Contract;
use dlc_manager::contract::PreClosedContract;
use dlc_manager::DlcChannelId;
use dlc_manager::ReferenceId;
use rust_decimal::Decimal;
use time::OffsetDateTime;
use tokio::sync::broadcast::error::RecvError;
use xxi_node::bitcoin_conversion::to_secp_pk_30;
use xxi_node::bitcoin_conversion::to_txid_30;
use xxi_node::node::event::NodeEvent;
use xxi_node::node::ProtocolId;
use xxi_node::storage::DlcChannelEvent;
pub enum DlcChannelState {
Pending,
Open,
Closing,
Closed,
Failed,
Cancelled,
}
pub struct DlcChannel {
pub channel_id: DlcChannelId,
pub trader: PublicKey,
pub channel_state: DlcChannelState,
pub trader_reserve_sats: Amount,
pub coordinator_reserve_sats: Amount,
pub coordinator_funding_sats: Amount,
pub trader_funding_sats: Amount,
pub funding_txid: Option<Txid>,
pub close_txid: Option<Txid>,
pub settle_txid: Option<Txid>,
pub buffer_txid: Option<Txid>,
pub claim_txid: Option<Txid>,
pub punish_txid: Option<Txid>,
pub created_at: OffsetDateTime,
pub updated_at: OffsetDateTime,
}
impl Node {
pub async fn force_close_dlc_channel(&self, channel_id: DlcChannelId) -> Result<()> {
self.inner.close_dlc_channel(channel_id, true).await?;
Ok(())
}
pub async fn close_dlc_channel(&self, channel_id: DlcChannelId) -> Result<()> {
let channel = self.inner.get_dlc_channel_by_id(&channel_id)?;
let previous_id = channel
.get_reference_id()
.map(ProtocolId::try_from)
.transpose()?;
let protocol_id = self.inner.close_dlc_channel(channel_id, false).await?;
let protocol_executor = dlc_protocol::DlcProtocolExecutor::new(self.pool.clone());
protocol_executor.start_close_channel_protocol(
protocol_id,
previous_id,
&channel.get_id(),
&to_secp_pk_30(channel.get_counter_party_id()),
)?;
Ok(())
}
pub fn spawn_watch_dlc_channel_events_task(&self) {
let mut receiver = self.inner.event_handler.subscribe();
tokio::spawn({
let node = self.clone();
async move {
loop {
match receiver.recv().await {
Ok(NodeEvent::DlcChannelEvent { dlc_channel_event }) => {
if let Err(e) = node.process_dlc_channel_event(dlc_channel_event) {
tracing::error!(
?dlc_channel_event,
"Failed to process DLC channel event. Error: {e:#}"
);
}
}
Ok(NodeEvent::Connected { .. })
| Ok(NodeEvent::SendDlcMessage { .. })
| Ok(NodeEvent::StoreDlcMessage { .. })
| Ok(NodeEvent::SendLastDlcMessage { .. }) => {} // ignored
Err(RecvError::Lagged(skipped)) => {
tracing::warn!("Skipped {skipped} messages");
}
Err(RecvError::Closed) => {
tracing::error!("Lost connection to sender!");
break;
}
}
}
}
});
}
pub fn process_dlc_channel_event(&self, dlc_channel_event: DlcChannelEvent) -> Result<()> {
let mut conn = self.pool.get()?;
let protocol_id = match dlc_channel_event.get_reference_id() {
Some(reference_id) => reference_id,
None => {
bail!("Can't process dlc channel event without reference id. dlc_channel_event = {dlc_channel_event:?}");
}
};
if let DlcChannelEvent::Deleted(_) = dlc_channel_event {
// we need to handle the delete event here, as the corresponding channel isn't existing
// anymore.
let protocol_id = ProtocolId::try_from(protocol_id)?;
db::dlc_channels::set_channel_failed(&mut conn, &protocol_id)?;
return Ok(());
}
let channel = &self.inner.get_dlc_channel_by_reference_id(protocol_id)?;
match dlc_channel_event {
DlcChannelEvent::Offered(_) => {
let open_protocol_id = ProtocolId::try_from(protocol_id)?;
db::dlc_channels::insert_pending_dlc_channel(
&mut conn,
&open_protocol_id,
&channel.get_id(),
&to_secp_pk_30(channel.get_counter_party_id()),
)?;
}
DlcChannelEvent::Established(_) | DlcChannelEvent::Settled(_) => {
let signed_channel = match channel {
Channel::Signed(signed_channel) => signed_channel,
channel => {
bail!("Dlc channel in unexpected state. dlc_channel = {channel:?}");
}
};
let trader_reserve = self
.inner
.get_dlc_channel_usable_balance_counterparty(&signed_channel.channel_id)?;
let coordinator_reserve = self
.inner
.get_dlc_channel_usable_balance(&signed_channel.channel_id)?;
let coordinator_funding = Amount::from_sat(signed_channel.own_params.collateral);
let trader_funding = Amount::from_sat(signed_channel.counter_params.collateral);
let protocol_id = ProtocolId::try_from(protocol_id)?;
let dlc_protocol = db::dlc_protocols::get_dlc_protocol(&mut conn, protocol_id)?;
match dlc_protocol.protocol_type {
DlcProtocolType::OpenChannel { .. } => {
db::dlc_channels::set_dlc_channel_open(
&mut conn,
&protocol_id,
&channel.get_id(),
to_txid_30(signed_channel.fund_tx.txid()),
coordinator_reserve,
trader_reserve,
coordinator_funding,
trader_funding,
)?;
}
DlcProtocolType::OpenPosition { .. }
| DlcProtocolType::Settle { .. }
| DlcProtocolType::Rollover { .. }
| DlcProtocolType::ResizePosition { .. } => {
db::dlc_channels::update_channel(
&mut conn,
&channel.get_id(),
coordinator_reserve,
trader_reserve,
)?;
}
DlcProtocolType::Close { .. } | DlcProtocolType::ForceClose { .. } => {} /* ignored */
}
}
DlcChannelEvent::SettledClosing(_) => {
let (settle_transaction, claim_transaction) = match channel {
Channel::Signed(SignedChannel {
state:
SignedChannelState::SettledClosing {
settle_transaction, ..
},
..
}) => (settle_transaction, None),
Channel::SettledClosing(SettledClosingChannel {
settle_transaction,
claim_transaction,
..
}) => (settle_transaction, Some(claim_transaction)),
channel => {
bail!("DLC channel in unexpected state. dlc_channel = {channel:?}")
}
};
db::dlc_channels::set_channel_force_closing_settled(
&mut conn,
&channel.get_id(),
to_txid_30(settle_transaction.txid()),
claim_transaction.map(|tx| to_txid_30(tx.txid())),
)?;
}
DlcChannelEvent::Closing(_) => self.handle_closing_event(&mut conn, channel)?,
DlcChannelEvent::ClosedPunished(_) => {
let punish_txid = match channel {
Channel::ClosedPunished(ClosedPunishedChannel { punish_txid, .. }) => {
punish_txid
}
channel => {
bail!("DLC channel in unexpected state. dlc_channel = {channel:?}")
}
};
db::dlc_channels::set_channel_punished(
&mut conn,
&channel.get_id(),
to_txid_30(*punish_txid),
)?;
}
DlcChannelEvent::CollaborativeCloseOffered(_) => {
let close_transaction = match channel {
Channel::Signed(SignedChannel {
state: SignedChannelState::CollaborativeCloseOffered { close_tx, .. },
..
}) => close_tx,
channel => {
bail!("DLC channel in unexpected state. dlc_channel = {channel:?}")
}
};
db::dlc_channels::set_channel_collab_closing(
&mut conn,
&channel.get_id(),
to_txid_30(close_transaction.txid()),
)?;
}
DlcChannelEvent::Closed(_) | DlcChannelEvent::CounterClosed(_) => {
self.handle_force_closed_event(&mut conn, channel, protocol_id)?
}
DlcChannelEvent::CollaborativelyClosed(_) => {
self.handle_collaboratively_closed_event(&mut conn, channel, protocol_id)?
}
DlcChannelEvent::FailedAccept(_) | DlcChannelEvent::FailedSign(_) => {
let protocol_id = ProtocolId::try_from(protocol_id)?;
db::dlc_channels::set_channel_failed(&mut conn, &protocol_id)?;
}
DlcChannelEvent::Cancelled(_) => {
let protocol_id = ProtocolId::try_from(protocol_id)?;
db::dlc_channels::set_channel_cancelled(&mut conn, &protocol_id)?;
}
DlcChannelEvent::Deleted(_) => {} // delete is handled above.
DlcChannelEvent::Accepted(_)
| DlcChannelEvent::SettledOffered(_)
| DlcChannelEvent::SettledReceived(_)
| DlcChannelEvent::SettledAccepted(_)
| DlcChannelEvent::SettledConfirmed(_)
| DlcChannelEvent::RenewOffered(_)
| DlcChannelEvent::RenewAccepted(_)
| DlcChannelEvent::RenewConfirmed(_)
| DlcChannelEvent::RenewFinalized(_) => {} // intermediate state changes are ignored
}
Ok(())
}
pub fn apply_funding_fee_to_channel(
&self,
dlc_channel_id: DlcChannelId,
funding_fee: FundingFee,
) -> Result<(Amount, Amount)> {
let collateral_reserve_coordinator =
self.inner.get_dlc_channel_usable_balance(&dlc_channel_id)?;
let collateral_reserve_trader = self
.inner
.get_dlc_channel_usable_balance_counterparty(&dlc_channel_id)?;
// The party earning the funding fee receives adds it to their collateral reserve.
// Conversely, the party paying the funding fee subtracts it from their margin.
let reserves = match funding_fee {
FundingFee::Zero => (collateral_reserve_coordinator, collateral_reserve_trader),
FundingFee::CoordinatorPays(funding_fee) => {
let funding_fee = funding_fee.to_signed().expect("to fit");
let collateral_reserve_trader =
collateral_reserve_trader.to_signed().expect("to fit");
let new_collateral_reserve_trader = collateral_reserve_trader + funding_fee;
let new_collateral_reserve_trader =
new_collateral_reserve_trader.to_unsigned().expect("to fit");
(
// The coordinator pays the funding fee using their margin. Thus, their
// collateral reserve remains unchanged.
collateral_reserve_coordinator,
new_collateral_reserve_trader,
)
}
FundingFee::TraderPays(funding_fee) => {
let funding_fee = funding_fee.to_signed().expect("to fit");
let collateral_reserve_coordinator =
collateral_reserve_coordinator.to_signed().expect("to fit");
let new_collateral_reserve_coordinator =
collateral_reserve_coordinator + funding_fee;
let new_collateral_reserve_coordinator = new_collateral_reserve_coordinator
.to_unsigned()
.expect("to fit");
(
new_collateral_reserve_coordinator,
// The trader pays the funding fee using their margin. Thus, their
// collateral reserve remains unchanged.
collateral_reserve_trader,
)
}
};
Ok(reserves)
}
fn handle_closing_event(&self, conn: &mut PgConnection, channel: &Channel) -> Result<()> {
// If a channel is set to closing it means the buffer transaction got broadcasted,
// which will only happen if the channel got force closed while the
// user had an open position.
let trader_id = channel.get_counter_party_id();
// we do not know the price yet, since we have to wait for the position to expire.
if db::positions::Position::set_open_position_to_closing(
conn,
&to_secp_pk_30(trader_id),
None,
)? > 0
{
tracing::info!(%trader_id, "Set open position to closing after the dlc channel got force closed.");
}
let buffer_transaction = match channel {
Channel::Signed(SignedChannel {
state:
SignedChannelState::Closing {
buffer_transaction, ..
},
..
}) => buffer_transaction,
Channel::Closing(ClosingChannel {
buffer_transaction, ..
}) => buffer_transaction,
channel => {
bail!("DLC channel in unexpected state. dlc_channel = {channel:?}")
}
};
db::dlc_channels::set_channel_force_closing(
conn,
&channel.get_id(),
to_txid_30(buffer_transaction.txid()),
)?;
Ok(())
}
fn handle_force_closed_event(
&self,
conn: &mut PgConnection,
channel: &Channel,
reference_id: ReferenceId,
) -> Result<()> {
let protocol_id = ProtocolId::try_from(reference_id)?;
let dlc_protocol = db::dlc_protocols::get_dlc_protocol(conn, protocol_id)?;
let contract_id = &dlc_protocol.contract_id.context("Missing contract id")?;
let trader_id = dlc_protocol.trader;
let contract = self
.inner
.get_contract_by_id(contract_id)?
.context("Missing contract")?;
let position = db::positions::Position::get_position_by_trader(
conn,
trader_id,
/* the closing price doesn't matter here. */
vec![PositionState::Closing { closing_price: 0.0 }],
)?
.with_context(|| {
format!("Couldn't find closing position for trader. trader_id = {trader_id}")
})?;
let (closing_price, trader_realized_pnl_sat) = match contract {
Contract::PreClosed(PreClosedContract {
// We assume a closed contract does always have an attestation
attestations: Some(attestations),
signed_cet,
..
})
| Contract::Closed(ClosedContract {
// We assume a closed contract does always have an attestation
attestations: Some(attestations),
signed_cet: Some(signed_cet),
..
}) => {
let trader_realized_pnl_sat = self.calculate_trader_realized_pnl_from_cet(
conn,
&dlc_protocol.channel_id,
signed_cet,
)?;
let closing_price = Decimal::from_str_radix(
&attestations
.first()
.context("at least one attestation")?
.outcomes
.join(""),
2,
)?;
(closing_price, trader_realized_pnl_sat)
}
contract => {
bail!("Contract in unexpected state. Expected PreClosed or Closed Got: {:?}, trader_id = {trader_id}", contract)
}
};
tracing::debug!(
?position,
%trader_id,
"Finalize closing position after force closure",
);
if db::positions::Position::set_position_to_closed_with_pnl(
conn,
position.id,
trader_realized_pnl_sat,
closing_price,
)? > 0
{
tracing::info!(%trader_id, "Set closing position to closed after the dlc channel got force closed.");
} else {
tracing::warn!(%trader_id, "Failed to set closing position to closed after the dlc channel got force closed.");
}
let close_txid = match channel {
Channel::Closed(ClosedChannel { closing_txid, .. }) => closing_txid,
Channel::CounterClosed(ClosedChannel { closing_txid, .. }) => closing_txid,
channel => {
bail!("DLC channel in unexpected state. dlc_channel = {channel:?}")
}
};
db::dlc_channels::set_channel_closed(conn, &channel.get_id(), to_txid_30(*close_txid))?;
Ok(())
}
fn handle_collaboratively_closed_event(
&self,
conn: &mut PgConnection,
channel: &Channel,
reference_id: ReferenceId,
) -> Result<()> {
let protocol_executor = dlc_protocol::DlcProtocolExecutor::new(self.pool.clone());
protocol_executor.finish_dlc_protocol(
ProtocolId::try_from(reference_id)?,
&to_secp_pk_30(channel.get_counter_party_id()),
None,
&channel.get_id(),
self.tx_position_feed.clone(),
)?;
let close_txid = match channel {
Channel::CollaborativelyClosed(ClosedChannel { closing_txid, .. }) => closing_txid,
channel => {
bail!("DLC channel in unexpected state. dlc_channel = {channel:?}")
}
};
db::dlc_channels::set_channel_closed(conn, &channel.get_id(), to_txid_30(*close_txid))?;
Ok(())
}
/// Calculates the trader realized pnl from the cet outputs which do not belong to us.
/// 1. Sum the trader payouts
/// 2. Subtract the trader reserve sats from the trader payout
fn calculate_trader_realized_pnl_from_cet(
&self,
conn: &mut PgConnection,
channel_id: &DlcChannelId,
signed_cet: Transaction,
) -> Result<i64> {
let trader_payout: u64 = signed_cet
.output
.iter()
.filter(|output| {
!self
.inner
.is_mine(&ScriptBuf::from_bytes(output.script_pubkey.to_bytes()))
})
.map(|output| output.value)
.sum();
let dlc_channel =
db::dlc_channels::get_dlc_channel(conn, channel_id)?.with_context(|| {
format!("Couldn't find dlc channel by channel id = {:?}", channel_id)
})?;
let trader_realized_pnl_sat =
trader_payout as i64 - dlc_channel.trader_reserve_sats.to_sat() as i64;
Ok(trader_realized_pnl_sat)
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/node/liquidated_positions.rs | coordinator/src/node/liquidated_positions.rs | use crate::db;
use crate::funding_fee::funding_fee_from_funding_fee_events;
use crate::funding_fee::get_outstanding_funding_fee_events;
use crate::node::Node;
use crate::orderbook;
use crate::orderbook::db::orders;
use crate::orderbook::trading::NewOrderMessage;
use anyhow::Result;
use rust_decimal::prelude::FromPrimitive;
use rust_decimal::Decimal;
use std::ops::Add;
use time::Duration;
use time::OffsetDateTime;
use tokio::sync::mpsc;
use xxi_node::commons::average_execution_price;
use xxi_node::commons::BestPrice;
use xxi_node::commons::ContractSymbol;
use xxi_node::commons::Direction;
use xxi_node::commons::Match;
use xxi_node::commons::MatchState;
use xxi_node::commons::NewMarketOrder;
use xxi_node::commons::OrderReason;
use xxi_node::commons::OrderState;
/// The timeout before we give up on closing a liquidated position collaboratively. This value
/// should not be larger than our refund transaction time lock.
pub const LIQUIDATION_POSITION_TIMEOUT: Duration = Duration::days(7);
pub async fn monitor(node: Node, trading_sender: mpsc::Sender<NewOrderMessage>) {
if let Err(e) =
check_if_positions_need_to_get_liquidated(trading_sender.clone(), node.clone()).await
{
tracing::error!("Failed to check if positions need to get liquidated. Error: {e:#}");
}
}
/// For all open positions, check if the maintenance margin has been reached. Send a liquidation
/// async match to the traders whose positions have been liquidated.
async fn check_if_positions_need_to_get_liquidated(
trading_sender: mpsc::Sender<NewOrderMessage>,
node: Node,
) -> Result<()> {
let mut conn = node.pool.get()?;
let open_positions = db::positions::Position::get_all_open_positions(&mut conn)?;
let best_current_price =
orderbook::db::orders::get_best_price(&mut conn, ContractSymbol::BtcUsd)?;
let maintenance_margin_rate =
{ Decimal::try_from(node.settings.read().await.maintenance_margin_rate).expect("to fit") };
for position in open_positions {
// Update position based on the outstanding funding fee events _before_ considering
// liquidation.
let funding_fee_events =
get_outstanding_funding_fee_events(&mut conn, position.trader, position.id)?;
let funding_fee = funding_fee_from_funding_fee_events(&funding_fee_events);
let position = position.apply_funding_fee(funding_fee, maintenance_margin_rate);
let coordinator_liquidation_price =
Decimal::try_from(position.coordinator_liquidation_price).expect("to fit into decimal");
let trader_liquidation_price =
Decimal::try_from(position.trader_liquidation_price).expect("to fit into decimal");
let trader_liquidation = check_if_position_needs_to_get_liquidated(
position.trader_direction,
&best_current_price,
trader_liquidation_price,
);
let coordinator_liquidation = check_if_position_needs_to_get_liquidated(
position.trader_direction.opposite(),
&best_current_price,
coordinator_liquidation_price,
);
if trader_liquidation || coordinator_liquidation {
if let Some(order) = orderbook::db::orders::get_by_trader_id_and_state(
&mut conn,
position.trader,
OrderState::Matched,
)? {
let trader_id = order.trader_id.to_string();
let order_id = order.id.to_string();
if order.expiry < OffsetDateTime::now_utc() {
tracing::warn!(trader_id, order_id, "Matched order expired! Giving up on that position, looks like the corresponding dlc channel has to get force closed.");
orderbook::db::orders::set_order_state(
&mut conn,
order.id,
OrderState::Expired,
)?;
orderbook::db::matches::set_match_state_by_order_id(
&mut conn,
order.id,
MatchState::Failed,
)?;
let matches =
orderbook::db::matches::get_matches_by_order_id(&mut conn, order.id)?;
let matches: Vec<Match> = matches.into_iter().map(Match::from).collect();
let closing_price = average_execution_price(matches);
db::positions::Position::set_open_position_to_closing(
&mut conn,
&position.trader,
Some(closing_price),
)?;
continue;
} else {
tracing::trace!(trader_id, order_id, "Skipping liquidated position as match has already been found. Waiting for trader to come online to execute the trade.");
continue;
}
}
tracing::info!(trader_id=%position.trader, ?best_current_price, position_id=%position.id, "Attempting to close liquidated position");
// Ensure that the users channel is confirmed on-chain before continuing with the
// liquidation.
match node
.inner
.check_if_signed_channel_is_confirmed(position.trader)
.await
{
Ok(true) => {
tracing::debug!(trader_id=%position.trader, "Traders dlc channel is confirmed. Continuing with the liquidation");
}
Ok(false) => {
tracing::warn!(trader_id=%position.trader, "Can't liquidated users position as the underlying channel is not yet confirmed");
continue;
}
Err(e) => {
tracing::error!(trader_id=%position.trader, "Failed to determine signed channel status. Skipping liquidation. Error: {e:#}");
continue;
}
}
let new_order = NewMarketOrder {
id: uuid::Uuid::new_v4(),
contract_symbol: position.contract_symbol,
quantity: Decimal::try_from(position.quantity).expect("to fit into decimal"),
trader_id: position.trader,
direction: position.trader_direction.opposite(),
leverage: Decimal::from_f32(position.trader_leverage).expect("to fit into decimal"),
// This order can basically not expire, but if the user does not come back online
// within a certain time period we can assume the channel to be
// abandoned and we should force close.
expiry: OffsetDateTime::now_utc().add(LIQUIDATION_POSITION_TIMEOUT),
stable: position.stable,
};
let order_reason = match trader_liquidation {
true => OrderReason::TraderLiquidated,
false => OrderReason::CoordinatorLiquidated,
};
let order = match orders::insert_market_order(
&mut conn,
new_order.clone(),
order_reason.clone(),
) {
Ok(order) => order,
Err(e) => {
tracing::error!("Failed to insert liquidation order into DB. Error: {e:#}");
continue;
}
};
let message = NewOrderMessage {
order,
channel_opening_params: None,
order_reason,
};
if let Err(e) = trading_sender.send(message).await {
tracing::error!(order_id=%new_order.id, trader_id=%new_order.trader_id, "Failed to submit new order for closing liquidated position. Error: {e:#}");
continue;
}
}
}
Ok(())
}
fn check_if_position_needs_to_get_liquidated(
direction: Direction,
best_current_price: &BestPrice,
liquidation_price: Decimal,
) -> bool {
match direction {
Direction::Short => best_current_price
.ask
.map(|ask| ask >= liquidation_price)
.unwrap_or(false),
Direction::Long => best_current_price
.bid
.map(|bid| bid <= liquidation_price)
.unwrap_or(false),
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/db/user.rs | coordinator/src/db/user.rs | use crate::db::bonus_status;
use crate::db::bonus_status::BonusType;
use crate::db::bonus_tiers;
use crate::schema;
use crate::schema::users;
use anyhow::bail;
use anyhow::Result;
use bitcoin::secp256k1::PublicKey;
use diesel::prelude::*;
use serde::Deserialize;
use serde::Serialize;
use time::OffsetDateTime;
use xxi_node::commons::referral_from_pubkey;
use xxi_node::commons::RegisterParams;
#[derive(Queryable, Identifiable, Debug, Clone, Serialize, Deserialize)]
#[diesel(primary_key(id))]
pub struct User {
#[diesel(deserialize_as = i32)]
pub id: Option<i32>,
pub pubkey: String,
pub contact: String,
pub timestamp: OffsetDateTime,
pub fcm_token: String,
pub last_login: OffsetDateTime,
pub nickname: Option<String>,
// TODO(holzeis): Version is only optional for the first upgrade. Afterwards we should make it
// mandatory.
pub version: Option<String>,
/// personal referral code
pub referral_code: String,
/// The referral code referred by
pub used_referral_code: Option<String>,
pub os: Option<String>,
}
#[derive(Insertable, Debug, Clone, Serialize, Deserialize)]
#[diesel(primary_key(id), table_name = users)]
pub struct NewUser {
#[diesel(deserialize_as = i32)]
pub id: Option<i32>,
pub pubkey: String,
pub contact: String,
pub timestamp: OffsetDateTime,
pub fcm_token: String,
pub last_login: OffsetDateTime,
pub nickname: Option<String>,
// TODO(holzeis): Version is only optional for the first upgrade. Afterwards we should make it
// mandatory.
pub version: Option<String>,
/// This user was referred by this code
pub used_referral_code: Option<String>,
pub os: Option<String>,
}
impl From<RegisterParams> for User {
fn from(value: RegisterParams) -> Self {
let referral_code = referral_from_pubkey(value.pubkey);
User {
id: None,
pubkey: value.pubkey.to_string(),
contact: value.contact.unwrap_or("".to_owned()),
nickname: value.nickname,
timestamp: OffsetDateTime::now_utc(),
fcm_token: "".to_owned(),
last_login: OffsetDateTime::now_utc(),
version: value.version,
// TODO: this is not ideal, we shouldn't need to do this as it's autogenerated in the db
// However, this is needed here because we convert from `RegisteredUser` to `User`. We
// should not do this anymore.
os: value.os,
referral_code,
used_referral_code: value.referral_code,
}
}
}
pub fn all(conn: &mut PgConnection) -> QueryResult<Vec<User>> {
users::dsl::users.load(conn)
}
pub fn by_id(conn: &mut PgConnection, id: String) -> QueryResult<Option<User>> {
let x = users::table
.filter(users::pubkey.eq(id))
.first(conn)
.optional()?;
Ok(x)
}
/// Returns all users which have logged in since the `cut_off`
pub fn all_with_login_date(
conn: &mut PgConnection,
cut_off: OffsetDateTime,
) -> QueryResult<Vec<User>> {
users::dsl::users
.filter(users::last_login.ge(cut_off))
.load(conn)
}
pub fn upsert_user(
conn: &mut PgConnection,
trader_id: PublicKey,
contact: Option<String>,
nickname: Option<String>,
version: Option<String>,
os: Option<String>,
used_referral_code: Option<String>,
) -> QueryResult<User> {
// If no name or contact has been provided we default to empty string
let contact = contact.unwrap_or_default();
let timestamp = OffsetDateTime::now_utc();
let user: User = diesel::insert_into(users::table)
.values(NewUser {
id: None,
pubkey: trader_id.to_string(),
contact: contact.clone(),
nickname: nickname.clone(),
timestamp,
fcm_token: "".to_owned(),
last_login: timestamp,
version: version.clone(),
os: os.clone(),
used_referral_code: used_referral_code.clone(),
})
.on_conflict(schema::users::pubkey)
.do_update()
.set((
users::contact.eq(&contact),
users::nickname.eq(&nickname),
users::last_login.eq(timestamp),
users::version.eq(version),
))
.get_result(conn)?;
if let Some(referral_code) = used_referral_code {
// we need to check if this referral code is sane
if let Ok(Some(_)) = get_user_for_referral(conn, referral_code.as_str()) {
let bonus_tier = bonus_tiers::all_active_by_type(conn, vec![BonusType::Referent])?;
let bonus_tier = bonus_tier.first().expect("to have at least one tier");
bonus_status::insert(conn, &trader_id, bonus_tier.tier_level, BonusType::Referent)?;
tracing::info!(
referral_code,
trader_pubkey = trader_id.to_string(),
"New user has been referred and got referral bonus"
)
} else {
tracing::warn!(
referral_code,
trader_pubkey = trader_id.to_string(),
"User tried to register with invalid referral code"
)
}
}
Ok(user)
}
pub fn update_nickname(
conn: &mut PgConnection,
trader_id: PublicKey,
nickname: Option<String>,
) -> QueryResult<()> {
let nickname = nickname.unwrap_or_default();
let updated_rows = diesel::update(users::table)
.filter(users::pubkey.eq(trader_id.to_string()))
.set(users::nickname.eq(nickname.clone()))
.execute(conn)?;
if updated_rows == 0 {
tracing::warn!(
trader_id = trader_id.to_string(),
nickname,
"No username updated"
)
}
Ok(())
}
pub fn login_user(
conn: &mut PgConnection,
trader_id: PublicKey,
token: String,
version: Option<String>,
os: Option<String>,
) -> Result<()> {
tracing::debug!(%trader_id, token, "Updating token for client.");
let last_login = OffsetDateTime::now_utc();
let affected_rows = diesel::insert_into(users::table)
.values(NewUser {
id: None,
pubkey: trader_id.to_string(),
contact: "".to_owned(),
nickname: None,
timestamp: OffsetDateTime::now_utc(),
fcm_token: token.clone(),
version: version.clone(),
os: os.clone(),
last_login,
// TODO: this breaks the used referral code
used_referral_code: None,
})
.on_conflict(schema::users::pubkey)
.do_update()
.set((
users::fcm_token.eq(&token),
users::last_login.eq(last_login),
users::version.eq(version),
users::os.eq(os),
))
.execute(conn)?;
if affected_rows == 0 {
bail!("Could not update FCM token for node ID {trader_id}.");
} else {
tracing::debug!(%trader_id, %affected_rows, "Updated FCM token in DB.");
}
Ok(())
}
pub fn get_user(conn: &mut PgConnection, trader_id: &PublicKey) -> Result<Option<User>> {
let maybe_user = users::table
.filter(users::pubkey.eq(trader_id.to_string()))
.first(conn)
.optional()?;
Ok(maybe_user)
}
pub fn get_users(conn: &mut PgConnection, trader_ids: Vec<PublicKey>) -> Result<Vec<User>> {
let users = users::table
.filter(users::pubkey.eq_any(trader_ids.iter().map(|id| id.to_string())))
.load(conn)?;
Ok(users)
}
pub fn get_referred_users(conn: &mut PgConnection, referral_code: String) -> Result<Vec<User>> {
let users = users::table
.filter(users::used_referral_code.eq(referral_code))
.load(conn)?;
Ok(users)
}
pub fn get_user_for_referral(conn: &mut PgConnection, referral_code: &str) -> Result<Option<User>> {
let user = users::table
.filter(users::referral_code.eq(referral_code))
.first(conn)
.optional()?;
Ok(user)
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/db/spendable_outputs.rs | coordinator/src/db/spendable_outputs.rs | use crate::schema::spendable_outputs;
use anyhow::anyhow;
use anyhow::ensure;
use anyhow::Result;
use diesel::prelude::*;
use lightning::chain::transaction::OutPoint;
use lightning::sign::DelayedPaymentOutputDescriptor;
use lightning::sign::SpendableOutputDescriptor;
use lightning::sign::StaticPaymentOutputDescriptor;
use lightning::util::ser::Readable;
use lightning::util::ser::Writeable;
pub(crate) fn insert(
conn: &mut PgConnection,
output: SpendableOutputDescriptor,
) -> QueryResult<()> {
diesel::insert_into(spendable_outputs::table)
.values(NewSpendableOutput::from(output))
.execute(conn)?;
Ok(())
}
pub fn get(
conn: &mut PgConnection,
outpoint: &OutPoint,
) -> Result<Option<SpendableOutputDescriptor>> {
let output: Option<SpendableOutput> = spendable_outputs::table
.filter(spendable_outputs::txid.eq(outpoint.txid.to_string()))
.first(conn)
.optional()?;
let output = output
.map(|output| anyhow::Ok(output.try_into()?))
.transpose()?;
Ok(output)
}
pub fn delete(conn: &mut PgConnection, outpoint: &OutPoint) -> Result<()> {
let affected_rows = diesel::delete(
spendable_outputs::table.filter(spendable_outputs::txid.eq(outpoint.txid.to_string())),
)
.execute(conn)?;
ensure!(affected_rows > 0, "Could not delete spendable output");
Ok(())
}
pub fn get_all(conn: &mut PgConnection) -> Result<Vec<SpendableOutputDescriptor>> {
let outputs: Vec<SpendableOutput> = spendable_outputs::table.load(conn)?;
outputs
.into_iter()
.map(SpendableOutputDescriptor::try_from)
.collect()
}
#[derive(Insertable, Debug, Clone)]
#[diesel(table_name = spendable_outputs)]
struct NewSpendableOutput {
txid: String,
vout: i32,
descriptor: String,
}
#[derive(Queryable, Debug, Clone)]
#[diesel(table_name = spendable_outputs)]
struct SpendableOutput {
#[diesel(column_name = "id")]
_id: i32,
#[diesel(column_name = "txid")]
_txid: String,
#[diesel(column_name = "vout")]
_vout: i32,
descriptor: String,
}
impl From<SpendableOutputDescriptor> for NewSpendableOutput {
fn from(descriptor: SpendableOutputDescriptor) -> Self {
use SpendableOutputDescriptor::*;
let outpoint = match &descriptor {
StaticOutput { outpoint, .. } => outpoint,
DelayedPaymentOutput(DelayedPaymentOutputDescriptor { outpoint, .. }) => outpoint,
StaticPaymentOutput(StaticPaymentOutputDescriptor { outpoint, .. }) => outpoint,
};
let descriptor = hex::encode(descriptor.encode());
Self {
txid: outpoint.txid.to_string(),
vout: outpoint.index as i32,
descriptor,
}
}
}
impl TryFrom<SpendableOutput> for SpendableOutputDescriptor {
type Error = anyhow::Error;
fn try_from(value: SpendableOutput) -> Result<Self, Self::Error> {
let bytes = hex::decode(value.descriptor)?;
let descriptor = Self::read(&mut lightning::io::Cursor::new(bytes))
.map_err(|e| anyhow!("Failed to decode spendable output descriptor: {e}"))?;
Ok(descriptor)
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/db/trade_params.rs | coordinator/src/db/trade_params.rs | use crate::dlc_protocol;
use crate::orderbook::db::custom_types::Direction;
use crate::schema::trade_params;
use bitcoin::secp256k1::PublicKey;
use bitcoin::Amount;
use bitcoin::SignedAmount;
use diesel::ExpressionMethods;
use diesel::PgConnection;
use diesel::QueryDsl;
use diesel::QueryResult;
use diesel::Queryable;
use diesel::RunQueryDsl;
use std::str::FromStr;
use uuid::Uuid;
use xxi_node::commons;
use xxi_node::node::ProtocolId;
#[derive(Queryable, Debug)]
#[diesel(table_name = trade_params)]
#[allow(dead_code)] // We have to allow dead code here because diesel needs the fields to be able to derive queryable.
pub(crate) struct TradeParams {
pub id: i32,
pub protocol_id: Uuid,
pub trader_pubkey: String,
pub quantity: f32,
pub leverage: f32,
pub average_price: f32,
pub direction: Direction,
pub matching_fee: i64,
pub trader_pnl: Option<i64>,
}
pub(crate) fn insert(
conn: &mut PgConnection,
params: &dlc_protocol::TradeParams,
) -> QueryResult<()> {
let affected_rows = diesel::insert_into(trade_params::table)
.values(&(
trade_params::protocol_id.eq(params.protocol_id.to_uuid()),
trade_params::quantity.eq(params.quantity),
trade_params::leverage.eq(params.leverage),
trade_params::trader_pubkey.eq(params.trader.to_string()),
trade_params::direction.eq(Direction::from(params.direction)),
trade_params::average_price.eq(params.average_price),
trade_params::matching_fee.eq(params.matching_fee.to_sat() as i64),
trade_params::trader_pnl_sat.eq(params.trader_pnl.map(|pnl| pnl.to_sat())),
))
.execute(conn)?;
if affected_rows == 0 {
return Err(diesel::result::Error::NotFound);
}
Ok(())
}
pub(crate) fn get(
conn: &mut PgConnection,
protocol_id: ProtocolId,
) -> QueryResult<dlc_protocol::TradeParams> {
let trade_params: TradeParams = trade_params::table
.filter(trade_params::protocol_id.eq(protocol_id.to_uuid()))
.first(conn)?;
Ok(dlc_protocol::TradeParams::from(trade_params))
}
impl From<TradeParams> for dlc_protocol::TradeParams {
fn from(value: TradeParams) -> Self {
Self {
protocol_id: value.protocol_id.into(),
trader: PublicKey::from_str(&value.trader_pubkey).expect("valid pubkey"),
quantity: value.quantity,
leverage: value.leverage,
average_price: value.average_price,
direction: commons::Direction::from(value.direction),
matching_fee: Amount::from_sat(value.matching_fee as u64),
trader_pnl: value.trader_pnl.map(SignedAmount::from_sat),
}
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/db/rollover_params.rs | coordinator/src/db/rollover_params.rs | use crate::dlc_protocol;
use crate::schema::rollover_params;
use bitcoin::Amount;
use diesel::prelude::*;
use rust_decimal::prelude::ToPrimitive;
use rust_decimal::Decimal;
use time::OffsetDateTime;
use uuid::Uuid;
use xxi_node::node::ProtocolId;
#[derive(Queryable, Debug)]
#[diesel(table_name = rollover_params)]
struct RolloverParams {
#[diesel(column_name = "id")]
_id: i32,
protocol_id: Uuid,
trader_pubkey: String,
margin_coordinator_sat: i64,
margin_trader_sat: i64,
leverage_coordinator: f32,
leverage_trader: f32,
liquidation_price_coordinator: f32,
liquidation_price_trader: f32,
expiry_timestamp: OffsetDateTime,
}
pub(crate) fn insert(
conn: &mut PgConnection,
params: &dlc_protocol::RolloverParams,
) -> QueryResult<()> {
let dlc_protocol::RolloverParams {
protocol_id,
trader_pubkey,
margin_coordinator,
margin_trader,
leverage_coordinator,
leverage_trader,
liquidation_price_coordinator,
liquidation_price_trader,
expiry_timestamp,
} = params;
let affected_rows = diesel::insert_into(rollover_params::table)
.values(&(
rollover_params::protocol_id.eq(protocol_id.to_uuid()),
rollover_params::trader_pubkey.eq(trader_pubkey.to_string()),
rollover_params::margin_coordinator_sat.eq(margin_coordinator.to_sat() as i64),
rollover_params::margin_trader_sat.eq(margin_trader.to_sat() as i64),
rollover_params::leverage_coordinator
.eq(leverage_coordinator.to_f32().expect("to fit")),
rollover_params::leverage_trader.eq(leverage_trader.to_f32().expect("to fit")),
rollover_params::liquidation_price_coordinator
.eq(liquidation_price_coordinator.to_f32().expect("to fit")),
rollover_params::liquidation_price_trader
.eq(liquidation_price_trader.to_f32().expect("to fit")),
rollover_params::expiry_timestamp.eq(expiry_timestamp),
))
.execute(conn)?;
if affected_rows == 0 {
return Err(diesel::result::Error::NotFound);
}
Ok(())
}
pub(crate) fn get(
conn: &mut PgConnection,
protocol_id: ProtocolId,
) -> QueryResult<dlc_protocol::RolloverParams> {
let RolloverParams {
_id,
trader_pubkey,
protocol_id,
margin_coordinator_sat: margin_coordinator,
margin_trader_sat: margin_trader,
leverage_coordinator,
leverage_trader,
liquidation_price_coordinator,
liquidation_price_trader,
expiry_timestamp,
} = rollover_params::table
.filter(rollover_params::protocol_id.eq(protocol_id.to_uuid()))
.first(conn)?;
Ok(dlc_protocol::RolloverParams {
protocol_id: protocol_id.into(),
trader_pubkey: trader_pubkey.parse().expect("valid pubkey"),
margin_coordinator: Amount::from_sat(margin_coordinator as u64),
margin_trader: Amount::from_sat(margin_trader as u64),
leverage_coordinator: Decimal::try_from(leverage_coordinator).expect("to fit"),
leverage_trader: Decimal::try_from(leverage_trader).expect("to fit"),
liquidation_price_coordinator: Decimal::try_from(liquidation_price_coordinator)
.expect("to fit"),
liquidation_price_trader: Decimal::try_from(liquidation_price_trader).expect("to fit"),
expiry_timestamp,
})
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/db/transactions.rs | coordinator/src/db/transactions.rs | use crate::schema;
use crate::schema::transactions;
use anyhow::ensure;
use anyhow::Result;
use bitcoin::Txid;
use diesel::AsChangeset;
use diesel::ExpressionMethods;
use diesel::Insertable;
use diesel::OptionalExtension;
use diesel::PgConnection;
use diesel::QueryDsl;
use diesel::QueryResult;
use diesel::Queryable;
use diesel::QueryableByName;
use diesel::RunQueryDsl;
use std::str::FromStr;
use time::OffsetDateTime;
#[derive(Insertable, QueryableByName, Queryable, Debug, Clone, PartialEq, AsChangeset)]
#[diesel(table_name = transactions)]
pub(crate) struct Transaction {
pub txid: String,
pub fee: i64,
pub created_at: OffsetDateTime,
pub updated_at: OffsetDateTime,
pub raw: String,
}
pub(crate) fn get(txid: &str, conn: &mut PgConnection) -> QueryResult<Option<Transaction>> {
transactions::table
.filter(transactions::txid.eq(txid))
.first(conn)
.optional()
}
pub(crate) fn get_all_without_fees(conn: &mut PgConnection) -> QueryResult<Vec<Transaction>> {
transactions::table
.filter(transactions::fee.eq(0))
.load(conn)
}
pub(crate) fn upsert(tx: Transaction, conn: &mut PgConnection) -> Result<()> {
let affected_rows = diesel::insert_into(transactions::table)
.values(tx.clone())
.on_conflict(schema::transactions::txid)
.do_update()
.set(&tx)
.execute(conn)?;
ensure!(affected_rows > 0, "Could not upsert transaction");
Ok(())
}
impl From<xxi_node::transaction::Transaction> for Transaction {
fn from(value: xxi_node::transaction::Transaction) -> Self {
Transaction {
txid: value.txid().to_string(),
fee: value.fee() as i64,
created_at: value.created_at(),
updated_at: value.updated_at(),
raw: value.raw(),
}
}
}
impl From<Transaction> for xxi_node::transaction::Transaction {
fn from(value: Transaction) -> Self {
xxi_node::transaction::Transaction::new(
Txid::from_str(&value.txid).expect("valid txid"),
value.fee as u64,
value.created_at,
value.updated_at,
value.raw,
)
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/db/reported_errors.rs | coordinator/src/db/reported_errors.rs | use crate::schema::reported_errors;
use diesel::prelude::*;
use xxi_node::commons::ReportedError;
#[derive(Insertable, Debug, Clone)]
#[diesel(table_name = reported_errors)]
struct NewReportedError {
trader_pubkey: String,
error: String,
version: String,
}
pub(crate) fn insert(conn: &mut PgConnection, error: ReportedError) -> QueryResult<()> {
diesel::insert_into(reported_errors::table)
.values(NewReportedError::from(error))
.execute(conn)?;
Ok(())
}
impl From<ReportedError> for NewReportedError {
fn from(value: ReportedError) -> Self {
Self {
trader_pubkey: value.trader_pk.to_string(),
error: value.msg,
version: value.version.unwrap_or("<2.3.1".to_owned()),
}
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/db/dlc_messages.rs | coordinator/src/db/dlc_messages.rs | use crate::schema;
use crate::schema::dlc_messages;
use crate::schema::sql_types::MessageTypeType;
use anyhow::ensure;
use anyhow::Result;
use bitcoin::secp256k1::PublicKey;
use diesel::query_builder::QueryId;
use diesel::AsChangeset;
use diesel::AsExpression;
use diesel::ExpressionMethods;
use diesel::FromSqlRow;
use diesel::Insertable;
use diesel::OptionalExtension;
use diesel::PgConnection;
use diesel::QueryDsl;
use diesel::QueryResult;
use diesel::Queryable;
use diesel::QueryableByName;
use diesel::RunQueryDsl;
use std::any::TypeId;
use std::str::FromStr;
use time::OffsetDateTime;
#[derive(Debug, Clone, Copy, PartialEq, FromSqlRow, AsExpression)]
#[diesel(sql_type = MessageTypeType)]
pub(crate) enum MessageType {
Offer,
Accept,
Sign,
SettleOffer,
SettleAccept,
SettleConfirm,
SettleFinalize,
RenewOffer,
RenewAccept,
RenewConfirm,
RenewFinalize,
RenewRevoke,
RolloverOffer,
RolloverAccept,
RolloverConfirm,
RolloverFinalize,
RolloverRevoke,
CollaborativeCloseOffer,
Reject,
}
impl QueryId for MessageTypeType {
type QueryId = MessageTypeType;
const HAS_STATIC_QUERY_ID: bool = false;
fn query_id() -> Option<TypeId> {
None
}
}
#[derive(Insertable, QueryableByName, Queryable, Debug, Clone, PartialEq, AsChangeset)]
#[diesel(table_name = dlc_messages)]
pub(crate) struct DlcMessage {
pub message_hash: String,
pub inbound: bool,
pub peer_id: String,
pub message_type: MessageType,
pub timestamp: OffsetDateTime,
}
pub(crate) fn get(conn: &mut PgConnection, message_hash: &str) -> QueryResult<Option<DlcMessage>> {
dlc_messages::table
.filter(dlc_messages::message_hash.eq(message_hash.to_string()))
.first::<DlcMessage>(conn)
.optional()
}
pub(crate) fn insert(
conn: &mut PgConnection,
dlc_message: xxi_node::dlc_message::DlcMessage,
) -> Result<()> {
let affected_rows = diesel::insert_into(schema::dlc_messages::table)
.values(DlcMessage::from(dlc_message))
.execute(conn)?;
ensure!(affected_rows > 0, "Could not insert dlc message");
Ok(())
}
impl From<xxi_node::dlc_message::DlcMessage> for DlcMessage {
fn from(value: xxi_node::dlc_message::DlcMessage) -> Self {
Self {
message_hash: value.message_hash,
peer_id: value.peer_id.to_string(),
message_type: MessageType::from(value.message_type),
timestamp: value.timestamp,
inbound: value.inbound,
}
}
}
impl From<xxi_node::dlc_message::DlcMessageType> for MessageType {
fn from(value: xxi_node::dlc_message::DlcMessageType) -> Self {
match value {
xxi_node::dlc_message::DlcMessageType::Offer => Self::Offer,
xxi_node::dlc_message::DlcMessageType::Accept => Self::Accept,
xxi_node::dlc_message::DlcMessageType::Sign => Self::Sign,
xxi_node::dlc_message::DlcMessageType::SettleOffer => Self::SettleOffer,
xxi_node::dlc_message::DlcMessageType::SettleAccept => Self::SettleAccept,
xxi_node::dlc_message::DlcMessageType::SettleConfirm => Self::SettleConfirm,
xxi_node::dlc_message::DlcMessageType::SettleFinalize => Self::SettleFinalize,
xxi_node::dlc_message::DlcMessageType::RenewOffer => Self::RenewOffer,
xxi_node::dlc_message::DlcMessageType::RenewAccept => Self::RenewAccept,
xxi_node::dlc_message::DlcMessageType::RenewConfirm => Self::RenewConfirm,
xxi_node::dlc_message::DlcMessageType::RenewFinalize => Self::RenewFinalize,
xxi_node::dlc_message::DlcMessageType::RenewRevoke => Self::RenewRevoke,
xxi_node::dlc_message::DlcMessageType::RolloverOffer => Self::RolloverOffer,
xxi_node::dlc_message::DlcMessageType::RolloverAccept => Self::RolloverAccept,
xxi_node::dlc_message::DlcMessageType::RolloverConfirm => Self::RolloverConfirm,
xxi_node::dlc_message::DlcMessageType::RolloverFinalize => Self::RolloverFinalize,
xxi_node::dlc_message::DlcMessageType::RolloverRevoke => Self::RolloverRevoke,
xxi_node::dlc_message::DlcMessageType::CollaborativeCloseOffer => {
Self::CollaborativeCloseOffer
}
xxi_node::dlc_message::DlcMessageType::Reject => Self::Reject,
}
}
}
impl From<DlcMessage> for xxi_node::dlc_message::DlcMessage {
fn from(value: DlcMessage) -> Self {
Self {
message_hash: value.message_hash,
inbound: value.inbound,
message_type: xxi_node::dlc_message::DlcMessageType::from(value.message_type),
peer_id: PublicKey::from_str(&value.peer_id).expect("valid public key"),
timestamp: value.timestamp,
}
}
}
impl From<MessageType> for xxi_node::dlc_message::DlcMessageType {
fn from(value: MessageType) -> Self {
match value {
MessageType::Offer => xxi_node::dlc_message::DlcMessageType::Offer,
MessageType::Accept => xxi_node::dlc_message::DlcMessageType::Accept,
MessageType::Sign => xxi_node::dlc_message::DlcMessageType::Sign,
MessageType::SettleOffer => xxi_node::dlc_message::DlcMessageType::SettleOffer,
MessageType::SettleAccept => xxi_node::dlc_message::DlcMessageType::SettleAccept,
MessageType::SettleConfirm => xxi_node::dlc_message::DlcMessageType::SettleConfirm,
MessageType::SettleFinalize => xxi_node::dlc_message::DlcMessageType::SettleFinalize,
MessageType::RenewOffer => xxi_node::dlc_message::DlcMessageType::RenewOffer,
MessageType::RenewAccept => xxi_node::dlc_message::DlcMessageType::RenewAccept,
MessageType::RenewConfirm => xxi_node::dlc_message::DlcMessageType::RenewConfirm,
MessageType::RenewFinalize => xxi_node::dlc_message::DlcMessageType::RenewFinalize,
MessageType::RenewRevoke => xxi_node::dlc_message::DlcMessageType::RenewRevoke,
MessageType::RolloverOffer => xxi_node::dlc_message::DlcMessageType::RolloverOffer,
MessageType::RolloverAccept => xxi_node::dlc_message::DlcMessageType::RolloverAccept,
MessageType::RolloverConfirm => xxi_node::dlc_message::DlcMessageType::RolloverConfirm,
MessageType::RolloverFinalize => {
xxi_node::dlc_message::DlcMessageType::RolloverFinalize
}
MessageType::RolloverRevoke => xxi_node::dlc_message::DlcMessageType::RolloverRevoke,
MessageType::CollaborativeCloseOffer => {
xxi_node::dlc_message::DlcMessageType::CollaborativeCloseOffer
}
MessageType::Reject => xxi_node::dlc_message::DlcMessageType::Reject,
}
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/db/bonus_tiers.rs | coordinator/src/db/bonus_tiers.rs | use crate::db::bonus_status::BonusType;
use crate::schema::bonus_tiers;
use bitcoin::secp256k1::PublicKey;
use diesel::pg::sql_types::Timestamptz;
use diesel::prelude::*;
use diesel::sql_query;
use diesel::sql_types::Float;
use diesel::sql_types::Text;
use diesel::PgConnection;
use diesel::QueryResult;
use diesel::Queryable;
use rust_decimal::Decimal;
use time::OffsetDateTime;
pub struct Referral {
pub volume: Decimal,
}
#[derive(Queryable, Debug, Clone)]
#[diesel(table_name = bonus_tiers)]
// this is needed because some fields are unused but need to be here for diesel
#[allow(dead_code)]
pub(crate) struct BonusTier {
pub(crate) id: i32,
pub(crate) tier_level: i32,
pub(crate) min_users_to_refer: i32,
pub(crate) fee_rebate: f32,
pub(crate) bonus_tier_type: BonusType,
pub(crate) active: bool,
}
/// Returns all active bonus tiers for given types
pub(crate) fn all_active_by_type(
conn: &mut PgConnection,
types: Vec<BonusType>,
) -> QueryResult<Vec<BonusTier>> {
bonus_tiers::table
.filter(bonus_tiers::active.eq(true))
.filter(bonus_tiers::bonus_tier_type.eq_any(types))
.load::<BonusTier>(conn)
}
pub(crate) fn tier_by_tier_level(
conn: &mut PgConnection,
tier_level: i32,
) -> QueryResult<BonusTier> {
bonus_tiers::table
.filter(bonus_tiers::tier_level.eq(tier_level))
.first(conn)
}
#[derive(Debug, QueryableByName, Clone)]
pub struct UserReferralSummaryView {
#[diesel(sql_type = Text)]
pub referring_user: String,
#[diesel(sql_type = Text)]
pub referring_user_referral_code: String,
#[diesel(sql_type = Text)]
pub referred_user: String,
#[diesel(sql_type = Text)]
pub referred_user_referral_code: String,
#[diesel(sql_type = Timestamptz)]
pub timestamp: OffsetDateTime,
#[diesel(sql_type = Float)]
pub referred_user_total_quantity: f32,
}
/// Returns all referred users for by referrer
pub(crate) fn all_referrals_by_referring_user(
conn: &mut PgConnection,
trader_pubkey: &PublicKey,
) -> QueryResult<Vec<UserReferralSummaryView>> {
// we have to do this manually because diesel does not support views. If you make a change to
// below, make sure to test this against a life db as errors will only be thrown at runtime
let query = "SELECT referring_user, referring_user_referral_code, \
referred_user, \
referred_user_referral_code, \
timestamp, \
referred_user_total_quantity \
FROM user_referral_summary_view where referring_user = $1";
let summaries: Vec<UserReferralSummaryView> = sql_query(query)
.bind::<Text, _>(trader_pubkey.to_string())
.load(conn)?;
Ok(summaries)
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/db/hodl_invoice.rs | coordinator/src/db/hodl_invoice.rs | use crate::schema::hodl_invoices;
use crate::schema::sql_types::InvoiceStateType;
use anyhow::ensure;
use anyhow::Result;
use bitcoin::secp256k1::PublicKey;
use bitcoin::Amount;
use diesel::query_builder::QueryId;
use diesel::AsExpression;
use diesel::ExpressionMethods;
use diesel::FromSqlRow;
use diesel::PgConnection;
use diesel::QueryDsl;
use diesel::QueryResult;
use diesel::RunQueryDsl;
use std::any::TypeId;
use time::OffsetDateTime;
use uuid::Uuid;
#[derive(Debug, Clone, Copy, PartialEq, FromSqlRow, AsExpression)]
#[diesel(sql_type = InvoiceStateType)]
pub enum InvoiceState {
Open,
Accepted,
Settled,
Failed,
Canceled,
}
impl QueryId for InvoiceStateType {
type QueryId = InvoiceStateType;
const HAS_STATIC_QUERY_ID: bool = false;
fn query_id() -> Option<TypeId> {
None
}
}
pub fn cancel_pending_hodl_invoices(conn: &mut PgConnection) -> QueryResult<usize> {
diesel::update(hodl_invoices::table)
.filter(hodl_invoices::invoice_state.eq_any([InvoiceState::Open, InvoiceState::Accepted]))
.set(hodl_invoices::invoice_state.eq(InvoiceState::Canceled))
.execute(conn)
}
pub fn create_hodl_invoice(
conn: &mut PgConnection,
r_hash: &str,
trader_pubkey: PublicKey,
amount_sats: u64,
) -> Result<()> {
let affected_rows = diesel::insert_into(hodl_invoices::table)
.values((
hodl_invoices::r_hash.eq(r_hash),
hodl_invoices::trader_pubkey.eq(trader_pubkey.to_string()),
hodl_invoices::invoice_state.eq(InvoiceState::Open),
hodl_invoices::amount_sats.eq(amount_sats as i64),
))
.execute(conn)?;
ensure!(affected_rows > 0, "Could not insert hodl invoice");
Ok(())
}
pub fn get_r_hash_by_order_id(conn: &mut PgConnection, order_id: Uuid) -> QueryResult<String> {
hodl_invoices::table
.filter(hodl_invoices::order_id.eq(order_id))
.select(hodl_invoices::r_hash)
.get_result(conn)
}
/// Returns the pre image of the hodl invoice associated with the order id
/// If the hodl invoice can not be found a [`Not Found`] error is returned
/// If the hodl invoice is found the pre_image is optional, as it might have not yet been set.
pub fn get_pre_image_by_order_id(
conn: &mut PgConnection,
order_id: Uuid,
) -> QueryResult<Option<String>> {
hodl_invoices::table
.filter(hodl_invoices::order_id.eq(order_id))
.select(hodl_invoices::pre_image)
.get_result(conn)
}
pub fn update_hodl_invoice_to_accepted(
conn: &mut PgConnection,
hash: &str,
pre_image: &str,
order_id: Uuid,
) -> Result<Amount> {
let amount: i64 = diesel::update(hodl_invoices::table)
.filter(hodl_invoices::r_hash.eq(hash))
.set((
hodl_invoices::pre_image.eq(pre_image),
hodl_invoices::updated_at.eq(OffsetDateTime::now_utc()),
hodl_invoices::invoice_state.eq(InvoiceState::Accepted),
hodl_invoices::order_id.eq(order_id),
))
.returning(hodl_invoices::amount_sats)
.get_result(conn)?;
Ok(Amount::from_sat(amount as u64))
}
pub fn update_hodl_invoice_to_settled(
conn: &mut PgConnection,
r_hash: String,
) -> QueryResult<Option<String>> {
diesel::update(hodl_invoices::table)
.filter(hodl_invoices::r_hash.eq(r_hash))
.set((
hodl_invoices::updated_at.eq(OffsetDateTime::now_utc()),
hodl_invoices::invoice_state.eq(InvoiceState::Settled),
))
.returning(hodl_invoices::pre_image)
.get_result(conn)
}
pub fn update_hodl_invoice_to_canceled(
conn: &mut PgConnection,
r_hash: String,
) -> QueryResult<usize> {
diesel::update(hodl_invoices::table)
.filter(hodl_invoices::r_hash.eq(r_hash))
.set((
hodl_invoices::updated_at.eq(OffsetDateTime::now_utc()),
hodl_invoices::invoice_state.eq(InvoiceState::Canceled),
))
.execute(conn)
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/db/dlc_channels.rs | coordinator/src/db/dlc_channels.rs | use crate::node::channel;
use crate::schema::dlc_channels;
use crate::schema::sql_types::DlcChannelStateType;
use bitcoin::secp256k1::PublicKey;
use bitcoin::Amount;
use bitcoin::Txid;
use bitcoin_old::hashes::hex::ToHex;
use diesel::query_builder::QueryId;
use diesel::AsChangeset;
use diesel::AsExpression;
use diesel::ExpressionMethods;
use diesel::FromSqlRow;
use diesel::OptionalExtension;
use diesel::PgConnection;
use diesel::QueryDsl;
use diesel::QueryResult;
use diesel::Queryable;
use diesel::QueryableByName;
use diesel::RunQueryDsl;
use dlc_manager::DlcChannelId;
use hex::FromHex;
use std::any::TypeId;
use std::str::FromStr;
use time::OffsetDateTime;
use uuid::Uuid;
use xxi_node::node::ProtocolId;
#[derive(Debug, Clone, Copy, PartialEq, FromSqlRow, AsExpression)]
#[diesel(sql_type = DlcChannelStateType)]
pub(crate) enum DlcChannelState {
Pending,
Open,
Closing,
Closed,
Failed,
Cancelled,
}
#[derive(QueryableByName, Queryable, Debug, Clone, PartialEq, AsChangeset)]
#[diesel(table_name = dlc_channels)]
pub(crate) struct DlcChannel {
id: i32,
open_protocol_id: Uuid,
channel_id: String,
trader_pubkey: String,
channel_state: DlcChannelState,
trader_reserve_sats: i64,
coordinator_reserve_sats: i64,
funding_txid: Option<String>,
close_txid: Option<String>,
settle_txid: Option<String>,
buffer_txid: Option<String>,
claim_txid: Option<String>,
punish_txid: Option<String>,
created_at: OffsetDateTime,
updated_at: OffsetDateTime,
coordinator_funding_sats: i64,
trader_funding_sats: i64,
}
impl QueryId for DlcChannelStateType {
type QueryId = DlcChannelStateType;
const HAS_STATIC_QUERY_ID: bool = false;
fn query_id() -> Option<TypeId> {
None
}
}
pub(crate) fn insert_pending_dlc_channel(
conn: &mut PgConnection,
open_protocol_id: &ProtocolId,
channel_id: &DlcChannelId,
trader: &PublicKey,
) -> QueryResult<usize> {
diesel::insert_into(dlc_channels::table)
.values((
dlc_channels::open_protocol_id.eq(open_protocol_id.to_uuid()),
dlc_channels::channel_id.eq(channel_id.to_hex()),
dlc_channels::channel_state.eq(DlcChannelState::Pending),
dlc_channels::coordinator_reserve_sats.eq(0),
dlc_channels::trader_reserve_sats.eq(0),
dlc_channels::trader_pubkey.eq(trader.to_string()),
dlc_channels::updated_at.eq(OffsetDateTime::now_utc()),
dlc_channels::created_at.eq(OffsetDateTime::now_utc()),
))
.execute(conn)
}
#[allow(clippy::too_many_arguments)]
pub(crate) fn set_dlc_channel_open(
conn: &mut PgConnection,
open_protocol_id: &ProtocolId,
channel_id: &DlcChannelId,
funding_txid: Txid,
coordinator_reserve: Amount,
trader_reserve: Amount,
coordinator_funding: Amount,
trader_funding: Amount,
) -> QueryResult<usize> {
diesel::update(dlc_channels::table)
.set((
dlc_channels::funding_txid.eq(funding_txid.to_string()),
dlc_channels::channel_id.eq(channel_id.to_hex()),
dlc_channels::channel_state.eq(DlcChannelState::Open),
dlc_channels::updated_at.eq(OffsetDateTime::now_utc()),
dlc_channels::coordinator_reserve_sats.eq(coordinator_reserve.to_sat() as i64),
dlc_channels::trader_reserve_sats.eq(trader_reserve.to_sat() as i64),
dlc_channels::coordinator_funding_sats.eq(coordinator_funding.to_sat() as i64),
dlc_channels::trader_funding_sats.eq(trader_funding.to_sat() as i64),
))
.filter(dlc_channels::open_protocol_id.eq(open_protocol_id.to_uuid()))
.execute(conn)
}
pub(crate) fn update_channel(
conn: &mut PgConnection,
channel_id: &DlcChannelId,
coordinator_reserve: Amount,
trader_reserve: Amount,
) -> QueryResult<usize> {
diesel::update(dlc_channels::table)
.set((
dlc_channels::updated_at.eq(OffsetDateTime::now_utc()),
dlc_channels::coordinator_reserve_sats.eq(coordinator_reserve.to_sat() as i64),
dlc_channels::trader_reserve_sats.eq(trader_reserve.to_sat() as i64),
))
.filter(dlc_channels::channel_id.eq(channel_id.to_hex()))
.execute(conn)
}
pub(crate) fn set_channel_force_closing_settled(
conn: &mut PgConnection,
channel_id: &DlcChannelId,
settle_txid: Txid,
claim_txid: Option<Txid>,
) -> QueryResult<usize> {
diesel::update(dlc_channels::table)
.set((
dlc_channels::settle_txid.eq(settle_txid.to_string()),
dlc_channels::claim_txid.eq(claim_txid.map(|txid| txid.to_string())),
dlc_channels::channel_state.eq(DlcChannelState::Closing),
dlc_channels::updated_at.eq(OffsetDateTime::now_utc()),
))
.filter(dlc_channels::channel_id.eq(channel_id.to_hex()))
.execute(conn)
}
pub(crate) fn set_channel_force_closing(
conn: &mut PgConnection,
channel_id: &DlcChannelId,
buffer_txid: Txid,
) -> QueryResult<usize> {
diesel::update(dlc_channels::table)
.set((
dlc_channels::buffer_txid.eq(buffer_txid.to_string()),
dlc_channels::channel_state.eq(DlcChannelState::Closing),
dlc_channels::updated_at.eq(OffsetDateTime::now_utc()),
))
.filter(dlc_channels::channel_id.eq(channel_id.to_hex()))
.execute(conn)
}
pub(crate) fn set_channel_punished(
conn: &mut PgConnection,
channel_id: &DlcChannelId,
punish_txid: Txid,
) -> QueryResult<usize> {
diesel::update(dlc_channels::table)
.set((
dlc_channels::punish_txid.eq(punish_txid.to_string()),
dlc_channels::channel_state.eq(DlcChannelState::Closing),
dlc_channels::updated_at.eq(OffsetDateTime::now_utc()),
))
.filter(dlc_channels::channel_id.eq(channel_id.to_hex()))
.execute(conn)
}
pub(crate) fn set_channel_collab_closing(
conn: &mut PgConnection,
channel_id: &DlcChannelId,
close_txid: Txid,
) -> QueryResult<usize> {
diesel::update(dlc_channels::table)
.set((
dlc_channels::close_txid.eq(close_txid.to_string()),
dlc_channels::channel_state.eq(DlcChannelState::Closing),
dlc_channels::updated_at.eq(OffsetDateTime::now_utc()),
))
.filter(dlc_channels::channel_id.eq(channel_id.to_hex()))
.execute(conn)
}
pub(crate) fn set_channel_closed(
conn: &mut PgConnection,
channel_id: &DlcChannelId,
close_txid: Txid,
) -> QueryResult<usize> {
diesel::update(dlc_channels::table)
.set((
dlc_channels::close_txid.eq(close_txid.to_string()),
dlc_channels::channel_state.eq(DlcChannelState::Closed),
dlc_channels::updated_at.eq(OffsetDateTime::now_utc()),
))
.filter(dlc_channels::channel_id.eq(channel_id.to_hex()))
.execute(conn)
}
pub(crate) fn set_channel_failed(
conn: &mut PgConnection,
protocol_id: &ProtocolId,
) -> QueryResult<usize> {
diesel::update(dlc_channels::table)
.set((
dlc_channels::channel_state.eq(DlcChannelState::Failed),
dlc_channels::updated_at.eq(OffsetDateTime::now_utc()),
))
.filter(dlc_channels::open_protocol_id.eq(protocol_id.to_uuid()))
.execute(conn)
}
pub(crate) fn set_channel_cancelled(
conn: &mut PgConnection,
protocol_id: &ProtocolId,
) -> QueryResult<usize> {
diesel::update(dlc_channels::table)
.set((
dlc_channels::channel_state.eq(DlcChannelState::Cancelled),
dlc_channels::updated_at.eq(OffsetDateTime::now_utc()),
))
.filter(dlc_channels::open_protocol_id.eq(protocol_id.to_uuid()))
.execute(conn)
}
pub(crate) fn get_dlc_channel(
conn: &mut PgConnection,
channel_id: &DlcChannelId,
) -> QueryResult<Option<channel::DlcChannel>> {
let dlc_channel: Option<DlcChannel> = dlc_channels::table
.filter(dlc_channels::channel_id.eq(channel_id.to_hex()))
.first(conn)
.optional()?;
Ok(dlc_channel.map(channel::DlcChannel::from))
}
impl From<DlcChannel> for channel::DlcChannel {
fn from(value: DlcChannel) -> Self {
Self {
channel_id: DlcChannelId::from_hex(value.channel_id).expect("valid dlc channel id"),
trader: PublicKey::from_str(&value.trader_pubkey).expect("valid pubkey"),
channel_state: channel::DlcChannelState::from(value.channel_state),
trader_reserve_sats: Amount::from_sat(value.trader_reserve_sats as u64),
coordinator_reserve_sats: Amount::from_sat(value.coordinator_reserve_sats as u64),
trader_funding_sats: Amount::from_sat(value.trader_funding_sats as u64),
coordinator_funding_sats: Amount::from_sat(value.coordinator_funding_sats as u64),
funding_txid: value
.funding_txid
.map(|txid| Txid::from_str(&txid).expect("valid txid")),
close_txid: value
.close_txid
.map(|txid| Txid::from_str(&txid).expect("valid txid")),
settle_txid: value
.settle_txid
.map(|txid| Txid::from_str(&txid).expect("valid txid")),
buffer_txid: value
.buffer_txid
.map(|txid| Txid::from_str(&txid).expect("valid txid")),
claim_txid: value
.claim_txid
.map(|txid| Txid::from_str(&txid).expect("valid txid")),
punish_txid: value
.punish_txid
.map(|txid| Txid::from_str(&txid).expect("valid txid")),
created_at: value.created_at,
updated_at: value.updated_at,
}
}
}
impl From<DlcChannelState> for channel::DlcChannelState {
fn from(value: DlcChannelState) -> Self {
match value {
DlcChannelState::Pending => channel::DlcChannelState::Pending,
DlcChannelState::Open => channel::DlcChannelState::Open,
DlcChannelState::Closing => channel::DlcChannelState::Closing,
DlcChannelState::Closed => channel::DlcChannelState::Closed,
DlcChannelState::Failed => channel::DlcChannelState::Failed,
DlcChannelState::Cancelled => channel::DlcChannelState::Cancelled,
}
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/db/polls.rs | coordinator/src/db/polls.rs | use crate::schema::answers;
use crate::schema::choices;
use crate::schema::polls;
use crate::schema::polls_whitelist;
use crate::schema::sql_types::PollTypeType;
use anyhow::bail;
use anyhow::Result;
use bitcoin::secp256k1::PublicKey;
use diesel::dsl::exists;
use diesel::query_builder::QueryId;
use diesel::select;
use diesel::AsExpression;
use diesel::ExpressionMethods;
use diesel::FromSqlRow;
use diesel::Identifiable;
use diesel::Insertable;
use diesel::PgConnection;
use diesel::QueryDsl;
use diesel::QueryResult;
use diesel::Queryable;
use diesel::RunQueryDsl;
use diesel::Selectable;
use diesel::SelectableHelper;
use std::any::TypeId;
use std::collections::HashMap;
use time::OffsetDateTime;
use xxi_node::commons;
#[derive(Debug, Clone, Copy, PartialEq, FromSqlRow, AsExpression, Eq, Hash)]
#[diesel(sql_type = PollTypeType)]
pub enum PollType {
SingleChoice,
}
impl QueryId for PollTypeType {
type QueryId = PollTypeType;
const HAS_STATIC_QUERY_ID: bool = false;
fn query_id() -> Option<TypeId> {
None
}
}
#[derive(Insertable, Queryable, Identifiable, Selectable, Debug, Clone, Eq, PartialEq, Hash)]
#[diesel(table_name = polls)]
#[diesel(primary_key(id))]
pub struct Poll {
pub id: i32,
pub poll_type: PollType,
pub question: String,
pub active: bool,
pub creation_timestamp: OffsetDateTime,
pub whitelisted: bool,
}
#[derive(Insertable, Queryable, Identifiable, Selectable, Debug, Clone, Eq, PartialEq)]
#[diesel(belongs_to(Poll))]
#[diesel(table_name = choices)]
#[diesel(primary_key(id))]
pub struct Choice {
pub id: i32,
pub poll_id: i32,
pub value: String,
pub editable: bool,
}
#[derive(Insertable, Queryable, Identifiable, Debug, Clone)]
#[diesel(primary_key(id))]
pub struct Answer {
pub id: Option<i32>,
pub choice_id: i32,
pub trader_pubkey: String,
pub value: String,
pub creation_timestamp: OffsetDateTime,
}
pub fn active(conn: &mut PgConnection, trader_id: &PublicKey) -> QueryResult<Vec<commons::Poll>> {
let results = polls::table
.filter(polls::active.eq(true))
.left_join(choices::table)
.select(<(Poll, Option<Choice>)>::as_select())
.load::<(Poll, Option<Choice>)>(conn)?;
let mut polls_with_choices = HashMap::new();
for (poll, choice) in results {
if poll.whitelisted {
let whitelisted: bool = select(exists(
polls_whitelist::table
.filter(polls_whitelist::trader_pubkey.eq(trader_id.to_string())),
))
.get_result(conn)?;
if !whitelisted {
// skip polls which are note whitelisted for this user.
continue;
}
}
let entry = polls_with_choices.entry(poll).or_insert_with(Vec::new);
if let Some(choice) = choice {
entry.push(choice);
}
}
let polls = polls_with_choices
.into_iter()
.map(|(poll, choice_vec)| commons::Poll {
id: poll.id,
poll_type: poll.poll_type.into(),
question: poll.question,
choices: choice_vec
.into_iter()
.map(|choice| commons::Choice {
id: choice.id,
value: choice.value,
editable: choice.editable,
})
.collect(),
})
.collect();
Ok(polls)
}
impl From<PollType> for commons::PollType {
fn from(value: PollType) -> Self {
match value {
PollType::SingleChoice => commons::PollType::SingleChoice,
}
}
}
pub fn add_answer(conn: &mut PgConnection, answers: commons::PollAnswers) -> Result<()> {
let mut affected_rows = 0;
for answer in answers.answers {
affected_rows += diesel::insert_into(answers::table)
.values(Answer {
id: None,
choice_id: answer.choice_id,
trader_pubkey: answers.trader_pk.to_string(),
value: answer.value,
creation_timestamp: OffsetDateTime::now_utc(),
})
.execute(conn)?;
}
if affected_rows == 0 {
bail!(
"Could not insert answers by user {}.",
answers.trader_pk.to_string()
);
} else {
tracing::trace!(%affected_rows, trade_pk = answers.trader_pk.to_string(),
"Added new answers to a poll.");
}
Ok(())
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/db/trades.rs | coordinator/src/db/trades.rs | use crate::db::positions::ContractSymbol;
use crate::orderbook::db::custom_types::Direction;
use crate::schema::trades;
use anyhow::Result;
use bitcoin::secp256k1::PublicKey;
use bitcoin::Amount;
use diesel::prelude::*;
use std::str::FromStr;
use time::OffsetDateTime;
#[derive(Queryable, Debug, Clone)]
#[diesel(table_name = trades)]
struct Trade {
id: i32,
position_id: i32,
contract_symbol: ContractSymbol,
trader_pubkey: String,
quantity: f32,
trader_leverage: f32,
direction: Direction,
average_price: f32,
timestamp: OffsetDateTime,
order_matching_fee_sat: i64,
trader_realized_pnl_sat: Option<i64>,
}
#[derive(Insertable, Debug, Clone)]
#[diesel(table_name = trades)]
struct NewTrade {
position_id: i32,
contract_symbol: ContractSymbol,
trader_pubkey: String,
quantity: f32,
trader_leverage: f32,
direction: Direction,
average_price: f32,
order_matching_fee_sat: i64,
trader_realized_pnl_sat: Option<i64>,
}
pub fn insert(
conn: &mut PgConnection,
trade: crate::trade::models::NewTrade,
) -> QueryResult<crate::trade::models::Trade> {
let trade: Trade = diesel::insert_into(trades::table)
.values(NewTrade::from(trade))
.get_result(conn)?;
Ok(trade.into())
}
pub fn get_latest_for_position(
conn: &mut PgConnection,
position_id: i32,
) -> Result<Option<crate::trade::models::Trade>> {
let trade = trades::table
.filter(trades::position_id.eq(position_id))
.order_by(trades::id.desc())
.first::<Trade>(conn)
.optional()?;
Ok(trade.map(crate::trade::models::Trade::from))
}
pub fn get_trades(
connection: &mut PgConnection,
trader_pubkey: PublicKey,
) -> Result<Vec<crate::trade::models::Trade>> {
let trades: Vec<Trade> = trades::table
.filter(trades::trader_pubkey.eq(trader_pubkey.to_string()))
.load::<Trade>(connection)?;
let trades = trades
.into_iter()
.map(crate::trade::models::Trade::from)
.collect();
Ok(trades)
}
impl From<crate::trade::models::NewTrade> for NewTrade {
fn from(value: crate::trade::models::NewTrade) -> Self {
NewTrade {
position_id: value.position_id,
contract_symbol: value.contract_symbol.into(),
trader_pubkey: value.trader_pubkey.to_string(),
quantity: value.quantity,
trader_leverage: value.trader_leverage,
direction: value.trader_direction.into(),
average_price: value.average_price,
order_matching_fee_sat: value.order_matching_fee.to_sat() as i64,
trader_realized_pnl_sat: value.trader_realized_pnl_sat,
}
}
}
impl From<Trade> for crate::trade::models::Trade {
fn from(value: Trade) -> Self {
crate::trade::models::Trade {
id: value.id,
position_id: value.position_id,
contract_symbol: value.contract_symbol.into(),
trader_pubkey: PublicKey::from_str(value.trader_pubkey.as_str())
.expect("public key to decode"),
quantity: value.quantity,
trader_leverage: value.trader_leverage,
direction: value.direction.into(),
average_price: value.average_price,
timestamp: value.timestamp,
order_matching_fee: Amount::from_sat(value.order_matching_fee_sat as u64),
trader_realized_pnl_sat: value.trader_realized_pnl_sat,
}
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/db/mod.rs | coordinator/src/db/mod.rs | pub mod bonus_status;
pub mod bonus_tiers;
pub mod channel_opening_params;
pub mod collaborative_reverts;
pub mod custom_types;
pub mod dlc_channels;
pub mod dlc_messages;
pub mod dlc_protocols;
pub mod hodl_invoice;
pub mod last_outbound_dlc_message;
pub mod liquidity_options;
pub mod metrics;
pub mod polls;
pub mod positions;
pub mod reported_errors;
pub mod rollover_params;
pub mod spendable_outputs;
pub mod trade_params;
pub mod trades;
pub mod transactions;
pub mod user;
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/db/dlc_protocols.rs | coordinator/src/db/dlc_protocols.rs | use crate::db;
use crate::dlc_protocol;
use crate::schema::dlc_protocols;
use crate::schema::sql_types::ProtocolStateType;
use crate::schema::sql_types::ProtocolTypeType;
use bitcoin::secp256k1::PublicKey;
use diesel::query_builder::QueryId;
use diesel::AsExpression;
use diesel::ExpressionMethods;
use diesel::FromSqlRow;
use diesel::PgConnection;
use diesel::QueryDsl;
use diesel::QueryResult;
use diesel::Queryable;
use diesel::RunQueryDsl;
use dlc_manager::ContractId;
use dlc_manager::DlcChannelId;
use hex::FromHex;
use std::any::TypeId;
use std::str::FromStr;
use time::OffsetDateTime;
use uuid::Uuid;
use xxi_node::node::ProtocolId;
#[derive(Debug, Clone, Copy, PartialEq, FromSqlRow, AsExpression, Eq, Hash)]
#[diesel(sql_type = ProtocolStateType)]
pub(crate) enum DlcProtocolState {
Pending,
Success,
Failed,
}
impl QueryId for ProtocolStateType {
type QueryId = ProtocolStateType;
const HAS_STATIC_QUERY_ID: bool = false;
fn query_id() -> Option<TypeId> {
None
}
}
#[derive(Debug, Clone, Copy, PartialEq, FromSqlRow, AsExpression, Eq, Hash)]
#[diesel(sql_type = ProtocolTypeType)]
pub(crate) enum DlcProtocolType {
OpenChannel,
OpenPosition,
Settle,
Close,
ForceClose,
Rollover,
ResizePosition,
}
impl QueryId for ProtocolTypeType {
type QueryId = ProtocolTypeType;
const HAS_STATIC_QUERY_ID: bool = false;
fn query_id() -> Option<TypeId> {
None
}
}
#[derive(Queryable, Debug)]
#[diesel(table_name = protocols)]
#[allow(dead_code)] // We have to allow dead code here because diesel needs the fields to be able to derive queryable.
pub(crate) struct DlcProtocol {
pub id: i32,
pub protocol_id: Uuid,
pub previous_protocol_id: Option<Uuid>,
pub channel_id: String,
pub contract_id: Option<String>,
pub protocol_state: DlcProtocolState,
pub trader_pubkey: String,
pub timestamp: OffsetDateTime,
pub protocol_type: DlcProtocolType,
}
pub(crate) fn get_dlc_protocol(
conn: &mut PgConnection,
protocol_id: ProtocolId,
) -> QueryResult<dlc_protocol::DlcProtocol> {
let dlc_protocol: DlcProtocol = dlc_protocols::table
.filter(dlc_protocols::protocol_id.eq(protocol_id.to_uuid()))
.first(conn)?;
let protocol_type = match dlc_protocol.protocol_type {
DlcProtocolType::OpenChannel => {
let trade_params = db::trade_params::get(conn, protocol_id)?;
dlc_protocol::DlcProtocolType::OpenChannel { trade_params }
}
DlcProtocolType::OpenPosition => {
let trade_params = db::trade_params::get(conn, protocol_id)?;
dlc_protocol::DlcProtocolType::OpenPosition { trade_params }
}
DlcProtocolType::Settle => {
let trade_params = db::trade_params::get(conn, protocol_id)?;
dlc_protocol::DlcProtocolType::Settle { trade_params }
}
DlcProtocolType::Close => dlc_protocol::DlcProtocolType::Close {
trader: PublicKey::from_str(&dlc_protocol.trader_pubkey).expect("valid public key"),
},
DlcProtocolType::ForceClose => dlc_protocol::DlcProtocolType::ForceClose {
trader: PublicKey::from_str(&dlc_protocol.trader_pubkey).expect("valid public key"),
},
DlcProtocolType::Rollover => {
let rollover_params = db::rollover_params::get(conn, protocol_id)?;
dlc_protocol::DlcProtocolType::Rollover { rollover_params }
}
DlcProtocolType::ResizePosition => {
let trade_params = db::trade_params::get(conn, protocol_id)?;
dlc_protocol::DlcProtocolType::ResizePosition { trade_params }
}
};
let protocol = dlc_protocol::DlcProtocol {
id: dlc_protocol.protocol_id.into(),
previous_id: dlc_protocol
.previous_protocol_id
.map(|previous_id| previous_id.into()),
timestamp: dlc_protocol.timestamp,
channel_id: DlcChannelId::from_hex(&dlc_protocol.channel_id).expect("valid dlc channel id"),
contract_id: dlc_protocol
.contract_id
.map(|cid| ContractId::from_hex(cid).expect("valid contract id")),
trader: PublicKey::from_str(&dlc_protocol.trader_pubkey).expect("valid public key"),
protocol_state: dlc_protocol.protocol_state.into(),
protocol_type,
};
Ok(protocol)
}
pub(crate) fn set_dlc_protocol_state_to_failed(
conn: &mut PgConnection,
protocol_id: ProtocolId,
) -> QueryResult<()> {
let affected_rows = diesel::update(dlc_protocols::table)
.filter(dlc_protocols::protocol_id.eq(protocol_id.to_uuid()))
.set(dlc_protocols::protocol_state.eq(DlcProtocolState::Failed))
.execute(conn)?;
if affected_rows == 0 {
return Err(diesel::result::Error::NotFound);
}
Ok(())
}
pub(crate) fn set_dlc_protocol_state_to_success(
conn: &mut PgConnection,
protocol_id: ProtocolId,
contract_id: Option<&ContractId>,
channel_id: &DlcChannelId,
) -> QueryResult<()> {
let affected_rows = diesel::update(dlc_protocols::table)
.filter(dlc_protocols::protocol_id.eq(protocol_id.to_uuid()))
.set((
dlc_protocols::protocol_state.eq(DlcProtocolState::Success),
dlc_protocols::contract_id.eq(contract_id.map(hex::encode)),
dlc_protocols::channel_id.eq(hex::encode(channel_id)),
))
.execute(conn)?;
if affected_rows == 0 {
return Err(diesel::result::Error::NotFound);
}
Ok(())
}
pub(crate) fn create(
conn: &mut PgConnection,
protocol_id: ProtocolId,
previous_protocol_id: Option<ProtocolId>,
contract_id: Option<&ContractId>,
channel_id: &DlcChannelId,
protocol_type: impl Into<DlcProtocolType>,
trader: &PublicKey,
) -> QueryResult<()> {
let affected_rows = diesel::insert_into(dlc_protocols::table)
.values(&(
dlc_protocols::protocol_id.eq(protocol_id.to_uuid()),
dlc_protocols::previous_protocol_id.eq(previous_protocol_id.map(|ppid| ppid.to_uuid())),
dlc_protocols::contract_id.eq(contract_id.map(hex::encode)),
dlc_protocols::channel_id.eq(hex::encode(channel_id)),
dlc_protocols::protocol_state.eq(DlcProtocolState::Pending),
dlc_protocols::trader_pubkey.eq(trader.to_string()),
dlc_protocols::timestamp.eq(OffsetDateTime::now_utc()),
dlc_protocols::protocol_type.eq(protocol_type.into()),
))
.execute(conn)?;
if affected_rows == 0 {
return Err(diesel::result::Error::NotFound);
}
Ok(())
}
impl From<dlc_protocol::DlcProtocolState> for DlcProtocolState {
fn from(value: dlc_protocol::DlcProtocolState) -> Self {
match value {
dlc_protocol::DlcProtocolState::Pending => DlcProtocolState::Pending,
dlc_protocol::DlcProtocolState::Success => DlcProtocolState::Success,
dlc_protocol::DlcProtocolState::Failed => DlcProtocolState::Failed,
}
}
}
impl From<DlcProtocolState> for dlc_protocol::DlcProtocolState {
fn from(value: DlcProtocolState) -> Self {
match value {
DlcProtocolState::Pending => dlc_protocol::DlcProtocolState::Pending,
DlcProtocolState::Success => dlc_protocol::DlcProtocolState::Success,
DlcProtocolState::Failed => dlc_protocol::DlcProtocolState::Failed,
}
}
}
impl From<&dlc_protocol::DlcProtocolType> for DlcProtocolType {
fn from(value: &dlc_protocol::DlcProtocolType) -> Self {
match value {
dlc_protocol::DlcProtocolType::OpenChannel { .. } => DlcProtocolType::OpenChannel,
dlc_protocol::DlcProtocolType::OpenPosition { .. } => DlcProtocolType::OpenPosition,
dlc_protocol::DlcProtocolType::Settle { .. } => DlcProtocolType::Settle,
dlc_protocol::DlcProtocolType::Close { .. } => DlcProtocolType::Close,
dlc_protocol::DlcProtocolType::ForceClose { .. } => DlcProtocolType::ForceClose,
dlc_protocol::DlcProtocolType::Rollover { .. } => DlcProtocolType::Rollover,
dlc_protocol::DlcProtocolType::ResizePosition { .. } => DlcProtocolType::ResizePosition,
}
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/db/last_outbound_dlc_message.rs | coordinator/src/db/last_outbound_dlc_message.rs | use crate::db::dlc_messages::MessageType;
use crate::schema;
use crate::schema::dlc_messages;
use crate::schema::last_outbound_dlc_messages;
use anyhow::ensure;
use anyhow::Result;
use bitcoin::secp256k1::PublicKey;
use diesel::AsChangeset;
use diesel::ExpressionMethods;
use diesel::Insertable;
use diesel::JoinOnDsl;
use diesel::OptionalExtension;
use diesel::PgConnection;
use diesel::QueryDsl;
use diesel::QueryResult;
use diesel::Queryable;
use diesel::QueryableByName;
use diesel::RunQueryDsl;
use time::OffsetDateTime;
use xxi_node::dlc_message::SerializedDlcMessage;
#[derive(Insertable, QueryableByName, Queryable, Debug, Clone, PartialEq, AsChangeset)]
#[diesel(table_name = last_outbound_dlc_messages)]
pub(crate) struct LastOutboundDlcMessage {
pub peer_id: String,
pub message_hash: String,
pub message: String,
pub timestamp: OffsetDateTime,
}
pub(crate) fn delete(conn: &mut PgConnection, peer_id: &PublicKey) -> QueryResult<usize> {
diesel::delete(last_outbound_dlc_messages::table)
.filter(last_outbound_dlc_messages::peer_id.eq(peer_id.to_string()))
.execute(conn)
}
pub(crate) fn get(
conn: &mut PgConnection,
peer_id: &PublicKey,
) -> QueryResult<Option<SerializedDlcMessage>> {
let last_outbound_dlc_message = last_outbound_dlc_messages::table
.inner_join(
dlc_messages::table
.on(dlc_messages::message_hash.eq(last_outbound_dlc_messages::message_hash)),
)
.filter(last_outbound_dlc_messages::peer_id.eq(peer_id.to_string()))
.select((
dlc_messages::message_type,
last_outbound_dlc_messages::message,
))
.first::<(MessageType, String)>(conn)
.optional()?;
let serialized_dlc_message =
last_outbound_dlc_message.map(|(message_type, message)| SerializedDlcMessage {
message,
message_type: xxi_node::dlc_message::DlcMessageType::from(message_type),
});
Ok(serialized_dlc_message)
}
pub(crate) fn upsert(
conn: &mut PgConnection,
peer_id: &PublicKey,
sdm: SerializedDlcMessage,
) -> Result<()> {
let values = (
last_outbound_dlc_messages::peer_id.eq(peer_id.to_string()),
last_outbound_dlc_messages::message_hash.eq(sdm.generate_hash()),
last_outbound_dlc_messages::message.eq(sdm.message),
);
let affected_rows = diesel::insert_into(last_outbound_dlc_messages::table)
.values(&values.clone())
.on_conflict(schema::last_outbound_dlc_messages::peer_id)
.do_update()
.set(values)
.execute(conn)?;
ensure!(
affected_rows > 0,
"Could not upsert last outbound dlc messages"
);
Ok(())
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/db/metrics.rs | coordinator/src/db/metrics.rs | use crate::schema::metrics;
use anyhow::ensure;
use anyhow::Result;
use diesel::ExpressionMethods;
use diesel::PgConnection;
use diesel::RunQueryDsl;
pub fn create_metrics_entry(conn: &mut PgConnection, on_chain_balance: u64) -> Result<()> {
let affected_rows = diesel::insert_into(metrics::table)
.values(metrics::on_chain_balance_sats.eq(on_chain_balance as i64))
.execute(conn)?;
ensure!(affected_rows > 0, "Could not insert metric entry");
Ok(())
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/db/custom_types.rs | coordinator/src/db/custom_types.rs | use crate::db::bonus_status::BonusType;
use crate::db::dlc_channels::DlcChannelState;
use crate::db::dlc_messages::MessageType;
use crate::db::dlc_protocols::DlcProtocolState;
use crate::db::dlc_protocols::DlcProtocolType;
use crate::db::hodl_invoice::InvoiceState;
use crate::db::polls::PollType;
use crate::db::positions::ContractSymbol;
use crate::db::positions::PositionState;
use crate::schema::sql_types::BonusStatusType;
use crate::schema::sql_types::ContractSymbolType;
use crate::schema::sql_types::DirectionType;
use crate::schema::sql_types::DlcChannelStateType;
use crate::schema::sql_types::InvoiceStateType;
use crate::schema::sql_types::MessageTypeType;
use crate::schema::sql_types::PollTypeType;
use crate::schema::sql_types::PositionStateType;
use crate::schema::sql_types::ProtocolStateType;
use crate::schema::sql_types::ProtocolTypeType;
use diesel::deserialize;
use diesel::deserialize::FromSql;
use diesel::pg::Pg;
use diesel::pg::PgValue;
use diesel::serialize;
use diesel::serialize::IsNull;
use diesel::serialize::Output;
use diesel::serialize::ToSql;
use std::io::Write;
use xxi_node::commons::Direction;
impl ToSql<ContractSymbolType, Pg> for ContractSymbol {
fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Pg>) -> serialize::Result {
match *self {
ContractSymbol::BtcUsd => out.write_all(b"BtcUsd")?,
}
Ok(IsNull::No)
}
}
impl FromSql<ContractSymbolType, Pg> for ContractSymbol {
fn from_sql(bytes: PgValue<'_>) -> deserialize::Result<Self> {
match bytes.as_bytes() {
b"BtcUsd" => Ok(ContractSymbol::BtcUsd),
_ => Err("Unrecognized enum variant".into()),
}
}
}
impl ToSql<PositionStateType, Pg> for PositionState {
fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Pg>) -> serialize::Result {
match *self {
PositionState::Open => out.write_all(b"Open")?,
PositionState::Closing => out.write_all(b"Closing")?,
PositionState::Closed => out.write_all(b"Closed")?,
PositionState::Rollover => out.write_all(b"Rollover")?,
PositionState::Resizing => out.write_all(b"Resizing")?,
PositionState::Proposed => out.write_all(b"Proposed")?,
PositionState::Failed => out.write_all(b"Failed")?,
}
Ok(IsNull::No)
}
}
impl FromSql<PositionStateType, Pg> for PositionState {
fn from_sql(bytes: PgValue<'_>) -> deserialize::Result<Self> {
match bytes.as_bytes() {
b"Open" => Ok(PositionState::Open),
b"Closing" => Ok(PositionState::Closing),
b"Closed" => Ok(PositionState::Closed),
b"Rollover" => Ok(PositionState::Rollover),
b"Resizing" => Ok(PositionState::Resizing),
b"Proposed" => Ok(PositionState::Proposed),
b"Failed" => Ok(PositionState::Failed),
_ => Err("Unrecognized enum variant".into()),
}
}
}
impl ToSql<DirectionType, Pg> for Direction {
fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Pg>) -> serialize::Result {
match *self {
Direction::Long => out.write_all(b"Long")?,
Direction::Short => out.write_all(b"Short")?,
}
Ok(IsNull::No)
}
}
impl FromSql<DirectionType, Pg> for Direction {
fn from_sql(bytes: PgValue<'_>) -> deserialize::Result<Self> {
match bytes.as_bytes() {
b"Long" => Ok(Direction::Long),
b"Short" => Ok(Direction::Short),
_ => Err("Unrecognized enum variant".into()),
}
}
}
impl ToSql<MessageTypeType, Pg> for MessageType {
fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Pg>) -> serialize::Result {
match *self {
MessageType::Offer => out.write_all(b"Offer")?,
MessageType::Accept => out.write_all(b"Accept")?,
MessageType::Sign => out.write_all(b"Sign")?,
MessageType::SettleOffer => out.write_all(b"SettleOffer")?,
MessageType::SettleAccept => out.write_all(b"SettleAccept")?,
MessageType::SettleConfirm => out.write_all(b"SettleConfirm")?,
MessageType::SettleFinalize => out.write_all(b"SettleFinalize")?,
MessageType::RenewOffer => out.write_all(b"RenewOffer")?,
MessageType::RenewAccept => out.write_all(b"RenewAccept")?,
MessageType::RenewConfirm => out.write_all(b"RenewConfirm")?,
MessageType::RenewFinalize => out.write_all(b"RenewFinalize")?,
MessageType::RenewRevoke => out.write_all(b"RenewRevoke")?,
MessageType::RolloverOffer => out.write_all(b"RolloverOffer")?,
MessageType::RolloverAccept => out.write_all(b"RolloverAccept")?,
MessageType::RolloverConfirm => out.write_all(b"RolloverConfirm")?,
MessageType::RolloverFinalize => out.write_all(b"RolloverFinalize")?,
MessageType::RolloverRevoke => out.write_all(b"RolloverRevoke")?,
MessageType::CollaborativeCloseOffer => out.write_all(b"CollaborativeCloseOffer")?,
MessageType::Reject => out.write_all(b"Reject")?,
}
Ok(IsNull::No)
}
}
impl FromSql<MessageTypeType, Pg> for MessageType {
fn from_sql(bytes: PgValue<'_>) -> deserialize::Result<Self> {
match bytes.as_bytes() {
b"Offer" => Ok(MessageType::Offer),
b"Accept" => Ok(MessageType::Accept),
b"Sign" => Ok(MessageType::Sign),
b"SettleOffer" => Ok(MessageType::SettleOffer),
b"SettleAccept" => Ok(MessageType::SettleAccept),
b"SettleConfirm" => Ok(MessageType::SettleConfirm),
b"SettleFinalize" => Ok(MessageType::SettleFinalize),
b"RenewOffer" => Ok(MessageType::RenewOffer),
b"RenewAccept" => Ok(MessageType::RenewAccept),
b"RenewConfirm" => Ok(MessageType::RenewConfirm),
b"RenewFinalize" => Ok(MessageType::RenewFinalize),
b"RenewRevoke" => Ok(MessageType::RenewRevoke),
b"RolloverOffer" => Ok(MessageType::RolloverOffer),
b"RolloverAccept" => Ok(MessageType::RolloverAccept),
b"RolloverConfirm" => Ok(MessageType::RolloverConfirm),
b"RolloverFinalize" => Ok(MessageType::RolloverFinalize),
b"RolloverRevoke" => Ok(MessageType::RolloverRevoke),
b"CollaborativeCloseOffer" => Ok(MessageType::CollaborativeCloseOffer),
b"Reject" => Ok(MessageType::Reject),
_ => Err("Unrecognized enum variant".into()),
}
}
}
impl ToSql<PollTypeType, Pg> for PollType {
fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Pg>) -> serialize::Result {
match *self {
PollType::SingleChoice => out.write_all(b"SingleChoice")?,
}
Ok(IsNull::No)
}
}
impl FromSql<PollTypeType, Pg> for PollType {
fn from_sql(bytes: PgValue<'_>) -> deserialize::Result<Self> {
match bytes.as_bytes() {
b"SingleChoice" => Ok(PollType::SingleChoice),
_ => Err("Unrecognized enum variant for PollType".into()),
}
}
}
impl ToSql<ProtocolStateType, Pg> for DlcProtocolState {
fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Pg>) -> serialize::Result {
match *self {
DlcProtocolState::Pending => out.write_all(b"Pending")?,
DlcProtocolState::Success => out.write_all(b"Success")?,
DlcProtocolState::Failed => out.write_all(b"Failed")?,
}
Ok(IsNull::No)
}
}
impl FromSql<ProtocolStateType, Pg> for DlcProtocolState {
fn from_sql(bytes: PgValue<'_>) -> deserialize::Result<Self> {
match bytes.as_bytes() {
b"Pending" => Ok(DlcProtocolState::Pending),
b"Success" => Ok(DlcProtocolState::Success),
b"Failed" => Ok(DlcProtocolState::Failed),
_ => Err("Unrecognized enum variant for ProtocolStateType".into()),
}
}
}
impl ToSql<ProtocolTypeType, Pg> for DlcProtocolType {
fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Pg>) -> serialize::Result {
match *self {
DlcProtocolType::OpenChannel => out.write_all(b"open-channel")?,
DlcProtocolType::Settle => out.write_all(b"settle")?,
DlcProtocolType::OpenPosition => out.write_all(b"open-position")?,
DlcProtocolType::Rollover => out.write_all(b"rollover")?,
DlcProtocolType::Close => out.write_all(b"close")?,
DlcProtocolType::ForceClose => out.write_all(b"force-close")?,
DlcProtocolType::ResizePosition => out.write_all(b"resize-position")?,
}
Ok(IsNull::No)
}
}
impl FromSql<ProtocolTypeType, Pg> for DlcProtocolType {
fn from_sql(bytes: PgValue<'_>) -> deserialize::Result<Self> {
match bytes.as_bytes() {
b"open-channel" => Ok(DlcProtocolType::OpenChannel),
b"settle" => Ok(DlcProtocolType::Settle),
b"open-position" => Ok(DlcProtocolType::OpenPosition),
b"rollover" => Ok(DlcProtocolType::Rollover),
b"close" => Ok(DlcProtocolType::Close),
b"force-close" => Ok(DlcProtocolType::ForceClose),
b"resize-position" => Ok(DlcProtocolType::ResizePosition),
_ => Err("Unrecognized enum variant for ProtocolTypeType".into()),
}
}
}
impl ToSql<DlcChannelStateType, Pg> for DlcChannelState {
fn to_sql(&self, out: &mut Output<Pg>) -> serialize::Result {
match *self {
DlcChannelState::Pending => out.write_all(b"Pending")?,
DlcChannelState::Open => out.write_all(b"Open")?,
DlcChannelState::Closing => out.write_all(b"Closing")?,
DlcChannelState::Closed => out.write_all(b"Closed")?,
DlcChannelState::Failed => out.write_all(b"Failed")?,
DlcChannelState::Cancelled => out.write_all(b"Cancelled")?,
}
Ok(IsNull::No)
}
}
impl FromSql<DlcChannelStateType, Pg> for DlcChannelState {
fn from_sql(bytes: PgValue<'_>) -> deserialize::Result<Self> {
match bytes.as_bytes() {
b"Pending" => Ok(DlcChannelState::Pending),
b"Open" => Ok(DlcChannelState::Open),
b"Closing" => Ok(DlcChannelState::Closing),
b"Closed" => Ok(DlcChannelState::Closed),
b"Failed" => Ok(DlcChannelState::Failed),
b"Cancelled" => Ok(DlcChannelState::Cancelled),
_ => Err("Unrecognized enum variant".into()),
}
}
}
impl ToSql<BonusStatusType, Pg> for BonusType {
fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Pg>) -> serialize::Result {
match *self {
BonusType::Referral => out.write_all(b"Referral")?,
BonusType::Referent => out.write_all(b"Referent")?,
}
Ok(IsNull::No)
}
}
impl FromSql<BonusStatusType, Pg> for BonusType {
fn from_sql(bytes: PgValue<'_>) -> deserialize::Result<Self> {
match bytes.as_bytes() {
b"Referral" => Ok(BonusType::Referral),
b"Referent" => Ok(BonusType::Referent),
_ => Err("Unrecognized enum variant".into()),
}
}
}
impl ToSql<InvoiceStateType, Pg> for InvoiceState {
fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Pg>) -> serialize::Result {
match *self {
InvoiceState::Open => out.write_all(b"Open")?,
InvoiceState::Accepted => out.write_all(b"Accepted")?,
InvoiceState::Settled => out.write_all(b"Settled")?,
InvoiceState::Failed => out.write_all(b"Failed")?,
InvoiceState::Canceled => out.write_all(b"Canceled")?,
}
Ok(IsNull::No)
}
}
impl FromSql<InvoiceStateType, Pg> for InvoiceState {
fn from_sql(bytes: PgValue<'_>) -> deserialize::Result<Self> {
match bytes.as_bytes() {
b"Open" => Ok(InvoiceState::Open),
b"Accepted" => Ok(InvoiceState::Accepted),
b"Settled" => Ok(InvoiceState::Settled),
b"Failed" => Ok(InvoiceState::Failed),
b"Canceled" => Ok(InvoiceState::Canceled),
_ => Err("Unrecognized enum variant".into()),
}
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/db/liquidity_options.rs | coordinator/src/db/liquidity_options.rs | use crate::schema::liquidity_options;
use diesel::PgConnection;
use diesel::QueryResult;
use diesel::Queryable;
use diesel::RunQueryDsl;
use time::OffsetDateTime;
use xxi_node::commons;
#[derive(Queryable, Debug, Clone, PartialEq)]
#[diesel(table_name = liquidity_options)]
pub(crate) struct LiquidityOption {
pub id: i32,
pub rank: i16,
pub title: String,
/// amount the user can trade up to in sats
pub trade_up_to_sats: i64,
/// min deposit in sats
pub min_deposit_sats: i64,
/// max deposit in sats
pub max_deposit_sats: i64,
/// min fee in sats
pub min_fee_sats: Option<i64>,
pub fee_percentage: f64,
pub coordinator_leverage: f32,
pub active: bool,
pub created_at: OffsetDateTime,
pub updated_at: OffsetDateTime,
}
pub(crate) fn get_all(conn: &mut PgConnection) -> QueryResult<Vec<commons::LiquidityOption>> {
let options = liquidity_options::table.load::<LiquidityOption>(conn)?;
let options = options
.into_iter()
.map(commons::LiquidityOption::from)
.collect();
Ok(options)
}
impl From<LiquidityOption> for commons::LiquidityOption {
fn from(value: LiquidityOption) -> Self {
commons::LiquidityOption {
id: value.id,
rank: value.rank as usize,
title: value.title,
trade_up_to_sats: value.trade_up_to_sats as u64,
min_deposit_sats: value.min_deposit_sats as u64,
max_deposit_sats: value.max_deposit_sats as u64,
min_fee_sats: value.min_fee_sats.unwrap_or(0) as u64,
fee_percentage: value.fee_percentage,
coordinator_leverage: value.coordinator_leverage,
created_at: value.created_at,
updated_at: value.updated_at,
active: value.active,
}
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/db/collaborative_reverts.rs | coordinator/src/db/collaborative_reverts.rs | use crate::parse_dlc_channel_id;
use crate::position;
use crate::schema::collaborative_reverts;
use anyhow::ensure;
use anyhow::Result;
use bitcoin::secp256k1::PublicKey;
use bitcoin::Address;
use bitcoin::Amount;
use bitcoin::Denomination;
use bitcoin::Network;
use diesel::prelude::*;
use diesel::AsChangeset;
use diesel::Insertable;
use diesel::OptionalExtension;
use diesel::PgConnection;
use diesel::Queryable;
use diesel::RunQueryDsl;
use dlc_manager::DlcChannelId;
use rust_decimal::prelude::FromPrimitive;
use rust_decimal::prelude::ToPrimitive;
use rust_decimal::Decimal;
use std::str::FromStr;
use time::OffsetDateTime;
#[derive(Queryable, AsChangeset, Debug, Clone, PartialEq)]
#[diesel(table_name = collaborative_reverts)]
pub(crate) struct CollaborativeRevert {
id: i32,
channel_id: String,
trader_pubkey: String,
price: f32,
coordinator_address: String,
coordinator_amount_sats: i64,
trader_amount_sats: i64,
timestamp: OffsetDateTime,
}
#[derive(Insertable, Queryable, AsChangeset, Debug, Clone, PartialEq)]
#[diesel(table_name = collaborative_reverts)]
pub(crate) struct NewCollaborativeRevert {
channel_id: String,
trader_pubkey: String,
price: f32,
coordinator_address: String,
coordinator_amount_sats: i64,
trader_amount_sats: i64,
timestamp: OffsetDateTime,
}
pub(crate) fn by_trader_pubkey(
trader_pubkey: &str,
network: Network,
conn: &mut PgConnection,
) -> Result<Option<position::models::CollaborativeRevert>> {
let result: Option<CollaborativeRevert> = collaborative_reverts::table
.filter(collaborative_reverts::trader_pubkey.eq(trader_pubkey))
.first(conn)
.optional()?;
if let Some(rev) = result {
let rev = (rev, network).try_into()?;
Ok(Some(rev))
} else {
Ok(None)
}
}
pub(crate) fn get_by_channel_id(
conn: &mut PgConnection,
channel_id: &DlcChannelId,
network: Network,
) -> Result<Option<position::models::CollaborativeRevert>> {
let channel_id = hex::encode(channel_id);
collaborative_reverts::table
.filter(collaborative_reverts::channel_id.eq(channel_id))
.first(conn)
.optional()?
.map(|rev: CollaborativeRevert| anyhow::Ok((rev, network).try_into()?))
.transpose()
}
pub(crate) fn insert(
conn: &mut PgConnection,
collaborative_reverts: position::models::CollaborativeRevert,
) -> Result<()> {
let revert = NewCollaborativeRevert::from(collaborative_reverts);
let affected_rows = diesel::insert_into(collaborative_reverts::table)
.values(revert)
.execute(conn)?;
ensure!(affected_rows > 0, "Could not insert collaborative revert");
Ok(())
}
pub(crate) fn delete(conn: &mut PgConnection, channel_id: DlcChannelId) -> Result<()> {
diesel::delete(collaborative_reverts::table)
.filter(collaborative_reverts::channel_id.eq(hex::encode(channel_id)))
.execute(conn)?;
Ok(())
}
impl From<position::models::CollaborativeRevert> for NewCollaborativeRevert {
fn from(value: position::models::CollaborativeRevert) -> Self {
NewCollaborativeRevert {
channel_id: hex::encode(value.channel_id),
trader_pubkey: value.trader_pubkey.to_string(),
price: value.price.to_f32().expect("to be valid f32"),
coordinator_address: value.coordinator_address.to_string(),
coordinator_amount_sats: value.coordinator_amount_sats.to_sat() as i64,
trader_amount_sats: value.trader_amount_sats.to_sat() as i64,
timestamp: value.timestamp,
}
}
}
impl TryFrom<(CollaborativeRevert, Network)> for position::models::CollaborativeRevert {
type Error = anyhow::Error;
fn try_from(
(rev, network): (CollaborativeRevert, Network),
) -> std::result::Result<Self, Self::Error> {
Ok(position::models::CollaborativeRevert {
channel_id: parse_dlc_channel_id(rev.channel_id.as_str())?,
trader_pubkey: PublicKey::from_str(rev.trader_pubkey.as_str())?,
price: Decimal::from_f32(rev.price).expect("to be valid decimal"),
coordinator_address: Address::from_str(rev.coordinator_address.as_str())?
.require_network(network)?,
coordinator_amount_sats: Amount::from_str_in(
rev.coordinator_amount_sats.to_string().as_str(),
Denomination::Satoshi,
)?,
trader_amount_sats: Amount::from_str_in(
rev.trader_amount_sats.to_string().as_str(),
Denomination::Satoshi,
)?,
timestamp: rev.timestamp,
})
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/db/positions.rs | coordinator/src/db/positions.rs | use crate::orderbook::db::custom_types::Direction;
use crate::schema::positions;
use crate::schema::sql_types::ContractSymbolType;
use crate::schema::sql_types::PositionStateType;
use anyhow::bail;
use anyhow::ensure;
use anyhow::Result;
use bitcoin::secp256k1::PublicKey;
use bitcoin::Amount;
use bitcoin::SignedAmount;
use diesel::prelude::*;
use diesel::query_builder::QueryId;
use diesel::result::QueryResult;
use diesel::AsExpression;
use diesel::FromSqlRow;
use dlc_manager::ContractId;
use hex::FromHex;
use rust_decimal::prelude::ToPrimitive;
use rust_decimal::Decimal;
use std::any::TypeId;
use time::OffsetDateTime;
use xxi_node::commons;
#[derive(Queryable, Debug, Clone)]
pub struct Position {
pub id: i32,
pub contract_symbol: ContractSymbol,
pub trader_leverage: f32,
pub quantity: f32,
pub trader_direction: Direction,
pub average_entry_price: f32,
pub trader_liquidation_price: f32,
pub position_state: PositionState,
pub coordinator_margin: i64,
pub creation_timestamp: OffsetDateTime,
pub expiry_timestamp: OffsetDateTime,
pub update_timestamp: OffsetDateTime,
pub trader_pubkey: String,
pub temporary_contract_id: Option<String>,
pub trader_realized_pnl_sat: Option<i64>,
pub trader_unrealized_pnl_sat: Option<i64>,
pub closing_price: Option<f32>,
pub coordinator_leverage: f32,
pub trader_margin: i64,
pub stable: bool,
pub coordinator_liquidation_price: f32,
pub order_matching_fees: i64,
}
impl Position {
/// Returns the position by trader pub key
pub fn get_position_by_trader(
conn: &mut PgConnection,
trader_pubkey: PublicKey,
states: Vec<crate::position::models::PositionState>,
) -> QueryResult<Option<crate::position::models::Position>> {
let mut query = positions::table.into_boxed();
query = query.filter(positions::trader_pubkey.eq(trader_pubkey.to_string()));
if !states.is_empty() {
query = query.filter(
positions::position_state.eq_any(states.into_iter().map(PositionState::from)),
)
}
let x = query
.order_by(positions::creation_timestamp.desc())
.first::<Position>(conn)
.optional()?;
Ok(x.map(crate::position::models::Position::from))
}
pub fn get_all_open_positions_with_expiry_before(
conn: &mut PgConnection,
expiry: OffsetDateTime,
) -> QueryResult<Vec<crate::position::models::Position>> {
let positions = positions::table
.filter(positions::position_state.eq(PositionState::Open))
.filter(positions::expiry_timestamp.lt(expiry))
.load::<Position>(conn)?;
let positions = positions
.into_iter()
.map(crate::position::models::Position::from)
.collect();
Ok(positions)
}
/// Get all active positions that were open before `open_before_timestamp`.
///
/// Active positions are either [`PositionState::Open`], [`PositionState::Rollover`] or
/// [`PositionState::Resizing`].
pub fn get_all_active_positions_open_before(
conn: &mut PgConnection,
open_before_timestamp: OffsetDateTime,
) -> QueryResult<Vec<crate::position::models::Position>> {
let positions = positions::table
.filter(
positions::position_state
.eq(PositionState::Open)
.or(positions::position_state.eq(PositionState::Rollover))
.or(positions::position_state.eq(PositionState::Resizing)),
)
.filter(positions::creation_timestamp.lt(open_before_timestamp))
.load::<Position>(conn)?;
let positions = positions
.into_iter()
.map(crate::position::models::Position::from)
.collect();
Ok(positions)
}
pub fn get_all_open_positions(
conn: &mut PgConnection,
) -> QueryResult<Vec<crate::position::models::Position>> {
let positions = positions::table
.filter(positions::position_state.eq(PositionState::Open))
.load::<Position>(conn)?;
let positions = positions
.into_iter()
.map(crate::position::models::Position::from)
.collect();
Ok(positions)
}
pub fn get_all_closed_positions(
conn: &mut PgConnection,
) -> QueryResult<Vec<crate::position::models::Position>> {
let positions = positions::table
.filter(positions::position_state.eq(PositionState::Closed))
.load::<Position>(conn)?;
let positions = positions
.into_iter()
.map(crate::position::models::Position::from)
.collect();
Ok(positions)
}
pub fn get_all_open_or_closing_positions(
conn: &mut PgConnection,
) -> QueryResult<Vec<crate::position::models::Position>> {
let positions = positions::table
.filter(
positions::position_state
.eq(PositionState::Open)
.or(positions::position_state.eq(PositionState::Closing)),
)
.load::<Position>(conn)?;
let positions = positions
.into_iter()
.map(crate::position::models::Position::from)
.collect();
Ok(positions)
}
/// Set the `position_state` column from to `updated`. This will only succeed if the column does
/// match one of the values contained in `original`.
pub fn update_position_state(
conn: &mut PgConnection,
trader_pubkey: String,
original: Vec<crate::position::models::PositionState>,
updated: crate::position::models::PositionState,
) -> QueryResult<crate::position::models::Position> {
if original.is_empty() {
// It is not really a `NotFound` error, but `diesel` does not make it easy to build
// other variants.
return QueryResult::Err(diesel::result::Error::NotFound);
}
let updated = PositionState::from(updated);
let position: Position = diesel::update(positions::table)
.filter(positions::trader_pubkey.eq(trader_pubkey.clone()))
.filter(positions::position_state.eq_any(original.into_iter().map(PositionState::from)))
.set((
positions::position_state.eq(updated),
positions::update_timestamp.eq(OffsetDateTime::now_utc()),
))
.get_result(conn)?;
Ok(crate::position::models::Position::from(position))
}
/// sets the status of all open position to closing (note, we expect that number to be always
/// exactly 1)
pub fn set_open_position_to_closing(
conn: &mut PgConnection,
trader: &PublicKey,
closing_price: Option<Decimal>,
) -> QueryResult<usize> {
let closing_price = closing_price.map(|price| price.to_f32().expect("to fit into f32"));
diesel::update(positions::table)
.filter(positions::trader_pubkey.eq(trader.to_string()))
.filter(positions::position_state.eq(PositionState::Open))
.set((
positions::position_state.eq(PositionState::Closing),
positions::closing_price.eq(closing_price),
positions::update_timestamp.eq(OffsetDateTime::now_utc()),
))
.execute(conn)
}
pub fn set_position_to_closed_with_pnl(
conn: &mut PgConnection,
id: i32,
trader_realized_pnl_sat: i64,
closing_price: Decimal,
) -> QueryResult<usize> {
diesel::update(positions::table)
.filter(positions::id.eq(id))
.set((
positions::position_state.eq(PositionState::Closed),
positions::trader_realized_pnl_sat.eq(Some(trader_realized_pnl_sat)),
positions::update_timestamp.eq(OffsetDateTime::now_utc()),
positions::closing_price.eq(closing_price.to_f32().expect("to fit into f32")),
))
.execute(conn)
}
pub fn set_position_to_closed(conn: &mut PgConnection, id: i32) -> Result<()> {
let affected_rows = diesel::update(positions::table)
.filter(positions::id.eq(id))
.set((
positions::position_state.eq(PositionState::Closed),
positions::update_timestamp.eq(OffsetDateTime::now_utc()),
))
.execute(conn)?;
if affected_rows == 0 {
bail!("Could not update position to Closed for position {id}")
}
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub fn set_position_to_resizing(
conn: &mut PgConnection,
trader_pubkey: PublicKey,
temporary_contract_id: ContractId,
quantity: Decimal,
trader_direction: commons::Direction,
trader_margin: Amount,
coordinator_margin: Amount,
average_entry_price: Decimal,
expiry: OffsetDateTime,
coordinator_liquidation_price: Decimal,
trader_liquidation_price: Decimal,
// Reducing or changing direction may generate PNL.
realized_pnl: Option<SignedAmount>,
order_matching_fee: Amount,
) -> QueryResult<usize> {
let resize_trader_realized_pnl_sat = realized_pnl.unwrap_or_default().to_sat();
diesel::update(positions::table)
.filter(positions::trader_pubkey.eq(trader_pubkey.to_string()))
.filter(positions::position_state.eq(PositionState::Open))
.set((
positions::position_state.eq(PositionState::Resizing),
positions::temporary_contract_id.eq(hex::encode(temporary_contract_id)),
positions::quantity.eq(quantity.to_f32().expect("to fit")),
positions::trader_direction.eq(Direction::from(trader_direction)),
positions::average_entry_price.eq(average_entry_price.to_f32().expect("to fit")),
positions::trader_liquidation_price
.eq(trader_liquidation_price.to_f32().expect("to fit")),
positions::coordinator_liquidation_price
.eq(coordinator_liquidation_price.to_f32().expect("to fit")),
positions::coordinator_margin.eq(coordinator_margin.to_sat() as i64),
positions::expiry_timestamp.eq(expiry),
positions::trader_realized_pnl_sat
.eq(positions::trader_realized_pnl_sat + resize_trader_realized_pnl_sat),
positions::trader_unrealized_pnl_sat.eq(0),
positions::trader_margin.eq(trader_margin.to_sat() as i64),
positions::update_timestamp.eq(OffsetDateTime::now_utc()),
positions::order_matching_fees
.eq(positions::order_matching_fees + order_matching_fee.to_sat() as i64),
))
.execute(conn)
}
#[allow(clippy::too_many_arguments)]
pub fn finish_rollover_protocol(
conn: &mut PgConnection,
trader_pubkey: String,
temporary_contract_id: ContractId,
leverage_coordinator: Decimal,
margin_coordinator: Amount,
liquidation_price_coordinator: Decimal,
leverage_trader: Decimal,
margin_trader: Amount,
liquidation_price_trader: Decimal,
) -> QueryResult<usize> {
diesel::update(positions::table)
.filter(positions::trader_pubkey.eq(trader_pubkey))
.filter(positions::position_state.eq(PositionState::Rollover))
.set((
positions::position_state.eq(PositionState::Open),
positions::temporary_contract_id.eq(hex::encode(temporary_contract_id)),
positions::update_timestamp.eq(OffsetDateTime::now_utc()),
positions::coordinator_leverage.eq(leverage_coordinator.to_f32().expect("to fit")),
positions::coordinator_margin.eq(margin_coordinator.to_sat() as i64),
positions::coordinator_liquidation_price
.eq(liquidation_price_coordinator.to_f32().expect("to fit")),
positions::trader_leverage.eq(leverage_trader.to_f32().expect("to fit")),
positions::trader_margin.eq(margin_trader.to_sat() as i64),
positions::trader_liquidation_price
.eq(liquidation_price_trader.to_f32().expect("to fit")),
))
.execute(conn)
}
pub fn set_position_to_open(
conn: &mut PgConnection,
trader_pubkey: String,
temporary_contract_id: ContractId,
) -> QueryResult<usize> {
diesel::update(positions::table)
.filter(positions::trader_pubkey.eq(trader_pubkey))
.filter(
positions::position_state
.eq(PositionState::Rollover)
.or(positions::position_state.eq(PositionState::Resizing)),
)
.set((
positions::position_state.eq(PositionState::Open),
positions::temporary_contract_id.eq(hex::encode(temporary_contract_id)),
positions::update_timestamp.eq(OffsetDateTime::now_utc()),
))
.execute(conn)
}
pub fn update_unrealized_pnl(conn: &mut PgConnection, id: i32, pnl: i64) -> Result<()> {
let affected_rows = diesel::update(positions::table)
.filter(positions::id.eq(id))
.set((
positions::trader_unrealized_pnl_sat.eq(Some(pnl)),
positions::update_timestamp.eq(OffsetDateTime::now_utc()),
))
.execute(conn)?;
if affected_rows == 0 {
bail!("Could not update unrealized pnl {pnl} for position {id}")
}
Ok(())
}
pub fn rollover_position(
conn: &mut PgConnection,
trader_pubkey: PublicKey,
expiry_timestamp: &OffsetDateTime,
) -> Result<()> {
let affected_rows = diesel::update(positions::table)
.filter(positions::trader_pubkey.eq(trader_pubkey.to_string()))
.filter(positions::position_state.eq(PositionState::Open))
.set((
positions::expiry_timestamp.eq(expiry_timestamp),
positions::position_state.eq(PositionState::Rollover),
positions::update_timestamp.eq(OffsetDateTime::now_utc()),
))
.execute(conn)?;
ensure!(affected_rows > 0, "Could not set position to rollover");
Ok(())
}
/// inserts the given position into the db. Returns the position if successful
pub fn insert(
conn: &mut PgConnection,
new_position: crate::position::models::NewPosition,
) -> Result<crate::position::models::Position> {
let position: Position = diesel::insert_into(positions::table)
.values(NewPosition::from(new_position))
.get_result(conn)?;
Ok(position.into())
}
}
impl From<crate::position::models::PositionState> for PositionState {
fn from(value: crate::position::models::PositionState) -> Self {
match value {
crate::position::models::PositionState::Open => PositionState::Open,
crate::position::models::PositionState::Closing { .. } => PositionState::Closing,
crate::position::models::PositionState::Closed { .. } => PositionState::Closed,
crate::position::models::PositionState::Rollover => PositionState::Rollover,
crate::position::models::PositionState::Resizing { .. } => PositionState::Resizing,
crate::position::models::PositionState::Proposed => PositionState::Proposed,
crate::position::models::PositionState::Failed => PositionState::Failed,
}
}
}
impl From<Position> for crate::position::models::Position {
fn from(value: Position) -> Self {
crate::position::models::Position {
id: value.id,
contract_symbol: commons::ContractSymbol::from(value.contract_symbol),
trader_leverage: value.trader_leverage,
quantity: value.quantity,
trader_direction: commons::Direction::from(value.trader_direction),
average_entry_price: value.average_entry_price,
trader_liquidation_price: value.trader_liquidation_price,
coordinator_liquidation_price: value.coordinator_liquidation_price,
position_state: crate::position::models::PositionState::from((
value.position_state,
value.trader_realized_pnl_sat,
value.closing_price,
)),
coordinator_margin: Amount::from_sat(value.coordinator_margin as u64),
creation_timestamp: value.creation_timestamp,
expiry_timestamp: value.expiry_timestamp,
update_timestamp: value.update_timestamp,
trader: value.trader_pubkey.parse().expect("to be valid public key"),
temporary_contract_id: value.temporary_contract_id.map(|contract_id| {
ContractId::from_hex(contract_id.as_str()).expect("contract id to decode")
}),
closing_price: value.closing_price,
coordinator_leverage: value.coordinator_leverage,
trader_margin: Amount::from_sat(value.trader_margin as u64),
stable: value.stable,
trader_realized_pnl_sat: value.trader_realized_pnl_sat,
order_matching_fees: Amount::from_sat(value.order_matching_fees as u64),
}
}
}
#[derive(Insertable, Debug, PartialEq)]
#[diesel(table_name = positions)]
struct NewPosition {
pub contract_symbol: ContractSymbol,
pub trader_leverage: f32,
pub quantity: f32,
pub trader_direction: Direction,
pub average_entry_price: f32,
pub trader_liquidation_price: f32,
pub coordinator_liquidation_price: f32,
pub position_state: PositionState,
pub coordinator_margin: i64,
pub expiry_timestamp: OffsetDateTime,
pub trader_pubkey: String,
pub temporary_contract_id: String,
pub coordinator_leverage: f32,
pub trader_margin: i64,
pub stable: bool,
pub order_matching_fees: i64,
}
impl From<crate::position::models::NewPosition> for NewPosition {
fn from(value: crate::position::models::NewPosition) -> Self {
NewPosition {
contract_symbol: ContractSymbol::from(value.contract_symbol),
trader_leverage: value.trader_leverage,
quantity: value.quantity,
trader_direction: Direction::from(value.trader_direction),
average_entry_price: value.average_entry_price,
trader_liquidation_price: value
.trader_liquidation_price
.to_f32()
.expect("to fit into f32"),
coordinator_liquidation_price: value
.coordinator_liquidation_price
.to_f32()
.expect("to fit into f32"),
position_state: PositionState::Proposed,
coordinator_margin: value.coordinator_margin.to_sat() as i64,
expiry_timestamp: value.expiry_timestamp,
trader_pubkey: value.trader.to_string(),
temporary_contract_id: hex::encode(value.temporary_contract_id),
coordinator_leverage: value.coordinator_leverage,
trader_margin: value.trader_margin.to_sat() as i64,
stable: value.stable,
order_matching_fees: value.order_matching_fees.to_sat() as i64,
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, FromSqlRow, AsExpression)]
#[diesel(sql_type = PositionStateType)]
pub enum PositionState {
Proposed,
Open,
Closing,
Rollover,
Closed,
Failed,
Resizing,
}
impl QueryId for PositionStateType {
type QueryId = PositionStateType;
const HAS_STATIC_QUERY_ID: bool = false;
fn query_id() -> Option<TypeId> {
None
}
}
impl From<(PositionState, Option<i64>, Option<f32>)> for crate::position::models::PositionState {
fn from(
(position_state, realized_pnl, closing_price): (PositionState, Option<i64>, Option<f32>),
) -> Self {
match position_state {
PositionState::Open => crate::position::models::PositionState::Open,
PositionState::Closing => crate::position::models::PositionState::Closing {
// For backwards compatibility we set the closing price to 0 if it was not set in
// `Closing` state
closing_price: closing_price.unwrap_or(0.0_f32),
},
PositionState::Closed => crate::position::models::PositionState::Closed {
// For backwards compatibility we set the realized pnl to 0 if it was not set in
// `Closed` state
pnl: realized_pnl.unwrap_or(0),
},
PositionState::Rollover => crate::position::models::PositionState::Rollover,
PositionState::Resizing => crate::position::models::PositionState::Resizing,
PositionState::Proposed => crate::position::models::PositionState::Proposed,
PositionState::Failed => crate::position::models::PositionState::Failed,
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, FromSqlRow, AsExpression)]
#[diesel(sql_type = ContractSymbolType)]
pub enum ContractSymbol {
BtcUsd,
}
impl QueryId for ContractSymbolType {
type QueryId = ContractSymbolType;
const HAS_STATIC_QUERY_ID: bool = false;
fn query_id() -> Option<TypeId> {
None
}
}
impl From<ContractSymbol> for commons::ContractSymbol {
fn from(value: ContractSymbol) -> Self {
match value {
ContractSymbol::BtcUsd => commons::ContractSymbol::BtcUsd,
}
}
}
impl From<commons::ContractSymbol> for ContractSymbol {
fn from(value: commons::ContractSymbol) -> Self {
match value {
commons::ContractSymbol::BtcUsd => ContractSymbol::BtcUsd,
}
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/db/bonus_status.rs | coordinator/src/db/bonus_status.rs | use crate::db::bonus_tiers;
use crate::schema::bonus_status;
use crate::schema::sql_types::BonusStatusType;
use bitcoin::secp256k1::PublicKey;
use diesel::AsExpression;
use diesel::ExpressionMethods;
use diesel::FromSqlRow;
use diesel::Insertable;
use diesel::PgConnection;
use diesel::QueryDsl;
use diesel::QueryResult;
use diesel::Queryable;
use diesel::RunQueryDsl;
use time::OffsetDateTime;
use xxi_node::commons;
/// A user's referral bonus status may be active for this much days at max
const MAX_DAYS_FOR_ACTIVE_REFERRAL_STATUS: i64 = 30;
#[derive(Debug, Clone, Copy, PartialEq, FromSqlRow, AsExpression)]
#[diesel(sql_type = BonusStatusType)]
pub enum BonusType {
/// The bonus is because he referred so many users
Referral,
/// The user has been referred and gets a bonus
Referent,
}
#[allow(dead_code)]
// this is needed because the fields needs to be here to satisfy diesel
#[derive(Queryable, Debug, Clone)]
#[diesel(table_name = bonus_status)]
pub(crate) struct BonusStatus {
pub(crate) id: i32,
pub(crate) trader_pubkey: String,
pub(crate) tier_level: i32,
pub(crate) fee_rebate: f32,
pub(crate) bonus_type: BonusType,
pub(crate) activation_timestamp: OffsetDateTime,
pub(crate) deactivation_timestamp: OffsetDateTime,
}
impl From<BonusType> for commons::BonusStatusType {
fn from(value: BonusType) -> Self {
match value {
BonusType::Referral => commons::BonusStatusType::Referral,
BonusType::Referent => commons::BonusStatusType::Referent,
}
}
}
#[derive(Insertable, Debug, Clone)]
#[diesel(table_name = bonus_status)]
pub(crate) struct NewBonusStatus {
pub(crate) trader_pubkey: String,
pub(crate) tier_level: i32,
pub(crate) fee_rebate: f32,
pub(crate) bonus_type: BonusType,
pub(crate) activation_timestamp: OffsetDateTime,
pub(crate) deactivation_timestamp: OffsetDateTime,
}
/// This function might return multiple status for a single user
///
/// Because he might have moved up into the next level without the old level being expired. The
/// caller is responsible in picking the most suitable status
pub(crate) fn active_status_for_user(
conn: &mut PgConnection,
trader_pubkey: &PublicKey,
) -> QueryResult<Vec<BonusStatus>> {
bonus_status::table
.filter(bonus_status::trader_pubkey.eq(trader_pubkey.to_string()))
.filter(bonus_status::deactivation_timestamp.gt(OffsetDateTime::now_utc()))
.load(conn)
}
pub(crate) fn insert(
conn: &mut PgConnection,
trader_pk: &PublicKey,
tier_level: i32,
bonus_type: BonusType,
) -> QueryResult<BonusStatus> {
let existing_status_for_user = active_status_for_user(conn, trader_pk)?;
let bonus_tier = bonus_tiers::tier_by_tier_level(conn, tier_level)?;
if let Some(status) = existing_status_for_user
.into_iter()
.find(|status| status.tier_level == tier_level)
{
tracing::debug!(
trader_pubkey = trader_pk.to_string(),
tier_level,
"User has already gained bonus status"
);
return Ok(status);
}
let bonus_status = diesel::insert_into(bonus_status::table)
.values(NewBonusStatus {
trader_pubkey: trader_pk.to_string(),
tier_level,
fee_rebate: bonus_tier.fee_rebate,
bonus_type,
activation_timestamp: OffsetDateTime::now_utc(),
deactivation_timestamp: OffsetDateTime::now_utc()
+ time::Duration::days(MAX_DAYS_FOR_ACTIVE_REFERRAL_STATUS),
})
.get_result(conn)?;
Ok(bonus_status)
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
get10101/10101 | https://github.com/get10101/10101/blob/3ae135090528d64fbe2702aa03e1e3953cd57e2f/coordinator/src/db/channel_opening_params.rs | coordinator/src/db/channel_opening_params.rs | use crate::schema::channel_opening_params;
use bitcoin::Amount;
use diesel::ExpressionMethods;
use diesel::Insertable;
use diesel::OptionalExtension;
use diesel::PgConnection;
use diesel::QueryDsl;
use diesel::QueryResult;
use diesel::Queryable;
use diesel::QueryableByName;
use diesel::RunQueryDsl;
use time::OffsetDateTime;
use uuid::Uuid;
#[derive(Queryable, QueryableByName, Insertable, Debug, Clone, PartialEq)]
#[diesel(table_name = channel_opening_params)]
pub struct ChannelOpeningParams {
order_id: String,
coordinator_reserve: i64,
trader_reserve: i64,
created_at: i64,
external_funding: Option<i64>,
}
pub fn insert(
conn: &mut PgConnection,
order_id: Uuid,
channel_opening_params: crate::ChannelOpeningParams,
) -> QueryResult<()> {
let affected_rows = diesel::insert_into(channel_opening_params::table)
.values(ChannelOpeningParams::from((
order_id,
channel_opening_params,
)))
.execute(conn)?;
if affected_rows == 0 {
return diesel::result::QueryResult::Err(diesel::result::Error::NotFound);
}
diesel::result::QueryResult::Ok(())
}
pub fn get_by_order_id(
conn: &mut PgConnection,
order_id: Uuid,
) -> QueryResult<Option<crate::ChannelOpeningParams>> {
let channel_opening_params: Option<ChannelOpeningParams> = channel_opening_params::table
.filter(channel_opening_params::order_id.eq(order_id.to_string()))
.first(conn)
.optional()?;
Ok(channel_opening_params.map(crate::ChannelOpeningParams::from))
}
impl From<(Uuid, crate::ChannelOpeningParams)> for ChannelOpeningParams {
fn from((order_id, channel_opening_params): (Uuid, crate::ChannelOpeningParams)) -> Self {
Self {
order_id: order_id.to_string(),
trader_reserve: channel_opening_params.trader_reserve.to_sat() as i64,
coordinator_reserve: channel_opening_params.coordinator_reserve.to_sat() as i64,
external_funding: channel_opening_params
.external_funding
.map(|funding| funding.to_sat() as i64),
created_at: OffsetDateTime::now_utc().unix_timestamp(),
}
}
}
impl From<ChannelOpeningParams> for crate::ChannelOpeningParams {
fn from(value: ChannelOpeningParams) -> Self {
Self {
coordinator_reserve: Amount::from_sat(value.coordinator_reserve as u64),
trader_reserve: Amount::from_sat(value.trader_reserve as u64),
external_funding: value
.external_funding
.map(|funding| Amount::from_sat(funding as u64)),
}
}
}
| rust | MIT | 3ae135090528d64fbe2702aa03e1e3953cd57e2f | 2026-01-04T20:18:11.134572Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.