repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
mempool/electrs | https://github.com/mempool/electrs/blob/3000bd13e76e2f33d0844a30489108846954d0a3/src/bin/popular-scripts.rs | src/bin/popular-scripts.rs | extern crate electrs;
use std::{convert::TryInto, thread::ThreadId, time::Instant};
use electrs::{config::Config, new_index::db::open_raw_db};
use lazy_static::lazy_static;
/*
// How to run:
export ELECTRS_DATA=/path/to/electrs
cargo run \
-q --release --bin popular-scripts -- \
--db-dir $ELECTRS_DATA/db \
> ./contrib/popular-scripts.txt
*/
type DB = rocksdb::DBWithThreadMode<rocksdb::MultiThreaded>;
lazy_static! {
static ref HISTORY_DB: DB = {
let config = Config::from_args();
open_raw_db(
&config.db_path.join("newindex").join("history"),
electrs::new_index::db::OpenMode::ReadOnly,
)
};
}
// Dev note:
// Only use println for file output (lines for output)
// Use eprintln to print to stderr for dev notifications
fn main() {
let high_usage_threshold = std::env::var("HIGH_USAGE_THRESHOLD")
.ok()
.and_then(|s| s.parse::<u32>().ok())
.unwrap_or(4000);
let thread_count = std::env::var("JOB_THREAD_COUNT")
.ok()
.and_then(|s| s.parse::<usize>().ok())
.unwrap_or(4);
eprintln!(
"Seaching for scripts with history rows of {} or more...",
high_usage_threshold
);
let thread_pool = rayon::ThreadPoolBuilder::new()
.num_threads(thread_count)
.build()
.expect("Built threadpool");
let (sender, receiver) = crossbeam_channel::unbounded::<[u8; 32]>();
let increment = 256 / thread_count;
let bytes: Vec<u8> = (0u8..=255u8)
.filter(|n| *n % increment as u8 == 0)
.collect();
let now = Instant::now();
for i in 0..bytes.len() {
let sender = sender.clone();
let first_byte = bytes[i];
let second_byte = bytes.get(i + 1).copied();
thread_pool.spawn(move || {
let id = std::thread::current().id();
run_iterator(
id,
&HISTORY_DB,
high_usage_threshold,
first_byte,
second_byte,
sender,
now,
);
eprintln!("{id:?} Finished its job!");
})
}
// If we don't drop this sender
// the receiver will hang forever
drop(sender);
while let Ok(script) = receiver.recv() {
println!("{}", hex::encode(script));
}
eprintln!("Finished!!!!");
}
fn run_iterator(
thread_id: ThreadId,
db: &DB,
high_usage_threshold: u32,
first_byte: u8,
next_byte: Option<u8>,
sender: crossbeam_channel::Sender<[u8; 32]>,
now: Instant,
) {
let mut iter = db.raw_iterator();
eprintln!(
"Thread ({thread_id:?}) Seeking DB to beginning of tx histories for b'H' + {}",
hex::encode([first_byte])
);
let mut compare_vec: Vec<u8> = vec![b'H', first_byte];
iter.seek(&compare_vec); // Seek to beginning of our section
// Insert the byte of the next section for comparing
// This will tell us when to stop with a closure
type Checker<'a> = Box<dyn Fn(&[u8]) -> bool + 'a>;
let is_finished: Checker<'_> = if let Some(next) = next_byte {
// Modify the vec to what we're looking for next
// to indicate we left our section
compare_vec[1] = next;
Box::new(|key: &[u8]| -> bool { key.starts_with(&compare_vec) })
} else {
// Modify the vec to only have H so we know when we left H
compare_vec.remove(1);
Box::new(|key: &[u8]| -> bool { !key.starts_with(&compare_vec) })
};
eprintln!("Thread ({thread_id:?}) Seeking done");
let mut curr_scripthash = [0u8; 32];
let mut total_entries: usize = 0;
let mut iter_index: usize = 1;
while iter.valid() {
let key = iter.key().unwrap();
if key.is_empty() || key[0] != b'H' || is_finished(key) {
// We have left the txhistory section,
// but we need to check the final scripthash
send_if_popular(
high_usage_threshold,
total_entries,
curr_scripthash,
&sender,
);
break;
}
if iter_index % 10_000_000 == 0 {
let duration = now.elapsed().as_secs();
eprintln!(
"Thread ({thread_id:?}) Processing row #{iter_index}... {duration} seconds elapsed"
);
}
// We know that the TxHistory key is 1 byte "H" followed by
// 32 byte scripthash
let entry_hash: [u8; 32] = key[1..33].try_into().unwrap();
if curr_scripthash != entry_hash {
// We have rolled on to a new scripthash
// If the last scripthash was popular
// Collect for sorting
send_if_popular(
high_usage_threshold,
total_entries,
curr_scripthash,
&sender,
);
// After collecting, reset values for next scripthash
curr_scripthash = entry_hash;
total_entries = 0;
}
total_entries += 1;
iter_index += 1;
iter.next();
}
}
#[inline]
fn send_if_popular(
high_usage_threshold: u32,
total_entries: usize,
curr_scripthash: [u8; 32],
sender: &crossbeam_channel::Sender<[u8; 32]>,
) {
if total_entries >= high_usage_threshold as usize {
sender.send(curr_scripthash).unwrap();
}
}
| rust | MIT | 3000bd13e76e2f33d0844a30489108846954d0a3 | 2026-01-04T20:24:15.088141Z | false |
mempool/electrs | https://github.com/mempool/electrs/blob/3000bd13e76e2f33d0844a30489108846954d0a3/src/bin/electrs.rs | src/bin/electrs.rs | extern crate error_chain;
#[macro_use]
extern crate log;
extern crate electrs;
use error_chain::ChainedError;
use std::process;
use std::sync::{Arc, RwLock};
use std::time::Duration;
use electrs::{
config::Config,
daemon::Daemon,
electrum::RPC as ElectrumRPC,
errors::*,
metrics::Metrics,
new_index::{precache, ChainQuery, FetchFrom, Indexer, Mempool, Query, Store},
rest,
signal::Waiter,
};
#[cfg(feature = "liquid")]
use electrs::elements::AssetRegistry;
fn fetch_from(config: &Config, store: &Store) -> FetchFrom {
let mut jsonrpc_import = config.jsonrpc_import;
if !jsonrpc_import {
// switch over to jsonrpc after the initial sync is done
jsonrpc_import = store.done_initial_sync();
}
if jsonrpc_import {
// slower, uses JSONRPC (good for incremental updates)
FetchFrom::Bitcoind
} else {
// faster, uses blk*.dat files (good for initial indexing)
FetchFrom::BlkFiles
}
}
fn run_server(config: Arc<Config>) -> Result<()> {
let signal = Waiter::start();
let metrics = Metrics::new(config.monitoring_addr);
metrics.start();
let daemon = Arc::new(Daemon::new(
config.daemon_dir.clone(),
config.blocks_dir.clone(),
config.daemon_rpc_addr,
config.cookie_getter(),
config.network_type,
config.magic,
signal.clone(),
&metrics,
)?);
let store = Arc::new(Store::open(&config.db_path.join("newindex"), &config));
let mut indexer = Indexer::open(
Arc::clone(&store),
fetch_from(&config, &store),
&config,
&metrics,
);
let mut tip = indexer.update(&daemon)?;
let chain = Arc::new(ChainQuery::new(
Arc::clone(&store),
Arc::clone(&daemon),
&config,
&metrics,
));
let mempool = Arc::new(RwLock::new(Mempool::new(
Arc::clone(&chain),
&metrics,
Arc::clone(&config),
)));
loop {
match Mempool::update(&mempool, &daemon) {
Ok(_) => break,
Err(e) => {
warn!(
"Error performing initial mempool update, trying again in 5 seconds: {}",
e.display_chain()
);
signal.wait(Duration::from_secs(5), false)?;
}
}
}
#[cfg(feature = "liquid")]
let asset_db = config.asset_db_path.as_ref().map(|db_dir| {
let asset_db = Arc::new(RwLock::new(AssetRegistry::new(db_dir.clone())));
AssetRegistry::spawn_sync(asset_db.clone());
asset_db
});
let query = Arc::new(Query::new(
Arc::clone(&chain),
Arc::clone(&mempool),
Arc::clone(&daemon),
Arc::clone(&config),
#[cfg(feature = "liquid")]
asset_db,
));
// TODO: configuration for which servers to start
let rest_server = rest::start(Arc::clone(&config), Arc::clone(&query), &metrics);
let electrum_server = ElectrumRPC::start(Arc::clone(&config), Arc::clone(&query), &metrics);
if let Some(ref precache_file) = config.precache_scripts {
let precache_scripthashes = precache::scripthashes_from_file(precache_file.to_string())
.expect("cannot load scripts to precache");
precache::precache(
Arc::clone(&chain),
precache_scripthashes,
config.precache_threads,
);
}
loop {
if let Err(err) = signal.wait(Duration::from_millis(config.main_loop_delay), true) {
info!("stopping server: {}", err);
electrs::util::spawn_thread("shutdown-thread-checker", || {
let mut counter = 40;
let interval_ms = 500;
while counter > 0 {
electrs::util::with_spawned_threads(|threads| {
debug!("Threads during shutdown: {:?}", threads);
});
std::thread::sleep(std::time::Duration::from_millis(interval_ms));
counter -= 1;
}
});
rest_server.stop();
// the electrum server is stopped when dropped
break;
}
// Index new blocks
let current_tip = daemon.getbestblockhash()?;
if current_tip != tip {
indexer.update(&daemon)?;
tip = current_tip;
};
// Update mempool
if let Err(e) = Mempool::update(&mempool, &daemon) {
// Log the error if the result is an Err
warn!(
"Error updating mempool, skipping mempool update: {}",
e.display_chain()
);
}
// Update subscribed clients
electrum_server.notify();
}
info!("server stopped");
Ok(())
}
fn main() {
let config = Arc::new(Config::from_args());
if let Err(e) = run_server(config) {
error!("server failed: {}", e.display_chain());
process::exit(1);
}
electrs::util::with_spawned_threads(|threads| {
debug!("Threads before closing: {:?}", threads);
});
}
| rust | MIT | 3000bd13e76e2f33d0844a30489108846954d0a3 | 2026-01-04T20:24:15.088141Z | false |
mempool/electrs | https://github.com/mempool/electrs/blob/3000bd13e76e2f33d0844a30489108846954d0a3/src/bin/tx-fingerprint-stats.rs | src/bin/tx-fingerprint-stats.rs | extern crate electrs;
#[cfg(not(feature = "liquid"))]
#[macro_use]
extern crate log;
#[cfg(not(feature = "liquid"))]
fn main() {
use std::collections::HashSet;
use std::sync::Arc;
use bitcoin::blockdata::script::Script;
use bitcoin::consensus::encode::deserialize;
use electrs::{
chain::Transaction,
config::Config,
daemon::Daemon,
metrics::Metrics,
new_index::{ChainQuery, FetchFrom, Indexer, Store},
signal::Waiter,
util::has_prevout,
};
let signal = Waiter::start();
let config = Config::from_args();
let store = Arc::new(Store::open(&config.db_path.join("newindex"), &config));
let metrics = Metrics::new(config.monitoring_addr);
metrics.start();
let daemon = Arc::new(
Daemon::new(
config.daemon_dir.clone(),
config.blocks_dir.clone(),
config.daemon_rpc_addr,
config.cookie_getter(),
config.network_type,
config.magic,
signal,
&metrics,
)
.unwrap(),
);
let chain = ChainQuery::new(Arc::clone(&store), Arc::clone(&daemon), &config, &metrics);
let mut indexer = Indexer::open(Arc::clone(&store), FetchFrom::Bitcoind, &config, &metrics);
indexer.update(&daemon).unwrap();
let mut iter = store.txstore_db().raw_iterator();
iter.seek(b"T");
let mut total = 0;
let mut uih_totals = vec![0, 0, 0];
while iter.valid() {
let key = iter.key().unwrap();
let value = iter.value().unwrap();
if !key.starts_with(b"T") {
break;
}
let tx: Transaction = deserialize(value).expect("failed to parse Transaction");
let txid = tx.txid();
iter.next();
// only consider transactions of exactly two outputs
if tx.output.len() != 2 {
continue;
}
// skip coinbase txs
if tx.is_coin_base() {
continue;
}
// skip orphaned transactions
let blockid = match chain.tx_confirming_block(&txid) {
Some(blockid) => blockid,
None => continue,
};
//info!("{:?},{:?}", txid, blockid);
let prevouts = chain.lookup_txos(
&tx.input
.iter()
.filter(|txin| has_prevout(txin))
.map(|txin| txin.previous_output)
.collect(),
);
let total_out: u64 = tx.output.iter().map(|out| out.value).sum();
let small_out = tx.output.iter().map(|out| out.value).min().unwrap();
let large_out = tx.output.iter().map(|out| out.value).max().unwrap();
let total_in: u64 = prevouts.values().map(|out| out.value).sum();
let smallest_in = prevouts.values().map(|out| out.value).min().unwrap();
let fee = total_in - total_out;
// test for UIH
let uih = if total_in - smallest_in > large_out + fee {
2
} else if total_in - smallest_in > small_out + fee {
1
} else {
0
};
// test for spending multiple coins owned by the same spk
let is_multi_spend = {
let mut seen_spks = HashSet::new();
prevouts
.values()
.any(|out| !seen_spks.insert(&out.script_pubkey))
};
// test for sending back to one of the spent spks
let has_reuse = {
let prev_spks: HashSet<Script> = prevouts
.values()
.map(|out| out.script_pubkey.clone())
.collect();
tx.output
.iter()
.any(|out| prev_spks.contains(&out.script_pubkey))
};
println!(
"{},{},{},{},{},{}",
txid, blockid.height, tx.lock_time, uih, is_multi_spend as u8, has_reuse as u8
);
total += 1;
uih_totals[uih] += 1;
}
info!(
"processed {} total txs, UIH counts: {:?}",
total, uih_totals
);
}
#[cfg(feature = "liquid")]
fn main() {}
| rust | MIT | 3000bd13e76e2f33d0844a30489108846954d0a3 | 2026-01-04T20:24:15.088141Z | false |
mempool/electrs | https://github.com/mempool/electrs/blob/3000bd13e76e2f33d0844a30489108846954d0a3/src/new_index/db.rs | src/new_index/db.rs | use rocksdb;
use std::path::Path;
use crate::config::Config;
use crate::util::{bincode_util, Bytes};
/// Each version will break any running instance with a DB that has a differing version.
/// It will also break if light mode is enabled or disabled.
// 1 = Original DB (since fork from Blockstream)
// 2 = Add tx position to TxHistory rows and place Spending before Funding
static DB_VERSION: u32 = 2;
#[derive(Debug, Eq, PartialEq)]
pub struct DBRow {
pub key: Vec<u8>,
pub value: Vec<u8>,
}
pub struct ScanIterator<'a> {
prefix: Vec<u8>,
iter: rocksdb::DBIterator<'a>,
done: bool,
}
impl Iterator for ScanIterator<'_> {
type Item = DBRow;
fn next(&mut self) -> Option<DBRow> {
if self.done {
return None;
}
let (key, value) = self.iter.next().map(Result::ok)??;
if !key.starts_with(&self.prefix) {
self.done = true;
return None;
}
Some(DBRow {
key: key.to_vec(),
value: value.to_vec(),
})
}
}
pub struct ReverseScanIterator<'a> {
prefix: Vec<u8>,
iter: rocksdb::DBRawIterator<'a>,
done: bool,
}
impl Iterator for ReverseScanIterator<'_> {
type Item = DBRow;
fn next(&mut self) -> Option<DBRow> {
if self.done || !self.iter.valid() {
return None;
}
let key = self.iter.key().unwrap();
if !key.starts_with(&self.prefix) {
self.done = true;
return None;
}
let row = DBRow {
key: key.into(),
value: self.iter.value().unwrap().into(),
};
self.iter.prev();
Some(row)
}
}
pub struct ReverseScanGroupIterator<'a> {
iters: Vec<ReverseScanIterator<'a>>,
next_rows: Vec<Option<DBRow>>,
value_offset: usize,
done: bool,
}
impl<'a> ReverseScanGroupIterator<'a> {
pub fn new(
mut iters: Vec<ReverseScanIterator<'a>>,
value_offset: usize,
) -> ReverseScanGroupIterator<'a> {
let mut next_rows: Vec<Option<DBRow>> = Vec::with_capacity(iters.len());
for iter in &mut iters {
let next = iter.next();
next_rows.push(next);
}
let done = next_rows.iter().all(|row| row.is_none());
ReverseScanGroupIterator {
iters,
next_rows,
value_offset,
done,
}
}
}
impl Iterator for ReverseScanGroupIterator<'_> {
type Item = DBRow;
fn next(&mut self) -> Option<DBRow> {
if self.done {
return None;
}
let best_index = self
.next_rows
.iter()
.enumerate()
.max_by(|(a_index, a_opt), (b_index, b_opt)| match (a_opt, b_opt) {
(None, None) => a_index.cmp(b_index),
(Some(_), None) => std::cmp::Ordering::Greater,
(None, Some(_)) => std::cmp::Ordering::Less,
(Some(a), Some(b)) => a.key[self.value_offset..].cmp(&(b.key[self.value_offset..])),
})
.map(|(index, _)| index)
.unwrap_or(0);
let best = self.next_rows[best_index].take();
self.next_rows[best_index] = self.iters.get_mut(best_index)?.next();
if self.next_rows.iter().all(|row| row.is_none()) {
self.done = true;
}
best
}
}
#[derive(Debug)]
pub struct DB {
db: rocksdb::DB,
}
#[derive(Copy, Clone, Debug)]
pub enum DBFlush {
Disable,
Enable,
}
impl DB {
pub fn open(path: &Path, config: &Config) -> DB {
let db = DB {
db: open_raw_db(path, OpenMode::ReadWrite),
};
db.verify_compatibility(config);
db
}
pub fn full_compaction(&self) {
// TODO: make sure this doesn't fail silently
debug!("starting full compaction on {:?}", self.db);
self.db.compact_range(None::<&[u8]>, None::<&[u8]>);
debug!("finished full compaction on {:?}", self.db);
}
pub fn enable_auto_compaction(&self) {
let opts = [("disable_auto_compactions", "false")];
self.db.set_options(&opts).unwrap();
}
pub fn raw_iterator(&self) -> rocksdb::DBRawIterator<'_> {
self.db.raw_iterator()
}
pub fn iter_scan(&self, prefix: &[u8]) -> ScanIterator<'_> {
ScanIterator {
prefix: prefix.to_vec(),
iter: self.db.prefix_iterator(prefix),
done: false,
}
}
pub fn iter_scan_from(&self, prefix: &[u8], start_at: &[u8]) -> ScanIterator<'_> {
let iter = self.db.iterator(rocksdb::IteratorMode::From(
start_at,
rocksdb::Direction::Forward,
));
ScanIterator {
prefix: prefix.to_vec(),
iter,
done: false,
}
}
pub fn iter_scan_reverse(&self, prefix: &[u8], prefix_max: &[u8]) -> ReverseScanIterator<'_> {
let mut iter = self.db.raw_iterator();
iter.seek_for_prev(prefix_max);
ReverseScanIterator {
prefix: prefix.to_vec(),
iter,
done: false,
}
}
pub fn iter_scan_group_reverse(
&self,
prefixes: impl Iterator<Item = (Vec<u8>, Vec<u8>)>,
value_offset: usize,
) -> ReverseScanGroupIterator<'_> {
let iters = prefixes
.map(|(prefix, prefix_max)| {
let mut iter = self.db.raw_iterator();
iter.seek_for_prev(prefix_max);
ReverseScanIterator {
prefix: prefix.to_vec(),
iter,
done: false,
}
})
.collect();
ReverseScanGroupIterator::new(iters, value_offset)
}
pub fn write(&self, mut rows: Vec<DBRow>, flush: DBFlush) {
debug!(
"writing {} rows to {:?}, flush={:?}",
rows.len(),
self.db,
flush
);
rows.sort_unstable_by(|a, b| a.key.cmp(&b.key));
let mut batch = rocksdb::WriteBatch::default();
for row in rows {
batch.put(&row.key, &row.value);
}
let do_flush = match flush {
DBFlush::Enable => true,
DBFlush::Disable => false,
};
let mut opts = rocksdb::WriteOptions::new();
opts.set_sync(do_flush);
opts.disable_wal(!do_flush);
self.db.write_opt(batch, &opts).unwrap();
}
pub fn delete(&self, keys: Vec<Vec<u8>>) {
debug!("deleting {} rows from {:?}", keys.len(), self.db);
for key in keys {
let _ = self.db.delete(key).inspect_err(|err| {
warn!("Error while deleting DB row: {err}");
});
}
}
pub fn flush(&self) {
self.db.flush().unwrap();
}
pub fn put(&self, key: &[u8], value: &[u8]) {
self.db.put(key, value).unwrap();
}
pub fn put_sync(&self, key: &[u8], value: &[u8]) {
let mut opts = rocksdb::WriteOptions::new();
opts.set_sync(true);
self.db.put_opt(key, value, &opts).unwrap();
}
pub fn get(&self, key: &[u8]) -> Option<Bytes> {
self.db.get(key).unwrap().map(|v| v.to_vec())
}
fn verify_compatibility(&self, config: &Config) {
let mut compatibility_bytes = bincode_util::serialize_little(&DB_VERSION).unwrap();
if config.light_mode {
// append a byte to indicate light_mode is enabled.
// we're not letting bincode serialize this so that the compatiblity bytes won't change
// (and require a reindex) when light_mode is disabled. this should be chagned the next
// time we bump DB_VERSION and require a re-index anyway.
compatibility_bytes.push(1);
}
match self.get(b"V") {
None => self.put(b"V", &compatibility_bytes),
Some(ref x) if x != &compatibility_bytes => {
panic!("Incompatible database found. Please reindex.")
}
Some(_) => (),
}
}
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
#[repr(u8)]
pub enum OpenMode {
ReadOnly,
ReadWrite,
}
pub fn open_raw_db<T: rocksdb::ThreadMode>(
path: &Path,
read_mode: OpenMode,
) -> rocksdb::DBWithThreadMode<T> {
debug!("opening DB at {:?}", path);
let mut db_opts = rocksdb::Options::default();
db_opts.create_if_missing(true);
db_opts.set_max_open_files(100_000); // TODO: make sure to `ulimit -n` this process correctly
db_opts.set_compaction_style(rocksdb::DBCompactionStyle::Level);
db_opts.set_compression_type(rocksdb::DBCompressionType::None);
db_opts.set_target_file_size_base(1_073_741_824);
db_opts.set_write_buffer_size(256 << 20);
db_opts.set_disable_auto_compactions(true); // for initial bulk load
// db_opts.set_advise_random_on_open(???);
db_opts.set_compaction_readahead_size(1 << 20);
db_opts.increase_parallelism(2);
// let mut block_opts = rocksdb::BlockBasedOptions::default();
// block_opts.set_block_size(???);
match read_mode {
OpenMode::ReadOnly => {
rocksdb::DBWithThreadMode::<T>::open_for_read_only(&db_opts, path, false)
.expect("failed to open RocksDB (READ ONLY)")
}
OpenMode::ReadWrite => {
rocksdb::DBWithThreadMode::<T>::open(&db_opts, path).expect("failed to open RocksDB")
}
}
}
| rust | MIT | 3000bd13e76e2f33d0844a30489108846954d0a3 | 2026-01-04T20:24:15.088141Z | false |
mempool/electrs | https://github.com/mempool/electrs/blob/3000bd13e76e2f33d0844a30489108846954d0a3/src/new_index/fetch.rs | src/new_index/fetch.rs | use rayon::prelude::*;
#[cfg(not(feature = "liquid"))]
use bitcoin::consensus::encode::{deserialize, Decodable};
#[cfg(feature = "liquid")]
use elements::encode::{deserialize, Decodable};
use std::collections::HashMap;
use std::fs;
use std::io::Cursor;
use std::path::PathBuf;
use std::thread;
use crate::chain::{Block, BlockHash};
use crate::daemon::Daemon;
use crate::errors::*;
use crate::util::{spawn_thread, HeaderEntry, SyncChannel};
#[derive(Clone, Copy, Debug)]
pub enum FetchFrom {
Bitcoind,
BlkFiles,
}
pub fn start_fetcher(
from: FetchFrom,
daemon: &Daemon,
new_headers: Vec<HeaderEntry>,
) -> Result<Fetcher<Vec<BlockEntry>>> {
let fetcher = match from {
FetchFrom::Bitcoind => bitcoind_fetcher,
FetchFrom::BlkFiles => blkfiles_fetcher,
};
fetcher(daemon, new_headers)
}
pub struct BlockEntry {
pub block: Block,
pub entry: HeaderEntry,
pub size: u32,
}
type SizedBlock = (Block, u32);
pub struct SequentialFetcher<T> {
fetcher: Box<dyn FnOnce() -> Vec<Vec<T>>>,
}
impl<T> SequentialFetcher<T> {
fn from<F: FnOnce() -> Vec<Vec<T>> + 'static>(pre_func: F) -> Self {
SequentialFetcher {
fetcher: Box::new(pre_func),
}
}
pub fn map<FN>(self, mut func: FN)
where
FN: FnMut(Vec<T>),
{
for item in (self.fetcher)() {
func(item);
}
}
}
pub fn bitcoind_sequential_fetcher(
daemon: &Daemon,
new_headers: Vec<HeaderEntry>,
) -> Result<SequentialFetcher<BlockEntry>> {
let daemon = daemon.reconnect()?;
Ok(SequentialFetcher::from(move || {
new_headers
.chunks(100)
.map(|entries| {
let blockhashes: Vec<BlockHash> = entries.iter().map(|e| *e.hash()).collect();
let blocks = daemon
.getblocks(&blockhashes)
.expect("failed to get blocks from bitcoind");
assert_eq!(blocks.len(), entries.len());
let block_entries: Vec<BlockEntry> = blocks
.into_iter()
.zip(entries)
.map(|(block, entry)| BlockEntry {
entry: entry.clone(), // TODO: remove this clone()
size: block.size() as u32,
block,
})
.collect();
assert_eq!(block_entries.len(), entries.len());
block_entries
})
.collect()
}))
}
pub struct Fetcher<T> {
receiver: crossbeam_channel::Receiver<T>,
thread: thread::JoinHandle<()>,
}
impl<T> Fetcher<T> {
fn from(receiver: crossbeam_channel::Receiver<T>, thread: thread::JoinHandle<()>) -> Self {
Fetcher { receiver, thread }
}
pub fn map<F>(self, mut func: F)
where
F: FnMut(T),
{
for item in self.receiver {
func(item);
}
self.thread.join().expect("fetcher thread panicked")
}
}
fn bitcoind_fetcher(
daemon: &Daemon,
new_headers: Vec<HeaderEntry>,
) -> Result<Fetcher<Vec<BlockEntry>>> {
if let Some(tip) = new_headers.last() {
debug!("{:?} ({} left to index)", tip, new_headers.len());
};
let daemon = daemon.reconnect()?;
let chan = SyncChannel::new(1);
let sender = chan.sender();
Ok(Fetcher::from(
chan.into_receiver(),
spawn_thread("bitcoind_fetcher", move || {
for entries in new_headers.chunks(100) {
let blockhashes: Vec<BlockHash> = entries.iter().map(|e| *e.hash()).collect();
let blocks = daemon
.getblocks(&blockhashes)
.expect("failed to get blocks from bitcoind");
assert_eq!(blocks.len(), entries.len());
let block_entries: Vec<BlockEntry> = blocks
.into_iter()
.zip(entries)
.map(|(block, entry)| BlockEntry {
entry: entry.clone(), // TODO: remove this clone()
size: block.size() as u32,
block,
})
.collect();
assert_eq!(block_entries.len(), entries.len());
sender
.send(block_entries)
.expect("failed to send fetched blocks");
}
}),
))
}
fn blkfiles_fetcher(
daemon: &Daemon,
new_headers: Vec<HeaderEntry>,
) -> Result<Fetcher<Vec<BlockEntry>>> {
let magic = daemon.magic();
let blk_files = daemon.list_blk_files()?;
let chan = SyncChannel::new(1);
let sender = chan.sender();
let mut entry_map: HashMap<BlockHash, HeaderEntry> =
new_headers.into_iter().map(|h| (*h.hash(), h)).collect();
let parser = blkfiles_parser(blkfiles_reader(blk_files), magic);
Ok(Fetcher::from(
chan.into_receiver(),
spawn_thread("blkfiles_fetcher", move || {
parser.map(|sizedblocks| {
let block_entries: Vec<BlockEntry> = sizedblocks
.into_iter()
.filter_map(|(block, size)| {
let blockhash = block.block_hash();
entry_map
.remove(&blockhash)
.map(|entry| BlockEntry { block, entry, size })
.or_else(|| {
trace!("skipping block {}", blockhash);
None
})
})
.collect();
trace!("fetched {} blocks", block_entries.len());
sender
.send(block_entries)
.expect("failed to send blocks entries from blk*.dat files");
});
if !entry_map.is_empty() {
panic!(
"failed to index {} blocks from blk*.dat files",
entry_map.len()
)
}
}),
))
}
fn blkfiles_reader(blk_files: Vec<PathBuf>) -> Fetcher<Vec<u8>> {
let chan = SyncChannel::new(1);
let sender = chan.sender();
let xor_key = blk_files.first().and_then(|p| {
let xor_file = p
.parent()
.expect("blk.dat files must exist in a directory")
.join("xor.dat");
if xor_file.exists() {
Some(fs::read(xor_file).expect("xor.dat exists"))
} else {
None
}
});
Fetcher::from(
chan.into_receiver(),
spawn_thread("blkfiles_reader", move || {
for path in blk_files {
trace!("reading {:?}", path);
let mut blob = fs::read(&path)
.unwrap_or_else(|e| panic!("failed to read {:?}: {:?}", path, e));
// If the xor.dat exists. Use it to decrypt the block files.
if let Some(xor_key) = &xor_key {
for (&key, byte) in xor_key.iter().cycle().zip(blob.iter_mut()) {
*byte ^= key;
}
}
sender
.send(blob)
.unwrap_or_else(|_| panic!("failed to send {:?} contents", path));
}
}),
)
}
fn blkfiles_parser(blobs: Fetcher<Vec<u8>>, magic: u32) -> Fetcher<Vec<SizedBlock>> {
let chan = SyncChannel::new(1);
let sender = chan.sender();
Fetcher::from(
chan.into_receiver(),
spawn_thread("blkfiles_parser", move || {
blobs.map(|blob| {
trace!("parsing {} bytes", blob.len());
let blocks = parse_blocks(blob, magic).expect("failed to parse blk*.dat file");
sender
.send(blocks)
.expect("failed to send blocks from blk*.dat file");
});
}),
)
}
fn parse_blocks(blob: Vec<u8>, magic: u32) -> Result<Vec<SizedBlock>> {
let mut cursor = Cursor::new(&blob);
let mut slices = vec![];
let max_pos = blob.len() as u64;
while cursor.position() < max_pos {
let offset = cursor.position();
match u32::consensus_decode(&mut cursor) {
Ok(value) => {
if magic != value {
cursor.set_position(offset + 1);
continue;
}
}
Err(_) => break, // EOF
};
let block_size = u32::consensus_decode(&mut cursor).chain_err(|| "no block size")?;
let start = cursor.position();
let end = start + block_size as u64;
// If Core's WriteBlockToDisk ftell fails, only the magic bytes and size will be written
// and the block body won't be written to the blk*.dat file.
// Since the first 4 bytes should contain the block's version, we can skip such blocks
// by peeking the cursor (and skipping previous `magic` and `block_size`).
match u32::consensus_decode(&mut cursor) {
Ok(value) => {
if magic == value {
cursor.set_position(start);
continue;
}
}
Err(_) => break, // EOF
}
slices.push((&blob[start as usize..end as usize], block_size));
cursor.set_position(end);
}
let pool = rayon::ThreadPoolBuilder::new()
.num_threads(0) // CPU-bound
.thread_name(|i| format!("parse-blocks-{}", i))
.build()
.unwrap();
Ok(pool.install(|| {
slices
.into_par_iter()
.map(|(slice, size)| (deserialize(slice).expect("failed to parse Block"), size))
.collect()
}))
}
| rust | MIT | 3000bd13e76e2f33d0844a30489108846954d0a3 | 2026-01-04T20:24:15.088141Z | false |
mempool/electrs | https://github.com/mempool/electrs/blob/3000bd13e76e2f33d0844a30489108846954d0a3/src/new_index/schema.rs | src/new_index/schema.rs | use bitcoin::hashes::sha256d::Hash as Sha256dHash;
#[cfg(not(feature = "liquid"))]
use bitcoin::util::merkleblock::MerkleBlock;
use bitcoin::VarInt;
use itertools::Itertools;
use rayon::prelude::*;
use sha2::{Digest, Sha256};
#[cfg(not(feature = "liquid"))]
use bitcoin::consensus::encode::{deserialize, serialize};
#[cfg(feature = "liquid")]
use elements::{
encode::{deserialize, serialize},
AssetId,
};
use std::collections::{BTreeSet, HashMap, HashSet};
use std::convert::TryInto;
use std::path::Path;
use std::sync::{Arc, RwLock};
use crate::chain::{
BlockHash, BlockHeader, Network, OutPoint, Script, Transaction, TxOut, Txid, Value,
};
use crate::config::Config;
use crate::daemon::Daemon;
use crate::errors::*;
use crate::metrics::{Gauge, HistogramOpts, HistogramTimer, HistogramVec, MetricOpts, Metrics};
use crate::util::{
bincode_util, full_hash, has_prevout, is_spendable, BlockHeaderMeta, BlockId, BlockMeta,
BlockStatus, Bytes, HeaderEntry, HeaderList, ScriptToAddr,
};
use crate::new_index::db::{DBFlush, DBRow, ReverseScanIterator, ScanIterator, DB};
use crate::new_index::fetch::{start_fetcher, BlockEntry, FetchFrom};
#[cfg(feature = "liquid")]
use crate::elements::{asset, peg};
use super::{db::ReverseScanGroupIterator, fetch::bitcoind_sequential_fetcher};
const MIN_HISTORY_ITEMS_TO_CACHE: usize = 100;
pub struct Store {
// TODO: should be column families
txstore_db: DB,
history_db: DB,
cache_db: DB,
added_blockhashes: RwLock<HashSet<BlockHash>>,
indexed_blockhashes: RwLock<HashSet<BlockHash>>,
indexed_headers: RwLock<HeaderList>,
}
impl Store {
pub fn open(path: &Path, config: &Config) -> Self {
let txstore_db = DB::open(&path.join("txstore"), config);
let added_blockhashes = load_blockhashes(&txstore_db, &BlockRow::done_filter());
debug!("{} blocks were added", added_blockhashes.len());
let history_db = DB::open(&path.join("history"), config);
let indexed_blockhashes = load_blockhashes(&history_db, &BlockRow::done_filter());
debug!("{} blocks were indexed", indexed_blockhashes.len());
let cache_db = DB::open(&path.join("cache"), config);
let headers = if let Some(tip_hash) = txstore_db.get(b"t") {
let tip_hash = deserialize(&tip_hash).expect("invalid chain tip in `t`");
let headers_map = load_blockheaders(&txstore_db);
debug!(
"{} headers were loaded, tip at {:?}",
headers_map.len(),
tip_hash
);
HeaderList::new(headers_map, tip_hash)
} else {
HeaderList::empty()
};
Store {
txstore_db,
history_db,
cache_db,
added_blockhashes: RwLock::new(added_blockhashes),
indexed_blockhashes: RwLock::new(indexed_blockhashes),
indexed_headers: RwLock::new(headers),
}
}
pub fn txstore_db(&self) -> &DB {
&self.txstore_db
}
pub fn history_db(&self) -> &DB {
&self.history_db
}
pub fn cache_db(&self) -> &DB {
&self.cache_db
}
pub fn done_initial_sync(&self) -> bool {
self.txstore_db.get(b"t").is_some()
}
}
type UtxoMap = HashMap<OutPoint, (BlockId, Value)>;
#[derive(Debug)]
pub struct Utxo {
pub txid: Txid,
pub vout: u32,
pub confirmed: Option<BlockId>,
pub value: Value,
#[cfg(feature = "liquid")]
pub asset: elements::confidential::Asset,
#[cfg(feature = "liquid")]
pub nonce: elements::confidential::Nonce,
#[cfg(feature = "liquid")]
pub witness: elements::TxOutWitness,
}
impl From<&Utxo> for OutPoint {
fn from(utxo: &Utxo) -> Self {
OutPoint {
txid: utxo.txid,
vout: utxo.vout,
}
}
}
#[derive(Debug)]
pub struct SpendingInput {
pub txid: Txid,
pub vin: u32,
pub confirmed: Option<BlockId>,
}
#[derive(Serialize, Deserialize, Debug, Default)]
pub struct ScriptStats {
pub tx_count: usize,
pub funded_txo_count: usize,
pub spent_txo_count: usize,
#[cfg(not(feature = "liquid"))]
pub funded_txo_sum: u64,
#[cfg(not(feature = "liquid"))]
pub spent_txo_sum: u64,
}
impl ScriptStats {
#[cfg(feature = "liquid")]
fn is_sane(&self) -> bool {
// See below for comments.
self.spent_txo_count <= self.funded_txo_count
&& self.tx_count <= self.spent_txo_count + self.funded_txo_count
}
#[cfg(not(feature = "liquid"))]
fn is_sane(&self) -> bool {
// There are less or equal spends to funds
self.spent_txo_count <= self.funded_txo_count
// There are less or equal transactions to total spent+funded txo counts
// (Most spread out txo case = N funds in 1 tx each + M spends in 1 tx each = N + M txes)
&& self.tx_count <= self.spent_txo_count + self.funded_txo_count
// There are less or equal spent coins to funded coins
&& self.spent_txo_sum <= self.funded_txo_sum
// If funded and spent txos are equal (0 balance)
// Then funded and spent coins must be equal (0 balance)
&& (self.funded_txo_count == self.spent_txo_count)
== (self.funded_txo_sum == self.spent_txo_sum)
}
}
pub struct Indexer {
store: Arc<Store>,
flush: DBFlush,
from: FetchFrom,
iconfig: IndexerConfig,
duration: HistogramVec,
tip_metric: Gauge,
}
struct IndexerConfig {
light_mode: bool,
address_search: bool,
index_unspendables: bool,
network: Network,
#[cfg(feature = "liquid")]
parent_network: crate::chain::BNetwork,
}
impl From<&Config> for IndexerConfig {
fn from(config: &Config) -> Self {
IndexerConfig {
light_mode: config.light_mode,
address_search: config.address_search,
index_unspendables: config.index_unspendables,
network: config.network_type,
#[cfg(feature = "liquid")]
parent_network: config.parent_network,
}
}
}
pub struct ChainQuery {
store: Arc<Store>, // TODO: should be used as read-only
daemon: Arc<Daemon>,
light_mode: bool,
duration: HistogramVec,
network: Network,
}
#[derive(Debug, Clone)]
pub enum Operation {
AddBlocks,
DeleteBlocks,
DeleteBlocksWithHistory(crossbeam_channel::Sender<[u8; 32]>),
}
impl std::fmt::Display for Operation {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(match self {
Operation::AddBlocks => "Adding",
Operation::DeleteBlocks | Operation::DeleteBlocksWithHistory(_) => "Deleting",
})
}
}
// TODO: &[Block] should be an iterator / a queue.
impl Indexer {
pub fn open(store: Arc<Store>, from: FetchFrom, config: &Config, metrics: &Metrics) -> Self {
Indexer {
store,
flush: DBFlush::Disable,
from,
iconfig: IndexerConfig::from(config),
duration: metrics.histogram_vec(
HistogramOpts::new("index_duration", "Index update duration (in seconds)"),
&["step"],
),
tip_metric: metrics.gauge(MetricOpts::new("tip_height", "Current chain tip height")),
}
}
fn start_timer(&self, name: &str) -> HistogramTimer {
self.duration.with_label_values(&[name]).start_timer()
}
fn headers_to_add(&self, new_headers: &[HeaderEntry]) -> Vec<HeaderEntry> {
let added_blockhashes = self.store.added_blockhashes.read().unwrap();
new_headers
.iter()
.filter(|e| !added_blockhashes.contains(e.hash()))
.cloned()
.collect()
}
fn headers_to_index(&self, new_headers: &[HeaderEntry]) -> Vec<HeaderEntry> {
let indexed_blockhashes = self.store.indexed_blockhashes.read().unwrap();
new_headers
.iter()
.filter(|e| !indexed_blockhashes.contains(e.hash()))
.cloned()
.collect()
}
fn start_auto_compactions(&self, db: &DB) {
let key = b"F".to_vec();
if db.get(&key).is_none() {
db.full_compaction();
db.put_sync(&key, b"");
assert!(db.get(&key).is_some());
}
db.enable_auto_compaction();
}
fn get_new_headers(&self, daemon: &Daemon, tip: &BlockHash) -> Result<Vec<HeaderEntry>> {
let headers = self.store.indexed_headers.read().unwrap();
let new_headers = daemon.get_new_headers(&headers, tip)?;
let result = headers.order(new_headers);
if let Some(tip) = result.last() {
info!("{:?} ({} left to index)", tip, result.len());
};
Ok(result)
}
fn reorg(&self, reorged: Vec<HeaderEntry>, daemon: &Daemon) -> Result<()> {
if reorged.len() > 10 {
warn!(
"reorg of over 10 blocks ({}) detected! Wonky stuff might happen!",
reorged.len()
);
}
// This channel holds a Vec of [u8; 32] scripts found in the blocks (with duplicates)
// if we reorg the whole mainnet chain it should come out to about 145 GB of memory.
let (tx, rx) = crossbeam_channel::unbounded();
// Delete history_db
bitcoind_sequential_fetcher(daemon, reorged.clone())?
.map(|blocks| self.index(&blocks, Operation::DeleteBlocksWithHistory(tx.clone())));
// Delete txstore
bitcoind_sequential_fetcher(daemon, reorged)?
.map(|blocks| self.add(&blocks, Operation::DeleteBlocks));
// All senders must be dropped for receiver iterator to finish
drop(tx);
// All senders are dropped by now, so the receiver will iterate until the
// end of the unbounded queue.
let scripts = rx.into_iter().collect::<HashSet<_>>();
for script in scripts {
// cancel the script cache DB for these scripts. They might get incorrect data mixed in.
self.store.cache_db.delete(vec![
StatsCacheRow::key(&script),
UtxoCacheRow::key(&script),
#[cfg(feature = "liquid")]
[b"z", &script[..]].concat(), // asset cache key
]);
}
Ok(())
}
pub fn update(&mut self, daemon: &Daemon) -> Result<BlockHash> {
let daemon = daemon.reconnect()?;
let tip = daemon.getbestblockhash()?;
let new_headers = self.get_new_headers(&daemon, &tip)?;
// Must rollback blocks before rolling forward
let headers_len = {
let mut headers = self.store.indexed_headers.write().unwrap();
let (reorged, rollback_tip) = headers.apply(new_headers.clone());
assert_eq!(tip, *headers.tip());
let headers_len = headers.len();
drop(headers);
if !reorged.is_empty() {
// We should rollback the tip blockhash first in case something crashes during rollback
// or before the next block appears and sets the new tip.
match rollback_tip {
Some(rb_tip) => {
debug!("updating reorged tip to {:?}", rb_tip);
self.store.txstore_db.put_sync(b"t", &serialize(&rb_tip));
}
None => {
// This should only happen on regtest or some weird networks.
error!("Rollback to genesis block detected!!! (rollback to height 0)");
// There is no tip anymore.
self.store.txstore_db.delete(vec![b"t".into()]);
}
}
self.reorg(reorged, &daemon)?;
}
headers_len
};
let to_add = self.headers_to_add(&new_headers);
debug!(
"adding transactions from {} blocks using {:?}",
to_add.len(),
self.from
);
start_fetcher(self.from, &daemon, to_add)?
.map(|blocks| self.add(&blocks, Operation::AddBlocks));
self.start_auto_compactions(&self.store.txstore_db);
let to_index = self.headers_to_index(&new_headers);
debug!(
"indexing history from {} blocks using {:?}",
to_index.len(),
self.from
);
start_fetcher(self.from, &daemon, to_index)?
.map(|blocks| self.index(&blocks, Operation::AddBlocks));
self.start_auto_compactions(&self.store.history_db);
if let DBFlush::Disable = self.flush {
debug!("flushing to disk");
self.store.txstore_db.flush();
self.store.history_db.flush();
self.flush = DBFlush::Enable;
}
// update the synced tip *after* the new data is flushed to disk
debug!("updating synced tip to {:?}", tip);
self.store.txstore_db.put_sync(b"t", &serialize(&tip));
if let FetchFrom::BlkFiles = self.from {
self.from = FetchFrom::Bitcoind;
}
self.tip_metric.set(headers_len as i64 - 1);
Ok(tip)
}
fn add(&self, blocks: &[BlockEntry], op: Operation) {
debug!("{} {} blocks to Indexer", op, blocks.len());
let write_label = match &op {
Operation::AddBlocks => "add_write",
_ => "delete_write",
};
// TODO: skip orphaned blocks?
let rows = {
let _timer = self.start_timer("add_process");
add_blocks(blocks, &self.iconfig)
};
{
let _timer = self.start_timer(write_label);
if let Operation::AddBlocks = op {
self.store.txstore_db.write(rows, self.flush);
} else {
self.store
.txstore_db
.delete(rows.into_iter().map(|r| r.key).collect());
}
}
if let Operation::AddBlocks = op {
self.store
.added_blockhashes
.write()
.unwrap()
.extend(blocks.iter().map(|b| {
if b.entry.height() % 10_000 == 0 {
info!("Tx indexing is up to height={}", b.entry.height());
}
b.entry.hash()
}));
} else {
let mut added_blockhashes = self.store.added_blockhashes.write().unwrap();
for b in blocks {
added_blockhashes.remove(b.entry.hash());
}
}
}
fn index(&self, blocks: &[BlockEntry], op: Operation) {
debug!("Indexing ({}) {} blocks with Indexer", op, blocks.len());
let previous_txos_map = {
let _timer = self.start_timer("index_lookup");
if matches!(op, Operation::AddBlocks) {
lookup_txos(&self.store.txstore_db, &get_previous_txos(blocks), false)
} else {
lookup_txos_sequential(&self.store.txstore_db, &get_previous_txos(blocks), false)
}
};
let rows = {
let _timer = self.start_timer("index_process");
if let Operation::AddBlocks = op {
let added_blockhashes = self.store.added_blockhashes.read().unwrap();
for b in blocks {
if b.entry.height() % 10_000 == 0 {
info!("History indexing is up to height={}", b.entry.height());
}
let blockhash = b.entry.hash();
// TODO: replace by lookup into txstore_db?
if !added_blockhashes.contains(blockhash) {
panic!("cannot index block {} (missing from store)", blockhash);
}
}
}
index_blocks(blocks, &previous_txos_map, &self.iconfig, &op)
};
if let Operation::AddBlocks = op {
self.store.history_db.write(rows, self.flush);
} else {
self.store
.history_db
.delete(rows.into_iter().map(|r| r.key).collect());
}
}
}
impl ChainQuery {
pub fn new(store: Arc<Store>, daemon: Arc<Daemon>, config: &Config, metrics: &Metrics) -> Self {
ChainQuery {
store,
daemon,
light_mode: config.light_mode,
network: config.network_type,
duration: metrics.histogram_vec(
HistogramOpts::new("query_duration", "Index query duration (in seconds)"),
&["name"],
),
}
}
pub fn network(&self) -> Network {
self.network
}
pub fn store(&self) -> &Store {
&self.store
}
fn start_timer(&self, name: &str) -> HistogramTimer {
self.duration.with_label_values(&[name]).start_timer()
}
pub fn get_block_txids(&self, hash: &BlockHash) -> Option<Vec<Txid>> {
let _timer = self.start_timer("get_block_txids");
if self.light_mode {
// TODO fetch block as binary from REST API instead of as hex
let mut blockinfo = self.daemon.getblock_raw(hash, 1).ok()?;
Some(serde_json::from_value(blockinfo["tx"].take()).unwrap())
} else {
self.store
.txstore_db
.get(&BlockRow::txids_key(full_hash(&hash[..])))
.map(|val| {
bincode_util::deserialize_little(&val).expect("failed to parse block txids")
})
}
}
pub fn get_block_txs(&self, hash: &BlockHash) -> Option<Vec<Transaction>> {
let _timer = self.start_timer("get_block_txs");
let txids: Option<Vec<Txid>> = if self.light_mode {
// TODO fetch block as binary from REST API instead of as hex
let mut blockinfo = self.daemon.getblock_raw(hash, 1).ok()?;
Some(serde_json::from_value(blockinfo["tx"].take()).unwrap())
} else {
self.store
.txstore_db
.get(&BlockRow::txids_key(full_hash(&hash[..])))
.map(|val| {
bincode_util::deserialize_little(&val).expect("failed to parse block txids")
})
};
txids.and_then(|txid_vec| {
let mut transactions = Vec::with_capacity(txid_vec.len());
for txid in txid_vec {
match self.lookup_txn(&txid, Some(hash)) {
Some(transaction) => transactions.push(transaction),
None => return None,
}
}
Some(transactions)
})
}
pub fn get_block_meta(&self, hash: &BlockHash) -> Option<BlockMeta> {
let _timer = self.start_timer("get_block_meta");
if self.light_mode {
let blockinfo = self.daemon.getblock_raw(hash, 1).ok()?;
Some(serde_json::from_value(blockinfo).unwrap())
} else {
self.store
.txstore_db
.get(&BlockRow::meta_key(full_hash(&hash[..])))
.map(|val| {
bincode_util::deserialize_little(&val).expect("failed to parse BlockMeta")
})
}
}
pub fn get_block_raw(&self, hash: &BlockHash) -> Option<Vec<u8>> {
let _timer = self.start_timer("get_block_raw");
if self.light_mode {
let blockhex = self.daemon.getblock_raw(hash, 0).ok()?;
Some(hex::decode(blockhex.as_str().unwrap()).unwrap())
} else {
let entry = self.header_by_hash(hash)?;
let meta = self.get_block_meta(hash)?;
let txids = self.get_block_txids(hash)?;
// Reconstruct the raw block using the header and txids,
// as <raw header><tx count varint><raw txs>
let mut raw = Vec::with_capacity(meta.size as usize);
raw.append(&mut serialize(entry.header()));
raw.append(&mut serialize(&VarInt(txids.len() as u64)));
for txid in txids {
// we don't need to provide the blockhash because we know we're not in light mode
raw.append(&mut self.lookup_raw_txn(&txid, None)?);
}
Some(raw)
}
}
pub fn get_block_header(&self, hash: &BlockHash) -> Option<BlockHeader> {
let _timer = self.start_timer("get_block_header");
#[allow(clippy::clone_on_copy)]
Some(self.header_by_hash(hash)?.header().clone())
}
pub fn get_mtp(&self, height: usize) -> u32 {
let _timer = self.start_timer("get_block_mtp");
self.store.indexed_headers.read().unwrap().get_mtp(height)
}
pub fn get_block_with_meta(&self, hash: &BlockHash) -> Option<BlockHeaderMeta> {
let _timer = self.start_timer("get_block_with_meta");
let header_entry = self.header_by_hash(hash)?;
Some(BlockHeaderMeta {
meta: self.get_block_meta(hash)?,
mtp: self.get_mtp(header_entry.height()),
header_entry,
})
}
pub fn history_iter_scan(
&self,
code: u8,
hash: &[u8],
start_height: usize,
) -> ScanIterator<'_> {
self.store.history_db.iter_scan_from(
&TxHistoryRow::filter(code, hash),
&TxHistoryRow::prefix_height(code, hash, start_height as u32),
)
}
fn history_iter_scan_reverse(
&self,
code: u8,
hash: &[u8],
start_height: Option<u32>,
) -> ReverseScanIterator<'_> {
self.store.history_db.iter_scan_reverse(
&TxHistoryRow::filter(code, hash),
&start_height.map_or(TxHistoryRow::prefix_end(code, hash), |start_height| {
TxHistoryRow::prefix_height_end(code, hash, start_height)
}),
)
}
fn history_iter_scan_group_reverse(
&self,
code: u8,
hashes: &[[u8; 32]],
start_height: Option<u32>,
) -> ReverseScanGroupIterator<'_> {
self.store.history_db.iter_scan_group_reverse(
hashes.iter().map(|hash| {
let prefix = TxHistoryRow::filter(code, &hash[..]);
let prefix_max = start_height
.map_or(TxHistoryRow::prefix_end(code, &hash[..]), |start_height| {
TxHistoryRow::prefix_height_end(code, &hash[..], start_height)
});
(prefix, prefix_max)
}),
33,
)
}
fn collate_summaries(
&self,
iter: impl Iterator<Item = TxHistoryRow>,
last_seen_txid: Option<&Txid>,
limit: usize,
) -> Vec<TxHistorySummary> {
// collate utxo funding/spending events by transaction
let rows = iter
.map(|row| (row.get_txid(), row.key.txinfo, row.key.tx_position))
// We should make sure that each history entry should be unique
// We check uniqueness against equality of these 3 things:
// ("current txid", "the position in the inputs or outputs", "whether it's an input or output")
.unique_by(|(txid, info, _)| (*txid, info.get_vin_or_vout(), info.has_vin()))
.skip_while(|(txid, _, _)| {
// skip until we reach the last_seen_txid
last_seen_txid.is_some_and(|last_seen_txid| last_seen_txid != txid)
})
.skip_while(|(txid, _, _)| {
// skip the last_seen_txid itself
last_seen_txid == Some(txid)
})
.filter_map(|(txid, info, tx_position)| {
self.tx_confirming_block(&txid)
.map(|b| (txid, info, b.height, b.time, tx_position))
});
let mut map: HashMap<Txid, TxHistorySummary> = HashMap::new();
for (txid, info, height, time, tx_position) in rows {
if !map.contains_key(&txid) && map.len() >= limit {
break;
}
match info {
#[cfg(not(feature = "liquid"))]
TxHistoryInfo::Funding(info) => {
map.entry(txid)
.and_modify(|tx| {
tx.value = tx.value.saturating_add(info.value.try_into().unwrap_or(0))
})
.or_insert(TxHistorySummary {
txid,
value: info.value.try_into().unwrap_or(0),
height,
time,
tx_position,
});
}
#[cfg(not(feature = "liquid"))]
TxHistoryInfo::Spending(info) => {
map.entry(txid)
.and_modify(|tx| {
tx.value = tx.value.saturating_sub(info.value.try_into().unwrap_or(0))
})
.or_insert(TxHistorySummary {
txid,
value: 0_i64.saturating_sub(info.value.try_into().unwrap_or(0)),
height,
time,
tx_position,
});
}
#[cfg(feature = "liquid")]
TxHistoryInfo::Funding(_info) => {
map.entry(txid).or_insert(TxHistorySummary {
txid,
value: 0,
height,
time,
tx_position,
});
}
#[cfg(feature = "liquid")]
TxHistoryInfo::Spending(_info) => {
map.entry(txid).or_insert(TxHistorySummary {
txid,
value: 0,
height,
time,
tx_position,
});
}
#[cfg(feature = "liquid")]
_ => {}
}
}
let mut tx_summaries = map.into_values().collect::<Vec<TxHistorySummary>>();
tx_summaries.sort_by(|a, b| {
if a.height == b.height {
if a.tx_position == b.tx_position {
a.value.cmp(&b.value)
} else {
b.tx_position.cmp(&a.tx_position)
}
} else {
b.height.cmp(&a.height)
}
});
tx_summaries
}
pub fn summary(
&self,
scripthash: &[u8],
last_seen_txid: Option<&Txid>,
start_height: Option<u32>,
limit: usize,
) -> Vec<TxHistorySummary> {
// scripthash lookup
self._summary(b'H', scripthash, last_seen_txid, start_height, limit)
}
fn _summary(
&self,
code: u8,
hash: &[u8],
last_seen_txid: Option<&Txid>,
start_height: Option<u32>,
limit: usize,
) -> Vec<TxHistorySummary> {
let _timer_scan = self.start_timer("address_summary");
let rows = self
.history_iter_scan_reverse(code, hash, start_height)
.map(TxHistoryRow::from_row);
self.collate_summaries(rows, last_seen_txid, limit)
}
pub fn summary_group(
&self,
scripthashes: &[[u8; 32]],
last_seen_txid: Option<&Txid>,
start_height: Option<u32>,
limit: usize,
) -> Vec<TxHistorySummary> {
// scripthash lookup
let _timer_scan = self.start_timer("address_group_summary");
let rows = self
.history_iter_scan_group_reverse(b'H', scripthashes, start_height)
.map(TxHistoryRow::from_row);
self.collate_summaries(rows, last_seen_txid, limit)
}
pub fn history<'a>(
&'a self,
scripthash: &[u8],
last_seen_txid: Option<&'a Txid>,
start_height: Option<u32>,
limit: usize,
) -> impl rayon::iter::ParallelIterator<Item = Result<(Transaction, BlockId, u16)>> + 'a {
// scripthash lookup
self._history(b'H', scripthash, last_seen_txid, start_height, limit)
}
pub fn history_txids_iter<'a>(&'a self, scripthash: &[u8]) -> impl Iterator<Item = Txid> + 'a {
self.history_iter_scan_reverse(b'H', scripthash, None)
.map(|row| TxHistoryRow::from_row(row).get_txid())
.unique()
}
fn _history<'a>(
&'a self,
code: u8,
hash: &[u8],
last_seen_txid: Option<&'a Txid>,
start_height: Option<u32>,
limit: usize,
) -> impl rayon::iter::ParallelIterator<Item = Result<(Transaction, BlockId, u16)>> + 'a {
let _timer_scan = self.start_timer("history");
self.lookup_txns(
self.history_iter_scan_reverse(code, hash, start_height)
.map(TxHistoryRow::from_row)
// XXX: unique_by() requires keeping an in-memory list of all txids, can we avoid that?
.unique_by(|row| row.get_txid())
// TODO seek directly to last seen tx without reading earlier rows
.skip_while(move |row| {
// skip until we reach the last_seen_txid
last_seen_txid.is_some_and(|last_seen_txid| last_seen_txid != &row.get_txid())
})
.skip(match last_seen_txid {
Some(_) => 1, // skip the last_seen_txid itself
None => 0,
})
.filter_map(move |row| {
self.tx_confirming_block(&row.get_txid())
.map(|b| (row.get_txid(), b, row.get_tx_position()))
}),
limit,
)
}
pub fn history_txids(&self, scripthash: &[u8], limit: usize) -> Vec<(Txid, BlockId)> {
// scripthash lookup
self._history_txids(b'H', scripthash, limit)
}
fn _history_txids(&self, code: u8, hash: &[u8], limit: usize) -> Vec<(Txid, BlockId)> {
let _timer = self.start_timer("history_txids");
self.history_iter_scan(code, hash, 0)
.map(|row| TxHistoryRow::from_row(row).get_txid())
.unique()
.filter_map(|txid| self.tx_confirming_block(&txid).map(|b| (txid, b)))
.take(limit)
.collect()
}
pub fn history_group<'a>(
&'a self,
scripthashes: &[[u8; 32]],
last_seen_txid: Option<&'a Txid>,
start_height: Option<u32>,
limit: usize,
) -> impl rayon::iter::ParallelIterator<Item = Result<(Transaction, BlockId, u16)>> + 'a {
// scripthash lookup
self._history_group(b'H', scripthashes, last_seen_txid, start_height, limit)
}
pub fn history_txids_iter_group(
&self,
scripthashes: &[[u8; 32]],
start_height: Option<u32>,
) -> impl Iterator<Item = Txid> + '_ {
self.history_iter_scan_group_reverse(b'H', scripthashes, start_height)
.map(|row| TxHistoryRow::from_row(row).get_txid())
.unique()
}
fn _history_group<'a>(
&'a self,
code: u8,
hashes: &[[u8; 32]],
last_seen_txid: Option<&'a Txid>,
start_height: Option<u32>,
limit: usize,
) -> impl rayon::iter::ParallelIterator<Item = Result<(Transaction, BlockId, u16)>> + 'a {
debug!("limit {} | last_seen {:?}", limit, last_seen_txid);
let _timer_scan = self.start_timer("history_group");
self.lookup_txns(
self.history_iter_scan_group_reverse(code, hashes, start_height)
.map(TxHistoryRow::from_row)
// XXX: unique_by() requires keeping an in-memory list of all txids, can we avoid that?
.unique_by(|row| row.get_txid())
.skip_while(move |row| {
// we already seeked to the last txid at this height
// now skip just past the last_seen_txid itself
last_seen_txid.is_some_and(|last_seen_txid| last_seen_txid != &row.get_txid())
})
.skip(match last_seen_txid {
Some(_) => 1, // skip the last_seen_txid itself
None => 0,
})
.filter_map(move |row| {
self.tx_confirming_block(&row.get_txid())
.map(|b| (row.get_txid(), b, row.get_tx_position()))
}),
limit,
)
}
// TODO: avoid duplication with stats/stats_delta?
pub fn utxo(&self, scripthash: &[u8], limit: usize, flush: DBFlush) -> Result<Vec<Utxo>> {
| rust | MIT | 3000bd13e76e2f33d0844a30489108846954d0a3 | 2026-01-04T20:24:15.088141Z | true |
mempool/electrs | https://github.com/mempool/electrs/blob/3000bd13e76e2f33d0844a30489108846954d0a3/src/new_index/mod.rs | src/new_index/mod.rs | pub mod db;
mod fetch;
mod mempool;
pub mod precache;
mod query;
pub mod schema;
pub use self::db::{DBRow, DB};
pub use self::fetch::{BlockEntry, FetchFrom};
pub use self::mempool::Mempool;
pub use self::query::Query;
pub use self::schema::{
compute_script_hash, parse_hash, ChainQuery, FundingInfo, Indexer, ScriptStats, SpendingInfo,
SpendingInput, Store, TxHistoryInfo, TxHistoryKey, TxHistoryRow, Utxo,
};
| rust | MIT | 3000bd13e76e2f33d0844a30489108846954d0a3 | 2026-01-04T20:24:15.088141Z | false |
mempool/electrs | https://github.com/mempool/electrs/blob/3000bd13e76e2f33d0844a30489108846954d0a3/src/new_index/mempool.rs | src/new_index/mempool.rs | use bounded_vec_deque::BoundedVecDeque;
use itertools::Itertools;
#[cfg(not(feature = "liquid"))]
use bitcoin::consensus::encode::serialize;
#[cfg(feature = "liquid")]
use elements::{encode::serialize, AssetId};
use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet};
use std::iter::FromIterator;
use std::ops::Bound::{Excluded, Unbounded};
use std::sync::{Arc, RwLock};
use std::time::{Duration, Instant};
use crate::chain::{deserialize, Network, OutPoint, Transaction, TxOut, Txid};
use crate::config::Config;
use crate::daemon::Daemon;
use crate::errors::*;
use crate::metrics::{GaugeVec, HistogramOpts, HistogramVec, MetricOpts, Metrics};
use crate::new_index::{
compute_script_hash, schema::FullHash, ChainQuery, FundingInfo, ScriptStats, SpendingInfo,
SpendingInput, TxHistoryInfo, Utxo,
};
use crate::util::fees::{make_fee_histogram, TxFeeInfo};
use crate::util::{extract_tx_prevouts, full_hash, has_prevout, is_spendable, Bytes};
#[cfg(feature = "liquid")]
use crate::elements::asset;
pub struct Mempool {
chain: Arc<ChainQuery>,
config: Arc<Config>,
txstore: BTreeMap<Txid, Transaction>,
feeinfo: HashMap<Txid, TxFeeInfo>,
history: HashMap<FullHash, Vec<TxHistoryInfo>>, // ScriptHash -> {history_entries}
edges: HashMap<OutPoint, (Txid, u32)>, // OutPoint -> (spending_txid, spending_vin)
recent: BoundedVecDeque<TxOverview>, // The N most recent txs to enter the mempool
backlog_stats: (BacklogStats, Instant),
// monitoring
latency: HistogramVec, // mempool requests latency
delta: HistogramVec, // # of added/removed txs
count: GaugeVec, // current state of the mempool
// elements only
#[cfg(feature = "liquid")]
pub asset_history: HashMap<AssetId, Vec<TxHistoryInfo>>,
#[cfg(feature = "liquid")]
pub asset_issuance: HashMap<AssetId, asset::AssetRow>,
}
// A simplified transaction view used for the list of most recent transactions
#[derive(Serialize)]
pub struct TxOverview {
txid: Txid,
fee: u64,
vsize: u32,
#[cfg(not(feature = "liquid"))]
value: u64,
}
impl Mempool {
pub fn new(chain: Arc<ChainQuery>, metrics: &Metrics, config: Arc<Config>) -> Self {
Mempool {
chain,
txstore: BTreeMap::new(),
feeinfo: HashMap::new(),
history: HashMap::new(),
edges: HashMap::new(),
recent: BoundedVecDeque::new(config.mempool_recent_txs_size),
backlog_stats: (
BacklogStats::default(),
Instant::now() - Duration::from_secs(config.mempool_backlog_stats_ttl),
),
latency: metrics.histogram_vec(
HistogramOpts::new("mempool_latency", "Mempool requests latency (in seconds)"),
&["part"],
),
delta: metrics.histogram_vec(
HistogramOpts::new("mempool_delta", "# of transactions added/removed"),
&["type"],
),
count: metrics.gauge_vec(
MetricOpts::new("mempool_count", "# of elements currently at the mempool"),
&["type"],
),
#[cfg(feature = "liquid")]
asset_history: HashMap::new(),
#[cfg(feature = "liquid")]
asset_issuance: HashMap::new(),
config,
}
}
pub fn network(&self) -> Network {
self.config.network_type
}
pub fn lookup_txn(&self, txid: &Txid) -> Option<Transaction> {
self.txstore.get(txid).cloned()
}
pub fn lookup_raw_txn(&self, txid: &Txid) -> Option<Bytes> {
self.txstore.get(txid).map(serialize)
}
pub fn lookup_spend(&self, outpoint: &OutPoint) -> Option<SpendingInput> {
self.edges.get(outpoint).map(|(txid, vin)| SpendingInput {
txid: *txid,
vin: *vin,
confirmed: None,
})
}
pub fn has_spend(&self, outpoint: &OutPoint) -> bool {
self.edges.contains_key(outpoint)
}
pub fn get_tx_fee(&self, txid: &Txid) -> Option<u64> {
Some(self.feeinfo.get(txid)?.fee)
}
pub fn has_unconfirmed_parents(&self, txid: &Txid) -> bool {
let tx = match self.txstore.get(txid) {
Some(tx) => tx,
None => return false,
};
tx.input
.iter()
.any(|txin| self.txstore.contains_key(&txin.previous_output.txid))
}
pub fn history(
&self,
scripthash: &[u8],
last_seen_txid: Option<&Txid>,
limit: usize,
) -> Vec<Transaction> {
let _timer = self.latency.with_label_values(&["history"]).start_timer();
self.history
.get(scripthash)
.map_or_else(std::vec::Vec::new, |entries| {
self._history(entries, last_seen_txid, limit)
})
}
pub fn history_txids_iter<'a>(&'a self, scripthash: &[u8]) -> impl Iterator<Item = Txid> + 'a {
self.history
.get(scripthash)
.into_iter()
.flat_map(|v| v.iter().map(|e| e.get_txid()).unique())
}
fn _history(
&self,
entries: &[TxHistoryInfo],
last_seen_txid: Option<&Txid>,
limit: usize,
) -> Vec<Transaction> {
entries
.iter()
.map(|e| e.get_txid())
.unique()
// TODO seek directly to last seen tx without reading earlier rows
.skip_while(|txid| {
// skip until we reach the last_seen_txid
last_seen_txid.is_some_and(|last_seen_txid| last_seen_txid != txid)
})
.skip(match last_seen_txid {
Some(_) => 1, // skip the last_seen_txid itself
None => 0,
})
.take(limit)
.map(|txid| self.txstore.get(&txid).expect("missing mempool tx"))
.cloned()
.collect()
}
pub fn history_group(
&self,
scripthashes: &[[u8; 32]],
last_seen_txid: Option<&Txid>,
limit: usize,
) -> Vec<Transaction> {
let _timer = self
.latency
.with_label_values(&["history_group"])
.start_timer();
scripthashes
.iter()
.filter_map(|scripthash| self.history.get(&scripthash[..]))
.flat_map(|entries| entries.iter())
.map(|e| e.get_txid())
.unique()
// TODO seek directly to last seen tx without reading earlier rows
.skip_while(|txid| {
// skip until we reach the last_seen_txid
last_seen_txid.is_some_and(|last_seen_txid| last_seen_txid != txid)
})
.skip(match last_seen_txid {
Some(_) => 1, // skip the last_seen_txid itself
None => 0,
})
.take(limit)
.map(|txid| self.txstore.get(&txid).expect("missing mempool tx"))
.cloned()
.collect()
}
pub fn history_txids_iter_group<'a>(
&'a self,
scripthashes: &'a [[u8; 32]],
) -> impl Iterator<Item = Txid> + 'a {
scripthashes
.iter()
.filter_map(move |scripthash| self.history.get(&scripthash[..]))
.flat_map(|entries| entries.iter())
.map(|entry| entry.get_txid())
.unique()
}
pub fn history_txids(&self, scripthash: &[u8], limit: usize) -> Vec<Txid> {
let _timer = self
.latency
.with_label_values(&["history_txids"])
.start_timer();
match self.history.get(scripthash) {
None => vec![],
Some(entries) => entries
.iter()
.map(|e| e.get_txid())
.unique()
.take(limit)
.collect(),
}
}
pub fn utxo(&self, scripthash: &[u8]) -> Vec<Utxo> {
let _timer = self.latency.with_label_values(&["utxo"]).start_timer();
let entries = match self.history.get(scripthash) {
None => return vec![],
Some(entries) => entries,
};
entries
.iter()
.filter_map(|entry| match entry {
TxHistoryInfo::Funding(info) => {
// Liquid requires some additional information from the txo that's not available in the TxHistoryInfo index.
#[cfg(feature = "liquid")]
let txo = self.lookup_txo(&entry.get_funded_outpoint())?;
Some(Utxo {
txid: deserialize(&info.txid).expect("invalid txid"),
vout: info.vout,
value: info.value,
confirmed: None,
#[cfg(feature = "liquid")]
asset: txo.asset,
#[cfg(feature = "liquid")]
nonce: txo.nonce,
#[cfg(feature = "liquid")]
witness: txo.witness,
})
}
TxHistoryInfo::Spending(_) => None,
#[cfg(feature = "liquid")]
TxHistoryInfo::Issuing(_)
| TxHistoryInfo::Burning(_)
| TxHistoryInfo::Pegin(_)
| TxHistoryInfo::Pegout(_) => unreachable!(),
})
.filter(|utxo| !self.has_spend(&OutPoint::from(utxo)))
.collect()
}
// @XXX avoid code duplication with ChainQuery::stats()?
pub fn stats(&self, scripthash: &[u8]) -> ScriptStats {
let _timer = self.latency.with_label_values(&["stats"]).start_timer();
let mut stats = ScriptStats::default();
let mut seen_txids = HashSet::new();
let entries = match self.history.get(scripthash) {
None => return stats,
Some(entries) => entries,
};
for entry in entries {
if seen_txids.insert(entry.get_txid()) {
stats.tx_count += 1;
}
match entry {
#[cfg(not(feature = "liquid"))]
TxHistoryInfo::Funding(info) => {
stats.funded_txo_count += 1;
stats.funded_txo_sum += info.value;
}
#[cfg(not(feature = "liquid"))]
TxHistoryInfo::Spending(info) => {
stats.spent_txo_count += 1;
stats.spent_txo_sum += info.value;
}
// Elements
#[cfg(feature = "liquid")]
TxHistoryInfo::Funding(_) => {
stats.funded_txo_count += 1;
}
#[cfg(feature = "liquid")]
TxHistoryInfo::Spending(_) => {
stats.spent_txo_count += 1;
}
#[cfg(feature = "liquid")]
TxHistoryInfo::Issuing(_)
| TxHistoryInfo::Burning(_)
| TxHistoryInfo::Pegin(_)
| TxHistoryInfo::Pegout(_) => unreachable!(),
};
}
stats
}
// Get all txids in the mempool
pub fn txids(&self) -> Vec<&Txid> {
let _timer = self.latency.with_label_values(&["txids"]).start_timer();
self.txstore.keys().collect()
}
// Get n txids after the given txid in the mempool
pub fn txids_page(&self, n: usize, start: Option<Txid>) -> Vec<&Txid> {
let _timer = self
.latency
.with_label_values(&["txids_page"])
.start_timer();
let start_bound = match start {
Some(txid) => Excluded(txid),
None => Unbounded,
};
self.txstore
.range((start_bound, Unbounded))
.take(n)
.map(|(k, _v)| k)
.collect()
}
// Get all txs in the mempool
pub fn txs(&self) -> Vec<Transaction> {
let _timer = self.latency.with_label_values(&["txs"]).start_timer();
self.txstore.values().cloned().collect()
}
// Get n txs after the given txid in the mempool
pub fn txs_page(&self, n: usize, start: Option<Txid>) -> Vec<Transaction> {
let _timer = self.latency.with_label_values(&["txs_page"]).start_timer();
let mut page = Vec::with_capacity(n);
let start_bound = match start {
Some(txid) => Excluded(txid),
None => Unbounded,
};
self.txstore
.range((start_bound, Unbounded))
.take(n)
.for_each(|(_, value)| {
page.push(value.clone());
});
page
}
// Get an overview of the most recent transactions
pub fn recent_txs_overview(&self) -> Vec<&TxOverview> {
// We don't bother ever deleting elements from the recent list.
// It may contain outdated txs that are no longer in the mempool,
// until they get pushed out by newer transactions.
self.recent.iter().collect()
}
pub fn backlog_stats(&self) -> &BacklogStats {
&self.backlog_stats.0
}
pub fn unique_txids(&self) -> HashSet<Txid> {
HashSet::from_iter(self.txstore.keys().cloned())
}
pub fn update(mempool: &RwLock<Mempool>, daemon: &Daemon) -> Result<()> {
// 1. Start the metrics timer and get the current mempool txids
// [LOCK] Takes read lock for whole scope.
let (_timer, old_txids) = {
let mempool = mempool.read().unwrap();
(
mempool.latency.with_label_values(&["update"]).start_timer(),
mempool.unique_txids(),
)
};
// 2. Get all the mempool txids from the RPC.
// [LOCK] No lock taken. Wait for RPC request. Get lists of remove/add txes.
let all_txids = daemon
.getmempooltxids()
.chain_err(|| "failed to update mempool from daemon")?;
let txids_to_remove: HashSet<&Txid> = old_txids.difference(&all_txids).collect();
let txids_to_add: Vec<&Txid> = all_txids.difference(&old_txids).collect();
// 3. Remove missing transactions. Even if we are unable to download new transactions from
// the daemon, we still want to remove the transactions that are no longer in the mempool.
// [LOCK] Write lock is released at the end of the call to remove().
mempool.write().unwrap().remove(txids_to_remove);
// 4. Download the new transactions from the daemon's mempool
// [LOCK] No lock taken, waiting for RPC response.
let txs_to_add = daemon
.gettransactions(&txids_to_add)
.chain_err(|| format!("failed to get {} transactions", txids_to_add.len()))?;
// 4. Update local mempool to match daemon's state
// [LOCK] Takes Write lock for whole scope.
{
let mut mempool = mempool.write().unwrap();
// Add new transactions
if txs_to_add.len() > mempool.add(txs_to_add) {
debug!("Mempool update added less transactions than expected");
}
mempool
.count
.with_label_values(&["txs"])
.set(mempool.txstore.len() as f64);
// Update cached backlog stats (if expired)
if mempool.backlog_stats.1.elapsed()
> Duration::from_secs(mempool.config.mempool_backlog_stats_ttl)
{
let _timer = mempool
.latency
.with_label_values(&["update_backlog_stats"])
.start_timer();
mempool.backlog_stats = (BacklogStats::new(&mempool.feeinfo), Instant::now());
}
Ok(())
}
}
pub fn add_by_txid(&mut self, daemon: &Daemon, txid: &Txid) -> Result<()> {
if !self.txstore.contains_key(txid) {
if let Ok(tx) = daemon.getmempooltx(txid) {
if self.add(vec![tx]) == 0 {
return Err(format!(
"Unable to add {txid} to mempool likely due to missing parents."
)
.into());
}
}
}
Ok(())
}
/// Add transactions to the mempool.
///
/// The return value is the number of transactions processed.
#[must_use = "Must deal with [[input vec's length]] > [[result]]."]
fn add(&mut self, txs: Vec<Transaction>) -> usize {
self.delta
.with_label_values(&["add"])
.observe(txs.len() as f64);
let _timer = self.latency.with_label_values(&["add"]).start_timer();
let txlen = txs.len();
if txlen == 0 {
return 0;
}
debug!("Adding {} transactions to Mempool", txlen);
let mut txids = Vec::with_capacity(txs.len());
// Phase 1: add to txstore
for tx in txs {
let txid = tx.txid();
// Only push if it doesn't already exist.
// This is important now that update doesn't lock during
// the entire function body.
if self.txstore.insert(txid, tx).is_none() {
txids.push(txid);
}
}
// Phase 2: index history and spend edges (some txos can be missing)
let txos = self.lookup_txos(&self.get_prevouts(&txids));
// Count how many transactions were actually processed.
let mut processed_count = 0;
// Phase 3: Iterate over the transactions and do the following:
// 1. Find all of the TxOuts of each input parent using `txos`
// 2. If any parent wasn't found, skip parsing this transaction
// 3. Insert TxFeeInfo into info.
// 4. Push TxOverview into recent tx queue.
// 5. Create the Spend and Fund TxHistory structs for inputs + outputs
// 6. Insert all TxHistory into history.
// 7. Insert the tx edges into edges (HashMap of (Outpoint, (Txid, vin)))
// 8. (Liquid only) Parse assets of tx.
for txid in txids {
let tx = self.txstore.get(&txid).expect("missing tx from txstore");
let prevouts = match extract_tx_prevouts(tx, &txos) {
Ok(v) => v,
Err(e) => {
warn!("Skipping tx {txid} missing parent error: {e}");
continue;
}
};
let txid_bytes = full_hash(&txid[..]);
// Get feeinfo for caching and recent tx overview
let feeinfo = TxFeeInfo::new(tx, &prevouts, self.config.network_type);
// recent is an BoundedVecDeque that automatically evicts the oldest elements
self.recent.push_front(TxOverview {
txid,
fee: feeinfo.fee,
vsize: feeinfo.vsize,
#[cfg(not(feature = "liquid"))]
value: prevouts.values().map(|prevout| prevout.value).sum(),
});
self.feeinfo.insert(txid, feeinfo);
// An iterator over (ScriptHash, TxHistoryInfo)
let spending = prevouts.into_iter().map(|(input_index, prevout)| {
let txi = tx.input.get(input_index as usize).unwrap();
(
compute_script_hash(&prevout.script_pubkey),
TxHistoryInfo::Spending(SpendingInfo {
txid: txid_bytes,
vin: input_index,
prev_txid: full_hash(&txi.previous_output.txid[..]),
prev_vout: txi.previous_output.vout,
value: prevout.value,
}),
)
});
let config = &self.config;
// An iterator over (ScriptHash, TxHistoryInfo)
let funding = tx
.output
.iter()
.enumerate()
.filter(|(_, txo)| is_spendable(txo) || config.index_unspendables)
.map(|(index, txo)| {
(
compute_script_hash(&txo.script_pubkey),
TxHistoryInfo::Funding(FundingInfo {
txid: txid_bytes,
vout: index as u32,
value: txo.value,
}),
)
});
// Index funding/spending history entries and spend edges
for (scripthash, entry) in funding.chain(spending) {
self.history.entry(scripthash).or_default().push(entry);
}
for (i, txi) in tx.input.iter().enumerate() {
self.edges.insert(txi.previous_output, (txid, i as u32));
}
// Index issued assets & native asset pegins/pegouts/burns
#[cfg(feature = "liquid")]
asset::index_mempool_tx_assets(
tx,
self.config.network_type,
self.config.parent_network,
&mut self.asset_history,
&mut self.asset_issuance,
);
processed_count += 1;
}
processed_count
}
/// Returns None if the lookup fails (mempool transaction RBF-ed etc.)
pub fn lookup_txo(&self, outpoint: &OutPoint) -> Option<TxOut> {
let mut outpoints = BTreeSet::new();
outpoints.insert(*outpoint);
// This can possibly be None now
self.lookup_txos(&outpoints).remove(outpoint)
}
/// For a given set of OutPoints, return a HashMap<OutPoint, TxOut>
///
/// Not all OutPoints from mempool transactions are guaranteed to be there.
/// Ensure you deal with the None case in your logic.
pub fn lookup_txos(&self, outpoints: &BTreeSet<OutPoint>) -> HashMap<OutPoint, TxOut> {
let _timer = self
.latency
.with_label_values(&["lookup_txos"])
.start_timer();
let confirmed_txos = self.chain.lookup_avail_txos(outpoints);
let mempool_txos = outpoints
.iter()
.filter(|outpoint| !confirmed_txos.contains_key(outpoint))
.flat_map(|outpoint| {
self.txstore
.get(&outpoint.txid)
.and_then(|tx| tx.output.get(outpoint.vout as usize).cloned())
.map(|txout| (*outpoint, txout))
.or_else(|| {
warn!("missing outpoint {:?}", outpoint);
None
})
})
.collect::<HashMap<OutPoint, TxOut>>();
let mut txos = confirmed_txos;
txos.extend(mempool_txos);
txos
}
fn get_prevouts(&self, txids: &[Txid]) -> BTreeSet<OutPoint> {
let _timer = self
.latency
.with_label_values(&["get_prevouts"])
.start_timer();
txids
.iter()
.map(|txid| self.txstore.get(txid).expect("missing mempool tx"))
.flat_map(|tx| {
tx.input
.iter()
.filter(|txin| has_prevout(txin))
.map(|txin| txin.previous_output)
})
.collect()
}
fn remove(&mut self, to_remove: HashSet<&Txid>) {
self.delta
.with_label_values(&["remove"])
.observe(to_remove.len() as f64);
let _timer = self.latency.with_label_values(&["remove"]).start_timer();
for txid in &to_remove {
self.txstore
.remove(*txid)
.unwrap_or_else(|| panic!("missing mempool tx {}", txid));
self.feeinfo.remove(*txid).or_else(|| {
warn!("missing mempool tx feeinfo {}", txid);
None
});
}
// TODO: make it more efficient (currently it takes O(|mempool|) time)
self.history.retain(|_scripthash, entries| {
entries.retain(|entry| !to_remove.contains(&entry.get_txid()));
!entries.is_empty()
});
#[cfg(feature = "liquid")]
asset::remove_mempool_tx_assets(
&to_remove,
&mut self.asset_history,
&mut self.asset_issuance,
);
self.edges
.retain(|_outpoint, (txid, _vin)| !to_remove.contains(txid));
}
#[cfg(feature = "liquid")]
pub fn asset_history(&self, asset_id: &AssetId, limit: usize) -> Vec<Transaction> {
let _timer = self
.latency
.with_label_values(&["asset_history"])
.start_timer();
self.asset_history
.get(asset_id)
.map_or_else(std::vec::Vec::new, |entries| {
self._history(entries, None, limit)
})
}
}
#[derive(Serialize)]
pub struct BacklogStats {
pub count: u32,
pub vsize: u32, // in virtual bytes (= weight/4)
pub total_fee: u64, // in satoshis
pub fee_histogram: Vec<(f32, u32)>,
}
impl BacklogStats {
fn default() -> Self {
BacklogStats {
count: 0,
vsize: 0,
total_fee: 0,
fee_histogram: vec![(0.0, 0)],
}
}
fn new(feeinfo: &HashMap<Txid, TxFeeInfo>) -> Self {
let (count, vsize, total_fee) = feeinfo
.values()
.fold((0, 0, 0), |(count, vsize, fee), feeinfo| {
(count + 1, vsize + feeinfo.vsize, fee + feeinfo.fee)
});
BacklogStats {
count,
vsize,
total_fee,
fee_histogram: make_fee_histogram(feeinfo.values().collect()),
}
}
}
| rust | MIT | 3000bd13e76e2f33d0844a30489108846954d0a3 | 2026-01-04T20:24:15.088141Z | false |
mempool/electrs | https://github.com/mempool/electrs/blob/3000bd13e76e2f33d0844a30489108846954d0a3/src/new_index/query.rs | src/new_index/query.rs | use rayon::prelude::*;
use std::collections::{BTreeSet, HashMap};
use std::sync::{Arc, RwLock, RwLockReadGuard};
use std::time::{Duration, Instant};
use crate::chain::{Network, OutPoint, Transaction, TxOut, Txid};
use crate::config::Config;
use crate::daemon::{Daemon, MempoolAcceptResult, SubmitPackageResult};
use crate::errors::*;
use crate::new_index::{ChainQuery, Mempool, ScriptStats, SpendingInput, Utxo};
use crate::util::{is_spendable, BlockId, Bytes, TransactionStatus};
#[cfg(feature = "liquid")]
use crate::{
chain::{asset::AssetRegistryLock, AssetId},
elements::{lookup_asset, AssetRegistry, AssetSorting, LiquidAsset},
};
const FEE_ESTIMATES_TTL: u64 = 60; // seconds
const CONF_TARGETS: [u16; 28] = [
1u16, 2u16, 3u16, 4u16, 5u16, 6u16, 7u16, 8u16, 9u16, 10u16, 11u16, 12u16, 13u16, 14u16, 15u16,
16u16, 17u16, 18u16, 19u16, 20u16, 21u16, 22u16, 23u16, 24u16, 25u16, 144u16, 504u16, 1008u16,
];
pub struct Query {
chain: Arc<ChainQuery>, // TODO: should be used as read-only
mempool: Arc<RwLock<Mempool>>,
daemon: Arc<Daemon>,
config: Arc<Config>,
cached_estimates: RwLock<(HashMap<u16, f64>, Option<Instant>)>,
cached_relayfee: RwLock<Option<f64>>,
#[cfg(feature = "liquid")]
asset_db: Option<Arc<RwLock<AssetRegistry>>>,
}
impl Query {
#[cfg(not(feature = "liquid"))]
pub fn new(
chain: Arc<ChainQuery>,
mempool: Arc<RwLock<Mempool>>,
daemon: Arc<Daemon>,
config: Arc<Config>,
) -> Self {
Query {
chain,
mempool,
daemon,
config,
cached_estimates: RwLock::new((HashMap::new(), None)),
cached_relayfee: RwLock::new(None),
}
}
pub fn chain(&self) -> &ChainQuery {
&self.chain
}
pub fn config(&self) -> &Config {
&self.config
}
pub fn network(&self) -> Network {
self.config.network_type
}
pub fn mempool(&self) -> RwLockReadGuard<'_, Mempool> {
self.mempool.read().unwrap()
}
pub fn broadcast_raw(&self, txhex: &str) -> Result<Txid> {
let txid = self.daemon.broadcast_raw(txhex)?;
// The important part is whether we succeeded in broadcasting.
// Ignore errors in adding to the cache and show an internal warning.
if let Err(e) = self
.mempool
.write()
.unwrap()
.add_by_txid(&self.daemon, &txid)
{
warn!(
"broadcast_raw of {txid} succeeded to broadcast \
but failed to add to mempool-electrs Mempool cache: {e}"
);
}
Ok(txid)
}
pub fn test_mempool_accept(
&self,
txhex: Vec<String>,
maxfeerate: Option<f64>,
) -> Result<Vec<MempoolAcceptResult>> {
self.daemon.test_mempool_accept(txhex, maxfeerate)
}
pub fn submit_package(
&self,
txhex: Vec<String>,
maxfeerate: Option<f64>,
maxburnamount: Option<f64>,
) -> Result<SubmitPackageResult> {
self.daemon.submit_package(txhex, maxfeerate, maxburnamount)
}
pub fn utxo(&self, scripthash: &[u8]) -> Result<Vec<Utxo>> {
let mut utxos = self.chain.utxo(
scripthash,
self.config.utxos_limit,
super::db::DBFlush::Enable,
)?;
let mempool = self.mempool();
utxos.retain(|utxo| !mempool.has_spend(&OutPoint::from(utxo)));
utxos.extend(mempool.utxo(scripthash));
Ok(utxos)
}
pub fn history_txids(&self, scripthash: &[u8], limit: usize) -> Vec<(Txid, Option<BlockId>)> {
let confirmed_txids = self.chain.history_txids(scripthash, limit);
let confirmed_len = confirmed_txids.len();
let confirmed_txids = confirmed_txids.into_iter().map(|(tx, b)| (tx, Some(b)));
let mempool_txids = self
.mempool()
.history_txids(scripthash, limit - confirmed_len)
.into_iter()
.map(|tx| (tx, None));
confirmed_txids.chain(mempool_txids).collect()
}
pub fn stats(&self, scripthash: &[u8]) -> (ScriptStats, ScriptStats) {
(
self.chain.stats(scripthash, super::db::DBFlush::Enable),
self.mempool().stats(scripthash),
)
}
pub fn lookup_txn(&self, txid: &Txid) -> Option<Transaction> {
self.chain
.lookup_txn(txid, None)
.or_else(|| self.mempool().lookup_txn(txid))
}
pub fn lookup_raw_txn(&self, txid: &Txid) -> Option<Bytes> {
self.chain
.lookup_raw_txn(txid, None)
.or_else(|| self.mempool().lookup_raw_txn(txid))
}
/// Not all OutPoints from mempool transactions are guaranteed to be included in the result
pub fn lookup_txos(&self, outpoints: &BTreeSet<OutPoint>) -> HashMap<OutPoint, TxOut> {
// the mempool lookup_txos() internally looks up confirmed txos as well
self.mempool().lookup_txos(outpoints)
}
pub fn lookup_spend(&self, outpoint: &OutPoint) -> Option<SpendingInput> {
self.chain
.lookup_spend(outpoint)
.or_else(|| self.mempool().lookup_spend(outpoint))
}
pub fn lookup_tx_spends(&self, tx: Transaction) -> Vec<Option<SpendingInput>> {
let txid = tx.txid();
tx.output
.par_iter()
.enumerate()
.map(|(vout, txout)| {
if is_spendable(txout) {
self.lookup_spend(&OutPoint {
txid,
vout: vout as u32,
})
} else {
None
}
})
.collect()
}
pub fn get_tx_status(&self, txid: &Txid) -> TransactionStatus {
TransactionStatus::from(self.chain.tx_confirming_block(txid))
}
pub fn get_mempool_tx_fee(&self, txid: &Txid) -> Option<u64> {
self.mempool().get_tx_fee(txid)
}
pub fn has_unconfirmed_parents(&self, txid: &Txid) -> bool {
self.mempool().has_unconfirmed_parents(txid)
}
pub fn estimate_fee(&self, conf_target: u16) -> Option<f64> {
if self.config.network_type.is_regtest() {
return self.get_relayfee().ok();
}
if let (ref cache, Some(cache_time)) = *self.cached_estimates.read().unwrap() {
if cache_time.elapsed() < Duration::from_secs(FEE_ESTIMATES_TTL) {
return cache.get(&conf_target).copied();
}
}
self.update_fee_estimates();
self.cached_estimates
.read()
.unwrap()
.0
.get(&conf_target)
.copied()
}
pub fn estimate_fee_map(&self) -> HashMap<u16, f64> {
if let (ref cache, Some(cache_time)) = *self.cached_estimates.read().unwrap() {
if cache_time.elapsed() < Duration::from_secs(FEE_ESTIMATES_TTL) {
return cache.clone();
}
}
self.update_fee_estimates();
self.cached_estimates.read().unwrap().0.clone()
}
fn update_fee_estimates(&self) {
match self.daemon.estimatesmartfee_batch(&CONF_TARGETS) {
Ok(estimates) => {
*self.cached_estimates.write().unwrap() = (estimates, Some(Instant::now()));
}
Err(err) => {
warn!("failed estimating feerates: {:?}", err);
}
}
}
pub fn get_relayfee(&self) -> Result<f64> {
if let Some(cached) = *self.cached_relayfee.read().unwrap() {
return Ok(cached);
}
let relayfee = self.daemon.get_relayfee()?;
self.cached_relayfee.write().unwrap().replace(relayfee);
Ok(relayfee)
}
#[cfg(feature = "liquid")]
pub fn new(
chain: Arc<ChainQuery>,
mempool: Arc<RwLock<Mempool>>,
daemon: Arc<Daemon>,
config: Arc<Config>,
asset_db: Option<Arc<RwLock<AssetRegistry>>>,
) -> Self {
Query {
chain,
mempool,
daemon,
config,
asset_db,
cached_estimates: RwLock::new((HashMap::new(), None)),
cached_relayfee: RwLock::new(None),
}
}
#[cfg(feature = "liquid")]
pub fn lookup_asset(&self, asset_id: &AssetId) -> Result<Option<LiquidAsset>> {
lookup_asset(
self,
self.asset_db.as_ref().map(AssetRegistryLock::RwLock),
asset_id,
None,
)
}
#[cfg(feature = "liquid")]
pub fn list_registry_assets(
&self,
start_index: usize,
limit: usize,
sorting: AssetSorting,
) -> Result<(usize, Vec<LiquidAsset>)> {
let asset_db = match &self.asset_db {
None => return Ok((0, vec![])),
Some(db) => db.read().unwrap(),
};
let (total_num, results) = asset_db.list(start_index, limit, sorting);
// Attach on-chain information alongside the registry metadata
let results = results
.into_iter()
.map(|(asset_id, metadata)| {
lookup_asset(
self,
Some(AssetRegistryLock::RwLockReadGuard(&asset_db)),
asset_id,
Some(metadata),
)?
.chain_err(|| "missing registered asset")
})
.collect::<Result<Vec<_>>>()?;
Ok((total_num, results))
}
}
| rust | MIT | 3000bd13e76e2f33d0844a30489108846954d0a3 | 2026-01-04T20:24:15.088141Z | false |
mempool/electrs | https://github.com/mempool/electrs/blob/3000bd13e76e2f33d0844a30489108846954d0a3/src/new_index/precache.rs | src/new_index/precache.rs | use crate::errors::*;
use crate::new_index::ChainQuery;
use crate::util::{full_hash, FullHash};
use rayon::prelude::*;
use hex;
use std::fs::File;
use std::io;
use std::io::prelude::*;
use std::sync::{atomic::AtomicUsize, Arc};
use std::time::Instant;
pub fn precache(chain: Arc<ChainQuery>, scripthashes: Vec<FullHash>, threads: usize) {
let total = scripthashes.len();
info!(
"Pre-caching stats and utxo set on {} threads for {} scripthashes",
threads, total
);
let pool = rayon::ThreadPoolBuilder::new()
.num_threads(threads)
.thread_name(|i| format!("precache-{}", i))
.build()
.unwrap();
let now = Instant::now();
let counter = AtomicUsize::new(0);
std::thread::spawn(move || {
pool.install(|| {
scripthashes
.par_iter()
.for_each(|scripthash| {
// First, cache
chain.stats(&scripthash[..], crate::new_index::db::DBFlush::Disable);
let _ = chain.utxo(&scripthash[..], usize::MAX, crate::new_index::db::DBFlush::Disable);
// Then, increment the counter
let pre_increment = counter.fetch_add(1, std::sync::atomic::Ordering::AcqRel);
let post_increment_counter = pre_increment + 1;
// Then, log
if post_increment_counter % 500 == 0 {
let now_millis = now.elapsed().as_millis();
info!("{post_increment_counter}/{total} Processed in {now_millis} ms running pre-cache for scripthash");
}
// Every 10k counts, flush the DB to disk
if post_increment_counter % 10000 == 0 {
info!("Flushing cache_db... {post_increment_counter}");
chain.store().cache_db().flush();
info!("Done Flushing cache_db!!! {post_increment_counter}");
}
})
});
// After everything is done, flush the cache
chain.store().cache_db().flush();
});
}
pub fn scripthashes_from_file(path: String) -> Result<Vec<FullHash>> {
let reader =
io::BufReader::new(File::open(path).chain_err(|| "cannot open precache scripthash file")?);
reader
.lines()
.map(|line| {
let line = line.chain_err(|| "cannot read scripthash line")?;
Ok(full_hash(&hex::decode(line).chain_err(|| "invalid hex")?))
})
.collect()
}
| rust | MIT | 3000bd13e76e2f33d0844a30489108846954d0a3 | 2026-01-04T20:24:15.088141Z | false |
eairp/summer-boot | https://github.com/eairp/summer-boot/blob/3bacf5b9996a015c527fe5b7941d6f5d7a1e6335/summer-boot-actuator/src/lib.rs | summer-boot-actuator/src/lib.rs | mod configuration_properties; | rust | Apache-2.0 | 3bacf5b9996a015c527fe5b7941d6f5d7a1e6335 | 2026-01-04T20:24:35.228453Z | false |
eairp/summer-boot | https://github.com/eairp/summer-boot/blob/3bacf5b9996a015c527fe5b7941d6f5d7a1e6335/summer-boot-actuator/src/configuration_properties.rs | summer-boot-actuator/src/configuration_properties.rs | //!
//! Configuration properties
//!
pub struct ConfigurationProperties {
pub keys_sanitize: Vec<String>,
pub additional_keys_sanitize: Vec<String>,
}
impl ConfigurationProperties {
pub fn new() -> ConfigurationProperties {
ConfigurationProperties {
keys_sanitize: Vec::new(),
additional_keys_sanitize: Vec::new(),
}
}
pub fn get_keys_sanitize(&self) -> &Vec<String> {
&self.keys_sanitize
}
pub fn set_keys_sanitize(&mut self, keys_sanitize: Vec<String>) {
self.keys_sanitize = keys_sanitize;
}
pub fn get_additional_keys_sanitize(&self) -> &Vec<String> {
&self.additional_keys_sanitize
}
pub fn set_additional_keys_sanitize(&mut self, additional_keys_sanitize: Vec<String>) {
self.additional_keys_sanitize = additional_keys_sanitize;
}
}
| rust | Apache-2.0 | 3bacf5b9996a015c527fe5b7941d6f5d7a1e6335 | 2026-01-04T20:24:35.228453Z | false |
eairp/summer-boot | https://github.com/eairp/summer-boot/blob/3bacf5b9996a015c527fe5b7941d6f5d7a1e6335/summer-boot/src/lib.rs | summer-boot/src/lib.rs | pub mod common;
pub mod log;
mod context;
mod gateway;
mod http1;
mod server;
mod tcp;
pub mod utils;
pub use http1::http;
pub use utils::middleware::{Middleware, Next};
pub use utils::request::Request;
pub use utils::response::Response;
pub use utils::response_builder::ResponseBuilder;
pub use utils::util;
pub use gateway::route::Route;
pub use http_types::{self, Body, Error, Status, StatusCode};
pub use server::endpoint::Endpoint;
use server::server::Server;
#[must_use]
pub fn new() -> Server<()> {
Server::new()
}
/// 自动扫描 日志开启 读取yml
#[must_use]
pub fn run() -> Server<()> {
Server::run()
}
pub fn with_state<State>(state: State) -> Server<State>
where
State: Clone + Send + Sync + 'static,
{
Server::with_state(state)
}
/// 结果类型处理
pub type Result<T = Response> = std::result::Result<T, Error>;
pub mod rt;
/// 建立过程宏与summer boot的关联
macro_rules! macro_reexport {
($name:ident) => {
#[cfg(feature = "macros")]
#[cfg_attr(docsrs, doc(cfg(feature = "macros")))]
pub use summer_boot_macro::$name;
};
}
macro_reexport!(auto_scan);
macro_reexport!(main);
macro_reexport!(post);
macro_reexport!(get);
macro_reexport!(delete);
macro_reexport!(put);
macro_reexport!(head);
macro_reexport!(options);
macro_reexport!(connect);
macro_reexport!(patch);
macro_reexport!(trace);
| rust | Apache-2.0 | 3bacf5b9996a015c527fe5b7941d6f5d7a1e6335 | 2026-01-04T20:24:35.228453Z | false |
eairp/summer-boot | https://github.com/eairp/summer-boot/blob/3bacf5b9996a015c527fe5b7941d6f5d7a1e6335/summer-boot/src/tcp/unix.rs | summer-boot/src/tcp/unix.rs | use super::{is_transient_error, ListenInfo};
use super::Listener;
use crate::{http1, Server};
use std::fmt::{self, Display, Formatter};
use async_std::os::unix::net::{self, SocketAddr, UnixStream};
use async_std::path::PathBuf;
use async_std::prelude::*;
use async_std::{io, task};
use kv_log_macro::error;
pub struct UnixListener<State> {
path: Option<PathBuf>,
listener: Option<net::UnixListener>,
server: Option<Server<State>>,
info: Option<ListenInfo>,
}
impl<State> UnixListener<State> {
pub fn from_path(path: impl Into<PathBuf>) -> Self {
Self {
path: Some(path.into()),
listener: None,
server: None,
info: None,
}
}
pub fn from_listener(unix_listener: impl Into<net::UnixListener>) -> Self {
Self {
path: None,
listener: Some(unix_listener.into()),
server: None,
info: None,
}
}
}
fn handle_unix<State: Clone + Send + Sync + 'static>(app: Server<State>, stream: UnixStream) {
task::spawn(async move {
let local_addr = unix_socket_addr_to_string(stream.local_addr());
let peer_addr = unix_socket_addr_to_string(stream.peer_addr());
let fut = http1::http::accept(stream, |mut req| async {
req.set_local_addr(local_addr.as_ref());
req.set_peer_addr(peer_addr.as_ref());
app.respond(req).await
});
if let Err(error) = fut.await {
error!("async-h1 error", { error: error.to_string() });
}
});
}
#[async_trait::async_trait]
impl<State> Listener<State> for UnixListener<State>
where
State: Clone + Send + Sync + 'static,
{
async fn bind(&mut self, server: Server<State>) -> io::Result<()> {
assert!(self.server.is_none(), "`bind` should only be called once");
self.server = Some(server);
if self.listener.is_none() {
let path = self.path.take().expect("`bind` should only be called once");
let listener = net::UnixListener::bind(path).await?;
self.listener = Some(listener);
}
// Format the listen information.
let conn_string = format!("{}", self);
let transport = "uds".to_owned();
let tls = false;
self.info = Some(ListenInfo::new(conn_string, transport, tls));
Ok(())
}
async fn accept(&mut self) -> io::Result<()> {
let server = self
.server
.take()
.expect("`Listener::bind` must be called before `Listener::accept`");
let listener = self
.listener
.take()
.expect("`Listener::bind` must be called before `Listener::accept`");
let mut incoming = listener.incoming();
while let Some(stream) = incoming.next().await {
match stream {
Err(ref e) if is_transient_error(e) => continue,
Err(error) => {
let delay = std::time::Duration::from_millis(500);
error!("Error: {}. Pausing for {:?}.", error, delay);
task::sleep(delay).await;
continue;
}
Ok(stream) => {
handle_unix(server.clone(), stream);
}
};
}
Ok(())
}
fn info(&self) -> Vec<ListenInfo> {
match &self.info {
Some(info) => vec![info.clone()],
None => vec![],
}
}
}
impl<State> fmt::Debug for UnixListener<State> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.debug_struct("UnixListener")
.field("listener", &self.listener)
.field("path", &self.path)
.field(
"server",
if self.server.is_some() {
&"Some(Server<State>)"
} else {
&"None"
},
)
.finish()
}
}
impl<State> Display for UnixListener<State> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match &self.listener {
Some(listener) => {
let path = listener.local_addr().expect("Could not get local path dir");
let pathname = path
.as_pathname()
.and_then(|p| p.canonicalize().ok())
.expect("Could not canonicalize path dir");
write!(f, "http+unix://{}", pathname.display())
}
None => match &self.path {
Some(path) => write!(f, "http+unix://{}", path.display()),
None => write!(f, "Not listening. Did you forget to call `Listener::bind`?"),
},
}
}
}
fn unix_socket_addr_to_string(result: io::Result<SocketAddr>) -> Option<String> {
result
.ok()
.as_ref()
.and_then(SocketAddr::as_pathname)
.and_then(|p| p.canonicalize().ok())
.map(|pathname| format!("http+unix://{}", pathname.display()))
}
| rust | Apache-2.0 | 3bacf5b9996a015c527fe5b7941d6f5d7a1e6335 | 2026-01-04T20:24:35.228453Z | false |
eairp/summer-boot | https://github.com/eairp/summer-boot/blob/3bacf5b9996a015c527fe5b7941d6f5d7a1e6335/summer-boot/src/tcp/to_listener.rs | summer-boot/src/tcp/to_listener.rs | use super::Listener;
use async_std::io;
/// ToListener 可以转换为
/// [`Listener`](crate::listener::Listener),实现的任何类型。
/// 现实可以看to_listener_impls
///
pub trait ToListener<State: Clone + Send + Sync + 'static> {
/// 转换具体哪一种类型的Listener
type Listener: Listener<State>;
/// 将self进行转换为
/// [`Listener`](crate::listener::Listener)。
/// 除非self是已绑定/连接到io,转换为侦听器不启动连接。
/// 错误返回表示转换为侦听器失败,而不是绑定尝试失败。
fn to_listener(self) -> io::Result<Self::Listener>;
}
| rust | Apache-2.0 | 3bacf5b9996a015c527fe5b7941d6f5d7a1e6335 | 2026-01-04T20:24:35.228453Z | false |
eairp/summer-boot | https://github.com/eairp/summer-boot/blob/3bacf5b9996a015c527fe5b7941d6f5d7a1e6335/summer-boot/src/tcp/tcp_listener.rs | summer-boot/src/tcp/tcp_listener.rs | use super::{is_transient_error, ListenInfo};
use super::Listener;
use crate::{http, log, Server};
use std::fmt::{self, Display, Formatter};
use async_std::net::{self, SocketAddr, TcpStream};
use async_std::prelude::*;
use async_std::{io, task};
pub struct TcpListener<State> {
addrs: Option<Vec<SocketAddr>>,
listener: Option<net::TcpListener>,
server: Option<Server<State>>,
info: Option<ListenInfo>,
}
impl<State> TcpListener<State> {
pub fn from_addrs(addrs: Vec<SocketAddr>) -> Self {
Self {
addrs: Some(addrs),
listener: None,
server: None,
info: None,
}
}
pub fn from_listener(tcp_listener: impl Into<net::TcpListener>) -> Self {
Self {
addrs: None,
listener: Some(tcp_listener.into()),
server: None,
info: None,
}
}
}
fn handle_tcp<State: Clone + Send + Sync + 'static>(app: Server<State>, stream: TcpStream) {
task::spawn(async move {
let local_addr = stream.local_addr().ok();
let peer_addr = stream.peer_addr().ok();
let fut = http::accept(stream, |mut req| async {
req.set_local_addr(local_addr);
req.set_peer_addr(peer_addr);
app.respond(req).await
});
if let Err(error) = fut.await {
log::error!("http1 error", { error: error.to_string() });
}
});
}
#[async_trait::async_trait]
impl<State> Listener<State> for TcpListener<State>
where
State: Clone + Send + Sync + 'static,
{
async fn bind(&mut self, server: Server<State>) -> io::Result<()> {
assert!(self.server.is_none(), "`bind`只能调用一次");
self.server = Some(server);
if self.listener.is_none() {
let addrs = self.addrs.take().expect("`bind` 只能调用一次");
let listener = net::TcpListener::bind(addrs.as_slice()).await?;
self.listener = Some(listener);
}
// Format the listen information.
let conn_string = format!("{}", self);
let transport = "tcp".to_owned();
let tls = false;
self.info = Some(ListenInfo::new(conn_string, transport, tls));
Ok(())
}
async fn accept(&mut self) -> io::Result<()> {
let server = self
.server
.take()
.expect("`Listener::bind` 必须在之前调用 `Listener::accept`");
let listener = self
.listener
.take()
.expect("`Listener::bind` 必须在之前调用 `Listener::accept`");
let mut incoming = listener.incoming();
while let Some(stream) = incoming.next().await {
match stream {
Err(ref e) if is_transient_error(e) => continue,
Err(error) => {
let delay = std::time::Duration::from_millis(500);
crate::log::error!("Error: {}. for {:?}.", error, delay);
task::sleep(delay).await;
continue;
}
Ok(stream) => {
handle_tcp(server.clone(), stream);
}
};
}
Ok(())
}
fn info(&self) -> Vec<ListenInfo> {
match &self.info {
Some(info) => vec![info.clone()],
None => vec![],
}
}
}
impl<State> fmt::Debug for TcpListener<State> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.debug_struct("TcpListener")
.field("listener", &self.listener)
.field("addrs", &self.addrs)
.field(
"server",
if self.server.is_some() {
&"Some(Server<State>)"
} else {
&"None"
},
)
.finish()
}
}
impl<State> Display for TcpListener<State> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
let http_fmt = |a| format!("http://{}", a);
match &self.listener {
Some(listener) => {
let addr = listener.local_addr().expect("无法获取本地地址");
write!(f, "{}", http_fmt(&addr))
}
None => match &self.addrs {
Some(addrs) => {
let addrs = addrs.iter().map(http_fmt).collect::<Vec<_>>().join(", ");
write!(f, "{}", addrs)
}
None => write!(f, "没有监听,请检查是否成功调用了 `Listener::bind`?"),
},
}
}
}
| rust | Apache-2.0 | 3bacf5b9996a015c527fe5b7941d6f5d7a1e6335 | 2026-01-04T20:24:35.228453Z | false |
eairp/summer-boot | https://github.com/eairp/summer-boot/blob/3bacf5b9996a015c527fe5b7941d6f5d7a1e6335/summer-boot/src/tcp/parsed.rs | summer-boot/src/tcp/parsed.rs | #[cfg(unix)]
use super::UnixListener;
use super::{ListenInfo, Listener, TcpListener};
use crate::Server;
use async_std::io;
use std::fmt::{self, Debug, Display, Formatter};
pub enum ParsedListener<State> {
#[cfg(unix)]
Unix(UnixListener<State>),
Tcp(TcpListener<State>),
}
impl<State> Debug for ParsedListener<State> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
#[cfg(unix)]
ParsedListener::Unix(unix) => Debug::fmt(unix, f),
ParsedListener::Tcp(tcp) => Debug::fmt(tcp, f),
}
}
}
impl<State> Display for ParsedListener<State> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
#[cfg(unix)]
Self::Unix(u) => write!(f, "{}", u),
Self::Tcp(t) => write!(f, "{}", t),
}
}
}
#[async_trait::async_trait]
impl<State> Listener<State> for ParsedListener<State>
where
State: Clone + Send + Sync + 'static,
{
async fn bind(&mut self, server: Server<State>) -> io::Result<()> {
match self {
#[cfg(unix)]
Self::Unix(u) => u.bind(server).await,
Self::Tcp(t) => t.bind(server).await,
}
}
async fn accept(&mut self) -> io::Result<()> {
match self {
#[cfg(unix)]
Self::Unix(u) => u.accept().await,
Self::Tcp(t) => t.accept().await,
}
}
fn info(&self) -> Vec<ListenInfo> {
match self {
#[cfg(unix)]
ParsedListener::Unix(unix) => unix.info(),
ParsedListener::Tcp(tcp) => tcp.info(),
}
}
}
| rust | Apache-2.0 | 3bacf5b9996a015c527fe5b7941d6f5d7a1e6335 | 2026-01-04T20:24:35.228453Z | false |
eairp/summer-boot | https://github.com/eairp/summer-boot/blob/3bacf5b9996a015c527fe5b7941d6f5d7a1e6335/summer-boot/src/tcp/mod.rs | summer-boot/src/tcp/mod.rs | //! 表示HTTP传输和绑定的类型
use crate::Server;
mod concurrent;
mod failover;
mod parsed;
mod tcp_listener;
mod to_listener;
mod to_listener_impls;
#[cfg(unix)]
mod unix;
use std::fmt::{Debug, Display};
use async_std::io;
use async_trait::async_trait;
pub use concurrent::ConcurrentListener;
pub use failover::FailoverListener;
pub use to_listener::ToListener;
pub(crate) use parsed::ParsedListener;
pub(crate) use tcp_listener::TcpListener;
#[cfg(unix)]
pub(crate) use unix::UnixListener;
#[macro_export]
macro_rules! read_to_end {
($expr:expr) => {
match $expr {
Poll::Ready(Ok(0)) => (),
other => return other,
}
};
}
#[async_trait]
pub trait Listener<State>: Debug + Display + Send + Sync + 'static
where
State: Send + Sync + 'static,
{
async fn bind(&mut self, app: Server<State>) -> io::Result<()>;
async fn accept(&mut self) -> io::Result<()>;
fn info(&self) -> Vec<ListenInfo>;
}
#[async_trait]
impl<L, State> Listener<State> for Box<L>
where
L: Listener<State>,
State: Send + Sync + 'static,
{
async fn bind(&mut self, app: Server<State>) -> io::Result<()> {
self.as_mut().bind(app).await
}
async fn accept(&mut self) -> io::Result<()> {
self.as_mut().accept().await
}
fn info(&self) -> Vec<ListenInfo> {
self.as_ref().info()
}
}
/// tcp和unix侦听器使用的crate内部共享逻辑
/// io::Error 触发是否需要回退延迟
/// types不需要延迟
pub(crate) fn is_transient_error(e: &io::Error) -> bool {
use io::ErrorKind::*;
matches!(
e.kind(),
ConnectionRefused | ConnectionAborted | ConnectionReset
)
}
#[derive(Debug, Clone)]
#[allow(dead_code)]
pub struct ListenInfo {
conn_string: String,
transport: String,
tls: bool,
}
impl ListenInfo {
pub fn new(conn_string: String, transport: String, tls: bool) -> Self {
Self {
conn_string,
transport,
tls,
}
}
#[allow(dead_code)]
pub fn connection(&self) -> &str {
self.conn_string.as_str()
}
#[allow(dead_code)]
pub fn transport(&self) -> &str {
self.transport.as_str()
}
#[allow(dead_code)]
pub fn is_encrypted(&self) -> bool {
self.tls
}
}
impl Display for ListenInfo {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.conn_string)
}
}
| rust | Apache-2.0 | 3bacf5b9996a015c527fe5b7941d6f5d7a1e6335 | 2026-01-04T20:24:35.228453Z | false |
eairp/summer-boot | https://github.com/eairp/summer-boot/blob/3bacf5b9996a015c527fe5b7941d6f5d7a1e6335/summer-boot/src/tcp/failover.rs | summer-boot/src/tcp/failover.rs | use crate::tcp::{Listener, ToListener};
use crate::Server;
use std::fmt::{self, Debug, Display, Formatter};
use async_std::io;
use crate::tcp::ListenInfo;
#[derive(Default)]
pub struct FailoverListener<State> {
listeners: Vec<Option<Box<dyn Listener<State>>>>,
index: Option<usize>,
}
#[allow(dead_code)]
impl<State> FailoverListener<State>
where
State: Clone + Send + Sync + 'static,
{
pub fn new() -> Self {
Self {
listeners: vec![],
index: None,
}
}
pub fn add<L>(&mut self, listener: L) -> io::Result<()>
where
L: ToListener<State>,
{
self.listeners.push(Some(Box::new(listener.to_listener()?)));
Ok(())
}
pub fn with_listener<L>(mut self, listener: L) -> Self
where
L: ToListener<State>,
{
self.add(listener).expect("无法添加侦听器");
self
}
}
#[async_trait::async_trait]
impl<State> Listener<State> for FailoverListener<State>
where
State: Clone + Send + Sync + 'static,
{
async fn bind(&mut self, app: Server<State>) -> io::Result<()> {
for (index, listener) in self.listeners.iter_mut().enumerate() {
let listener = listener.as_deref_mut().expect("bind调用了两次");
match listener.bind(app.clone()).await {
Ok(_) => {
self.index = Some(index);
return Ok(());
}
Err(e) => {
crate::log::info!("无法绑定", {
listener: listener.to_string(),
error: e.to_string()
});
}
}
}
Err(io::Error::new(
io::ErrorKind::AddrNotAvailable,
"无法绑定到任何提供的侦听器",
))
}
async fn accept(&mut self) -> io::Result<()> {
match self.index {
Some(index) => {
let mut listener = self.listeners[index].take().expect("accept调用了两次");
listener.accept().await?;
Ok(())
}
None => Err(io::Error::new(
io::ErrorKind::AddrNotAvailable,
"无法侦听任何提供的侦听器",
)),
}
}
fn info(&self) -> Vec<ListenInfo> {
match self.index {
Some(index) => match self.listeners.get(index) {
Some(Some(listener)) => listener.info(),
_ => vec![],
},
None => vec![],
}
}
}
impl<State> Debug for FailoverListener<State> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(f, "{:?}", self.listeners)
}
}
impl<State> Display for FailoverListener<State> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
let string = self
.listeners
.iter()
.map(|l| match l {
Some(l) => l.to_string(),
None => String::new(),
})
.collect::<Vec<_>>()
// 请不要删除这里空格
.join(", ");
writeln!(f, "{}", string)
}
}
| rust | Apache-2.0 | 3bacf5b9996a015c527fe5b7941d6f5d7a1e6335 | 2026-01-04T20:24:35.228453Z | false |
eairp/summer-boot | https://github.com/eairp/summer-boot/blob/3bacf5b9996a015c527fe5b7941d6f5d7a1e6335/summer-boot/src/tcp/concurrent.rs | summer-boot/src/tcp/concurrent.rs | use crate::tcp::{ListenInfo, Listener, ToListener};
use crate::Server;
use std::fmt::{self, Debug, Display, Formatter};
use async_std::io;
use futures_util::stream::{futures_unordered::FuturesUnordered, StreamExt};
#[derive(Default)]
pub struct ConcurrentListener<State> {
listeners: Vec<Box<dyn Listener<State>>>,
}
impl<State: Clone + Send + Sync + 'static> ConcurrentListener<State> {
pub fn new() -> Self {
Self { listeners: vec![] }
}
pub fn add<L>(&mut self, listener: L) -> io::Result<()>
where
L: ToListener<State>,
{
self.listeners.push(Box::new(listener.to_listener()?));
Ok(())
}
pub fn with_listener<L>(mut self, listener: L) -> Self
where
L: ToListener<State>,
{
self.add(listener).expect("无法添加侦听器");
self
}
}
#[async_trait::async_trait]
impl<State> Listener<State> for ConcurrentListener<State>
where
State: Clone + Send + Sync + 'static,
{
async fn bind(&mut self, app: Server<State>) -> io::Result<()> {
for listener in self.listeners.iter_mut() {
listener.bind(app.clone()).await?;
}
Ok(())
}
async fn accept(&mut self) -> io::Result<()> {
let mut futures_unordered = FuturesUnordered::new();
for listener in self.listeners.iter_mut() {
futures_unordered.push(listener.accept());
}
while let Some(result) = futures_unordered.next().await {
result?;
}
Ok(())
}
fn info(&self) -> Vec<ListenInfo> {
self.listeners
.iter()
.flat_map(|listener| listener.info().into_iter())
.collect()
}
}
impl<State> Debug for ConcurrentListener<State> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(f, "{:?}", self.listeners)
}
}
impl<State> Display for ConcurrentListener<State> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
let string = self
.listeners
.iter()
.map(|l| l.to_string())
.collect::<Vec<_>>()
.join(", ");
writeln!(f, "{}", string)
}
}
| rust | Apache-2.0 | 3bacf5b9996a015c527fe5b7941d6f5d7a1e6335 | 2026-01-04T20:24:35.228453Z | false |
eairp/summer-boot | https://github.com/eairp/summer-boot/blob/3bacf5b9996a015c527fe5b7941d6f5d7a1e6335/summer-boot/src/tcp/to_listener_impls.rs | summer-boot/src/tcp/to_listener_impls.rs | #[cfg(unix)]
use super::UnixListener;
use super::{ConcurrentListener, FailoverListener, ParsedListener, TcpListener, ToListener};
use async_std::io;
use http_types::url::Url;
use std::net::ToSocketAddrs;
impl<State> ToListener<State> for Url
where
State: Clone + Send + Sync + 'static,
{
type Listener = ParsedListener<State>;
fn to_listener(self) -> io::Result<Self::Listener> {
match self.scheme() {
"http+unix" => {
#[cfg(unix)]
{
let path = std::path::PathBuf::from(format!(
"{}{}",
self.domain().unwrap_or_default(),
self.path()
));
Ok(ParsedListener::Unix(UnixListener::from_path(path)))
}
#[cfg(not(unix))]
{
Err(io::Error::new(
io::ErrorKind::Other,
"此平台上不支持Unix套接字",
))
}
}
"tcp" | "http" => Ok(ParsedListener::Tcp(TcpListener::from_addrs(
self.socket_addrs(|| Some(80))?,
))),
// 后续考虑支持ssl正在封装,tls暂时不做处理
"tls" | "ssl" | "https" => Err(io::Error::new(
io::ErrorKind::Other,
"尚不支持解析TLS侦听器",
)),
_ => Err(io::Error::new(io::ErrorKind::InvalidInput, "无法识别的url")),
}
}
}
impl<State> ToListener<State> for String
where
State: Clone + Send + Sync + 'static,
{
type Listener = ParsedListener<State>;
fn to_listener(self) -> io::Result<Self::Listener> {
ToListener::<State>::to_listener(self.as_str())
}
}
impl<State> ToListener<State> for &String
where
State: Clone + Send + Sync + 'static,
{
type Listener = ParsedListener<State>;
fn to_listener(self) -> io::Result<Self::Listener> {
ToListener::<State>::to_listener(self.as_str())
}
}
impl<State> ToListener<State> for &str
where
State: Clone + Send + Sync + 'static,
{
type Listener = ParsedListener<State>;
fn to_listener(self) -> io::Result<Self::Listener> {
if let Ok(socket_addrs) = self.to_socket_addrs() {
Ok(ParsedListener::Tcp(TcpListener::from_addrs(
socket_addrs.collect(),
)))
} else if let Ok(url) = Url::parse(self) {
ToListener::<State>::to_listener(url)
} else {
Err(io::Error::new(
io::ErrorKind::InvalidInput,
format!("无法解析侦听器 `{}`", self),
))
}
}
}
#[cfg(unix)]
impl<State> ToListener<State> for async_std::path::PathBuf
where
State: Clone + Send + Sync + 'static,
{
type Listener = UnixListener<State>;
fn to_listener(self) -> io::Result<Self::Listener> {
Ok(UnixListener::from_path(self))
}
}
#[cfg(unix)]
impl<State> ToListener<State> for std::path::PathBuf
where
State: Clone + Send + Sync + 'static,
{
type Listener = UnixListener<State>;
fn to_listener(self) -> io::Result<Self::Listener> {
Ok(UnixListener::from_path(self))
}
}
impl<State> ToListener<State> for async_std::net::TcpListener
where
State: Clone + Send + Sync + 'static,
{
type Listener = TcpListener<State>;
fn to_listener(self) -> io::Result<Self::Listener> {
Ok(TcpListener::from_listener(self))
}
}
impl<State> ToListener<State> for std::net::TcpListener
where
State: Clone + Send + Sync + 'static,
{
type Listener = TcpListener<State>;
fn to_listener(self) -> io::Result<Self::Listener> {
Ok(TcpListener::from_listener(self))
}
}
impl<State> ToListener<State> for (String, u16)
where
State: Clone + Send + Sync + 'static,
{
type Listener = TcpListener<State>;
fn to_listener(self) -> io::Result<Self::Listener> {
ToListener::<State>::to_listener((self.0.as_str(), self.1))
}
}
impl<State> ToListener<State> for (&String, u16)
where
State: Clone + Send + Sync + 'static,
{
type Listener = TcpListener<State>;
fn to_listener(self) -> io::Result<Self::Listener> {
ToListener::<State>::to_listener((self.0.as_str(), self.1))
}
}
impl<State> ToListener<State> for (&str, u16)
where
State: Clone + Send + Sync + 'static,
{
type Listener = TcpListener<State>;
fn to_listener(self) -> io::Result<Self::Listener> {
Ok(TcpListener::from_addrs(self.to_socket_addrs()?.collect()))
}
}
#[cfg(unix)]
impl<State> ToListener<State> for async_std::os::unix::net::UnixListener
where
State: Clone + Send + Sync + 'static,
{
type Listener = UnixListener<State>;
fn to_listener(self) -> io::Result<Self::Listener> {
Ok(UnixListener::from_listener(self))
}
}
#[cfg(unix)]
impl<State> ToListener<State> for std::os::unix::net::UnixListener
where
State: Clone + Send + Sync + 'static,
{
type Listener = UnixListener<State>;
fn to_listener(self) -> io::Result<Self::Listener> {
Ok(UnixListener::from_listener(self))
}
}
impl<State> ToListener<State> for TcpListener<State>
where
State: Clone + Send + Sync + 'static,
{
type Listener = Self;
fn to_listener(self) -> io::Result<Self::Listener> {
Ok(self)
}
}
#[cfg(unix)]
impl<State> ToListener<State> for UnixListener<State>
where
State: Clone + Send + Sync + 'static,
{
type Listener = Self;
fn to_listener(self) -> io::Result<Self::Listener> {
Ok(self)
}
}
impl<State> ToListener<State> for ConcurrentListener<State>
where
State: Clone + Send + Sync + 'static,
{
type Listener = Self;
fn to_listener(self) -> io::Result<Self::Listener> {
Ok(self)
}
}
impl<State> ToListener<State> for ParsedListener<State>
where
State: Clone + Send + Sync + 'static,
{
type Listener = Self;
fn to_listener(self) -> io::Result<Self::Listener> {
Ok(self)
}
}
impl<State> ToListener<State> for FailoverListener<State>
where
State: Clone + Send + Sync + 'static,
{
type Listener = Self;
fn to_listener(self) -> io::Result<Self::Listener> {
Ok(self)
}
}
impl<State> ToListener<State> for std::net::SocketAddr
where
State: Clone + Send + Sync + 'static,
{
type Listener = TcpListener<State>;
fn to_listener(self) -> io::Result<Self::Listener> {
Ok(TcpListener::from_addrs(vec![self]))
}
}
impl<L, State> ToListener<State> for Vec<L>
where
L: ToListener<State>,
State: Clone + Send + Sync + 'static,
{
type Listener = ConcurrentListener<State>;
fn to_listener(self) -> io::Result<Self::Listener> {
let mut concurrent_listener = ConcurrentListener::new();
for listener in self {
concurrent_listener.add(listener)?;
}
Ok(concurrent_listener)
}
}
#[cfg(test)]
mod parse_tests {
use super::*;
fn listen<L: ToListener<()>>(listener: L) -> io::Result<L::Listener> {
listener.to_listener()
}
#[test]
fn url_to_tcp_listener() {
let listener = listen(Url::parse("http://localhost:8000").unwrap()).unwrap();
assert!(listener.to_string().contains("http://127.0.0.1:8000"));
let listener = listen(Url::parse("tcp://localhost:8000").unwrap()).unwrap();
assert!(listener.to_string().contains("http://127.0.0.1:8000"));
let listener = listen(Url::parse("http://127.0.0.1").unwrap()).unwrap();
assert_eq!(listener.to_string(), "http://127.0.0.1:80");
}
#[test]
fn str_url_to_tcp_listener() {
let listener = listen("tcp://localhost:8000").unwrap();
assert!(listener.to_string().contains("http://127.0.0.1:8000"));
let listener = listen("tcp://localhost:8000").unwrap();
assert!(listener.to_string().contains("http://127.0.0.1:8000"));
let listener = listen("tcp://127.0.0.1").unwrap();
assert_eq!(listener.to_string(), "http://127.0.0.1:80");
}
#[cfg(unix)]
mod unix {
use super::*;
#[test]
fn str_url_to_unix_listener() {
let listener = listen("http+unix:///var/run/socket").unwrap();
assert_eq!("http+unix:///var/run/socket", listener.to_string());
let listener = listen("http+unix://./socket").unwrap();
assert_eq!("http+unix://./socket", listener.to_string());
let listener = listen("http+unix://socket").unwrap();
assert_eq!("http+unix://socket", listener.to_string());
}
#[test]
fn colon_port_does_not_work() {
let err = listen(":3000").unwrap_err().to_string();
println!("{}", err);
}
}
#[cfg(not(unix))]
mod not_unix {
use super::*;
#[test]
fn str_url_to_unix_listener() {
let err = listen("http+unix:///var/run/socket").unwrap_err();
println!("{}", err);
}
#[test]
fn colon_port_works() {
let listener = listen(":3000").unwrap();
assert!(listener.to_string().ends_with(":3000"));
assert!(listener.to_string().starts_with("http://"));
}
}
#[test]
fn str_tls_parse_and_url() {
let err = listen("tls://localhost:443").unwrap_err();
println!("{}", err);
let err = listen(Url::parse("https://localhost:443").unwrap()).unwrap_err();
println!("{}", err);
}
#[test]
fn str_unknown_scheme() {
let err = listen("pigeon://localhost:443").unwrap_err();
println!("{}", err);
let err = listen(Url::parse("pigeon:///localhost:443").unwrap()).unwrap_err();
println!("{}", err);
}
#[test]
fn str_to_socket_addr() {
let listener = listen("127.0.0.1:1312").unwrap();
assert_eq!("http://127.0.0.1:1312", listener.to_string());
let listener = listen("[::1]:1312").unwrap();
assert_eq!("http://[::1]:1312", listener.to_string());
let listener = listen("localhost:3000").unwrap();
assert!(listener.to_string().contains(":3000"));
}
#[test]
fn invalid_str_input() {
let err = listen("hello world").unwrap_err();
println!("{}", err);
}
#[test]
fn to_listener_impls_compile() {
listen("127.0.0.1:80").unwrap();
listen(String::from("127.0.0.1:80")).unwrap();
listen(&String::from("127.0.0.1:80")).unwrap();
listen(("127.0.0.1", 80)).unwrap();
listen((String::from("127.0.0.1"), 80)).unwrap();
listen((&String::from("127.0.0.1"), 80)).unwrap();
}
}
| rust | Apache-2.0 | 3bacf5b9996a015c527fe5b7941d6f5d7a1e6335 | 2026-01-04T20:24:35.228453Z | false |
eairp/summer-boot | https://github.com/eairp/summer-boot/blob/3bacf5b9996a015c527fe5b7941d6f5d7a1e6335/summer-boot/src/http1/body_encoder.rs | summer-boot/src/http1/body_encoder.rs | use std::io;
use std::pin::Pin;
use std::task::{Context, Poll};
use async_std::io::Read;
use http_types::Body;
use pin_project::pin_project;
use super::encode::ChunkedEncoder;
#[pin_project(project=BodyEncoderProjection)]
#[derive(Debug)]
pub(crate) enum BodyEncoder {
Chunked(#[pin] ChunkedEncoder<Body>),
Fixed(#[pin] Body),
}
impl BodyEncoder {
pub(crate) fn new(body: Body) -> Self {
match body.len() {
Some(_) => Self::Fixed(body),
None => Self::Chunked(ChunkedEncoder::new(body)),
}
}
}
impl Read for BodyEncoder {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut [u8],
) -> Poll<io::Result<usize>> {
match self.project() {
BodyEncoderProjection::Chunked(encoder) => encoder.poll_read(cx, buf),
BodyEncoderProjection::Fixed(body) => body.poll_read(cx, buf),
}
}
}
| rust | Apache-2.0 | 3bacf5b9996a015c527fe5b7941d6f5d7a1e6335 | 2026-01-04T20:24:35.228453Z | false |
eairp/summer-boot | https://github.com/eairp/summer-boot/blob/3bacf5b9996a015c527fe5b7941d6f5d7a1e6335/summer-boot/src/http1/date.rs | summer-boot/src/http1/date.rs | use std::fmt::{self, Display, Formatter};
use std::str::{from_utf8, FromStr};
use std::time::{Duration, SystemTime, UNIX_EPOCH};
use http_types::{bail, ensure, format_err};
const IMF_FIXDATE_LENGTH: usize = 29;
const RFC850_MAX_LENGTH: usize = 23;
const ASCTIME_LENGTH: usize = 24;
const YEAR_9999_SECONDS: u64 = 253402300800;
const SECONDS_IN_DAY: u64 = 86400;
const SECONDS_IN_HOUR: u64 = 3600;
/// 使用`Display` trait 设置格式
/// 将时间戳转换为或从 `SytemTime` 换为要使用的时间戳
/// 支持比较和排序
#[derive(Copy, Clone, Debug, Eq)]
pub struct HttpDate {
/// 0...59
second: u8,
/// 0...59
minute: u8,
/// 0...23
hour: u8,
/// 1...31
day: u8,
/// 1...12
month: u8,
/// 1970...9999
year: u16,
/// 1...7
week_day: u8,
}
/// 从HTTP header 字段解析日期。
///
/// 支持首选的IMF固定日期和传统RFC 805和ascdate格式。两位数年份映射到
/// 1970 和 2069.
#[allow(dead_code)]
pub(crate) fn parse_http_date(s: &str) -> http_types::Result<SystemTime> {
s.parse::<HttpDate>().map(|d| d.into())
}
/// 设置要在HTTP header字段中使用的日期的格式。
///
///日期格式为IMF固定日期: IMF-fixdate: `Fri, 15 May 2015 15:34:21 GMT`.
pub(crate) fn fmt_http_date(d: SystemTime) -> String {
format!("{}", HttpDate::from(d))
}
impl HttpDate {
fn is_valid(self) -> bool {
self.second < 60
&& self.minute < 60
&& self.hour < 24
&& self.day > 0
&& self.day < 32
&& self.month > 0
&& self.month <= 12
&& self.year >= 1970
&& self.year <= 9999
&& self.week_day >= 1
&& self.week_day < 8
}
}
fn parse_imf_fixdate(s: &[u8]) -> http_types::Result<HttpDate> {
// 例如: `Sun, 06 Nov 1994 08:49:37 GMT`
if s.len() != IMF_FIXDATE_LENGTH
|| &s[25..] != b" GMT"
|| s[16] != b' '
|| s[19] != b':'
|| s[22] != b':'
{
bail!("Date time not in imf fixdate format");
}
Ok(HttpDate {
second: from_utf8(&s[23..25])?.parse()?,
minute: from_utf8(&s[20..22])?.parse()?,
hour: from_utf8(&s[17..19])?.parse()?,
day: from_utf8(&s[5..7])?.parse()?,
month: match &s[7..12] {
b" Jan " => 1,
b" Feb " => 2,
b" Mar " => 3,
b" Apr " => 4,
b" May " => 5,
b" Jun " => 6,
b" Jul " => 7,
b" Aug " => 8,
b" Sep " => 9,
b" Oct " => 10,
b" Nov " => 11,
b" Dec " => 12,
_ => bail!("Invalid Month"),
},
year: from_utf8(&s[12..16])?.parse()?,
week_day: match &s[..5] {
b"Mon, " => 1,
b"Tue, " => 2,
b"Wed, " => 3,
b"Thu, " => 4,
b"Fri, " => 5,
b"Sat, " => 6,
b"Sun, " => 7,
_ => bail!("Invalid Day"),
},
})
}
fn parse_rfc850_date(s: &[u8]) -> http_types::Result<HttpDate> {
// 例如: `Sunday, 06-Nov-94 08:49:37 GMT`
ensure!(
s.len() >= RFC850_MAX_LENGTH,
"Date time not in rfc850 format"
);
fn week_day<'a>(s: &'a [u8], week_day: u8, name: &'static [u8]) -> Option<(u8, &'a [u8])> {
if &s[0..name.len()] == name {
return Some((week_day, &s[name.len()..]));
}
None
}
let (week_day, s) = week_day(s, 1, b"Monday, ")
.or_else(|| week_day(s, 2, b"Tuesday, "))
.or_else(|| week_day(s, 3, b"Wednesday, "))
.or_else(|| week_day(s, 4, b"Thursday, "))
.or_else(|| week_day(s, 5, b"Friday, "))
.or_else(|| week_day(s, 6, b"Saturday, "))
.or_else(|| week_day(s, 7, b"Sunday, "))
.ok_or_else(|| format_err!("Invalid day"))?;
if s.len() != 22 || s[12] != b':' || s[15] != b':' || &s[18..22] != b" GMT" {
bail!("Date time not in rfc950 fmt");
}
let mut year = from_utf8(&s[7..9])?.parse::<u16>()?;
if year < 70 {
year += 2000;
} else {
year += 1900;
}
Ok(HttpDate {
second: from_utf8(&s[16..18])?.parse()?,
minute: from_utf8(&s[13..15])?.parse()?,
hour: from_utf8(&s[10..12])?.parse()?,
day: from_utf8(&s[0..2])?.parse()?,
month: match &s[2..7] {
b"-Jan-" => 1,
b"-Feb-" => 2,
b"-Mar-" => 3,
b"-Apr-" => 4,
b"-May-" => 5,
b"-Jun-" => 6,
b"-Jul-" => 7,
b"-Aug-" => 8,
b"-Sep-" => 9,
b"-Oct-" => 10,
b"-Nov-" => 11,
b"-Dec-" => 12,
_ => bail!("Invalid month"),
},
year,
week_day,
})
}
fn parse_asctime(s: &[u8]) -> http_types::Result<HttpDate> {
// 例如: `Sun Nov 6 08:49:37 1994`
if s.len() != ASCTIME_LENGTH || s[10] != b' ' || s[13] != b':' || s[16] != b':' || s[19] != b' '
{
bail!("Date time not in asctime format");
}
Ok(HttpDate {
second: from_utf8(&s[17..19])?.parse()?,
minute: from_utf8(&s[14..16])?.parse()?,
hour: from_utf8(&s[11..13])?.parse()?,
day: {
let x = &s[8..10];
from_utf8(if x[0] == b' ' { &x[1..2] } else { x })?.parse()?
},
month: match &s[4..8] {
b"Jan " => 1,
b"Feb " => 2,
b"Mar " => 3,
b"Apr " => 4,
b"May " => 5,
b"Jun " => 6,
b"Jul " => 7,
b"Aug " => 8,
b"Sep " => 9,
b"Oct " => 10,
b"Nov " => 11,
b"Dec " => 12,
_ => bail!("Invalid month"),
},
year: from_utf8(&s[20..24])?.parse()?,
week_day: match &s[0..4] {
b"Mon " => 1,
b"Tue " => 2,
b"Wed " => 3,
b"Thu " => 4,
b"Fri " => 5,
b"Sat " => 6,
b"Sun " => 7,
_ => bail!("Invalid day"),
},
})
}
impl From<SystemTime> for HttpDate {
fn from(system_time: SystemTime) -> Self {
let dur = system_time
.duration_since(UNIX_EPOCH)
.expect("all times should be after the epoch");
let secs_since_epoch = dur.as_secs();
if secs_since_epoch >= YEAR_9999_SECONDS {
// year 9999
panic!("date must be before year 9999");
}
/* 2000-03-01 (mod 400 year, 紧接着29年2月之后 */
const LEAPOCH: i64 = 11017;
const DAYS_PER_400Y: i64 = 365 * 400 + 97;
const DAYS_PER_100Y: i64 = 365 * 100 + 24;
const DAYS_PER_4Y: i64 = 365 * 4 + 1;
let days = (secs_since_epoch / SECONDS_IN_DAY) as i64 - LEAPOCH;
let secs_of_day = secs_since_epoch % SECONDS_IN_DAY;
let mut qc_cycles = days / DAYS_PER_400Y;
let mut remdays = days % DAYS_PER_400Y;
if remdays < 0 {
remdays += DAYS_PER_400Y;
qc_cycles -= 1;
}
let mut c_cycles = remdays / DAYS_PER_100Y;
if c_cycles == 4 {
c_cycles -= 1;
}
remdays -= c_cycles * DAYS_PER_100Y;
let mut q_cycles = remdays / DAYS_PER_4Y;
if q_cycles == 25 {
q_cycles -= 1;
}
remdays -= q_cycles * DAYS_PER_4Y;
let mut remyears = remdays / 365;
if remyears == 4 {
remyears -= 1;
}
remdays -= remyears * 365;
let mut year = 2000 + remyears + 4 * q_cycles + 100 * c_cycles + 400 * qc_cycles;
let months = [31, 30, 31, 30, 31, 31, 30, 31, 30, 31, 31, 29];
let mut month = 0;
for month_len in months.iter() {
month += 1;
if remdays < *month_len {
break;
}
remdays -= *month_len;
}
let mday = remdays + 1;
let month = if month + 2 > 12 {
year += 1;
month - 10
} else {
month + 2
};
let mut week_day = (3 + days) % 7;
if week_day <= 0 {
week_day += 7
};
HttpDate {
second: (secs_of_day % 60) as u8,
minute: ((secs_of_day % SECONDS_IN_HOUR) / 60) as u8,
hour: (secs_of_day / SECONDS_IN_HOUR) as u8,
day: mday as u8,
month: month as u8,
year: year as u16,
week_day: week_day as u8,
}
}
}
impl From<HttpDate> for SystemTime {
fn from(http_date: HttpDate) -> Self {
let leap_years = ((http_date.year - 1) - 1968) / 4 - ((http_date.year - 1) - 1900) / 100
+ ((http_date.year - 1) - 1600) / 400;
let mut ydays = match http_date.month {
1 => 0,
2 => 31,
3 => 59,
4 => 90,
5 => 120,
6 => 151,
7 => 181,
8 => 212,
9 => 243,
10 => 273,
11 => 304,
12 => 334,
_ => unreachable!(),
} + http_date.day as u64
- 1;
if is_leap_year(http_date.year) && http_date.month > 2 {
ydays += 1;
}
let days = (http_date.year as u64 - 1970) * 365 + leap_years as u64 + ydays;
UNIX_EPOCH
+ Duration::from_secs(
http_date.second as u64
+ http_date.minute as u64 * 60
+ http_date.hour as u64 * SECONDS_IN_HOUR
+ days * SECONDS_IN_DAY,
)
}
}
impl FromStr for HttpDate {
type Err = http_types::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
ensure!(s.is_ascii(), "String slice is not valid ASCII");
let x = s.trim().as_bytes();
let date = parse_imf_fixdate(x)
.or_else(|_| parse_rfc850_date(x))
.or_else(|_| parse_asctime(x))?;
ensure!(date.is_valid(), "Invalid date time");
Ok(date)
}
}
impl Display for HttpDate {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
let week_day = match self.week_day {
1 => b"Mon",
2 => b"Tue",
3 => b"Wed",
4 => b"Thu",
5 => b"Fri",
6 => b"Sat",
7 => b"Sun",
_ => unreachable!(),
};
let month = match self.month {
1 => b"Jan",
2 => b"Feb",
3 => b"Mar",
4 => b"Apr",
5 => b"May",
6 => b"Jun",
7 => b"Jul",
8 => b"Aug",
9 => b"Sep",
10 => b"Oct",
11 => b"Nov",
12 => b"Dec",
_ => unreachable!(),
};
let mut buf: [u8; 29] = [
// 太长,无法写入: b"Thu, 01 Jan 1970 00:00:00 GMT"
b' ', b' ', b' ', b',', b' ', b'0', b'0', b' ', b' ', b' ', b' ', b' ', b'0', b'0',
b'0', b'0', b' ', b'0', b'0', b':', b'0', b'0', b':', b'0', b'0', b' ', b'G', b'M',
b'T',
];
buf[0] = week_day[0];
buf[1] = week_day[1];
buf[2] = week_day[2];
buf[5] = b'0' + (self.day / 10) as u8;
buf[6] = b'0' + (self.day % 10) as u8;
buf[8] = month[0];
buf[9] = month[1];
buf[10] = month[2];
buf[12] = b'0' + (self.year / 1000) as u8;
buf[13] = b'0' + (self.year / 100 % 10) as u8;
buf[14] = b'0' + (self.year / 10 % 10) as u8;
buf[15] = b'0' + (self.year % 10) as u8;
buf[17] = b'0' + (self.hour / 10) as u8;
buf[18] = b'0' + (self.hour % 10) as u8;
buf[20] = b'0' + (self.minute / 10) as u8;
buf[21] = b'0' + (self.minute % 10) as u8;
buf[23] = b'0' + (self.second / 10) as u8;
buf[24] = b'0' + (self.second % 10) as u8;
f.write_str(from_utf8(&buf[..]).unwrap())
}
}
impl PartialEq for HttpDate {
fn eq(&self, other: &HttpDate) -> bool {
SystemTime::from(*self) == SystemTime::from(*other)
}
}
impl PartialOrd for HttpDate {
fn partial_cmp(&self, other: &HttpDate) -> Option<std::cmp::Ordering> {
SystemTime::from(*self).partial_cmp(&SystemTime::from(*other))
}
}
fn is_leap_year(year: u16) -> bool {
year % 4 == 0 && (year % 100 != 0 || year % 400 == 0)
}
#[cfg(test)]
mod tests {
use std::time::{Duration, UNIX_EPOCH};
use super::{fmt_http_date, parse_http_date, HttpDate, SECONDS_IN_DAY, SECONDS_IN_HOUR};
#[test]
fn test_rfc_example() {
let d = UNIX_EPOCH + Duration::from_secs(784111777);
assert_eq!(
d,
parse_http_date("Sun, 06 Nov 1994 08:49:37 GMT").expect("#1")
);
assert_eq!(
d,
parse_http_date("Sunday, 06-Nov-94 08:49:37 GMT").expect("#2")
);
assert_eq!(d, parse_http_date("Sun Nov 6 08:49:37 1994").expect("#3"));
}
#[test]
fn test2() {
let d = UNIX_EPOCH + Duration::from_secs(1475419451);
assert_eq!(
d,
parse_http_date("Sun, 02 Oct 2016 14:44:11 GMT").expect("#1")
);
assert!(parse_http_date("Sun Nov 10 08:00:00 1000").is_err());
assert!(parse_http_date("Sun Nov 10 08*00:00 2000").is_err());
assert!(parse_http_date("Sunday, 06-Nov-94 08+49:37 GMT").is_err());
}
#[test]
fn test3() {
let mut d = UNIX_EPOCH;
assert_eq!(d, parse_http_date("Thu, 01 Jan 1970 00:00:00 GMT").unwrap());
d += Duration::from_secs(SECONDS_IN_HOUR);
assert_eq!(d, parse_http_date("Thu, 01 Jan 1970 01:00:00 GMT").unwrap());
d += Duration::from_secs(SECONDS_IN_DAY);
assert_eq!(d, parse_http_date("Fri, 02 Jan 1970 01:00:00 GMT").unwrap());
d += Duration::from_secs(2592000);
assert_eq!(d, parse_http_date("Sun, 01 Feb 1970 01:00:00 GMT").unwrap());
d += Duration::from_secs(2592000);
assert_eq!(d, parse_http_date("Tue, 03 Mar 1970 01:00:00 GMT").unwrap());
d += Duration::from_secs(31536005);
assert_eq!(d, parse_http_date("Wed, 03 Mar 1971 01:00:05 GMT").unwrap());
d += Duration::from_secs(15552000);
assert_eq!(d, parse_http_date("Mon, 30 Aug 1971 01:00:05 GMT").unwrap());
d += Duration::from_secs(6048000);
assert_eq!(d, parse_http_date("Mon, 08 Nov 1971 01:00:05 GMT").unwrap());
d += Duration::from_secs(864000000);
assert_eq!(d, parse_http_date("Fri, 26 Mar 1999 01:00:05 GMT").unwrap());
}
#[test]
fn test_fmt() {
let d = UNIX_EPOCH;
assert_eq!(fmt_http_date(d), "Thu, 01 Jan 1970 00:00:00 GMT");
let d = UNIX_EPOCH + Duration::from_secs(1475419451);
assert_eq!(fmt_http_date(d), "Sun, 02 Oct 2016 14:44:11 GMT");
}
#[test]
fn size_of() {
assert_eq!(::std::mem::size_of::<HttpDate>(), 8);
}
}
| rust | Apache-2.0 | 3bacf5b9996a015c527fe5b7941d6f5d7a1e6335 | 2026-01-04T20:24:35.228453Z | false |
eairp/summer-boot | https://github.com/eairp/summer-boot/blob/3bacf5b9996a015c527fe5b7941d6f5d7a1e6335/summer-boot/src/http1/http.rs | summer-boot/src/http1/http.rs | //! HTTP1 connections on the server.
use std::str::FromStr;
use std::task::{Context, Poll};
use std::{fmt, marker::PhantomData, pin::Pin, time::Duration};
use async_std::future::{timeout, Future, TimeoutError};
use async_std::io::{self, BufRead, BufReader, Read, Take, Write};
use async_std::{prelude::*, task};
use http_types::content::ContentLength;
use http_types::headers::{CONNECTION, EXPECT, TRANSFER_ENCODING, UPGRADE};
use http_types::upgrade::Connection;
use http_types::{ensure, ensure_eq, format_err};
use http_types::{Body, Method, Request, Response, StatusCode, Url};
use async_channel::Sender;
use async_dup::{Arc, Mutex};
use super::decode::ChunkedDecoder;
use super::encode::Encoder;
const MAX_HEADERS: usize = 128;
const MAX_HEAD_LENGTH: usize = 8 * 1024;
const LF: u8 = b'\n';
/// 当请求为HTTP 1.1时,从httparse返回的数字
const HTTP_1_1_VERSION: u8 = 1;
const CONTINUE_HEADER_VALUE: &str = "100-continue";
const CONTINUE_RESPONSE: &[u8] = b"HTTP/1.1 100 Continue\r\n\r\n";
// http1 connection 配置服务器
#[derive(Debug, Clone)]
pub struct ServerOptions {
/// 处理headers超时。默认值为60秒
headers_timeout: Option<Duration>,
}
impl Default for ServerOptions {
fn default() -> Self {
Self {
headers_timeout: Some(Duration::from_secs(60)),
}
}
}
/// 接受新的传入HTTP/1.1连接
/// 默认情况支持KeepAlive请求。
pub async fn accept<RW, F, Fut>(io: RW, endpoint: F) -> http_types::Result<()>
where
RW: Read + Write + Clone + Send + Sync + Unpin + 'static,
F: Fn(Request) -> Fut,
Fut: Future<Output = http_types::Result<Response>>,
{
Server::new(io, endpoint).accept().await
}
/// 接受新的传入HTTP/1.1连接
/// 默认情况支持KeepAlive请求。
pub async fn accept_with_opts<RW, F, Fut>(
io: RW,
endpoint: F,
opts: ServerOptions,
) -> http_types::Result<()>
where
RW: Read + Write + Clone + Send + Sync + Unpin + 'static,
F: Fn(Request) -> Fut,
Fut: Future<Output = http_types::Result<Response>>,
{
Server::new(io, endpoint).with_opts(opts).accept().await
}
/// struct server
#[derive(Debug)]
pub struct Server<RW, F, Fut> {
io: RW,
endpoint: F,
opts: ServerOptions,
_phantom: PhantomData<Fut>,
}
/// 服务器是否应接受后续请求的枚举
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub enum ConnectionStatus {
/// 服务器不接受其他请求
Close,
/// 服务器可能会接受另一个请求
KeepAlive,
}
impl<RW, F, Fut> Server<RW, F, Fut>
where
RW: Read + Write + Clone + Send + Sync + Unpin + 'static,
F: Fn(Request) -> Fut,
Fut: Future<Output = http_types::Result<Response>>,
{
///构建一个新服务器
pub fn new(io: RW, endpoint: F) -> Self {
Self {
io,
endpoint,
opts: Default::default(),
_phantom: PhantomData,
}
}
/// with opts
pub fn with_opts(mut self, opts: ServerOptions) -> Self {
self.opts = opts;
self
}
/// accept in a loop
pub async fn accept(&mut self) -> http_types::Result<()> {
while ConnectionStatus::KeepAlive == self.accept_one().await? {}
Ok(())
}
/// accept one request
pub async fn accept_one(&mut self) -> http_types::Result<ConnectionStatus>
where
RW: Read + Write + Clone + Send + Sync + Unpin + 'static,
F: Fn(Request) -> Fut,
Fut: Future<Output = http_types::Result<Response>>,
{
// 对新请求进行解码,如果解码时间超过超时持续时间,则超时。
let fut = decode(self.io.clone());
let (req, mut body) = if let Some(timeout_duration) = self.opts.headers_timeout {
match timeout(timeout_duration, fut).await {
Ok(Ok(Some(r))) => r,
Ok(Ok(None)) | Err(TimeoutError { .. }) => return Ok(ConnectionStatus::Close), /* EOF或超时 */
Ok(Err(e)) => return Err(e),
}
} else {
match fut.await? {
Some(r) => r,
None => return Ok(ConnectionStatus::Close), /* EOF */
}
};
let has_upgrade_header = req.header(UPGRADE).is_some();
let connection_header_as_str = req
.header(CONNECTION)
.map(|connection| connection.as_str())
.unwrap_or("");
let connection_header_is_upgrade = connection_header_as_str
.split(',')
.any(|s| s.trim().eq_ignore_ascii_case("upgrade"));
let mut close_connection = connection_header_as_str.eq_ignore_ascii_case("close");
let upgrade_requested = has_upgrade_header && connection_header_is_upgrade;
let method = req.method();
// 将请求传递给endpoint并对响应进行编码
let mut res = (self.endpoint)(req).await?;
close_connection |= res
.header(CONNECTION)
.map(|c| c.as_str().eq_ignore_ascii_case("close"))
.unwrap_or(false);
let upgrade_provided = res.status() == StatusCode::SwitchingProtocols && res.has_upgrade();
let upgrade_sender = if upgrade_requested && upgrade_provided {
Some(res.send_upgrade())
} else {
None
};
let mut encoder = Encoder::new(res, method);
let bytes_written = io::copy(&mut encoder, &mut self.io).await?;
log::trace!("wrote {} response bytes", bytes_written);
let body_bytes_discarded = io::copy(&mut body, &mut io::sink()).await?;
log::trace!(
"discarded {} unread request body bytes",
body_bytes_discarded
);
if let Some(upgrade_sender) = upgrade_sender {
upgrade_sender.send(Connection::new(self.io.clone())).await;
Ok(ConnectionStatus::Close)
} else if close_connection {
Ok(ConnectionStatus::Close)
} else {
Ok(ConnectionStatus::KeepAlive)
}
}
}
/// body_reader
pub enum BodyReader<IO: Read + Unpin> {
Chunked(Arc<Mutex<ChunkedDecoder<BufReader<IO>>>>),
Fixed(Arc<Mutex<Take<BufReader<IO>>>>),
None,
}
impl<IO: Read + Unpin> fmt::Debug for BodyReader<IO> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
BodyReader::Chunked(_) => f.write_str("BodyReader::Chunked"),
BodyReader::Fixed(_) => f.write_str("BodyReader::Fixed"),
BodyReader::None => f.write_str("BodyReader::None"),
}
}
}
impl<IO: Read + Unpin> Read for BodyReader<IO> {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut [u8],
) -> Poll<io::Result<usize>> {
match &*self {
BodyReader::Chunked(r) => Pin::new(&mut *r.lock()).poll_read(cx, buf),
BodyReader::Fixed(r) => Pin::new(&mut *r.lock()).poll_read(cx, buf),
BodyReader::None => Poll::Ready(Ok(0)),
}
}
fn poll_read_vectored(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &mut [io::IoSliceMut<'_>],
) -> Poll<io::Result<usize>> {
for b in bufs {
if !b.is_empty() {
return self.poll_read(cx, b);
}
}
self.poll_read(cx, &mut [])
}
}
/// read_notifier
#[pin_project::pin_project]
pub struct ReadNotifier<B> {
#[pin]
reader: B,
sender: Sender<()>,
has_been_read: bool,
}
impl<B> fmt::Debug for ReadNotifier<B> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("ReadNotifier")
.field("read", &self.has_been_read)
.finish()
}
}
impl<B: Read> ReadNotifier<B> {
pub(crate) fn new(reader: B, sender: Sender<()>) -> Self {
Self {
reader,
sender,
has_been_read: false,
}
}
}
impl<B: BufRead> BufRead for ReadNotifier<B> {
fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<&[u8]>> {
self.project().reader.poll_fill_buf(cx)
}
fn consume(self: Pin<&mut Self>, amt: usize) {
self.project().reader.consume(amt)
}
}
impl<B: Read> Read for ReadNotifier<B> {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut [u8],
) -> Poll<io::Result<usize>> {
let this = self.project();
if !*this.has_been_read {
if let Ok(()) = this.sender.try_send(()) {
*this.has_been_read = true;
};
}
this.reader.poll_read(cx, buf)
}
}
/// 解码服务器上的HTTP请求
pub async fn decode<IO>(mut io: IO) -> http_types::Result<Option<(Request, BodyReader<IO>)>>
where
IO: Read + Write + Clone + Send + Sync + Unpin + 'static,
{
let mut reader = BufReader::new(io.clone());
let mut buf = Vec::new();
let mut headers = [httparse::EMPTY_HEADER; MAX_HEADERS];
let mut httparse_req = httparse::Request::new(&mut headers);
// 一直从流中读取字节,直到到达流快结束的时候
loop {
let bytes_read = reader.read_until(LF, &mut buf).await?;
// 不再从流中生成更多字节
if bytes_read == 0 {
return Ok(None);
}
// 防止DDOS
ensure!(
buf.len() < MAX_HEAD_LENGTH,
"Head byte length should be less than 8kb"
);
// 找到了流的结束分割符
let idx = buf.len() - 1;
if idx >= 3 && &buf[idx - 3..=idx] == b"\r\n\r\n" {
break;
}
}
// 将header buf转换为httparse实例,并进行验证
let status = httparse_req.parse(&buf)?;
ensure!(!status.is_partial(), "Malformed HTTP head");
// 将httparse headers + body 转换为 `http_types::Request` 类型。
let method = httparse_req.method;
let method = method.ok_or_else(|| format_err!("No method found"))?;
let version = httparse_req.version;
let version = version.ok_or_else(|| format_err!("No version found"))?;
ensure_eq!(
version,
HTTP_1_1_VERSION,
"Unsupported HTTP version 1.{}",
version
);
let url = url_from_httparse_req(&httparse_req)?;
let mut req = Request::new(Method::from_str(method)?, url);
req.set_version(Some(http_types::Version::Http1_1));
for header in httparse_req.headers.iter() {
req.append_header(header.name, std::str::from_utf8(header.value)?);
}
let content_length = ContentLength::from_headers(&req)?;
let transfer_encoding = req.header(TRANSFER_ENCODING);
// 如果内容长度和传输编码头都是,则返回400状态
// 设置为防止请求攻击。
//
// https://tools.ietf.org/html/rfc7230#section-3.3.3
http_types::ensure_status!(
content_length.is_none() || transfer_encoding.is_none(),
400,
"Unexpected Content-Length header"
);
// 建立一个通道以等待读取body, 允许我们避免在以下情况下发送100-continue
// 无需读取body即可响应,避免客户端上传body
let (body_read_sender, body_read_receiver) = async_channel::bounded(1);
if Some(CONTINUE_HEADER_VALUE) == req.header(EXPECT).map(|h| h.as_str()) {
task::spawn(async move {
// /如果客户端需要100 continue标头,则生成任务等待正文上的第一次读取尝试。
if let Ok(()) = body_read_receiver.recv().await {
io.write_all(CONTINUE_RESPONSE).await.ok();
};
// 由于发件方已移动到body中,因此此任务将 在客户端断开连接时完成,无论发送了100-continue
});
}
// 检查传输编码
if transfer_encoding
.map(|te| te.as_str().eq_ignore_ascii_case("chunked"))
.unwrap_or(false)
{
let trailer_sender = req.send_trailers();
let reader = ChunkedDecoder::new(reader, trailer_sender);
let reader = Arc::new(Mutex::new(reader));
let reader_clone = reader.clone();
let reader = ReadNotifier::new(reader, body_read_sender);
let reader = BufReader::new(reader);
req.set_body(Body::from_reader(reader, None));
Ok(Some((req, BodyReader::Chunked(reader_clone))))
} else if let Some(len) = content_length {
let len = len.len();
let reader = Arc::new(Mutex::new(reader.take(len)));
req.set_body(Body::from_reader(
BufReader::new(ReadNotifier::new(reader.clone(), body_read_sender)),
Some(len as usize),
));
Ok(Some((req, BodyReader::Fixed(reader))))
} else {
Ok(Some((req, BodyReader::None)))
}
}
fn url_from_httparse_req(req: &httparse::Request<'_, '_>) -> http_types::Result<Url> {
let path = req.path.ok_or_else(|| format_err!("No uri found"))?;
let host = req
.headers
.iter()
.find(|x| x.name.eq_ignore_ascii_case("host"))
.ok_or_else(|| format_err!("Mandatory Host header missing"))?
.value;
let host = std::str::from_utf8(host)?;
if path.starts_with("http://") || path.starts_with("https://") {
Ok(Url::parse(path)?)
} else if path.starts_with('/') {
Ok(Url::parse(&format!("http://{}{}", host, path))?)
} else if req.method.unwrap().eq_ignore_ascii_case("connect") {
Ok(Url::parse(&format!("http://{}/", path))?)
} else {
Err(format_err!("unexpected uri format"))
}
}
| rust | Apache-2.0 | 3bacf5b9996a015c527fe5b7941d6f5d7a1e6335 | 2026-01-04T20:24:35.228453Z | false |
eairp/summer-boot | https://github.com/eairp/summer-boot/blob/3bacf5b9996a015c527fe5b7941d6f5d7a1e6335/summer-boot/src/http1/encode.rs | summer-boot/src/http1/encode.rs | use std::io::Write;
use std::pin::Pin;
use std::time::SystemTime;
use crate::read_to_end;
use async_std::io::{self, Cursor, Read};
use async_std::task::{Context, Poll};
use futures_util::ready;
use http_types::headers::{CONTENT_LENGTH, DATE, TRANSFER_ENCODING};
use http_types::{Method, Response};
use super::body_encoder::BodyEncoder;
use super::date::fmt_http_date;
#[derive(Debug)]
pub(crate) enum EncoderState {
Start,
Head(Cursor<Vec<u8>>),
Body(BodyEncoder),
End,
}
/// streaming HTTP 编码
#[derive(Debug)]
pub struct Encoder {
response: Response,
state: EncoderState,
method: Method,
}
impl Read for Encoder {
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut [u8],
) -> Poll<io::Result<usize>> {
loop {
self.state = match self.state {
EncoderState::Start => EncoderState::Head(self.compute_head()?),
EncoderState::Head(ref mut cursor) => {
read_to_end!(Pin::new(cursor).poll_read(cx, buf));
if self.method == Method::Head {
EncoderState::End
} else {
EncoderState::Body(BodyEncoder::new(self.response.take_body()))
}
}
EncoderState::Body(ref mut encoder) => {
read_to_end!(Pin::new(encoder).poll_read(cx, buf));
EncoderState::End
}
EncoderState::End => return Poll::Ready(Ok(0)),
}
}
}
}
impl Encoder {
/// 创建编码的新实例。
pub fn new(response: Response, method: Method) -> Self {
Self {
method,
response,
state: EncoderState::Start,
}
}
fn finalize_headers(&mut self) {
// 如果正文没有流传输,可以提前设置内容长度。否则需要分块发送所有
if let Some(len) = self.response.len() {
self.response.insert_header(CONTENT_LENGTH, len.to_string());
} else {
self.response.insert_header(TRANSFER_ENCODING, "chunked");
}
if self.response.header(DATE).is_none() {
let date = fmt_http_date(SystemTime::now());
self.response.insert_header(DATE, date);
}
}
/// 第一次轮询时,将header编码到缓冲区。
fn compute_head(&mut self) -> io::Result<Cursor<Vec<u8>>> {
let mut head = Vec::with_capacity(128);
let reason = self.response.status().canonical_reason();
let status = self.response.status();
write!(head, "HTTP/1.1 {} {}\r\n", status, reason)?;
self.finalize_headers();
let mut headers = self.response.iter().collect::<Vec<_>>();
headers.sort_unstable_by_key(|(h, _)| h.as_str());
for (header, values) in headers {
for value in values.iter() {
write!(head, "{}: {}\r\n", header, value)?;
}
}
write!(head, "\r\n")?;
Ok(Cursor::new(head))
}
}
/// 用于分块编码的编码struct
#[derive(Debug)]
pub(crate) struct ChunkedEncoder<R> {
reader: R,
done: bool,
}
impl<R: Read + Unpin> ChunkedEncoder<R> {
/// 创建一个新的实例
pub(crate) fn new(reader: R) -> Self {
Self {
reader,
done: false,
}
}
}
impl<R: Read + Unpin> Read for ChunkedEncoder<R> {
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut [u8],
) -> Poll<io::Result<usize>> {
if self.done {
return Poll::Ready(Ok(0));
}
let reader = &mut self.reader;
let max_bytes_to_read = max_bytes_to_read(buf.len());
let bytes = ready!(Pin::new(reader).poll_read(cx, &mut buf[..max_bytes_to_read]))?;
if bytes == 0 {
self.done = true;
}
let start = format!("{:X}\r\n", bytes);
let start_length = start.as_bytes().len();
let total = bytes + start_length + 2;
buf.copy_within(..bytes, start_length);
buf[..start_length].copy_from_slice(start.as_bytes());
buf[total - 2..total].copy_from_slice(b"\r\n");
Poll::Ready(Ok(total))
}
}
fn max_bytes_to_read(buf_len: usize) -> usize {
if buf_len < 6 {
// 最小读取大小为6表示正文中的内容。其他五个字节是 1\r\n\r\n
//其中 _ 是实际内容
panic!("buffers of length {} are too small for this implementation. if this is a problem for you, please open an issue", buf_len);
}
let bytes_remaining_after_two_cr_lns = (buf_len - 4) as f64;
// the maximum number of bytes that the hex representation of remaining bytes might take
let max_bytes_of_hex_framing = bytes_remaining_after_two_cr_lns.log2() / 4f64;
(bytes_remaining_after_two_cr_lns - max_bytes_of_hex_framing.ceil()) as usize
}
| rust | Apache-2.0 | 3bacf5b9996a015c527fe5b7941d6f5d7a1e6335 | 2026-01-04T20:24:35.228453Z | false |
eairp/summer-boot | https://github.com/eairp/summer-boot/blob/3bacf5b9996a015c527fe5b7941d6f5d7a1e6335/summer-boot/src/http1/decode.rs | summer-boot/src/http1/decode.rs | use std::fmt;
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
use async_std::io::{self, Read};
use futures_util::ready;
use http_types::trailers::{Sender, Trailers};
/// 解码
#[derive(Debug)]
pub struct ChunkedDecoder<R: Read> {
/// 底层流
inner: R,
/// 当前状态
state: State,
/// 当前区块大小(分析大小时增大,读取区块时减小)
chunk_size: u64,
/// 通道发送
trailer_sender: Option<Sender>,
}
impl<R: Read> ChunkedDecoder<R> {
pub(crate) fn new(inner: R, trailer_sender: Sender) -> Self {
ChunkedDecoder {
inner,
state: State::ChunkSize,
chunk_size: 0,
trailer_sender: Some(trailer_sender),
}
}
}
/// 解码状态.
enum State {
ChunkSize,
ChunkSizeExpectLf,
ChunkBody,
ChunkBodyExpectCr,
ChunkBodyExpectLf,
Trailers(usize, Box<[u8; 8192]>),
TrailerSending(Pin<Box<dyn Future<Output = ()> + 'static + Send + Sync>>),
Done,
}
impl fmt::Debug for State {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
State::ChunkSize => write!(f, "State::ChunkSize"),
State::ChunkSizeExpectLf => write!(f, "State::ChunkSizeExpectLf"),
State::ChunkBody => write!(f, "State::ChunkBody"),
State::ChunkBodyExpectCr => write!(f, "State::ChunkBodyExpectCr"),
State::ChunkBodyExpectLf => write!(f, "State::ChunkBodyExpectLf"),
State::Trailers(len, _) => write!(f, "State::Trailers({}, _)", len),
State::TrailerSending(_) => write!(f, "State::TrailerSending"),
State::Done => write!(f, "State::Done"),
}
}
}
impl<R: Read + Unpin> ChunkedDecoder<R> {
fn poll_read_byte(&mut self, cx: &mut Context<'_>) -> Poll<io::Result<u8>> {
let mut byte = [0u8];
if ready!(Pin::new(&mut self.inner).poll_read(cx, &mut byte))? == 1 {
Poll::Ready(Ok(byte[0]))
} else {
eof()
}
}
fn expect_byte(
&mut self,
cx: &mut Context<'_>,
expected_byte: u8,
expected: &'static str,
) -> Poll<io::Result<()>> {
let byte = ready!(self.poll_read_byte(cx))?;
if byte == expected_byte {
Poll::Ready(Ok(()))
} else {
unexpected(byte, expected)
}
}
fn send_trailers(&mut self, trailers: Trailers) {
let sender = self
.trailer_sender
.take()
.expect("invalid chunked state, tried sending multiple trailers");
let fut = Box::pin(sender.send(trailers));
self.state = State::TrailerSending(fut);
}
}
fn eof<T>() -> Poll<io::Result<T>> {
Poll::Ready(Err(io::Error::new(
io::ErrorKind::UnexpectedEof,
"Unexpected EOF when decoding chunked data",
)))
}
fn unexpected<T>(byte: u8, expected: &'static str) -> Poll<io::Result<T>> {
Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("Unexpected byte {}; expected {}", byte, expected),
)))
}
fn overflow() -> io::Error {
io::Error::new(io::ErrorKind::InvalidData, "Chunk size overflowed 64 bits")
}
///
/// 实现了Read trait 的结构体ChunkedDecoder方法的poll_read实现
/// 它用于从一个Read类型的输入流中读取数据,并解析出分块编码(chunked encoding)的数据。
///
/// State::ChunkSize:在这个状态下,代码读取一个字节,并根据字节的值计算出当前块的大小。
/// State::ChunkSizeExpectLf:在这个状态下,代码期望读取到一个换行符(LF),如果当前块的大小为0,则进入State::Trailers状态,否则进入State::ChunkBody状态。
/// State::ChunkBody:在这个状态下,代码读取当前块的数据,并将读取的字节数返回。
/// State::ChunkBodyExpectCr:在这个状态下,代码期望读取到一个回车符(CR)。
/// State::ChunkBodyExpectLf:在这个状态下,代码期望读取到一个换行符(LF),并进入State::ChunkSize状态。
/// State::Trailers:在这个状态下,代码读取剩余的数据作为 trailers,并解析出 trailers 的头部字段。
/// State::TrailerSending:在这个状态下,代码等待 trailers 发送完成。
/// State::Done:在这个状态下,代码表示读取操作已完成。
///
impl<R: Read + Unpin> Read for ChunkedDecoder<R> {
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut [u8],
) -> Poll<io::Result<usize>> {
let this = &mut *self;
loop {
match this.state {
State::ChunkSize => {
let byte = ready!(this.poll_read_byte(cx))?;
let digit = match byte {
b'0'..=b'9' => byte - b'0',
b'a'..=b'f' => 10 + byte - b'a',
b'A'..=b'F' => 10 + byte - b'A',
b'\r' => {
this.state = State::ChunkSizeExpectLf;
continue;
}
_ => {
return unexpected(byte, "hex digit or CR");
}
};
this.chunk_size = this
.chunk_size
.checked_mul(16)
.ok_or_else(overflow)?
.checked_add(digit as u64)
.ok_or_else(overflow)?;
}
State::ChunkSizeExpectLf => {
ready!(this.expect_byte(cx, b'\n', "LF"))?;
if this.chunk_size == 0 {
this.state = State::Trailers(0, Box::new([0u8; 8192]));
} else {
this.state = State::ChunkBody;
}
}
State::ChunkBody => {
let max_bytes = std::cmp::min(
buf.len(),
std::cmp::min(this.chunk_size, usize::MAX as u64) as usize,
);
let bytes_read =
ready!(Pin::new(&mut this.inner).poll_read(cx, &mut buf[..max_bytes]))?;
this.chunk_size -= bytes_read as u64;
if bytes_read == 0 {
return eof();
} else if this.chunk_size == 0 {
this.state = State::ChunkBodyExpectCr;
}
return Poll::Ready(Ok(bytes_read));
}
State::ChunkBodyExpectCr => {
ready!(this.expect_byte(cx, b'\r', "CR"))?;
this.state = State::ChunkBodyExpectLf;
}
State::ChunkBodyExpectLf => {
ready!(this.expect_byte(cx, b'\n', "LF"))?;
this.state = State::ChunkSize;
}
State::Trailers(ref mut len, ref mut buf) => {
let bytes_read =
ready!(Pin::new(&mut this.inner).poll_read(cx, &mut buf[*len..]))?;
*len += bytes_read;
let len = *len;
if len == 0 {
this.send_trailers(Trailers::new());
continue;
}
if bytes_read == 0 {
return eof();
}
let mut headers = [httparse::EMPTY_HEADER; 16];
let parse_result = httparse::parse_headers(&buf[..len], &mut headers)
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
use httparse::Status;
match parse_result {
Status::Partial => {
if len == buf.len() {
return eof();
} else {
return Poll::Pending;
}
}
Status::Complete((offset, headers)) => {
if offset != len {
return unexpected(buf[offset], "end of trailers");
}
let mut trailers = Trailers::new();
for header in headers {
trailers.insert(
header.name,
String::from_utf8_lossy(header.value).as_ref(),
);
}
this.send_trailers(trailers);
}
}
}
State::TrailerSending(ref mut fut) => {
let _ = ready!(Pin::new(fut).poll(cx));
this.state = State::Done;
}
State::Done => return Poll::Ready(Ok(0)),
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use async_std::prelude::*;
#[test]
fn test_chunked_wiki() {
async_std::task::block_on(async move {
let input = async_std::io::Cursor::new(
"4\r\n\
Wiki\r\n\
5\r\n\
pedia\r\n\
E\r\n in\r\n\
\r\n\
chunks.\r\n\
0\r\n\
\r\n"
.as_bytes(),
);
let (s, _r) = async_channel::bounded(1);
let sender = Sender::new(s);
let mut decoder = ChunkedDecoder::new(input, sender);
let mut output = String::new();
decoder.read_to_string(&mut output).await.unwrap();
assert_eq!(
output,
"Wikipedia in\r\n\
\r\n\
chunks."
);
});
}
#[test]
fn test_chunked_big() {
async_std::task::block_on(async move {
let mut input: Vec<u8> = b"800\r\n".to_vec();
input.extend(vec![b'X'; 2048]);
input.extend(b"\r\n1800\r\n");
input.extend(vec![b'Y'; 6144]);
input.extend(b"\r\n800\r\n");
input.extend(vec![b'Z'; 2048]);
input.extend(b"\r\n0\r\n\r\n");
let (s, _r) = async_channel::bounded(1);
let sender = Sender::new(s);
let mut decoder = ChunkedDecoder::new(async_std::io::Cursor::new(input), sender);
let mut output = String::new();
decoder.read_to_string(&mut output).await.unwrap();
let mut expected = vec![b'X'; 2048];
expected.extend(vec![b'Y'; 6144]);
expected.extend(vec![b'Z'; 2048]);
assert_eq!(output.len(), 10240);
assert_eq!(output.as_bytes(), expected.as_slice());
});
}
#[test]
fn test_chunked_mdn() {
async_std::task::block_on(async move {
let input = async_std::io::Cursor::new(
"7\r\n\
Mozilla\r\n\
9\r\n\
Developer\r\n\
7\r\n\
Network\r\n\
0\r\n\
Expires: Wed, 21 Oct 2015 07:28:00 GMT\r\n\
\r\n"
.as_bytes(),
);
let (s, r) = async_channel::bounded(1);
let sender = Sender::new(s);
let mut decoder = ChunkedDecoder::new(input, sender);
let mut output = String::new();
decoder.read_to_string(&mut output).await.unwrap();
assert_eq!(output, "MozillaDeveloperNetwork");
let trailers = r.recv().await.unwrap();
assert_eq!(trailers.iter().count(), 1);
assert_eq!(trailers["Expires"], "Wed, 21 Oct 2015 07:28:00 GMT");
});
}
}
| rust | Apache-2.0 | 3bacf5b9996a015c527fe5b7941d6f5d7a1e6335 | 2026-01-04T20:24:35.228453Z | false |
eairp/summer-boot | https://github.com/eairp/summer-boot/blob/3bacf5b9996a015c527fe5b7941d6f5d7a1e6335/summer-boot/src/http1/body_reader.rs | summer-boot/src/http1/body_reader.rs | use super::decode::ChunkedDecoder;
use async_dup::{Arc, Mutex};
use async_std::io::{BufReader, Read, Take};
use async_std::task::{Context, Poll};
use std::{fmt::Debug, io, pin::Pin};
#[allow(dead_code)]
pub enum BodyReader<IO: Read + Unpin> {
Chunked(Arc<Mutex<ChunkedDecoder<BufReader<IO>>>>),
Fixed(Arc<Mutex<Take<BufReader<IO>>>>),
None,
}
impl<IO: Read + Unpin> Debug for BodyReader<IO> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
BodyReader::Chunked(_) => f.write_str("BodyReader::Chunked"),
BodyReader::Fixed(_) => f.write_str("BodyReader::Fixed"),
BodyReader::None => f.write_str("BodyReader::None"),
}
}
}
impl<IO: Read + Unpin> Read for BodyReader<IO> {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut [u8],
) -> Poll<io::Result<usize>> {
match &*self {
BodyReader::Chunked(r) => Pin::new(&mut *r.lock()).poll_read(cx, buf),
BodyReader::Fixed(r) => Pin::new(&mut *r.lock()).poll_read(cx, buf),
BodyReader::None => Poll::Ready(Ok(0)),
}
}
}
| rust | Apache-2.0 | 3bacf5b9996a015c527fe5b7941d6f5d7a1e6335 | 2026-01-04T20:24:35.228453Z | false |
eairp/summer-boot | https://github.com/eairp/summer-boot/blob/3bacf5b9996a015c527fe5b7941d6f5d7a1e6335/summer-boot/src/http1/mod.rs | summer-boot/src/http1/mod.rs | pub mod http;
// 其他为hhtp 私有处理
mod body_encoder;
mod body_reader;
mod date;
mod decode;
mod encode;
| rust | Apache-2.0 | 3bacf5b9996a015c527fe5b7941d6f5d7a1e6335 | 2026-01-04T20:24:35.228453Z | false |
eairp/summer-boot | https://github.com/eairp/summer-boot/blob/3bacf5b9996a015c527fe5b7941d6f5d7a1e6335/summer-boot/src/gateway/router.rs | summer-boot/src/gateway/router.rs | use crate::server;
use crate::{Request, Response, StatusCode};
use routefinder::{Captures, Router as MethodRouter};
use std::collections::HashMap;
use server::endpoint::DynEndpoint;
/// `Server` 使用的路由
///
/// 底层, 每个HTTP方法都有一个单独的状态;索引
/// 通过该方法,可以提高效率
#[allow(missing_debug_implementations)]
pub(crate) struct Router<State> {
method_map: HashMap<http_types::Method, MethodRouter<Box<DynEndpoint<State>>>>,
all_method_router: MethodRouter<Box<DynEndpoint<State>>>,
}
impl<State> std::fmt::Debug for Router<State> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Router")
.field("method_map", &self.method_map)
.field("all_method_router", &self.all_method_router)
.finish()
}
}
/// 路由URL的结果
pub(crate) struct Selection<'a, State> {
pub(crate) endpoint: &'a DynEndpoint<State>,
pub(crate) params: Captures<'static, 'static>,
}
impl<State: Clone + Send + Sync + 'static> Router<State> {
pub(crate) fn new() -> Self {
Router {
method_map: HashMap::default(),
all_method_router: MethodRouter::new(),
}
}
pub(crate) fn add(
&mut self,
path: &str,
method: http_types::Method,
ep: Box<DynEndpoint<State>>,
) {
self.method_map
.entry(method)
.or_insert_with(MethodRouter::new)
.add(path, ep)
.unwrap()
}
pub(crate) fn add_all(&mut self, path: &str, ep: Box<DynEndpoint<State>>) {
self.all_method_router.add(path, ep).unwrap()
}
pub(crate) fn route(&self, path: &str, method: http_types::Method) -> Selection<'_, State> {
if let Some(m) = self
.method_map
.get(&method)
.and_then(|r| r.best_match(path))
{
Selection {
endpoint: m.handler(),
params: m.captures().into_owned(),
}
} else if let Some(m) = self.all_method_router.best_match(path) {
Selection {
endpoint: m.handler(),
params: m.captures().into_owned(),
}
} else if method == http_types::Method::Head {
// 如果是HTTP头请求,则检查endpoints映射中是否有回调
// 如果没有,则返回到HTTP GET的逻辑,否则照常进行
self.route(path, http_types::Method::Get)
} else if self
.method_map
.iter()
.filter(|(k, _)| **k != method)
.any(|(_, r)| r.best_match(path).is_some())
{
// 如果此 `path` 可以由使用其他HTTP方法注册的回调处理
// 应返回405 Method Not Allowed
Selection {
endpoint: &method_not_allowed,
params: Captures::default(),
}
} else {
Selection {
endpoint: ¬_found_endpoint,
params: Captures::default(),
}
}
}
}
async fn not_found_endpoint<State: Clone + Send + Sync + 'static>(
_req: Request<State>,
) -> crate::Result {
Ok(Response::new(StatusCode::NotFound))
}
async fn method_not_allowed<State: Clone + Send + Sync + 'static>(
_req: Request<State>,
) -> crate::Result {
Ok(Response::new(StatusCode::MethodNotAllowed))
}
| rust | Apache-2.0 | 3bacf5b9996a015c527fe5b7941d6f5d7a1e6335 | 2026-01-04T20:24:35.228453Z | false |
eairp/summer-boot | https://github.com/eairp/summer-boot/blob/3bacf5b9996a015c527fe5b7941d6f5d7a1e6335/summer-boot/src/gateway/mod.rs | summer-boot/src/gateway/mod.rs | pub mod route;
pub mod router;
| rust | Apache-2.0 | 3bacf5b9996a015c527fe5b7941d6f5d7a1e6335 | 2026-01-04T20:24:35.228453Z | false |
eairp/summer-boot | https://github.com/eairp/summer-boot/blob/3bacf5b9996a015c527fe5b7941d6f5d7a1e6335/summer-boot/src/gateway/route.rs | summer-boot/src/gateway/route.rs | use crate::context;
use crate::gateway;
use crate::log;
use crate::server;
use crate::utils;
use std::fmt::Debug;
use std::io;
use std::path::Path;
use std::sync::Arc;
use context::serve_dir::ServeDir;
use context::serve_file::ServeFile;
use server::endpoint::{Endpoint, MiddlewareEndpoint};
use utils::middleware::Middleware;
use gateway::router::Router;
/// A handle to route
///
/// 所有HTTP请求都是针对资源请求的。
/// 使用`Server::at` 或者 `Route::at` 创建路由,可以使用 `Route` 类型
/// 为路径的一些HTTP方法创建endpoints
///
#[allow(missing_debug_implementations)]
pub struct Route<'a, State> {
router: &'a mut Router<State>,
path: String,
middleware: Vec<Arc<dyn Middleware<State>>>,
/// 是否将当前路由的路径作为前缀
/// [`strip_prefix`].
///
/// [`strip_prefix`]: #method.strip_prefix
prefix: bool,
}
impl<'a, State: Clone + Send + Sync + 'static> Route<'a, State> {
pub(crate) fn new(router: &'a mut Router<State>, path: String) -> Route<'a, State> {
Route {
router,
path,
middleware: Vec::new(),
prefix: false,
}
}
/// 使用指定 `path` 添加路由。
pub fn at<'b>(&'b mut self, path: &str) -> Route<'b, State> {
let mut p = self.path.clone();
if !p.ends_with('/') && !path.starts_with('/') {
p.push('/');
}
if path != "/" {
p.push_str(path);
}
Route {
router: self.router,
path: p,
middleware: self.middleware.clone(),
prefix: false,
}
}
/// 获取当前路径
#[must_use]
pub fn path(&self) -> &str {
&self.path
}
/// 将当前路径视为前缀,并从请求中去除前缀。
/// 这个方法标记为不稳定 unstable,后面需要summer boot 宏增强。
/// 给endpoints提供前缀已经删除的路径。
#[cfg(any(feature = "unstable", feature = "docs"))]
#[cfg_attr(feature = "docs", doc(cfg(unstable)))]
pub fn strip_prefix(&mut self) -> &mut Self {
self.prefix = true;
self
}
/// 将给定中间件作为当前路由。
pub fn with<M>(&mut self, middleware: M) -> &mut Self
where
M: Middleware<State>,
{
log::trace!(
"Adding middleware {} to route {:?}",
middleware.name(),
self.path
);
self.middleware.push(Arc::new(middleware));
self
}
/// 重置当前路由的中间件
pub fn reset_middleware(&mut self) -> &mut Self {
self.middleware.clear();
self
}
/// 在当前路径上嵌套 [`Server`]。
///
/// # Note
///
/// 其他服务 *始终* 具有优先权
/// 重叠路径,这个例子输入 `/hello` 将
/// 返回 "Unexpected" 给客户端
///
/// ```no_run
/// #[async_std::main]
/// async fn main() -> Result<(), std::io::Error> {
/// let mut app = summer_boot::new();
/// app.at("/hello").nest({
/// let mut example = summer_boot::with_state("world");
/// example
/// .at("/")
/// .get(|req: summer_boot::Request<&'static str>| async move {
/// Ok(format!("Hello {state}!", state = req.state()))
/// });
/// example
/// });
/// app.at("/*").get(|_| async { Ok("Unexpected") });
/// app.listen("127.0.0.1:8080").await?;
/// Ok(())
/// }
/// ```
///
/// [`Server`]: struct.Server.html
pub fn nest<InnerState>(&mut self, service: crate::Server<InnerState>) -> &mut Self
where
State: Clone + Send + Sync + 'static,
InnerState: Clone + Send + Sync + 'static,
{
let prefix = self.prefix;
self.prefix = true;
self.all(service);
self.prefix = prefix;
self
}
/// 静态目录服务。
///
/// 每一个文件都将从磁盘io流传输,并确定了mime类型
///
/// # Security
///
/// 这个方法确保了除了指定文件夹下之外的文件的路径
/// 无论是否存在都会返回StatusCode::Forbidden
///
/// # Examples
///
/// 本地服务提供目录 `./public/images/*` 来自路径
/// `localhost:8080/images/*`.
///
/// ```no_run
/// #[async_std::main]
/// async fn main() -> Result<(), std::io::Error> {
/// let mut app = summer_boot::new();
/// // app.at("/images/*").serve_dir("public/images/")?;
/// app.listen("127.0.0.1:8080").await.unwrap();
/// Ok(())
/// }
/// ```
pub fn serve_dir(&mut self, dir: impl AsRef<Path>) -> io::Result<()> {
// 验证路径是否存在,如果不存在,则返回错误。
let dir = dir.as_ref().to_owned().canonicalize()?;
let prefix = self.path().to_string();
self.get(ServeDir::new(prefix, dir));
Ok(())
}
/// 提供静态文件。
///
/// 每一个文件都将从磁盘io流传输,并确定了mime类型
/// 基于magic bytes。类似serve_dir
pub fn serve_file(&mut self, file: impl AsRef<Path>) -> io::Result<()> {
self.get(ServeFile::init(file)?);
Ok(())
}
/// 给定HTTP方法添加endpoint
pub fn method(&mut self, method: http_types::Method, ep: impl Endpoint<State>) -> &mut Self {
if self.prefix {
let ep = StripPrefixEndpoint::new(ep);
let wildcard = self.at("*");
wildcard.router.add(
&wildcard.path,
method,
MiddlewareEndpoint::wrap_with_middleware(ep, &wildcard.middleware),
);
} else {
self.router.add(
&self.path,
method,
MiddlewareEndpoint::wrap_with_middleware(ep, &self.middleware),
);
}
self
}
/// 为所有HTTP方法添加一个endpoin,作为回调。
///
/// 尝试使用特定HTTP方法的路由。
pub fn all(&mut self, ep: impl Endpoint<State>) -> &mut Self {
if self.prefix {
let ep = StripPrefixEndpoint::new(ep);
let wildcard = self.at("*");
wildcard.router.add_all(
&wildcard.path,
MiddlewareEndpoint::wrap_with_middleware(ep, &wildcard.middleware),
);
} else {
self.router.add_all(
&self.path,
MiddlewareEndpoint::wrap_with_middleware(ep, &self.middleware),
);
}
self
}
/// 为 `GET` 请求添加endpoint
pub fn get(&mut self, ep: impl Endpoint<State>) -> &mut Self {
self.method(http_types::Method::Get, ep);
self
}
/// 为 `HEAD` 请求添加endpoint
pub fn head(&mut self, ep: impl Endpoint<State>) -> &mut Self {
self.method(http_types::Method::Head, ep);
self
}
/// 为 `PUT` 请求添加endpoint
pub fn put(&mut self, ep: impl Endpoint<State>) -> &mut Self {
self.method(http_types::Method::Put, ep);
self
}
/// 为 `POST` 请求添加endpoint
pub fn post(&mut self, ep: impl Endpoint<State>) -> &mut Self {
self.method(http_types::Method::Post, ep);
self
}
/// 为 `DELETE 请求添加endpoint
pub fn delete(&mut self, ep: impl Endpoint<State>) -> &mut Self {
self.method(http_types::Method::Delete, ep);
self
}
/// 为 `OPTIONS` 请求添加endpoint
pub fn options(&mut self, ep: impl Endpoint<State>) -> &mut Self {
self.method(http_types::Method::Options, ep);
self
}
/// 为 `CONNECT` 请求添加endpoint
pub fn connect(&mut self, ep: impl Endpoint<State>) -> &mut Self {
self.method(http_types::Method::Connect, ep);
self
}
/// 为 `PATCH` 请求添加endpoint
pub fn patch(&mut self, ep: impl Endpoint<State>) -> &mut Self {
self.method(http_types::Method::Patch, ep);
self
}
/// 为 `TRACE` 请求添加endpoint
pub fn trace(&mut self, ep: impl Endpoint<State>) -> &mut Self {
self.method(http_types::Method::Trace, ep);
self
}
}
#[derive(Debug)]
struct StripPrefixEndpoint<E>(std::sync::Arc<E>);
impl<E> StripPrefixEndpoint<E> {
fn new(ep: E) -> Self {
Self(std::sync::Arc::new(ep))
}
}
impl<E> Clone for StripPrefixEndpoint<E> {
fn clone(&self) -> Self {
Self(self.0.clone())
}
}
#[async_trait::async_trait]
impl<State, E> Endpoint<State> for StripPrefixEndpoint<E>
where
State: Clone + Send + Sync + 'static,
E: Endpoint<State>,
{
async fn call(&self, req: crate::Request<State>) -> crate::Result {
let crate::Request {
state,
mut req,
route_params,
} = req;
let rest = route_params
.iter()
.rev()
.find_map(|captures| captures.wildcard())
.unwrap_or_default();
req.url_mut().set_path(rest);
self.0
.call(crate::Request {
state,
req,
route_params,
})
.await
}
}
| rust | Apache-2.0 | 3bacf5b9996a015c527fe5b7941d6f5d7a1e6335 | 2026-01-04T20:24:35.228453Z | false |
eairp/summer-boot | https://github.com/eairp/summer-boot/blob/3bacf5b9996a015c527fe5b7941d6f5d7a1e6335/summer-boot/src/utils/response.rs | summer-boot/src/utils/response.rs | use std::convert::TryInto;
use std::fmt::{Debug, Display};
use std::ops::Index;
use serde::Serialize;
use crate::http_types::headers::{self, HeaderName, HeaderValues, ToHeaderValues};
use crate::http_types::{self, Body, Error, Mime, StatusCode};
use crate::ResponseBuilder;
/// HTTP response
#[derive(Debug)]
pub struct Response {
pub(crate) res: http_types::Response,
pub(crate) error: Option<Error>,
}
impl Response {
/// 创建一个新的实例
#[must_use]
pub fn new<S>(status: S) -> Self
where
S: TryInto<StatusCode>,
S::Error: Debug,
{
let res = http_types::Response::new(status);
Self { res, error: None }
}
#[must_use]
pub fn builder<S>(status: S) -> ResponseBuilder
where
S: TryInto<StatusCode>,
S::Error: Debug,
{
ResponseBuilder::new(status)
}
#[must_use]
pub fn status(&self) -> crate::StatusCode {
self.res.status()
}
pub fn set_status<S>(&mut self, status: S)
where
S: TryInto<StatusCode>,
S::Error: Debug,
{
let status = status.try_into().expect("无法转换为有效的 `StatusCode`");
self.res.set_status(status);
}
#[must_use]
pub fn len(&self) -> Option<usize> {
self.res.len()
}
#[must_use]
pub fn is_empty(&self) -> Option<bool> {
Some(self.res.len()? == 0)
}
#[must_use]
pub fn header(&self, name: impl Into<HeaderName>) -> Option<&HeaderValues> {
self.res.header(name)
}
#[must_use]
pub fn header_mut(&mut self, name: impl Into<HeaderName>) -> Option<&mut HeaderValues> {
self.res.header_mut(name)
}
pub fn remove_header(&mut self, name: impl Into<HeaderName>) -> Option<HeaderValues> {
self.res.remove_header(name)
}
pub fn insert_header(&mut self, key: impl Into<HeaderName>, value: impl ToHeaderValues) {
self.res.insert_header(key, value);
}
pub fn append_header(&mut self, key: impl Into<HeaderName>, value: impl ToHeaderValues) {
self.res.append_header(key, value);
}
#[must_use]
pub fn iter(&self) -> headers::Iter<'_> {
self.res.iter()
}
#[must_use]
pub fn iter_mut(&mut self) -> headers::IterMut<'_> {
self.res.iter_mut()
}
#[must_use]
pub fn header_names(&self) -> headers::Names<'_> {
self.res.header_names()
}
#[must_use]
pub fn header_values(&self) -> headers::Values<'_> {
self.res.header_values()
}
#[must_use]
pub fn content_type(&self) -> Option<Mime> {
self.res.content_type()
}
pub fn set_content_type(&mut self, mime: impl Into<Mime>) {
self.res.set_content_type(mime.into());
}
/// 设置body读取.
pub fn set_body(&mut self, body: impl Into<Body>) {
self.res.set_body(body);
}
pub fn take_body(&mut self) -> Body {
self.res.take_body()
}
pub fn swap_body(&mut self, body: &mut Body) {
self.res.swap_body(body)
}
pub fn body_json(&mut self, json: &impl Serialize) -> crate::Result<()> {
self.res.set_body(Body::from_json(json)?);
Ok(())
}
pub fn body_string(&mut self, string: String) {
self.res.set_body(Body::from_string(string));
}
pub fn body_bytes(&mut self, bytes: impl AsRef<[u8]>) {
self.set_body(Body::from(bytes.as_ref()));
}
pub async fn body_file(&mut self, path: impl AsRef<std::path::Path>) -> std::io::Result<()> {
self.set_body(Body::from_file(path).await?);
Ok(())
}
#[cfg(feature = "cookies")]
pub fn insert_cookie(&mut self, cookie: Cookie<'static>) {
self.cookie_events.push(CookieEvent::Added(cookie));
}
#[cfg(feature = "cookies")]
pub fn remove_cookie(&mut self, cookie: Cookie<'static>) {
self.cookie_events.push(CookieEvent::Removed(cookie));
}
pub fn error(&self) -> Option<&Error> {
self.error.as_ref()
}
pub fn downcast_error<E>(&self) -> Option<&E>
where
E: Display + Debug + Send + Sync + 'static,
{
self.error.as_ref()?.downcast_ref()
}
pub fn take_error(&mut self) -> Option<Error> {
self.error.take()
}
pub fn set_error(&mut self, error: impl Into<Error>) {
self.error = Some(error.into());
}
#[must_use]
pub fn ext<T: Send + Sync + 'static>(&self) -> Option<&T> {
self.res.ext().get()
}
pub fn insert_ext<T: Send + Sync + 'static>(&mut self, val: T) {
self.res.ext_mut().insert(val);
}
pub fn from_res<T>(value: T) -> Self
where
T: Into<http_types::Response>,
{
let res: http_types::Response = value.into();
Self {
res,
error: None,
#[cfg(feature = "cookies")]
cookie_events: vec![],
}
}
}
impl AsRef<http_types::Response> for Response {
fn as_ref(&self) -> &http_types::Response {
&self.res
}
}
impl AsMut<http_types::Response> for Response {
fn as_mut(&mut self) -> &mut http_types::Response {
&mut self.res
}
}
impl AsRef<http_types::Headers> for Response {
fn as_ref(&self) -> &http_types::Headers {
self.res.as_ref()
}
}
impl AsMut<http_types::Headers> for Response {
fn as_mut(&mut self) -> &mut http_types::Headers {
self.res.as_mut()
}
}
impl From<Response> for http_types::Response {
fn from(response: Response) -> http_types::Response {
response.res
}
}
impl From<http_types::Body> for Response {
fn from(body: http_types::Body) -> Self {
let mut res = Response::new(200);
res.set_body(body);
res
}
}
impl From<serde_json::Value> for Response {
fn from(json_value: serde_json::Value) -> Self {
Body::from_json(&json_value)
.map(|body| body.into())
.unwrap_or_else(|_| Response::new(StatusCode::InternalServerError))
}
}
impl From<Error> for Response {
fn from(err: Error) -> Self {
Self {
res: http_types::Response::new(err.status()),
error: Some(err),
#[cfg(feature = "cookies")]
cookie_events: vec![],
}
}
}
impl From<http_types::Response> for Response {
fn from(res: http_types::Response) -> Self {
Self {
res,
error: None,
#[cfg(feature = "cookies")]
cookie_events: vec![],
}
}
}
impl From<StatusCode> for Response {
fn from(status: StatusCode) -> Self {
let res: http_types::Response = status.into();
res.into()
}
}
impl From<String> for Response {
fn from(s: String) -> Self {
Body::from_string(s).into()
}
}
impl<'a> From<&'a str> for Response {
fn from(s: &'a str) -> Self {
Body::from_string(String::from(s)).into()
}
}
impl IntoIterator for Response {
type Item = (HeaderName, HeaderValues);
type IntoIter = http_types::headers::IntoIter;
#[inline]
fn into_iter(self) -> Self::IntoIter {
self.res.into_iter()
}
}
impl<'a> IntoIterator for &'a Response {
type Item = (&'a HeaderName, &'a HeaderValues);
type IntoIter = http_types::headers::Iter<'a>;
#[inline]
fn into_iter(self) -> Self::IntoIter {
self.res.iter()
}
}
impl<'a> IntoIterator for &'a mut Response {
type Item = (&'a HeaderName, &'a mut HeaderValues);
type IntoIter = http_types::headers::IterMut<'a>;
#[inline]
fn into_iter(self) -> Self::IntoIter {
self.res.iter_mut()
}
}
impl Index<HeaderName> for Response {
type Output = HeaderValues;
#[inline]
fn index(&self, name: HeaderName) -> &HeaderValues {
&self.res[name]
}
}
impl Index<&str> for Response {
type Output = HeaderValues;
#[inline]
fn index(&self, name: &str) -> &HeaderValues {
&self.res[name]
}
}
| rust | Apache-2.0 | 3bacf5b9996a015c527fe5b7941d6f5d7a1e6335 | 2026-01-04T20:24:35.228453Z | false |
eairp/summer-boot | https://github.com/eairp/summer-boot/blob/3bacf5b9996a015c527fe5b7941d6f5d7a1e6335/summer-boot/src/utils/middleware.rs | summer-boot/src/utils/middleware.rs | use crate::server;
use crate::{Request, Response};
use async_trait::async_trait;
use server::endpoint::DynEndpoint;
use std::future::Future;
use std::pin::Pin;
use std::sync::Arc;
/// 异步中间件trait
#[async_trait]
pub trait Middleware<State>: Send + Sync + 'static {
/// 异步处理请求并返回响应。
async fn handle(&self, request: Request<State>, next: Next<'_, State>) -> crate::Result;
/// 设置中间件的名称。默认情况下,使用类型名字.
fn name(&self) -> &str {
std::any::type_name::<Self>()
}
}
#[async_trait]
impl<State, F> Middleware<State> for F
where
State: Clone + Send + Sync + 'static,
F: Send
+ Sync
+ 'static
+ for<'a> Fn(
Request<State>,
Next<'a, State>,
) -> Pin<Box<dyn Future<Output = crate::Result> + 'a + Send>>,
{
async fn handle(&self, req: Request<State>, next: Next<'_, State>) -> crate::Result {
(self)(req, next).await
}
}
/// 中间件链系列其余部分,包括endpoints。
#[allow(missing_debug_implementations)]
pub struct Next<'a, State> {
pub(crate) endpoint: &'a DynEndpoint<State>,
pub(crate) next_middleware: &'a [Arc<dyn Middleware<State>>],
}
impl<State: Clone + Send + Sync + 'static> Next<'_, State> {
/// 异步执行其余的中间件。
pub async fn run(mut self, req: Request<State>) -> Response {
if let Some((current, next)) = self.next_middleware.split_first() {
self.next_middleware = next;
match current.handle(req, self).await {
Ok(request) => request,
Err(err) => err.into(),
}
} else {
match self.endpoint.call(req).await {
Ok(request) => request,
Err(err) => err.into(),
}
}
}
}
| rust | Apache-2.0 | 3bacf5b9996a015c527fe5b7941d6f5d7a1e6335 | 2026-01-04T20:24:35.228453Z | false |
eairp/summer-boot | https://github.com/eairp/summer-boot/blob/3bacf5b9996a015c527fe5b7941d6f5d7a1e6335/summer-boot/src/utils/response_builder.rs | summer-boot/src/utils/response_builder.rs | use serde::Serialize;
use crate::http_types::headers::{HeaderName, ToHeaderValues};
use crate::http_types::{Body, Mime, StatusCode};
use crate::Response;
use std::convert::TryInto;
#[derive(Debug)]
pub struct ResponseBuilder(Response);
impl ResponseBuilder {
pub(crate) fn new<S>(status: S) -> Self
where
S: TryInto<StatusCode>,
S::Error: std::fmt::Debug,
{
Self(Response::new(status))
}
pub fn build(self) -> Response {
self.0
}
pub fn header(mut self, key: impl Into<HeaderName>, value: impl ToHeaderValues) -> Self {
self.0.insert_header(key, value);
self
}
pub fn content_type(mut self, content_type: impl Into<Mime>) -> Self {
self.0.set_content_type(content_type);
self
}
pub fn body(mut self, body: impl Into<Body>) -> Self {
self.0.set_body(body);
self
}
pub fn body_json(self, json: &impl Serialize) -> crate::Result<Self> {
Ok(self.body(Body::from_json(json)?))
}
pub fn body_string(self, string: String) -> Self {
self.body(Body::from_string(string))
}
pub fn body_bytes(self, bytes: impl AsRef<[u8]>) -> Self {
self.body(Body::from(bytes.as_ref()))
}
pub async fn body_file(self, path: impl AsRef<std::path::Path>) -> std::io::Result<Self> {
Ok(self.body(Body::from_file(path).await?))
}
}
impl From<ResponseBuilder> for Response {
fn from(response_builder: ResponseBuilder) -> Response {
response_builder.build()
}
}
| rust | Apache-2.0 | 3bacf5b9996a015c527fe5b7941d6f5d7a1e6335 | 2026-01-04T20:24:35.228453Z | false |
eairp/summer-boot | https://github.com/eairp/summer-boot/blob/3bacf5b9996a015c527fe5b7941d6f5d7a1e6335/summer-boot/src/utils/util.rs | summer-boot/src/utils/util.rs | //! 其他util
use crate::{Middleware, Next, Request, Response};
pub use async_trait::async_trait;
use std::future::Future;
/// 定义对传入请求进行操作的中间件。
///
/// 用于定义内联中间件的闭包。
///
/// # Examples
///
/// ```rust
/// use summer_boot::utils::util;
/// use summer_boot::Request;
/// use std::time::Instant;
///
/// let mut app = summer_boot::new();
/// app.with(util::Before(|mut request: Request<()>| async move {
/// request.set_ext(Instant::now());
/// request
/// }));
/// ```
#[derive(Debug)]
pub struct Before<F>(pub F);
#[async_trait]
impl<State, F, Fut> Middleware<State> for Before<F>
where
State: Clone + Send + Sync + 'static,
F: Fn(Request<State>) -> Fut + Send + Sync + 'static,
Fut: Future<Output = Request<State>> + Send + Sync + 'static,
{
async fn handle(&self, request: Request<State>, next: Next<'_, State>) -> crate::Result {
let request = (self.0)(request).await;
Ok(next.run(request).await)
}
}
/// 定义对传出响应进行操作的中间件。
///
/// 用于定义内联中间件的闭包。
///
#[derive(Debug)]
pub struct After<F>(pub F);
#[async_trait]
impl<State, F, Fut> Middleware<State> for After<F>
where
State: Clone + Send + Sync + 'static,
F: Fn(Response) -> Fut + Send + Sync + 'static,
Fut: Future<Output = crate::Result> + Send + Sync + 'static,
{
async fn handle(&self, request: Request<State>, next: Next<'_, State>) -> crate::Result {
let response = next.run(request).await;
(self.0)(response).await
}
}
| rust | Apache-2.0 | 3bacf5b9996a015c527fe5b7941d6f5d7a1e6335 | 2026-01-04T20:24:35.228453Z | false |
eairp/summer-boot | https://github.com/eairp/summer-boot/blob/3bacf5b9996a015c527fe5b7941d6f5d7a1e6335/summer-boot/src/utils/mod.rs | summer-boot/src/utils/mod.rs | pub mod middleware;
pub mod request;
pub mod response;
pub mod response_builder;
pub mod util;
| rust | Apache-2.0 | 3bacf5b9996a015c527fe5b7941d6f5d7a1e6335 | 2026-01-04T20:24:35.228453Z | false |
eairp/summer-boot | https://github.com/eairp/summer-boot/blob/3bacf5b9996a015c527fe5b7941d6f5d7a1e6335/summer-boot/src/utils/request.rs | summer-boot/src/utils/request.rs | use async_std::io::{self, prelude::*};
use async_std::task::{Context, Poll};
use routefinder::Captures;
use std::ops::Index;
use std::pin::Pin;
use crate::http_types::format_err;
use crate::http_types::headers::{self, HeaderName, HeaderValues, ToHeaderValues};
use crate::http_types::{self, Body, Method, Mime, StatusCode, Url, Version};
use crate::Response;
pin_project_lite::pin_project! {
/// HTTP request.
///
/// 请求、路由参数以及访问请求的各种方式。
/// 中间件和endpoints之间的通信
#[derive(Debug)]
pub struct Request<State> {
pub(crate) state: State,
#[pin]
pub(crate) req: http_types::Request,
pub(crate) route_params: Vec<Captures<'static, 'static>>,
}
}
impl<State> Request<State> {
/// 创建一个新的 `Request`.
pub(crate) fn new(
state: State,
req: http_types::Request,
route_params: Vec<Captures<'static, 'static>>,
) -> Self {
Self {
state,
req,
route_params,
}
}
/// 访问请求的HTTP方法。
///
/// # Examples
///
/// ```no_run
/// # use async_std::task::block_on;
/// # fn main() -> Result<(), std::io::Error> { block_on(async {
/// #
/// use summer_boot::Request;
///
/// let mut app = summer_boot::new();
/// app.at("/").get(|req: Request<()>| async move {
/// assert_eq!(req.method(), http_types::Method::Get);
/// Ok("")
/// });
/// app.listen("127.0.0.1:8080").await?;
/// #
/// # Ok(()) })}
/// ```
#[must_use]
pub fn method(&self) -> Method {
self.req.method()
}
/// 访问请求的完整URI方法。
#[must_use]
pub fn url(&self) -> &Url {
self.req.url()
}
/// 访问请求的HTTP版本。
///
/// # Examples
///
/// ```no_run
/// # use async_std::task::block_on;
/// # fn main() -> Result<(), std::io::Error> { block_on(async {
/// #
/// use summer_boot::Request;
///
/// let mut app = summer_boot::new();
/// app.at("/").get(|req: Request<()>| async move {
/// assert_eq!(req.version(), Some(http_types::Version::Http1_1));
/// Ok("")
/// });
/// app.listen("127.0.0.1:8080").await?;
/// #
/// # Ok(()) })}
/// ```
#[must_use]
pub fn version(&self) -> Option<Version> {
self.req.version()
}
/// 获取基础传输的socket地址
#[must_use]
pub fn peer_addr(&self) -> Option<&str> {
self.req.peer_addr()
}
/// 获取基础传输的本地地址
#[must_use]
pub fn local_addr(&self) -> Option<&str> {
self.req.local_addr()
}
/// 获取此请求的远程地址。
///
/// 按以下优先级确定:
/// 1. `Forwarded` head `for` key
/// 2. 第一个 `X-Forwarded-For` header
/// 3. 传输的对等地址
#[must_use]
pub fn remote(&self) -> Option<&str> {
self.req.remote()
}
/// 获取此请求的目标主机。
///
/// 按以下优先级确定:
/// 1. `Forwarded` header `host` key
/// 2. 第一个 `X-Forwarded-Host` header
/// 3. `Host` header
/// 4. URL域
#[must_use]
pub fn host(&self) -> Option<&str> {
self.req.host()
}
/// 以“Mime”形式获取请求内容类型。
///
/// 这将获取请求 `Content-Type` header。
///
#[must_use]
pub fn content_type(&self) -> Option<Mime> {
self.req.content_type()
}
/// 获取HTTP header.
///
/// # Examples
///
/// ```no_run
/// # use async_std::task::block_on;
/// # fn main() -> Result<(), std::io::Error> { block_on(async {
/// #
/// use summer_boot::Request;
///
/// let mut app = summer_boot::new();
/// app.at("/").get(|req: Request<()>| async move {
/// assert_eq!(req.header("X-Forwarded-For").unwrap(), "127.0.0.1");
/// Ok("")
/// });
/// app.listen("127.0.0.1:8080").await?;
/// #
/// # Ok(()) })}
/// ```
#[must_use]
pub fn header(
&self,
key: impl Into<http_types::headers::HeaderName>,
) -> Option<&http_types::headers::HeaderValues> {
self.req.header(key)
}
/// 获取标题的可变引用。
pub fn header_mut(&mut self, name: impl Into<HeaderName>) -> Option<&mut HeaderValues> {
self.req.header_mut(name)
}
/// 设置一个 HTTP header.
pub fn insert_header(
&mut self,
name: impl Into<HeaderName>,
values: impl ToHeaderValues,
) -> Option<HeaderValues> {
self.req.insert_header(name, values)
}
/// 将header添加到headers。
///
/// 与 `insert` 不同,此函数不会重写标头的内容,而是插入
/// 如果没有header。添加到现有的headers列表中。
pub fn append_header(&mut self, name: impl Into<HeaderName>, values: impl ToHeaderValues) {
self.req.append_header(name, values)
}
/// 移除一个 header.
pub fn remove_header(&mut self, name: impl Into<HeaderName>) -> Option<HeaderValues> {
self.req.remove_header(name)
}
/// 以任意顺序访问所有header的迭代。
#[must_use]
pub fn iter(&self) -> headers::Iter<'_> {
self.req.iter()
}
/// 迭代器以任意顺序访问所有header,并对值进行可变引用。
#[must_use]
pub fn iter_mut(&mut self) -> headers::IterMut<'_> {
self.req.iter_mut()
}
/// 以任意顺序访问所有header名称的迭代。
#[must_use]
pub fn header_names(&self) -> headers::Names<'_> {
self.req.header_names()
}
/// 以任意顺序访问所有header值的迭代。
#[must_use]
pub fn header_values(&self) -> headers::Values<'_> {
self.req.header_values()
}
/// 获取请求扩展值。
#[must_use]
pub fn ext<T: Send + Sync + 'static>(&self) -> Option<&T> {
self.req.ext().get()
}
/// 获取对存储在请求扩展中的值的可变引用。
#[must_use]
pub fn ext_mut<T: Send + Sync + 'static>(&mut self) -> Option<&mut T> {
self.req.ext_mut().get_mut()
}
/// 设置请求扩展值。
pub fn set_ext<T: Send + Sync + 'static>(&mut self, val: T) -> Option<T> {
self.req.ext_mut().insert(val)
}
#[must_use]
/// 访问应用程序范围的状态。
pub fn state(&self) -> &State {
&self.state
}
/// 按名称提取和解析路由参数。
///
/// 以 `&str` 形式返回参数,该参数是从此 `Request` 借用的。
///
/// 名称应不包括引用 `:`。
///
/// # Errors
///
/// 如果 `key` 不是路由的有效参数,则返回错误。
///
/// # Examples
///
/// ```no_run
/// # use async_std::task::block_on;
/// # fn main() -> Result<(), std::io::Error> { block_on(async {
/// #
/// use summer_boot::{Request, Result};
///
/// async fn greet(req: Request<()>) -> Result<String> {
/// let name = req.param("name").unwrap_or("world");
/// Ok(format!("Hello, {}!", name))
/// }
///
/// let mut app = summer_boot::new();
/// app.at("/hello").get(greet);
/// app.at("/hello/:name").get(greet);
/// app.listen("127.0.0.1:8080").await?;
/// #
/// # Ok(()) })}
/// ```
pub fn param(&self, key: &str) -> crate::Result<&str> {
self.route_params
.iter()
.rev()
.find_map(|captures| captures.get(key))
.ok_or_else(|| format_err!("Param \"{}\" not found", key.to_string()))
}
/// 从路由中提取通配符(如果存在)
///
/// 以 `&str` 形式返回参数,该参数是从此 `Request` 借用的。
///
/// # Examples
///
/// ```no_run
/// # use async_std::task::block_on;
/// # fn main() -> Result<(), std::io::Error> { block_on(async {
/// #
/// use summer_boot::{Request, Result};
///
/// async fn greet(req: Request<()>) -> Result<String> {
/// let name = req.wildcard().unwrap_or("world");
/// Ok(format!("Hello, {}!", name))
/// }
///
/// let mut app = summer_boot::new();
/// app.at("/hello/*").get(greet);
/// app.listen("127.0.0.1:8080").await?;
/// #
/// # Ok(()) })}
/// ```
pub fn wildcard(&self) -> Option<&str> {
self.route_params
.iter()
.rev()
.find_map(|captures| captures.wildcard())
}
///
/// 使用[serde_qs](https://docs.rs/serde_qs)将URL查询组件解析为结构
/// 将整个查询作为未解析的字符串获取,使用 `request.url().query()`。
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
/// use summer_boot::http_types::{self, convert::Deserialize};
/// use summer_boot::Request;
///
/// // 所有权结构:
///
/// #[derive(Deserialize)]
/// struct Index {
/// page: u32,
/// selections: HashMap<String, String>,
/// }
///
/// let req: Request<()> = http_types::Request::get("https://baidu.com/get?page=2&selections[width]=narrow&selections[height]=tall").into();
/// let Index { page, selections } = req.query().unwrap();
/// assert_eq!(page, 2);
/// assert_eq!(selections["width"], "narrow");
/// assert_eq!(selections["height"], "tall");
///
/// // 使用借用s:
///
/// #[derive(Deserialize)]
/// struct Query<'q> {
/// format: &'q str,
/// }
///
/// let req: Request<()> = http_types::Request::get("https://httpbin.org/get?format=bananna").into();
/// let Query { format } = req.query().unwrap();
/// assert_eq!(format, "bananna");
/// ```
pub fn query<'de, T: serde::de::Deserialize<'de>>(&'de self) -> crate::Result<T> {
self.req.query()
}
/// 设置body读取
pub fn set_body(&mut self, body: impl Into<Body>) {
self.req.set_body(body)
}
/// 处理请求 `Body`
///
/// 可以在获取或读取body后调用此方法,
/// 但是将返回一个空的`Body`.
///
/// 这对于通过AsyncReader或AsyncBufReader有用。
pub fn take_body(&mut self) -> Body {
self.req.take_body()
}
/// 将整个请求body读取字节缓冲区。
///
/// 可以在读取body后调用此方法,但生成空缓冲区。
///
/// # Errors
///
/// 读取body时遇到的任何I/O错误都会立即返回错误 `Err`
///
/// # Examples
///
/// ```no_run
/// # use async_std::task::block_on;
/// # fn main() -> Result<(), std::io::Error> { block_on(async {
/// #
/// use summer_boot::Request;
///
/// let mut app = summer_boot::new();
/// app.at("/").get(|mut req: Request<()>| async move {
/// let _body: Vec<u8> = req.body_bytes().await.unwrap();
/// Ok("")
/// });
/// app.listen("127.0.0.1:8080").await?;
/// #
/// # Ok(()) })}
/// ```
pub async fn body_bytes(&mut self) -> crate::Result<Vec<u8>> {
let res = self.req.body_bytes().await?;
Ok(res)
}
/// 将整个请求body读取字符串。
///
/// 可以在读取body后调用此方法,但生成空缓冲区。
///
/// # Errors
///
/// 读取body时遇到的任何I/O错误都会立即返回错误 `Err`
///
/// 如果body不能解释有效的UTF-8,则返回 `Err`
///
/// # Examples
///
/// ```no_run
/// # use async_std::task::block_on;
/// # fn main() -> Result<(), std::io::Error> { block_on(async {
/// #
/// use summer_boot::Request;
///
/// let mut app = summer_boot::new();
/// app.at("/").get(|mut req: Request<()>| async move {
/// let _body: String = req.body_string().await.unwrap();
/// Ok("")
/// });
/// app.listen("127.0.0.1:8080").await?;
/// #
/// # Ok(()) })}
/// ```
pub async fn body_string(&mut self) -> crate::Result<String> {
let res = self.req.body_string().await?;
Ok(res)
}
/// 通过json读取并反序列化整个请求body。
///
/// # Errors
///
/// 读取body时遇到的任何I/O错误都会立即返回错误 `Err`
///
/// 如果无法将body解释为目标类型 `T` 的有效json,则返回 `Err`
pub async fn body_json<T: serde::de::DeserializeOwned>(&mut self) -> crate::Result<T> {
let res = self.req.body_json().await?;
Ok(res)
}
/// 将请求主体解析为表单
///
/// ```rust
/// use serde::Deserialize;
/// # fn main() -> Result<(), std::io::Error> { async_std::task::block_on(async {
/// let mut app = summer_boot::new();
///
/// #[derive(Deserialize)]
/// struct Animal {
/// name: String,
/// legs: u8
/// }
///
/// app.at("/").post(|mut req: summer_boot::Request<()>| async move {
/// let animal: Animal = req.body_form().await?;
/// Ok(format!(
/// "hello, {}! i've put in an order for {} shoes",
/// animal.name, animal.legs
/// ))
/// });
///
/// # if false {
/// app.listen("localhost:8000").await?;
/// # }
///
/// // $ curl localhost:8000/test/api -d "name=chashu&legs=4"
/// // hello, chashu! i've put in an order for 4 shoes
///
/// // $ curl localhost:8000/test/api -d "name=mary%20millipede&legs=750"
/// // number too large to fit in target type
/// # Ok(()) })}
/// ```
pub async fn body_form<T: serde::de::DeserializeOwned>(&mut self) -> crate::Result<T> {
let res = self.req.body_form().await?;
Ok(res)
}
/// 按Cookie的名称返回 `Cookie`
#[cfg(feature = "cookies")]
#[must_use]
pub fn cookie(&self, name: &str) -> Option<Cookie<'static>> {
self.ext::<CookieData>()
.and_then(|cookie_data| cookie_data.content.read().unwrap().get(name).cloned())
}
/// 检索对当前session的引用。
///
/// # Panics
///
/// 如果summer_boot::sessions:SessionMiddleware 没有在运行。
#[cfg(feature = "sessions")]
pub fn session(&self) -> &crate::sessions::Session {
self.ext::<crate::sessions::Session>()
.expect("请求会话未初始化, 是否启用了summer_boot::sessions::SessionMiddleware?")
}
/// 检索对当前会话的可变引用。
///
/// # Panics
///
/// 如果summer_boot::sessions:SessionMiddleware 没有在运行。
#[cfg(feature = "sessions")]
pub fn session_mut(&mut self) -> &mut crate::sessions::Session {
self.ext_mut()
.expect("请求会话未初始化, 是否启用了summer_boot::sessions::SessionMiddleware?")
}
/// 获取body流的长度(如果已设置)。
///
/// 将固定大小的对象传递到作为body时,会设置此值(比如字符串)。 或者缓冲区。
/// 此API的使用应检查此值,决定是否使用 `Chunked`
/// 设置响应长度
#[must_use]
pub fn len(&self) -> Option<usize> {
self.req.len()
}
/// 如果请求的设置body流长度为零,则返回 `true`,否则返回 `false`。
#[must_use]
pub fn is_empty(&self) -> Option<bool> {
Some(self.req.len()? == 0)
}
}
impl<State> AsRef<http_types::Request> for Request<State> {
fn as_ref(&self) -> &http_types::Request {
&self.req
}
}
impl<State> AsMut<http_types::Request> for Request<State> {
fn as_mut(&mut self) -> &mut http_types::Request {
&mut self.req
}
}
impl<State> AsRef<http_types::Headers> for Request<State> {
fn as_ref(&self) -> &http_types::Headers {
self.req.as_ref()
}
}
impl<State> AsMut<http_types::Headers> for Request<State> {
fn as_mut(&mut self) -> &mut http_types::Headers {
self.req.as_mut()
}
}
impl<State> Read for Request<State> {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut [u8],
) -> Poll<io::Result<usize>> {
self.project().req.poll_read(cx, buf)
}
}
impl<State> From<Request<State>> for http_types::Request {
fn from(request: Request<State>) -> http_types::Request {
request.req
}
}
impl<State: Default> From<http_types::Request> for Request<State> {
fn from(request: http_types::Request) -> Request<State> {
Request::new(State::default(), request, vec![])
}
}
impl<State: Clone + Send + Sync + 'static> From<Request<State>> for Response {
fn from(mut request: Request<State>) -> Response {
let mut res = Response::new(StatusCode::Ok);
res.set_body(request.take_body());
res
}
}
impl<State> IntoIterator for Request<State> {
type Item = (HeaderName, HeaderValues);
type IntoIter = http_types::headers::IntoIter;
/// 返回对其余项的引用的迭代.
#[inline]
fn into_iter(self) -> Self::IntoIter {
self.req.into_iter()
}
}
impl<'a, State> IntoIterator for &'a Request<State> {
type Item = (&'a HeaderName, &'a HeaderValues);
type IntoIter = http_types::headers::Iter<'a>;
#[inline]
fn into_iter(self) -> Self::IntoIter {
self.req.iter()
}
}
impl<'a, State> IntoIterator for &'a mut Request<State> {
type Item = (&'a HeaderName, &'a mut HeaderValues);
type IntoIter = http_types::headers::IterMut<'a>;
#[inline]
fn into_iter(self) -> Self::IntoIter {
self.req.iter_mut()
}
}
impl<State> Index<HeaderName> for Request<State> {
type Output = HeaderValues;
/// 返回对与提供的名称相对应的值的引用。
///
/// # Panics
///
/// 如果 `Request` 中没有该名称,则会panic
#[inline]
fn index(&self, name: HeaderName) -> &HeaderValues {
&self.req[name]
}
}
impl<State> Index<&str> for Request<State> {
type Output = HeaderValues;
/// 返回对与提供的名称相对应的值的引用。
///
/// # Panics
///
/// 如果 `Request` 中没有该名称,则会panic
#[inline]
fn index(&self, name: &str) -> &HeaderValues {
&self.req[name]
}
}
| rust | Apache-2.0 | 3bacf5b9996a015c527fe5b7941d6f5d7a1e6335 | 2026-01-04T20:24:35.228453Z | false |
eairp/summer-boot | https://github.com/eairp/summer-boot/blob/3bacf5b9996a015c527fe5b7941d6f5d7a1e6335/summer-boot/src/common/mod.rs | summer-boot/src/common/mod.rs | //!
//! 这里在service module使用
//! 可以考虑添加消除警告的属性宏
//!
pub(crate) mod task;
pub(crate) use std::pin::Pin;
| rust | Apache-2.0 | 3bacf5b9996a015c527fe5b7941d6f5d7a1e6335 | 2026-01-04T20:24:35.228453Z | false |
eairp/summer-boot | https://github.com/eairp/summer-boot/blob/3bacf5b9996a015c527fe5b7941d6f5d7a1e6335/summer-boot/src/common/task.rs | summer-boot/src/common/task.rs | #[cfg(feature = "http1")]
use super::Never;
pub(crate) use std::task::{Context, Poll};
///
/// 重新安装feature
/// 这里用的是标准库Poll
///
#[cfg(feature = "http1")]
pub(crate) fn yield_now(cx: &mut Context<'_>) -> Poll<Never> {
cx.waker().wake_by_ref();
Poll::Pending
}
| rust | Apache-2.0 | 3bacf5b9996a015c527fe5b7941d6f5d7a1e6335 | 2026-01-04T20:24:35.228453Z | false |
eairp/summer-boot | https://github.com/eairp/summer-boot/blob/3bacf5b9996a015c527fe5b7941d6f5d7a1e6335/summer-boot/src/rt/mod.rs | summer-boot/src/rt/mod.rs | //! 提供了summer boot的运行时环境
//! 当前提供环境主要是 tokio 下的 Runtime
//!
use tokio::runtime::Runtime;
/// 运行时简单代理对象
#[derive(Debug)]
pub struct SummerRuntime;
impl SummerRuntime {
/// 新建 tokio runtime 运行时对象
pub fn new() -> Runtime {
tokio::runtime::Runtime::new().unwrap()
}
}
| rust | Apache-2.0 | 3bacf5b9996a015c527fe5b7941d6f5d7a1e6335 | 2026-01-04T20:24:35.228453Z | false |
eairp/summer-boot | https://github.com/eairp/summer-boot/blob/3bacf5b9996a015c527fe5b7941d6f5d7a1e6335/summer-boot/src/ssl/ssl.rs | summer-boot/src/ssl/ssl.rs | use serde::{Deserialize, Serialize};
#[derive(Debug, Serialize, Deserialize)]
enum ClientAuth {
/**
* Client authentication is not wanted
*/
NONE,
/**
* Client authentication is wanted but not mandatory.
*/
WANT,
/**
* Client authentication is needed and mandatory.
*/
NEED,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct Ssl {
enabled: Option<bool>,
ciphers: Vec<String>,
client_auth: ClientAuth,
enabled_protocols: Vec<String>,
key_alias: Option<String>,
key_passowrd: Option<String>,
key_store: Option<String>,
key_store_password: Option<String>,
key_store_type: Option<String>,
trust_store: Option<String>,
trust_store_password: Option<String>,
trust_store_type: Option<String>,
trust_store_provider: Option<String>,
certificate: Option<String>,
certificate_private_key: Option<String>,
trust_certificate: Option<String>,
trust_certificate_private_key: Option<String>,
protocol: Option<String>,
}
impl Ssl {
pub(crate) fn new(ssl_config: Ssl) -> Self {
Ssl {
enabled: Some(true),
protocol: Some(String::from("TLS")),
ciphers: ssl_config.ciphers,
client_auth: ssl_config.client_auth,
enabled_protocols: ssl_config.enabled_protocols,
key_alias: ssl_config.key_alias,
key_passowrd: ssl_config.key_passowrd,
key_store: ssl_config.key_store,
key_store_password: ssl_config.key_store_password,
key_store_type: ssl_config.key_store_type,
trust_store: ssl_config.trust_store,
trust_store_password: ssl_config.trust_store_password,
trust_store_type: ssl_config.trust_store_type,
trust_store_provider: ssl_config.trust_store_provider,
certificate: ssl_config.certificate,
certificate_private_key: ssl_config.certificate_private_key,
trust_certificate: ssl_config.trust_certificate,
trust_certificate_private_key: ssl_config.trust_certificate_private_key,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_ssl_config() {
let ssl_config = Ssl {
enabled: Some(false),
protocol: Some(String::from("TLS")),
ciphers: Vec::new(),
client_auth: ClientAuth::NONE,
enabled_protocols: Vec::new(),
key_alias: None,
key_passowrd: None,
key_store: None,
key_store_password: None,
key_store_type: None,
trust_store: None,
trust_store_password: None,
trust_store_type: None,
trust_store_provider: None,
certificate: None,
certificate_private_key: None,
trust_certificate: None,
trust_certificate_private_key: None,
};
println!("ssl config : {:?}", Ssl::new(ssl_config));
}
}
| rust | Apache-2.0 | 3bacf5b9996a015c527fe5b7941d6f5d7a1e6335 | 2026-01-04T20:24:35.228453Z | false |
eairp/summer-boot | https://github.com/eairp/summer-boot/blob/3bacf5b9996a015c527fe5b7941d6f5d7a1e6335/summer-boot/src/ssl/mod.rs | summer-boot/src/ssl/mod.rs | pub mod ssl; | rust | Apache-2.0 | 3bacf5b9996a015c527fe5b7941d6f5d7a1e6335 | 2026-01-04T20:24:35.228453Z | false |
eairp/summer-boot | https://github.com/eairp/summer-boot/blob/3bacf5b9996a015c527fe5b7941d6f5d7a1e6335/summer-boot/src/server/accept.rs | summer-boot/src/server/accept.rs | //! `Accept` trait 和支持的类型。
//!
//! 这个模块包含:
//!
//! - 用于异步接受传入数据的 [`Accept`](Accept) feture。
//! 链接.
//! - 像 `poll_fn` 这样的程序可以创建自定义的 `Accept`.
use crate::common::{
task::{self, Poll},
Pin,
};
/// 异步接受传入连接。
pub trait Accept {
/// 可以接受的连接类型。
type Conn;
/// 接受连接时可能发生的错误类型。
type Error;
/// 轮询接受下一个连接。
fn poll_accept(
self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
) -> Poll<Option<Result<Self::Conn, Self::Error>>>;
}
/// 使用轮询函数创建一个 `Accept` 。
/// # Example
///
#[allow(dead_code)]
pub fn poll_fn<F, IO, E>(func: F) -> impl Accept<Conn = IO, Error = E>
where
F: FnMut(&mut task::Context<'_>) -> Poll<Option<Result<IO, E>>>,
{
struct PollFn<F>(F);
// 闭包 `F` 是不固定的
impl<F> Unpin for PollFn<F> {}
impl<F, IO, E> Accept for PollFn<F>
where
F: FnMut(&mut task::Context<'_>) -> Poll<Option<Result<IO, E>>>,
{
type Conn = IO;
type Error = E;
fn poll_accept(
self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
) -> Poll<Option<Result<Self::Conn, Self::Error>>> {
(self.get_mut().0)(cx)
}
}
PollFn(func)
}
| rust | Apache-2.0 | 3bacf5b9996a015c527fe5b7941d6f5d7a1e6335 | 2026-01-04T20:24:35.228453Z | false |
eairp/summer-boot | https://github.com/eairp/summer-boot/blob/3bacf5b9996a015c527fe5b7941d6f5d7a1e6335/summer-boot/src/server/configurable.rs | summer-boot/src/server/configurable.rs | pub struct ConfigurableWebServer {
pub port: Option<i32>,
pub server_header: Option<String>,
} | rust | Apache-2.0 | 3bacf5b9996a015c527fe5b7941d6f5d7a1e6335 | 2026-01-04T20:24:35.228453Z | false |
eairp/summer-boot | https://github.com/eairp/summer-boot/blob/3bacf5b9996a015c527fe5b7941d6f5d7a1e6335/summer-boot/src/server/mod.rs | summer-boot/src/server/mod.rs | mod accept;
pub mod endpoint;
pub mod server;
| rust | Apache-2.0 | 3bacf5b9996a015c527fe5b7941d6f5d7a1e6335 | 2026-01-04T20:24:35.228453Z | false |
eairp/summer-boot | https://github.com/eairp/summer-boot/blob/3bacf5b9996a015c527fe5b7941d6f5d7a1e6335/summer-boot/src/server/server.rs | summer-boot/src/server/server.rs | //! HTTP server
use super::endpoint::Endpoint;
use crate::gateway;
use crate::log;
use crate::tcp;
use crate::utils;
use crate::{Request, Route};
use async_std::io;
use async_std::sync::Arc;
use gateway::router::{Router, Selection};
use tcp::{Listener, ToListener};
use utils::middleware::{Middleware, Next};
// use summer_boot_autoconfigure;
/// HTTP服务器。
///
/// 服务器由 *state*, *endpoints* 和 *middleware* 组成。
///
/// - 服务器状态是用户定义的,通过 [`summer_boot::Server::with_state`] 函数使用. 这个
/// 状态可以用于所有应用 endpoints 共享引用.
///
/// - Endpoints 提供与指定URL [`summer_boot::Server::at`] 创建一个 *路由*
/// 然后可以用于绑定注册到 endpoints
/// 对于指定HTTP请求类型进行使用
///
/// - Middleware 通过附加request或
/// response 处理, 例如压缩、默认请求头或日志记录。到
/// 中间件添加到应用程序中,使用 [`summer_boot::Server::middleware`] 方法.
pub struct Server<State> {
router: Arc<Router<State>>,
state: State,
/// 保存 middleware 堆栈 这里用了多线程引用计数.
///
/// Vec允许我们在运行时添加中间件。
/// 内部 Arc-s 允许在内部克隆 MiddlewareEndpoint-s 。
/// 在这里不在Vec使用互斥体,因为在执行期间添加中间件应该是一个错误。
#[allow(clippy::rc_buffer)]
middleware: Arc<Vec<Arc<dyn Middleware<State>>>>,
}
impl Server<()> {
/// 创建一个summer boot web2 server.
///
/// # Examples
///
/// ```no_run
/// # use async_std::task::block_on;
/// # fn main() -> Result<(), std::io::Error> { block_on(async {
/// #
/// let mut app = summer_boot::new();
/// app.at("/").get(|_| async { Ok("Hello, world!") });
/// app.listen("127.0.0.1:8080").await?;
/// #
/// # Ok(()) }) }
/// ```
#[must_use]
pub fn new() -> Self {
Self::with_state(())
}
/// 创建一个summer boot web2 server.
///
/// 默认开启日志记录
/// 读取yml然后绑定监听
///
/// 目前把他移动到macro扫描里面去了
pub fn run() -> Self {
log::start();
let server = Self::with_state(());
// let mut listener_addr = String::from("0.0.0.0:");
// let config = summer_boot_autoconfigure::load_conf();
// if let Some(config) = config {
// let read_server = serde_json::to_string(&config.server).unwrap();
// let v: Value = serde_json::from_str(&read_server).unwrap();
// let port = v["port"].to_string();
// listener_addr.push_str(&port);
// }
// server.listen(listener_addr).await.unwrap();
server
}
}
impl Default for Server<()> {
fn default() -> Self {
Self::new()
}
}
impl<State> Server<State>
where
State: Clone + Send + Sync + 'static,
{
/// 创建一个可以共享应用程序作用域状态到新服务.
///
// /应用程序范围的状态对于存储有用
///
/// # Examples
///
/// ```no_run
/// # use async_std::task::block_on;
/// # fn main() -> Result<(), std::io::Error> { block_on(async {
/// #
/// use summer_boot::Request;
///
/// /// 共享应用程序状态
/// #[derive(Clone)]
/// struct State {
/// name: String,
/// }
///
/// // 定义状态新的一个实例
/// let state = State {
/// name: "James".to_string()
/// };
///
/// // 使用状态初始化应用程序
/// let mut app = summer_boot::with_state(state);
/// app.at("/").get(|req: Request<State>| async move {
/// Ok(format!("Hello, {}!", &req.state().name))
/// });
/// app.listen("127.0.0.1:8080").await?;
/// #
/// # Ok(()) }) }
/// ```
pub fn with_state(state: State) -> Self {
Self {
router: Arc::new(Router::new()),
middleware: Arc::new(vec![
// 暂时没有使用到cookies
Arc::new(log::LoggingSystem::new()),
]),
state,
}
}
/// 在给定的 `path`(相对于根)处添加新路由。
///
/// 路由意味着将HTTP请求映射到endpoints。
/// 一种“目录”方法,可以方便地查看总体
/// 应用程序结构。Endpoints仅由path和HTTP方法选择
/// 请求:路径决定资源和HTTP请求所选资源的各个endpoints。
///
/// #Example:
///
/// ```rust,no_run
/// # let mut app = summer_boot::new();
/// app.at("/").get(|_| async { Ok("Hello, world!") });
/// ```
///
/// 路径由零个或多个段组成,即非空字符串,由 '/' 分隔。
///
/// 或者可以使用通配符
/// `*path` 代表使用通配符配置路由
/// 以下是一些基于HTTP的endpoints 路由选择的示例:
///
/// ```rust,no_run
/// # let mut app = summer_boot::new();
/// app.at("/");
/// app.at("/hello");
/// app.at("add_two/:num");
/// app.at("files/:user/*");
/// app.at("static/*path");
/// app.at("static/:context/:");
/// ```
///
/// 没有备用路由匹配,即资源已满
/// 匹配和没有匹配,意味着添加资源的顺序没有
pub fn at<'a>(&'a mut self, path: &str) -> Route<'a, State> {
let router = Arc::get_mut(&mut self.router).expect("服务器启动后无法注册路由");
Route::new(router, path.to_owned())
}
/// 向应用程序添加中间件。
///
/// 中间件提供请求/响应
/// 日志记录或标题修改。中间件在处理请求时被调用,并且可以
/// 继续处理(可能修改响应)或立即返回响应
/// 响应。有关详细信息,请参考 [`Middleware`] trait
///
/// 中间件只能在应用程序的 `顶层` 添加,并使用应用顺序
pub fn with<M>(&mut self, middleware: M) -> &mut Self
where
M: Middleware<State>,
{
log::trace!("正在添加中间件 {}", middleware.name());
let m = Arc::get_mut(&mut self.middleware).expect("服务器启动后无法注册中间件");
m.push(Arc::new(middleware));
self
}
/// 使用提供的侦听器异步为应用程序提供服务。
///
/// 这是调用 `summer_boot::Server::bind`, 记录`ListenInfo` 实例
/// 通过实例 `Listener::info`, 然后调用 `Listener::accept`.
///
/// # Examples
///
/// ```no_run
/// # use async_std::task::block_on;
/// # fn main() -> Result<(), std::io::Error> { block_on(async {
/// #
/// let mut app = summer_boot::new();
/// app.at("/").get(|_| async { Ok("Hello, world!") });
/// app.listen("127.0.0.1:8080").await?;
/// #
/// # Ok(()) }) }
/// ```
pub async fn listen<L: ToListener<State>>(self, listener: L) -> io::Result<()> {
let mut listener = listener.to_listener()?;
listener.bind(self).await?;
for info in listener.info().iter() {
log::info!("Server listening on {}", info);
}
listener.accept().await?;
Ok(())
}
/// 开发中 todo
///
/// 异步绑定侦听器。
///
/// 绑定侦听器。这将打开网络端口,但没有接受传入的连接。
/// 应调用 `Listener::listen` 开始连接
///
/// 调用 `Listener::info` 的时候可能出现多个 `ListenInfo` 实例返回
/// 这在使用例如 `ConcurrentListener` 时很有用
/// 因为它可以让单个服务器能够侦听多个端口。
///
/// # Examples
///
pub async fn bind<L: ToListener<State>>(
self,
listener: L,
) -> io::Result<<L as ToListener<State>>::Listener> {
let mut listener = listener.to_listener()?;
listener.bind(self).await?;
Ok(listener)
}
/// 响应 `Request`
///
/// 此方法对于直接测试endpoints
/// 或者通过自定义传输创建服务器。
///
/// # Examples
///
/// ```no_run
/// # #[async_std::main]
/// # async fn main() -> http_types::Result<()> {
/// #
/// use http_types::{Url, Method, Request, Response};
///
/// let mut app = summer_boot::new();
/// app.at("/").get(|_| async { Ok("hello world") });
///
/// let req = Request::new(Method::Get, Url::parse("https://example.com")?);
/// let res: Response = app.respond(req).await?;
///
/// assert_eq!(res.status(), 200);
/// #
/// # Ok(()) }
/// ```
pub async fn respond<Req, Res>(&self, req: Req) -> http_types::Result<Res>
where
Req: Into<http_types::Request>,
Res: From<http_types::Response>,
{
let req = req.into();
let Self {
router,
state,
middleware,
} = self.clone();
let method = req.method().to_owned();
let Selection { endpoint, params } = router.route(&req.url().path(), method);
let route_params = vec![params];
let req = Request::new(state, req, route_params);
let next = Next {
endpoint,
next_middleware: &middleware,
};
let res = next.run(req).await;
let res: http_types::Response = res.into();
Ok(res.into())
}
/// 获取对服务器状态的引用。用于测试和嵌套:
///
/// # Example
///
/// ```rust
/// # #[derive(Clone)] struct SomeAppState;
/// let mut app = summer_boot::with_state(SomeAppState);
/// let mut admin = summer_boot::with_state(app.state().clone());
/// admin.at("/").get(|_| async { Ok("nested app with cloned state") });
/// app.at("/").nest(admin);
/// ```
pub fn state(&self) -> &State {
&self.state
}
}
impl<State: Send + Sync + 'static> std::fmt::Debug for Server<State> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Server").finish()
}
}
impl<State: Clone> Clone for Server<State> {
fn clone(&self) -> Self {
Self {
router: self.router.clone(),
state: self.state.clone(),
middleware: self.middleware.clone(),
}
}
}
#[async_trait::async_trait]
impl<State: Clone + Sync + Send + 'static, InnerState: Clone + Sync + Send + 'static>
Endpoint<State> for Server<InnerState>
{
async fn call(&self, req: Request<State>) -> crate::Result {
let Request {
req,
mut route_params,
..
} = req;
let path = req.url().path().to_owned();
let method = req.method().to_owned();
let router = self.router.clone();
let middleware = self.middleware.clone();
let state = self.state.clone();
let Selection { endpoint, params } = router.route(&path, method);
route_params.push(params);
let req = Request::new(state, req, route_params);
let next = Next {
endpoint,
next_middleware: &middleware,
};
Ok(next.run(req).await)
}
}
#[cfg(test)]
mod test {
use crate as summer_boot;
#[test]
fn allow_nested_server_with_same_state() {
let inner = summer_boot::new();
let mut outer = summer_boot::new();
outer.at("/foo").get(inner);
}
#[test]
fn allow_nested_server_with_different_state() {
let inner = summer_boot::with_state(1);
let mut outer = summer_boot::new();
outer.at("/foo").get(inner);
}
}
| rust | Apache-2.0 | 3bacf5b9996a015c527fe5b7941d6f5d7a1e6335 | 2026-01-04T20:24:35.228453Z | false |
eairp/summer-boot | https://github.com/eairp/summer-boot/blob/3bacf5b9996a015c527fe5b7941d6f5d7a1e6335/summer-boot/src/server/endpoint.rs | summer-boot/src/server/endpoint.rs | use crate::utils;
use crate::{Middleware, Request, Response};
use async_std::future::Future;
use async_std::sync::Arc;
use async_trait::async_trait;
use http_types::Result;
use utils::middleware::Next;
/// HTTP请求处理。
///
/// 这个特效是为了 `Fn` 类型自动实现的,所以很少实现,由开发者提供
///
/// 实际上 endpoint是用`Request<State>`作为参数的函数,然后将实现的类型`T`(泛型)返回`Into<Response>`
///
/// # Examples
///
/// 这里利用的异步函数,但是只有Nightly版本才可以使用,如果要使用的话就需要启用Nightly版本
/// 这个例子对`GET`请求调用返回`String`
///
/// ```no_run
/// async fn hello(_req: summer_boot::Request<()>) -> summer_boot::Result<String> {
/// Ok(String::from("hello"))
/// }
///
/// let mut app = summer_boot::new();
/// app.at("/hello").get(hello);
/// ```
///
/// 如果不使用async异步的话,例子如下:
///
/// ```no_run
/// use core::future::Future;
/// fn hello(_req: summer_boot::Request<()>) -> impl Future<Output = summer_boot::Result<String>> {
/// async_std::future::ready(Ok(String::from("hello")))
/// }
///
/// let mut app = summer_boot::new();
/// app.at("/hello").get(hello);
/// ```
///
/// summer_boot也可以使用带有`Fn`的endpoint,但是一般建议用async异步
#[async_trait]
pub trait Endpoint<State: Clone + Send + Sync + 'static>: Send + Sync + 'static {
/// 上下文中调用endpoint
async fn call(&self, req: Request<State>) -> crate::Result;
}
pub(crate) type DynEndpoint<State> = dyn Endpoint<State>;
#[async_trait]
impl<State, F, Fut, Res> Endpoint<State> for F
where
State: Clone + Send + Sync + 'static,
F: Send + Sync + 'static + Fn(Request<State>) -> Fut,
Fut: Future<Output = Result<Res>> + Send + 'static,
Res: Into<Response> + 'static,
{
async fn call(&self, req: Request<State>) -> crate::Result {
let fut = (self)(req);
let res = fut.await?;
Ok(res.into())
}
}
pub(crate) struct MiddlewareEndpoint<E, State> {
endpoint: E,
middleware: Vec<Arc<dyn Middleware<State>>>,
}
impl<E: Clone, State> Clone for MiddlewareEndpoint<E, State> {
fn clone(&self) -> Self {
Self {
endpoint: self.endpoint.clone(),
middleware: self.middleware.clone(),
}
}
}
impl<E, State> std::fmt::Debug for MiddlewareEndpoint<E, State> {
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
fmt,
"MiddlewareEndpoint (length: {})",
self.middleware.len(),
)
}
}
impl<E, State> MiddlewareEndpoint<E, State>
where
State: Clone + Send + Sync + 'static,
E: Endpoint<State>,
{
pub(crate) fn wrap_with_middleware(
ep: E,
middleware: &[Arc<dyn Middleware<State>>],
) -> Box<dyn Endpoint<State> + Send + Sync + 'static> {
if middleware.is_empty() {
Box::new(ep)
} else {
Box::new(Self {
endpoint: ep,
middleware: middleware.to_vec(),
})
}
}
}
#[async_trait]
impl<E, State> Endpoint<State> for MiddlewareEndpoint<E, State>
where
State: Clone + Send + Sync + 'static,
E: Endpoint<State>,
{
async fn call(&self, req: Request<State>) -> crate::Result {
let next = Next {
endpoint: &self.endpoint,
next_middleware: &self.middleware,
};
Ok(next.run(req).await)
}
}
#[async_trait]
impl<State: Clone + Send + Sync + 'static> Endpoint<State> for Box<dyn Endpoint<State>> {
async fn call(&self, request: Request<State>) -> crate::Result {
self.as_ref().call(request).await
}
}
| rust | Apache-2.0 | 3bacf5b9996a015c527fe5b7941d6f5d7a1e6335 | 2026-01-04T20:24:35.228453Z | false |
eairp/summer-boot | https://github.com/eairp/summer-boot/blob/3bacf5b9996a015c527fe5b7941d6f5d7a1e6335/summer-boot/src/log/logging_system.rs | summer-boot/src/log/logging_system.rs | use crate::log;
use crate::{Middleware, Next, Request};
/// 记录所有传入的请求和响应
///
/// 此中间件在Summer Boot中默认启用
///
/// # Examples
///
/// ```
/// let mut app = summer_boot::new();
/// app.with(summer_boot::log::LoggingSystem::new());
/// ```
#[derive(Debug, Default, Clone)]
pub struct LoggingSystem {
_priv: (),
}
struct LoggingSystemHasBeenRun;
impl LoggingSystem {
/// Create a new instance of `LogMiddleware`.
#[must_use]
pub fn new() -> Self {
Self { _priv: () }
}
/// Log a request and a response.
async fn log<'a, State: Clone + Send + Sync + 'static>(
&'a self,
mut req: Request<State>,
next: Next<'a, State>,
) -> crate::Result {
if req.ext::<LoggingSystemHasBeenRun>().is_some() {
return Ok(next.run(req).await);
}
req.set_ext(LoggingSystemHasBeenRun);
let path = req.url().path().to_owned();
let method = req.method().to_string();
log::info!("<-- Request received", {
method: method,
path: path,
});
let start = std::time::Instant::now();
let response = next.run(req).await;
let status = response.status();
if status.is_server_error() {
if let Some(error) = response.error() {
log::error!("Internal error --> Response sent", {
message: format!("{:?}", error),
error_type: error.type_name(),
method: method,
path: path,
status: format!("{} - {}", status as u16, status.canonical_reason()),
duration: format!("{:?}", start.elapsed()),
});
} else {
log::error!("Internal error --> Response sent", {
method: method,
path: path,
status: format!("{} - {}", status as u16, status.canonical_reason()),
duration: format!("{:?}", start.elapsed()),
});
}
} else if status.is_client_error() {
if let Some(error) = response.error() {
log::warn!("Client error --> Response sent", {
message: format!("{:?}", error),
error_type: error.type_name(),
method: method,
path: path,
status: format!("{} - {}", status as u16, status.canonical_reason()),
duration: format!("{:?}", start.elapsed()),
});
} else {
log::warn!("Client error --> Response sent", {
method: method,
path: path,
status: format!("{} - {}", status as u16, status.canonical_reason()),
duration: format!("{:?}", start.elapsed()),
});
}
} else {
log::info!("--> Response sent", {
method: method,
path: path,
status: format!("{} - {}", status as u16, status.canonical_reason()),
duration: format!("{:?}", start.elapsed()),
});
}
Ok(response)
}
}
#[async_trait::async_trait]
impl<State: Clone + Send + Sync + 'static> Middleware<State> for LoggingSystem {
async fn handle(&self, req: Request<State>, next: Next<'_, State>) -> crate::Result {
self.log(req, next).await
}
}
| rust | Apache-2.0 | 3bacf5b9996a015c527fe5b7941d6f5d7a1e6335 | 2026-01-04T20:24:35.228453Z | false |
eairp/summer-boot | https://github.com/eairp/summer-boot/blob/3bacf5b9996a015c527fe5b7941d6f5d7a1e6335/summer-boot/src/log/mod.rs | summer-boot/src/log/mod.rs | //! 事件日志记录类型
//!
//!
//! # Examples
//!
//! ```
//! use summer_boot::log;
//!
//! log::start();
//!
//! log::info!("Hello James");
//! log::debug!("{} eat rice", "James");
//! log::error!("this is an error!");
//! log::info!("{} are win", "test", {
//! key_1: "value1",
//! key_2: "value2",
//! });
//! ```
pub use kv_log_macro::{debug, error, info, log, trace, warn};
pub use kv_log_macro::{max_level, Level};
mod logging_system;
pub use femme::LevelFilter;
pub use logging_system::LoggingSystem;
/// 开启日志记录
pub fn start() {
femme::start();
crate::log::info!("Logger started");
crate::log::info!("
_____ ____ _
/ ____| | _ \\ | |
| (___ _ _ _ __ ___ _ __ ___ ___ _ __ | |_) | ___ ___ | |_
\\___ \\| | | | '_ ` _ \\| '_ ` _ \\ / _ \\ '__| | _ < / _ \\ / _ \\| __|
____) | |_| | | | | | | | | | | | __/ | | |_) | (_) | (_) | |_
|_____/ \\__,_|_| |_| |_|_| |_| |_|\\___|_| |____/ \\___/ \\___/ \\__|
:: Summer Boot Version:: (1.4.0)
");
}
/// 使用日志级别开启日志记录
pub fn with_level(level: LevelFilter) {
femme::with_level(level);
crate::log::info!("Logger started", { level: format!("{}", level) });
crate::log::info!("
_____ ____ _
/ ____| | _ \\ | |
| (___ _ _ _ __ ___ _ __ ___ ___ _ __ | |_) | ___ ___ | |_
\\___ \\| | | | '_ ` _ \\| '_ ` _ \\ / _ \\ '__| | _ < / _ \\ / _ \\| __|
____) | |_| | | | | | | | | | | | __/ | | |_) | (_) | (_) | |_
|_____/ \\__,_|_| |_| |_|_| |_| |_|\\___|_| |____/ \\___/ \\___/ \\__|
:: Summer Boot Version:: (1.4.0)
");
}
| rust | Apache-2.0 | 3bacf5b9996a015c527fe5b7941d6f5d7a1e6335 | 2026-01-04T20:24:35.228453Z | false |
eairp/summer-boot | https://github.com/eairp/summer-boot/blob/3bacf5b9996a015c527fe5b7941d6f5d7a1e6335/summer-boot/src/context/mod.rs | summer-boot/src/context/mod.rs | pub mod serve_dir;
pub mod serve_file;
| rust | Apache-2.0 | 3bacf5b9996a015c527fe5b7941d6f5d7a1e6335 | 2026-01-04T20:24:35.228453Z | false |
eairp/summer-boot | https://github.com/eairp/summer-boot/blob/3bacf5b9996a015c527fe5b7941d6f5d7a1e6335/summer-boot/src/context/serve_dir.rs | summer-boot/src/context/serve_dir.rs | use crate::log;
use crate::{Body, Endpoint, Request, Response, Result, StatusCode};
use async_std::path::PathBuf as AsyncPathBuf;
use std::path::{Path, PathBuf};
use std::{ffi::OsStr, io};
pub(crate) struct ServeDir {
prefix: String,
dir: PathBuf,
}
impl ServeDir {
/// 创建一个 `ServeDir` 新的实例。
pub(crate) fn new(prefix: String, dir: PathBuf) -> Self {
Self { prefix, dir }
}
}
#[async_trait::async_trait]
impl<State> Endpoint<State> for ServeDir
where
State: Clone + Send + Sync + 'static,
{
async fn call(&self, req: Request<State>) -> Result {
let path = req.url().path();
let path = path
.strip_prefix(&self.prefix.trim_end_matches('*'))
.unwrap();
let path = path.trim_start_matches('/');
let mut file_path = self.dir.clone();
for p in Path::new(path) {
if p == OsStr::new(".") {
continue;
} else if p == OsStr::new("..") {
file_path.pop();
} else {
file_path.push(&p);
}
}
log::info!("请求的文件 {:?}", file_path);
let file_path = AsyncPathBuf::from(file_path);
if !file_path.starts_with(&self.dir) {
log::warn!("没有权限尝试读取: {:?}", file_path);
Ok(Response::new(StatusCode::Forbidden))
} else {
match Body::from_file(&file_path).await {
Ok(body) => Ok(Response::builder(StatusCode::Ok).body(body).build()),
Err(e) if e.kind() == io::ErrorKind::NotFound => {
log::warn!("文件未找到: {:?}", &file_path);
Ok(Response::new(StatusCode::NotFound))
}
Err(e) => Err(e.into()),
}
}
}
}
| rust | Apache-2.0 | 3bacf5b9996a015c527fe5b7941d6f5d7a1e6335 | 2026-01-04T20:24:35.228453Z | false |
eairp/summer-boot | https://github.com/eairp/summer-boot/blob/3bacf5b9996a015c527fe5b7941d6f5d7a1e6335/summer-boot/src/context/serve_file.rs | summer-boot/src/context/serve_file.rs | use crate::log;
use crate::{Body, Endpoint, Request, Response, Result, StatusCode};
use std::io;
use std::path::Path;
use async_std::path::PathBuf as AsyncPathBuf;
use async_trait::async_trait;
pub(crate) struct ServeFile {
path: AsyncPathBuf,
}
impl ServeFile {
/// 创建一个 `ServeFile` 新的实例。
pub(crate) fn init(path: impl AsRef<Path>) -> io::Result<Self> {
let file = path.as_ref().to_owned().canonicalize()?;
Ok(Self {
path: AsyncPathBuf::from(file),
})
}
}
#[async_trait]
impl<State: Clone + Send + Sync + 'static> Endpoint<State> for ServeFile {
async fn call(&self, _: Request<State>) -> Result {
match Body::from_file(&self.path).await {
Ok(body) => Ok(Response::builder(StatusCode::Ok).body(body).build()),
Err(e) if e.kind() == io::ErrorKind::NotFound => {
log::warn!("文件未找到: {:?}", &self.path);
Ok(Response::new(StatusCode::NotFound))
}
Err(e) => Err(e.into()),
}
}
}
| rust | Apache-2.0 | 3bacf5b9996a015c527fe5b7941d6f5d7a1e6335 | 2026-01-04T20:24:35.228453Z | false |
eairp/summer-boot | https://github.com/eairp/summer-boot/blob/3bacf5b9996a015c527fe5b7941d6f5d7a1e6335/summer-boot-macro/src/lib.rs | summer-boot-macro/src/lib.rs | //! 运行时宏处理
//!
//! # main
//! 使用运行时宏来设置summerboot async运行时。参见[main]宏文档。
//!
//! # auto_scan
//! 提供了基础的`auto_scan`功能用于发现并自动注册路由。
//!
//! # post、get、delete、put、patch、head、options、connect、trace
//! 提供了简单的路由宏标注。
//!
//!
use proc_macro::TokenStream;
use proc_macro2::{Ident, Span};
use quote::{quote, ToTokens};
use serde::Deserialize;
use serde_json::Value;
use std::io::Read;
use std::fs;
use syn::{
parse_file, parse_macro_input, parse_quote, punctuated::Punctuated, AttributeArgs, Item,
ItemFn, Lit, Meta, NestedMeta, Pat, Stmt, Token,
};
/// 用于匹配项目根目录下的 `Cargo.toml` 文件。
/// 匹配规则为:
/// 1. workspace下的member的数组格式
/// 2. 在package下的name字段
#[derive(Debug, Deserialize)]
struct ConfWorkSpace {
workspace: Option<Member>,
package: Option<Name>,
}
/// 匹配workspace下的member数组格式
#[derive(Debug, Deserialize)]
struct Member {
members: Option<Vec<String>>,
}
/// 匹配package下的name字段
#[derive(Debug, Deserialize)]
struct Name {
#[allow(dead_code)]
name: String,
}
/// 用于标记 summer_boot web 的入口点
/// # Examples
/// ```
/// #[summer_boot::main]
/// async fn main() {
/// async { println!("Hello world"); }.await
/// }
/// ```
#[proc_macro_attribute]
pub fn main(_: TokenStream, item: TokenStream) -> TokenStream {
let mut input = parse_macro_input!(item as ItemFn);
let attrs = &input.attrs;
let vis = &input.vis;
let sig = &mut input.sig;
let body = &input.block;
let _name = &sig.ident;
if sig.asyncness.is_none() {
return syn::Error::new_spanned(sig.fn_token, "仅支持 async fn")
.to_compile_error()
.into();
}
sig.asyncness = None;
(quote! {
#(#attrs)*
#vis #sig {
summer_boot::rt::SummerRuntime::new()
.block_on(async move { #body });
}
})
.into()
}
/// 完成 summer_boot 项目下的自动扫描功能,会先扫描找到`summer_boot::run();`
/// 函数,然后在此处进行装配活动。也可以手动增加过滤路径或过滤文件。
/// 如果增加过滤路径,需要在末尾添加 `/`,如果增加过滤文件,需要在末尾添加 `.rs`。
///
/// 注意:如果需要在此处添加运行时,必须在当前宏的后面配置,否则无法完成装配
/// # Examples
/// ```rust
/// // #[summer_boot::auto_scan]
/// // #[summer_boot::auto_scan("summer-boot-tests/src/lib.rs")]
/// fn main() {
/// summer_boot::run();
/// }
/// ```
#[proc_macro_attribute]
pub fn auto_scan(args: TokenStream, input: TokenStream) -> TokenStream {
let mut project = Vec::<String>::new();
let mut filter_paths = Vec::<String>::new();
// 找到需要扫描的路径
let mut cargo_toml = fs::File::open("Cargo.toml").expect("Cargo Toml文件找不到");
let mut content = String::new();
cargo_toml
.read_to_string(&mut content)
.expect("Cargo内容为空");
// 根据包类型分别处理
if let Ok(conf_work_space) = toml::from_str::<ConfWorkSpace>(&content) {
if let Some(workspace) = conf_work_space.workspace {
if let Some(members) = workspace.members {
for member in members {
project.push(format!("{}/{}", member, "src"));
}
}
} else if project.len() == 0 {
if let Some(_) = conf_work_space.package {
project.push("src".to_string());
}
}
}
// 解析宏信息
let args = parse_macro_input!(args as AttributeArgs);
for arg in args {
if let NestedMeta::Lit(Lit::Str(project)) = arg {
filter_paths.push(project.value());
}
}
// 解析函数体
let mut input = parse_macro_input!(input as ItemFn);
// 查找主函数的位置和是否存在变量名
// 未找到则直接退出宏处理
// 变量名不存在则默认添加app
// 如果存在则会返回出来,供后续使用
if let Some((master_index, master_name)) = scan_master_fn(&mut input) {
// 解析yaml文件
let mut listener_addr = String::from("0.0.0.0:");
let mut app_context_path = String::from("");
let config = summer_boot_autoconfigure::load_conf();
if let Some(config) = config {
let read_server = serde_json::to_string(&config.server).expect("读取服务配置文件失败");
let v: Value = serde_json::from_str(&read_server).expect("读取服务配置文件失败");
let port = v["port"].to_string();
let context_path = v["context_path"].to_string();
listener_addr.push_str(&port);
app_context_path.push_str(&context_path);
}
// 开始扫描
for path in project {
scan_method(
&path,
&filter_paths,
&mut input,
&app_context_path,
(master_index, &master_name),
);
}
// 配置listen
input.block.stmts.push(parse_quote! {
#master_name.listen(#listener_addr).await.expect("配置listen失败");
});
}
// 构建新的函数结构,增加函数行
TokenStream::from(input.into_token_stream())
}
// 扫描函数,找到主函数
// 返回主函数所在的位置索引,并判断是否存在变量名
// 如果存在,则找到并返回
// 如果不存在,则删除默认主函数,添加新的主函数
fn scan_master_fn(input: &mut ItemFn) -> Option<(i32, Ident)> {
let mut master_index: i32 = -1;
let mut master_name = Ident::new("app", Span::call_site());
for (index, stmt) in (&mut input.block.stmts).iter_mut().enumerate() {
let master = stmt.to_token_stream().to_string();
if let Some(_) = master.find("summer_boot :: run()") {
master_index = index as i32;
}
}
if master_index < 0 {
None
} else {
if let Stmt::Local(local) = &input.block.stmts[master_index as usize] {
// 函数存在变量,需要获取变量名称
let pat = &local.pat;
if let Pat::Ident(pat_ident) = pat {
let name = pat_ident.ident.to_string();
master_name = Ident::new(&name, Span::call_site());
}
} else {
// 函数不存在变量,需要手动添加
// TODO 目前相对简单,删除当前函数,并添加指定的函数即可,后续建议修改
input.block.stmts.remove(master_index as usize);
input.block.stmts.insert(
master_index as usize,
parse_quote! {
let mut app = summer_boot::run();
},
)
}
Some((master_index, master_name))
}
}
// 判断是否是目录,如果是路径则需要循环递归处理,
// 如果是文件则直接处理
// 处理过程中会将函数调用函数拼接,然后插入到指定的位置 下标+1 的位置
fn scan_method(
path: &str,
filter_paths: &Vec<String>,
input_token_stream: &mut ItemFn,
context_path: &str,
(mut master_index, master_name): (i32, &Ident),
) {
if let Ok(entries) = fs::read_dir(path) {
for entry in entries {
if let Ok(entry) = entry {
let file_path = entry.path();
if file_path.is_file() {
if let Some(extension) = file_path.extension() {
if extension == "rs" {
if filter_paths.iter().any(|p| path.contains(p)) {
return;
}
// 如果是文件,则处理内部细节
let content = fs::read_to_string(entry.path()).expect("处理内部细节");
// 解析文件
let ast = parse_file(&content).expect("解析文件失败");
let items = ast.items;
for item in items {
if let Item::Fn(item) = item {
// 处理函数中的函数名,指定宏信息
for attr in item.attrs {
// 遍历所有宏信息
if let Meta::List(meta) =
attr.parse_meta().expect("所有所有宏信息")
{
// 判断宏是否为指定的宏
let attr_path = meta.path.to_token_stream().to_string();
let method = config_req_type(&attr_path);
if method.is_none() {
continue;
}
let method =
method.expect("是否为指定的宏").to_token_stream();
// 获取函数全路径名
let fn_name: &String = &item.sig.ident.to_string();
let fn_path_token_stream = config_function_path(
&file_path.to_str().unwrap_or("文件为空"),
fn_name,
);
// 如果是 summer_boot 的宏信息,则处理
let attr_url = meta
.nested
.into_iter()
.next()
.expect("summer_boot 的宏信息");
if let NestedMeta::Lit(Lit::Str(url)) = attr_url {
let url = url.value();
let url = format!("{}{}", context_path, url)
.replace("\"", "")
.replace("//", "/");
if input_token_stream.block.stmts.len() < 1 {
// 如果注入的方法中没有任何代码,则不操作
break;
} else {
// 添加,注意下标加 1
master_index += 1;
input_token_stream.block.stmts.insert(
master_index as usize,
parse_quote! {
#master_name.at(#url).#method(#fn_path_token_stream);
},
);
}
}
}
}
}
}
}
}
}
}
}
}
}
// 配置函数全路径
// 根据相对项目的绝对路径找到函数调用的全路径链
// 注意:目前无法完成文件中mod下的函数调用,无法找到
fn config_function_path(path: &str, fu_name: &str) -> proc_macro2::TokenStream {
let mut fn_path_idents = Punctuated::<Ident, Token![::]>::new();
fn_path_idents.push(Ident::new("crate", Span::call_site()));
// 配置函数路径
let names: Vec<&str> = path
[path.find("src").expect("转换src") + 4..path.rfind(".rs").expect("转换rs后缀")]
.split("/")
.collect();
let len = names.len();
for (index, name) in names.into_iter().enumerate() {
if (index + 1) == len {
// 最后一个文件名称如果是main、lib、test则不需要加入路径
match name {
"main" | "mod" | "lib" => {
break;
}
_ => {}
}
}
if !name.is_empty() {
// 配置文件包名
fn_path_idents.push(Ident::new(name, Span::call_site()));
}
}
// 配置函数名称
fn_path_idents.push(Ident::new(fu_name, Span::call_site()));
fn_path_idents.to_token_stream()
}
// 配置请求类型
fn config_req_type(attr_path: &str) -> Option<Ident> {
if attr_path == "summer_boot_macro :: get"
|| attr_path == "summer_boot :: get"
|| attr_path == "get"
|| attr_path == "summer_boot_macro :: head"
|| attr_path == "summer_boot :: head"
|| attr_path == "head"
|| attr_path == "summer_boot_macro :: put"
|| attr_path == "summer_boot :: put"
|| attr_path == "put"
|| attr_path == "summer_boot_macro :: post"
|| attr_path == "summer_boot :: post"
|| attr_path == "post"
|| attr_path == "summer_boot_macro :: delete"
|| attr_path == "summer_boot :: delete"
|| attr_path == "delete"
|| attr_path == "summer_boot_macro :: head"
|| attr_path == "summer_boot :: head"
|| attr_path == "head"
|| attr_path == "summer_boot_macro :: options"
|| attr_path == "summer_boot :: options"
|| attr_path == "options"
|| attr_path == "summer_boot_macro :: connect"
|| attr_path == "summer_boot :: connect"
|| attr_path == "connect"
|| attr_path == "summer_boot_macro :: patch"
|| attr_path == "summer_boot :: patch"
|| attr_path == "patch"
|| attr_path == "summer_boot_macro :: trace"
|| attr_path == "summer_boot :: trace"
|| attr_path == "trace"
{
if attr_path.starts_with("summer_boot_macro ::") {
return Some(Ident::new(
&attr_path["summer_boot_macro :: ".len()..],
Span::call_site(),
));
} else if attr_path.starts_with("summer_boot ::") {
return Some(Ident::new(
&attr_path["summer_boot :: ".len()..],
Span::call_site(),
));
} else {
return Some(Ident::new(attr_path, Span::call_site()));
}
} else {
return None;
}
}
macro_rules! doc_comment {
($x:expr; $($tt:tt)*) => {
#[doc = $x]
$($tt)*
};
}
macro_rules! method_macro {
(
$($method:ident,)+
) => {
$(doc_comment! {
concat!("
# 功能
创建路由接口,用于`summer_boot.new()`的返回值使用,
该函数提供了对应方法`summer_boot/src/web2/gateway/routes.rs`文件下的所有路由方法,
# 支持的路由如下:
- get
- head
- put
- post
- delete
- options
- connect
- patch
- trace
# 例子:
```rust
# use summer_boot::{Request, Result};
#[summer_boot_macro::", stringify!($method), r#"("/")]
async fn example(mut req: Request<()>) -> Result {
Ok(format!("Hello World").into())
}
```
"#);
#[proc_macro_attribute]
pub fn $method(_args: TokenStream, input: TokenStream) -> TokenStream {
let mut input = parse_macro_input!(input as ItemFn);
let attrs = &input.attrs;
let vis = &input.vis;
let sig = &mut input.sig;
let body = &input.block;
let _name = &sig.ident;
if sig.asyncness.is_none() {
return syn::Error::new_spanned(sig.fn_token, "仅支持 async fn")
.to_compile_error()
.into();
}
(quote! {
#(#attrs)*
#vis #sig
#body
}).into()
}
})+
};
}
method_macro!(get, head, put, post, delete, patch, trace, options, connect,);
| rust | Apache-2.0 | 3bacf5b9996a015c527fe5b7941d6f5d7a1e6335 | 2026-01-04T20:24:35.228453Z | false |
eairp/summer-boot | https://github.com/eairp/summer-boot/blob/3bacf5b9996a015c527fe5b7941d6f5d7a1e6335/summer-boot-autoconfigure/src/lib.rs | summer-boot-autoconfigure/src/lib.rs | mod read_yml;
pub use read_yml::*;
| rust | Apache-2.0 | 3bacf5b9996a015c527fe5b7941d6f5d7a1e6335 | 2026-01-04T20:24:35.228453Z | false |
eairp/summer-boot | https://github.com/eairp/summer-boot/blob/3bacf5b9996a015c527fe5b7941d6f5d7a1e6335/summer-boot-autoconfigure/src/read_yml.rs | summer-boot-autoconfigure/src/read_yml.rs | use schemars::schema::RootSchema;
use serde::{Deserialize, Serialize};
use serde_json::{from_str as json_from_str, to_string_pretty};
use serde_yaml::from_str as yaml_from_str;
use std::{
fs::{self, read_to_string},
io::Read
};
#[derive(Serialize, Deserialize, Debug)]
pub struct GlobalConfig {
pub mysql: Option<Mysql>,
pub server: Option<Server>,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct Mysql {
pub host: String,
pub port: u32,
pub user: String,
pub password: String,
pub db: String,
pub pool_min_idle: u64,
pub pool_max_open: u64,
pub timeout_seconds: u64,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct Server {
pub port: u32,
pub context_path: String,
}
#[derive(Serialize, Deserialize)]
pub struct Profiles {
pub active: String,
}
#[derive(Serialize, Deserialize)]
pub struct EnvConfig {
pub profiles: Profiles,
}
#[derive(Debug, Deserialize)]
struct ConfWorkSpace {
workspace: Option<Member>,
package: Option<Name>,
}
/// 匹配workspace下的member数组格式
#[derive(Debug, Deserialize)]
struct Member {
members: Option<Vec<String>>,
}
/// 匹配package下的name字段
#[derive(Debug, Deserialize)]
struct Name {
name: String,
}
///
/// 判断是workspace还是project
///
fn check_project_workspace() -> String {
let mut types: String = String::new();
// 找到需要扫描的路径
let mut cargo_toml = fs::File::open("Cargo.toml").expect("2222");
let mut content = String::new();
cargo_toml.read_to_string(&mut content).expect("3333");
// 根据包类型分别处理
if let Ok(conf_work_space) = toml::from_str::<ConfWorkSpace>(&content) {
if let Some(_) = conf_work_space.workspace {
types.push_str("workspace");
} else {
types.push_str("project");
}
}
types
}
///
/// 获取toml package_name
///
fn get_package_name() -> String {
let mut cargo_toml = fs::File::open("Cargo.toml").expect("4444");
let mut content = String::new();
cargo_toml.read_to_string(&mut content).expect("5555");
let mut projects = Vec::<String>::new();
if let Ok(conf_work_space) = toml::from_str::<ConfWorkSpace>(&content) {
if let Some(workspace) = conf_work_space.workspace {
if let Some(members) = workspace.members {
for member in members {
projects.push(format!("{}/src/resources/application.yml", member));
for project in &projects {
let check = fs::metadata(project).is_ok();
if check == true {
return member;
}
}
}
}
} else if projects.len() == 0 {
if let Some(package) = conf_work_space.package {
return package.name;
}
}
}
// report error
String::from("_")
}
///
/// 加载环境配置
///
pub fn load_env_conf() -> Option<EnvConfig> {
let mut path = String::new();
let types = check_project_workspace();
if types.eq("workspace") {
let package_name = get_package_name();
path = format!("{}/src/resources/application.yml", package_name);
} else if types.eq("project") {
path = format!("src/resources/application.yml");
}
let schema = yaml_from_str::<RootSchema>(&read_to_string(&path).unwrap_or_else(|_| {
panic!(
"Error loading configuration file {}, please check the configuration!",
&path
)
}));
return match schema {
Ok(json) => {
let data =
to_string_pretty(&json).expect("resources/application.yml file data error!");
let p: EnvConfig =
json_from_str(&*data).expect("Failed to transfer JSON data to EnvConfig object!");
return Some(p);
}
Err(err) => {
println!("{}", err);
None
}
};
}
///
/// 根据环境配置加载全局配置
///
/// action dev 开始环境 test 测试环境 prod 生产环境
///
pub fn load_global_config(action: String) -> Option<GlobalConfig> {
let mut path = String::new();
let types = check_project_workspace();
if types.eq("workspace") {
let package_name = get_package_name();
path = format!("{}/src/resources/application-{}.yml", package_name, &action);
} else if types.eq("project") {
path = format!("src/resources/application-{}.yml", &action);
}
let schema = yaml_from_str::<RootSchema>(&read_to_string(&path).unwrap_or_else(|_| {
panic!(
"Error loading configuration file {}, please check the configuration!",
&path
)
}));
return match schema {
Ok(json) => {
let data = to_string_pretty(&json).unwrap_or_else(|_| {
panic!(
"{} file data error!, please check the configuration!",
path
)
});
let p = json_from_str(&*data)
.expect("Failed to transfer JSON data to BriefProConfig object!");
return Some(p);
}
Err(err) => {
println!("{}", err);
None
}
};
}
///
/// 先加载环境配置 在根据当前加载的环境 去加载相应的信息
///
pub fn load_conf() -> Option<GlobalConfig> {
if let Some(init) = load_env_conf() {
return load_global_config(init.profiles.active);
}
None
}
| rust | Apache-2.0 | 3bacf5b9996a015c527fe5b7941d6f5d7a1e6335 | 2026-01-04T20:24:35.228453Z | false |
eairp/summer-boot | https://github.com/eairp/summer-boot/blob/3bacf5b9996a015c527fe5b7941d6f5d7a1e6335/example/src/lib.rs | example/src/lib.rs | mod read_yml;
mod log; | rust | Apache-2.0 | 3bacf5b9996a015c527fe5b7941d6f5d7a1e6335 | 2026-01-04T20:24:35.228453Z | false |
eairp/summer-boot | https://github.com/eairp/summer-boot/blob/3bacf5b9996a015c527fe5b7941d6f5d7a1e6335/example/src/http_method.rs | example/src/http_method.rs | use serde::Deserialize;
use summer_boot::{Request, Result};
#[derive(Debug, Deserialize)]
struct User {
name: String,
age: u16,
}
#[summer_boot::get("/hello")]
pub async fn hello(_req: Request<()>) -> Result {
Ok("Hello, Summer Boot".to_string().into())
}
#[summer_boot::post("/user/getUserInfo")]
pub async fn test_api(mut req: Request<()>) -> Result {
let User { name, age } = req.body_json().await?;
Ok(format!("Hello, {}! {} years old", name, age).into())
}
| rust | Apache-2.0 | 3bacf5b9996a015c527fe5b7941d6f5d7a1e6335 | 2026-01-04T20:24:35.228453Z | false |
eairp/summer-boot | https://github.com/eairp/summer-boot/blob/3bacf5b9996a015c527fe5b7941d6f5d7a1e6335/example/src/log.rs | example/src/log.rs | #[cfg(test)]
mod test {
use summer_boot::log;
#[test]
fn log_print() {
// 在使用log的时候需要调用start()方法开启log记录
log::start();
log::info!("Hello Summer Boot");
// debug 模式下日志记录
log::debug!("debug apps");
log::error!("process error");
log::warn!("warning apps");
}
} | rust | Apache-2.0 | 3bacf5b9996a015c527fe5b7941d6f5d7a1e6335 | 2026-01-04T20:24:35.228453Z | false |
eairp/summer-boot | https://github.com/eairp/summer-boot/blob/3bacf5b9996a015c527fe5b7941d6f5d7a1e6335/example/src/read_yml.rs | example/src/read_yml.rs | use schemars::schema::RootSchema;
use serde::{Deserialize, Serialize};
use serde_json::{from_str as json_from_str, to_string_pretty};
use serde_yaml::from_str as yaml_from_str;
use std::fs::read_to_string;
#[derive(Serialize, Deserialize, Debug)]
pub struct GlobalConfig {
pub mysql: Mysql,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct Mysql {
pub host: String,
pub port: u32,
pub user: String,
pub password: String,
pub db: String,
pub pool_min_idle: u64,
pub pool_max_open: u64,
pub timeout_seconds: u64,
}
#[derive(Serialize, Deserialize)]
pub struct Profiles {
pub active: String,
}
#[derive(Serialize, Deserialize)]
pub struct EnvConfig {
pub profiles: Profiles,
}
/*
加载环境配置
*/
pub fn load_env_conf() -> Option<EnvConfig> {
let path = "src/resources/application.yml".to_string();
let schema = yaml_from_str::<RootSchema>(&read_to_string(&path).unwrap_or_else(|_| {
panic!(
"Error loading configuration file {}, please check the configuration!",
&path
)
}));
return match schema {
Ok(json) => {
let data = to_string_pretty(&json).expect("resources/app.yml file data error!");
let p: EnvConfig =
json_from_str(&*data).expect("Failed to transfer JSON data to EnvConfig object!");
return Some(p);
}
Err(err) => {
println!("{}", err);
None
}
};
}
/*
根据环境配置加载全局配置
action dev 开始环境 test 测试环境 prod 生产环境
*/
pub fn load_global_config(action: String) -> Option<GlobalConfig> {
let path = format!("src/resources/application-{}.yml", &action);
let schema = yaml_from_str::<RootSchema>(&read_to_string(&path).unwrap_or_else(|_| {
panic!(
"Error loading configuration file {}, please check the configuration!",
&path
)
}));
return match schema {
Ok(json) => {
let data = to_string_pretty(&json).unwrap_or_else(|_| {
panic!(
"{} file data error!, please check the configuration!",
path
)
});
let p = json_from_str(&*data)
.expect("Failed to transfer JSON data to BriefProConfig object!");
return Some(p);
}
Err(err) => {
println!("{}", err);
None
}
};
}
/*
先加载环境配置 在根据当前加载的环境 去加载相应的信息
*/
pub fn load_conf() -> Option<GlobalConfig> {
if let Some(init) = load_env_conf() {
return load_global_config(init.profiles.active);
}
None
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_load_env_conf_mysql() {
let pro = load_conf();
println!("{:?}", pro);
pro.as_ref().map(|a| {
println!("mysqlConfig:{}", serde_json::to_string(&a.mysql).unwrap());
});
}
#[test]
fn load_env_file_test() {
// let file = File::open("example/src/resources").expect("找不到文件");
let file_name = "application.yml";
let directories = vec!["src/resources", "spring-boot/src/resources", "dir3"];
for directory in directories {
let path = format!("{}/{}", directory, file_name);
let file_exists = std::fs::metadata(&path).is_ok();
if file_exists {
println!("文件 {} 存在于目录 {}", file_name, directory);
} else {
println!("文件 {} 不存在于目录 {}", file_name, directory);
}
}
}
#[test]
fn test_load_global_config() {
let pro = load_global_config("dev".to_string());
println!("{:?}", pro);
pro.as_ref().map(|a| {
println!("mysqlConfig:{}", serde_json::to_string(&a.mysql).unwrap());
});
}
#[test]
fn test_load_conf() {
let pro = load_conf();
println!("{:?}", pro);
pro.as_ref().map(|a| {
println!("mysqlConfig:{}", serde_json::to_string(&a.mysql).unwrap());
});
}
}
| rust | Apache-2.0 | 3bacf5b9996a015c527fe5b7941d6f5d7a1e6335 | 2026-01-04T20:24:35.228453Z | false |
eairp/summer-boot | https://github.com/eairp/summer-boot/blob/3bacf5b9996a015c527fe5b7941d6f5d7a1e6335/example/src/main.rs | example/src/main.rs | use serde::Deserialize;
use summer_boot::{Request, Result};
mod http_method;
#[derive(Debug, Deserialize)]
struct User {
name: String,
age: u16,
}
#[summer_boot::auto_scan]
#[summer_boot::main]
async fn main() {
summer_boot::run();
}
#[summer_boot::post("/test/api")]
async fn test_api(mut req: Request<()>) -> Result {
let User { name, age } = req.body_json().await?;
Ok(format!("Hello, {}! {} years old", name, age).into())
}
| rust | Apache-2.0 | 3bacf5b9996a015c527fe5b7941d6f5d7a1e6335 | 2026-01-04T20:24:35.228453Z | false |
mc2-project/delphi | https://github.com/mc2-project/delphi/blob/92bc0071fa11570df6b048ae0f6937ced249bb5a/rust/experiments/src/lib.rs | rust/experiments/src/lib.rs | use ::neural_network as nn;
extern crate num_cpus;
extern crate rayon;
use algebra::{fields::near_mersenne_64::F, FixedPoint, FixedPointParameters, Polynomial};
use bench_utils::*;
use io_utils::{counting::CountingIO, imux::IMuxSync};
use nn::{
layers::{
average_pooling::AvgPoolParams,
convolution::{Conv2dParams, Padding},
fully_connected::FullyConnectedParams,
Layer, LayerDims, LinearLayer, NonLinearLayer,
},
tensors::*,
NeuralArchitecture, NeuralNetwork,
};
use protocols::{neural_network::NNProtocol, AdditiveShare};
use rand::{CryptoRng, Rng, RngCore};
use std::{
io::{BufReader, BufWriter},
net::{TcpListener, TcpStream},
};
pub mod inference;
pub mod latency;
pub mod linear_only;
pub mod minionn;
pub mod mnist;
pub mod resnet32;
pub mod throughput;
pub mod validation;
pub struct TenBitExpParams {}
impl FixedPointParameters for TenBitExpParams {
type Field = F;
const MANTISSA_CAPACITY: u8 = 3;
const EXPONENT_CAPACITY: u8 = 8;
}
type TenBitExpFP = FixedPoint<TenBitExpParams>;
type TenBitAS = AdditiveShare<TenBitExpParams>;
pub fn client_connect(
addr: &str,
) -> (
IMuxSync<CountingIO<BufReader<TcpStream>>>,
IMuxSync<CountingIO<BufWriter<TcpStream>>>,
) {
// TODO: Maybe change to rayon_num_threads
let mut readers = Vec::with_capacity(16);
let mut writers = Vec::with_capacity(16);
for _ in 0..16 {
let stream = TcpStream::connect(addr).unwrap();
readers.push(CountingIO::new(BufReader::new(stream.try_clone().unwrap())));
writers.push(CountingIO::new(BufWriter::new(stream)));
}
(IMuxSync::new(readers), IMuxSync::new(writers))
}
pub fn server_connect(
addr: &str,
) -> (
IMuxSync<CountingIO<BufReader<TcpStream>>>,
IMuxSync<CountingIO<BufWriter<TcpStream>>>,
) {
let listener = TcpListener::bind(addr).unwrap();
let mut incoming = listener.incoming();
let mut readers = Vec::with_capacity(16);
let mut writers = Vec::with_capacity(16);
for _ in 0..16 {
let stream = incoming.next().unwrap().unwrap();
readers.push(CountingIO::new(BufReader::new(stream.try_clone().unwrap())));
writers.push(CountingIO::new(BufWriter::new(stream)));
}
(IMuxSync::new(readers), IMuxSync::new(writers))
}
pub fn nn_client<R: RngCore + CryptoRng>(
server_addr: &str,
architecture: &NeuralArchitecture<TenBitAS, TenBitExpFP>,
input: Input<TenBitExpFP>,
rng: &mut R,
) -> Input<TenBitExpFP> {
let (client_state, offline_read, offline_write) = {
let (mut reader, mut writer) = client_connect(server_addr);
(
NNProtocol::offline_client_protocol(&mut reader, &mut writer, &architecture, rng)
.unwrap(),
reader.count(),
writer.count(),
)
};
let (client_output, online_read, online_write) = {
let (mut reader, mut writer) = client_connect(server_addr);
(
NNProtocol::online_client_protocol(
&mut reader,
&mut writer,
&input,
&architecture,
&client_state,
)
.unwrap(),
reader.count(),
writer.count(),
)
};
add_to_trace!(|| "Offline Communication", || format!(
"Read {} bytes\nWrote {} bytes",
offline_read, offline_write
));
add_to_trace!(|| "Online Communication", || format!(
"Read {} bytes\nWrote {} bytes",
online_read, online_write
));
client_output
}
pub fn nn_server<R: RngCore + CryptoRng>(
server_addr: &str,
nn: &NeuralNetwork<TenBitAS, TenBitExpFP>,
rng: &mut R,
) {
let (server_state, offline_read, offline_write) = {
let (mut reader, mut writer) = server_connect(server_addr);
(
NNProtocol::offline_server_protocol(&mut reader, &mut writer, &nn, rng).unwrap(),
reader.count(),
writer.count(),
)
};
let (_, online_read, online_write) = {
let (mut reader, mut writer) = server_connect(server_addr);
(
NNProtocol::online_server_protocol(&mut reader, &mut writer, &nn, &server_state)
.unwrap(),
reader.count(),
writer.count(),
)
};
add_to_trace!(|| "Offline Communication", || format!(
"Read {} bytes\nWrote {} bytes",
offline_read, offline_write
));
add_to_trace!(|| "Online Communication", || format!(
"Read {} bytes\nWrote {} bytes",
online_read, online_write
));
}
pub fn generate_random_number<R: Rng>(rng: &mut R) -> (f64, TenBitExpFP) {
let is_neg: bool = rng.gen();
let mul = if is_neg { -1.0 } else { 1.0 };
let float: f64 = rng.gen();
let f = TenBitExpFP::truncate_float(float * mul);
let n = TenBitExpFP::from(f);
(f, n)
}
fn sample_conv_layer<R: RngCore + CryptoRng>(
vs: Option<&tch::nn::Path>,
input_dims: (usize, usize, usize, usize),
kernel_dims: (usize, usize, usize, usize),
stride: usize,
padding: Padding,
rng: &mut R,
) -> (
LinearLayer<TenBitAS, TenBitExpFP>,
LinearLayer<TenBitExpFP, TenBitExpFP>,
) {
let mut kernel = Kernel::zeros(kernel_dims);
let mut bias = Kernel::zeros((kernel_dims.0, 1, 1, 1));
kernel
.iter_mut()
.for_each(|ker_i| *ker_i = generate_random_number(rng).1);
bias.iter_mut()
.for_each(|bias_i| *bias_i = generate_random_number(rng).1);
let layer_params = match vs {
Some(vs) => Conv2dParams::<TenBitAS, _>::new_with_gpu(
vs,
padding,
stride,
kernel.clone(),
bias.clone(),
),
None => Conv2dParams::<TenBitAS, _>::new(padding, stride, kernel.clone(), bias.clone()),
};
let output_dims = layer_params.calculate_output_size(input_dims);
let layer_dims = LayerDims {
input_dims,
output_dims,
};
let layer = LinearLayer::Conv2d {
dims: layer_dims,
params: layer_params,
};
let pt_layer_params =
Conv2dParams::<TenBitExpFP, _>::new(padding, stride, kernel.clone(), bias.clone());
let pt_layer = LinearLayer::Conv2d {
dims: layer_dims,
params: pt_layer_params,
};
(layer, pt_layer)
}
fn sample_fc_layer<R: RngCore + CryptoRng>(
vs: Option<&tch::nn::Path>,
input_dims: (usize, usize, usize, usize),
out_chn: usize,
rng: &mut R,
) -> (
LinearLayer<TenBitAS, TenBitExpFP>,
LinearLayer<TenBitExpFP, TenBitExpFP>,
) {
let weight_dims = (out_chn, input_dims.1, input_dims.2, input_dims.3);
let mut weights = Kernel::zeros(weight_dims);
weights
.iter_mut()
.for_each(|w_i| *w_i = generate_random_number(rng).1);
let bias_dims = (out_chn, 1, 1, 1);
let mut bias = Kernel::zeros(bias_dims);
bias.iter_mut()
.for_each(|w_i| *w_i = generate_random_number(rng).1);
let pt_weights = weights.clone();
let pt_bias = bias.clone();
let params = match vs {
Some(vs) => FullyConnectedParams::new_with_gpu(vs, weights, bias),
None => FullyConnectedParams::new(weights, bias),
};
let output_dims = params.calculate_output_size(input_dims);
let dims = LayerDims {
input_dims,
output_dims,
};
let pt_params = FullyConnectedParams::new(pt_weights, pt_bias);
let layer = LinearLayer::FullyConnected { dims, params };
let pt_layer = LinearLayer::FullyConnected {
dims,
params: pt_params,
};
(layer, pt_layer)
}
#[allow(dead_code)]
fn sample_iden_layer(
input_dims: (usize, usize, usize, usize),
) -> (
LinearLayer<TenBitAS, TenBitExpFP>,
LinearLayer<TenBitExpFP, TenBitExpFP>,
) {
let output_dims = input_dims;
let layer_dims = LayerDims {
input_dims,
output_dims,
};
let layer = LinearLayer::Identity { dims: layer_dims };
let pt_layer = LinearLayer::Identity { dims: layer_dims };
(layer, pt_layer)
}
#[allow(dead_code)]
fn sample_avg_pool_layer(
input_dims: (usize, usize, usize, usize),
(pool_h, pool_w): (usize, usize),
stride: usize,
) -> LinearLayer<TenBitAS, TenBitExpFP> {
let size = (pool_h * pool_w) as f64;
let avg_pool_params = AvgPoolParams::new(pool_h, pool_w, stride, TenBitExpFP::from(1.0 / size));
let pool_dims = LayerDims {
input_dims,
output_dims: avg_pool_params.calculate_output_size(input_dims),
};
LinearLayer::AvgPool {
dims: pool_dims,
params: avg_pool_params,
}
}
fn add_activation_layer(nn: &mut NeuralNetwork<TenBitAS, TenBitExpFP>, relu_layers: &[usize]) {
let cur_input_dims = nn.layers.last().as_ref().unwrap().output_dimensions();
let layer_dims = LayerDims {
input_dims: cur_input_dims,
output_dims: cur_input_dims,
};
let num_layers_so_far = nn.layers.len();
let is_relu = relu_layers.contains(&num_layers_so_far);
let layer = if is_relu {
Layer::NLL(NonLinearLayer::ReLU(layer_dims))
} else {
let activation_poly_coefficients = vec![
TenBitExpFP::from(0.2),
TenBitExpFP::from(0.5),
TenBitExpFP::from(0.2),
];
let poly = Polynomial::new(activation_poly_coefficients);
let poly_layer = NonLinearLayer::PolyApprox {
dims: layer_dims,
poly,
_v: std::marker::PhantomData,
};
Layer::NLL(poly_layer)
};
nn.layers.push(layer);
}
| rust | Apache-2.0 | 92bc0071fa11570df6b048ae0f6937ced249bb5a | 2026-01-04T20:24:11.030795Z | false |
mc2-project/delphi | https://github.com/mc2-project/delphi/blob/92bc0071fa11570df6b048ae0f6937ced249bb5a/rust/experiments/src/linear_only.rs | rust/experiments/src/linear_only.rs | use ::neural_network as nn;
use nn::{
layers::{convolution::Padding, Layer},
NeuralNetwork,
};
use rand::{CryptoRng, RngCore};
type InputDims = (usize, usize, usize, usize);
use super::*;
pub fn construct_networks<R: RngCore + CryptoRng>(
vs: Option<&tch::nn::Path>,
batch_size: usize,
rng: &mut R,
) -> Vec<(InputDims, NeuralNetwork<TenBitAS, TenBitExpFP>)> {
let mut networks = Vec::new();
let input_dims = [
(batch_size, 3, 32, 32),
(batch_size, 16, 32, 32),
(batch_size, 32, 16, 16),
(batch_size, 64, 8, 8),
];
let kernel_dims = [
(16, 3, 3, 3),
(16, 16, 3, 3),
(32, 32, 3, 3),
(64, 64, 3, 3),
];
for i in 0..4 {
let input_dims = input_dims[i];
let kernel_dims = kernel_dims[i];
let conv = sample_conv_layer(vs, input_dims, kernel_dims, 1, Padding::Same, rng).0;
let network = match &vs {
Some(vs) => NeuralNetwork {
layers: vec![Layer::LL(conv)],
eval_method: ::neural_network::EvalMethod::TorchDevice(vs.device()),
},
None => NeuralNetwork {
layers: vec![Layer::LL(conv)],
..Default::default()
},
};
networks.push((input_dims, network));
}
networks
}
| rust | Apache-2.0 | 92bc0071fa11570df6b048ae0f6937ced249bb5a | 2026-01-04T20:24:11.030795Z | false |
mc2-project/delphi | https://github.com/mc2-project/delphi/blob/92bc0071fa11570df6b048ae0f6937ced249bb5a/rust/experiments/src/mnist.rs | rust/experiments/src/mnist.rs | use ::neural_network as nn;
use nn::{
layers::{convolution::Padding, Layer},
NeuralNetwork,
};
use rand::{CryptoRng, RngCore};
use super::*;
pub fn construct_mnist<R: RngCore + CryptoRng>(
vs: Option<&tch::nn::Path>,
batch_size: usize,
num_poly: usize,
rng: &mut R,
) -> NeuralNetwork<TenBitAS, TenBitExpFP> {
let relu_layers = match num_poly {
0 => vec![1, 4, 7],
1 => vec![1, 4],
2 => vec![1],
3 => vec![],
_ => unreachable!(),
};
let mut network = match &vs {
Some(vs) => NeuralNetwork {
layers: vec![],
eval_method: ::neural_network::EvalMethod::TorchDevice(vs.device()),
},
None => NeuralNetwork {
layers: vec![],
..Default::default()
},
};
// Dimensions of input image.
let input_dims = (batch_size, 1, 28, 28);
let kernel_dims = (16, 1, 5, 5);
let conv = sample_conv_layer(vs, input_dims, kernel_dims, 1, Padding::Valid, rng).0;
network.layers.push(Layer::LL(conv));
add_activation_layer(&mut network, &relu_layers);
let input_dims = network.layers.last().unwrap().output_dimensions();
let pool = sample_avg_pool_layer(input_dims, (2, 2), 2);
network.layers.push(Layer::LL(pool));
let input_dims = network.layers.last().unwrap().output_dimensions();
let kernel_dims = (16, 16, 5, 5);
let conv = sample_conv_layer(vs, input_dims, kernel_dims, 1, Padding::Valid, rng).0;
network.layers.push(Layer::LL(conv));
add_activation_layer(&mut network, &relu_layers);
let input_dims = network.layers.last().unwrap().output_dimensions();
let pool = sample_avg_pool_layer(input_dims, (2, 2), 2);
network.layers.push(Layer::LL(pool));
let fc_input_dims = network.layers.last().unwrap().output_dimensions();
println!("Fc input dims: {:?}", fc_input_dims);
let (fc, _) = sample_fc_layer(vs, fc_input_dims, 100, rng);
network.layers.push(Layer::LL(fc));
add_activation_layer(&mut network, &relu_layers);
let fc_input_dims = network.layers.last().unwrap().output_dimensions();
println!("Fc input dims: {:?}", fc_input_dims);
let (fc, _) = sample_fc_layer(vs, fc_input_dims, 10, rng);
network.layers.push(Layer::LL(fc));
for layer in &network.layers {
println!("Layer dim: {:?}", layer.input_dimensions());
}
assert!(network.validate());
network
}
| rust | Apache-2.0 | 92bc0071fa11570df6b048ae0f6937ced249bb5a | 2026-01-04T20:24:11.030795Z | false |
mc2-project/delphi | https://github.com/mc2-project/delphi/blob/92bc0071fa11570df6b048ae0f6937ced249bb5a/rust/experiments/src/minionn.rs | rust/experiments/src/minionn.rs | use ::neural_network as nn;
use nn::{
layers::{convolution::Padding, Layer},
NeuralNetwork,
};
use rand::{CryptoRng, RngCore};
use super::*;
pub fn construct_minionn<R: RngCore + CryptoRng>(
vs: Option<&tch::nn::Path>,
batch_size: usize,
num_poly: usize,
rng: &mut R,
) -> NeuralNetwork<TenBitAS, TenBitExpFP> {
let relu_layers = match num_poly {
0 => vec![1, 3, 6, 8, 11, 13, 15],
1 => vec![1, 3, 6, 8, 11, 13],
2 => vec![1, 3, 6, 8, 11],
3 => vec![3, 11, 13, 15],
5 => vec![6, 11],
6 => vec![11],
7 => vec![],
_ => unreachable!(),
};
let mut network = match &vs {
Some(vs) => NeuralNetwork {
layers: vec![],
eval_method: ::neural_network::EvalMethod::TorchDevice(vs.device()),
},
None => NeuralNetwork {
layers: vec![],
..Default::default()
},
};
// Dimensions of input image.
let input_dims = (batch_size, 3, 32, 32);
// 1
let kernel_dims = (64, 3, 3, 3);
let conv = sample_conv_layer(vs, input_dims, kernel_dims, 1, Padding::Same, rng).0;
network.layers.push(Layer::LL(conv));
add_activation_layer(&mut network, &relu_layers);
// 2
let input_dims = network.layers.last().unwrap().output_dimensions();
let kernel_dims = (64, 64, 3, 3);
let conv = sample_conv_layer(vs, input_dims, kernel_dims, 1, Padding::Same, rng).0;
network.layers.push(Layer::LL(conv));
add_activation_layer(&mut network, &relu_layers);
// 3
let input_dims = network.layers.last().unwrap().output_dimensions();
let pool = sample_avg_pool_layer(input_dims, (2, 2), 2);
network.layers.push(Layer::LL(pool));
// 4
let input_dims = network.layers.last().unwrap().output_dimensions();
let kernel_dims = (64, 64, 3, 3);
let conv = sample_conv_layer(vs, input_dims, kernel_dims, 1, Padding::Same, rng).0;
network.layers.push(Layer::LL(conv));
add_activation_layer(&mut network, &relu_layers);
// 5
let input_dims = network.layers.last().unwrap().output_dimensions();
let kernel_dims = (64, 64, 3, 3);
let conv = sample_conv_layer(vs, input_dims, kernel_dims, 1, Padding::Same, rng).0;
network.layers.push(Layer::LL(conv));
add_activation_layer(&mut network, &relu_layers);
// 6
let input_dims = network.layers.last().unwrap().output_dimensions();
let pool = sample_avg_pool_layer(input_dims, (2, 2), 2);
network.layers.push(Layer::LL(pool));
// 7
let input_dims = network.layers.last().unwrap().output_dimensions();
let kernel_dims = (64, 64, 3, 3);
let conv = sample_conv_layer(vs, input_dims, kernel_dims, 1, Padding::Same, rng).0;
network.layers.push(Layer::LL(conv));
add_activation_layer(&mut network, &relu_layers);
// 8
let input_dims = network.layers.last().unwrap().output_dimensions();
let kernel_dims = (64, 64, 1, 1);
let conv = sample_conv_layer(vs, input_dims, kernel_dims, 1, Padding::Valid, rng).0;
network.layers.push(Layer::LL(conv));
add_activation_layer(&mut network, &relu_layers);
// 9
let input_dims = network.layers.last().unwrap().output_dimensions();
let kernel_dims = (16, 64, 1, 1);
let conv = sample_conv_layer(vs, input_dims, kernel_dims, 1, Padding::Valid, rng).0;
network.layers.push(Layer::LL(conv));
add_activation_layer(&mut network, &relu_layers);
// 10
let fc_input_dims = network.layers.last().unwrap().output_dimensions();
let (fc, _) = sample_fc_layer(vs, fc_input_dims, 10, rng);
network.layers.push(Layer::LL(fc));
assert!(network.validate());
network
}
| rust | Apache-2.0 | 92bc0071fa11570df6b048ae0f6937ced249bb5a | 2026-01-04T20:24:11.030795Z | false |
mc2-project/delphi | https://github.com/mc2-project/delphi/blob/92bc0071fa11570df6b048ae0f6937ced249bb5a/rust/experiments/src/resnet32.rs | rust/experiments/src/resnet32.rs | use ::neural_network as nn;
use nn::{
layers::{convolution::Padding, Layer},
NeuralNetwork,
};
use rand::{CryptoRng, RngCore};
use super::*;
// It may be the case that down-sampling happens here.
fn conv_block<R: RngCore + CryptoRng>(
nn: &mut NeuralNetwork<TenBitAS, TenBitExpFP>,
vs: Option<&tch::nn::Path>,
(k_h, k_w): (usize, usize),
num_output_channels: usize,
stride: usize,
relu_layers: &[usize],
rng: &mut R,
) {
let cur_input_dims = nn.layers.last().as_ref().unwrap().output_dimensions();
let c_in = cur_input_dims.1;
let (conv_1, _) = sample_conv_layer(
vs,
cur_input_dims,
(num_output_channels, c_in, k_h, k_w),
stride,
Padding::Same,
rng,
);
nn.layers.push(Layer::LL(conv_1));
add_activation_layer(nn, relu_layers);
let cur_input_dims = nn.layers.last().as_ref().unwrap().output_dimensions();
let c_in = cur_input_dims.1;
let (conv_2, _) = sample_conv_layer(
vs,
cur_input_dims,
(c_in, c_in, k_h, k_w), // Kernel dims
1, // Stride = 1
Padding::Same,
rng,
);
nn.layers.push(Layer::LL(conv_2));
add_activation_layer(nn, relu_layers);
}
// There's no down-sampling happening here, strides are always (1, 1).
fn iden_block<R: RngCore + CryptoRng>(
nn: &mut NeuralNetwork<TenBitAS, TenBitExpFP>,
vs: Option<&tch::nn::Path>,
(k_h, k_w): (usize, usize),
relu_layers: &[usize],
rng: &mut R,
) {
let cur_input_dims = nn.layers.last().as_ref().unwrap().output_dimensions();
let c_in = cur_input_dims.1;
let (conv_1, _) = sample_conv_layer(
vs,
cur_input_dims,
(c_in, c_in, k_h, k_w), // Kernel dims
1, // stride
Padding::Same,
rng,
);
nn.layers.push(Layer::LL(conv_1));
add_activation_layer(nn, relu_layers);
let (conv_2, _) = sample_conv_layer(
vs,
cur_input_dims,
(c_in, c_in, k_h, k_w), // Kernel dims
1, // stride
Padding::Same,
rng,
);
nn.layers.push(Layer::LL(conv_2));
add_activation_layer(nn, relu_layers);
}
fn resnet_block<R: RngCore + CryptoRng>(
nn: &mut NeuralNetwork<TenBitAS, TenBitExpFP>,
vs: Option<&tch::nn::Path>,
layer_size: usize,
c_out: usize,
kernel_size: (usize, usize),
stride: usize,
relu_layers: &[usize],
rng: &mut R,
) {
conv_block(nn, vs, kernel_size, c_out, stride, relu_layers, rng);
for _ in 0..(layer_size - 1) {
iden_block(nn, vs, kernel_size, relu_layers, rng)
}
}
pub fn construct_resnet_32<R: RngCore + CryptoRng>(
vs: Option<&tch::nn::Path>,
batch_size: usize,
num_poly: usize,
rng: &mut R,
) -> NeuralNetwork<TenBitAS, TenBitExpFP> {
use std::collections::HashSet;
let mut relu_layers = Vec::new();
let poly_layers = match num_poly {
6 => vec![3, 5, 18, 19, 26, 27],
12 => vec![1, 2, 7, 10, 11, 12, 14, 16, 20, 21, 24, 28],
14 => vec![1, 2, 4, 5, 8, 9, 14, 16, 19, 20, 21, 24, 26, 29],
16 => vec![1, 3, 5, 6, 7, 8, 11, 15, 16, 17, 18, 19, 23, 24, 26, 29],
18 => vec![
1, 2, 4, 5, 8, 9, 10, 11, 14, 15, 17, 18, 20, 23, 24, 26, 27, 29,
],
20 => vec![
1, 2, 3, 5, 6, 8, 9, 10, 11, 13, 14, 16, 17, 18, 19, 21, 23, 25, 27, 28,
],
22 => vec![
1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 18, 19, 20, 23, 25, 27, 29,
],
24 => vec![
1, 2, 3, 4, 5, 7, 8, 9, 10, 11, 12, 14, 15, 16, 17, 19, 21, 23, 24, 25, 26, 27, 28, 29,
],
26 => vec![
1, 2, 3, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 17, 19, 20, 21, 22, 23, 24, 25, 26, 27,
28, 29,
],
_ => ((32 - num_poly)..32).collect::<Vec<_>>(),
};
let poly_layers: HashSet<_> = poly_layers.into_iter().collect();
for l in 0..32 {
if !poly_layers.contains(&l) {
relu_layers.push(2 * l + 1);
}
}
let mut network = match &vs {
Some(vs) => NeuralNetwork {
layers: vec![],
eval_method: ::neural_network::EvalMethod::TorchDevice(vs.device()),
},
None => NeuralNetwork {
layers: vec![],
..Default::default()
},
};
// Dimensions of input image.
let input_dims = (batch_size, 3, 32, 32);
// Dimensions of first kernel
let kernel_dims = (16, 3, 3, 3);
// Sample a random kernel.
let (conv_1, _) = sample_conv_layer(
vs,
input_dims,
kernel_dims,
1, // Stride
Padding::Same,
rng,
);
network.layers.push(Layer::LL(conv_1));
add_activation_layer(&mut network, &relu_layers);
resnet_block(
&mut network,
vs,
5, // layer_size,
16, // c_out
(3, 3), // kernel_size
1, // stride
&relu_layers,
rng,
);
resnet_block(
&mut network,
vs,
5, // layer_size,
32, // c_out
(3, 3), // kernel_size
2, // stride
&relu_layers,
rng,
);
resnet_block(
&mut network,
vs,
5, // layer_size,
64, // c_out
(3, 3), // kernel_size
2, // stride
&relu_layers,
rng,
);
let avg_pool_input_dims = network.layers.last().unwrap().output_dimensions();
network.layers.push(Layer::LL(sample_avg_pool_layer(
avg_pool_input_dims,
(2, 2),
2,
)));
let fc_input_dims = network.layers.last().unwrap().output_dimensions();
let (fc, _) = sample_fc_layer(vs, fc_input_dims, 10, rng);
network.layers.push(Layer::LL(fc));
assert!(network.validate());
network
}
| rust | Apache-2.0 | 92bc0071fa11570df6b048ae0f6937ced249bb5a | 2026-01-04T20:24:11.030795Z | false |
mc2-project/delphi | https://github.com/mc2-project/delphi/blob/92bc0071fa11570df6b048ae0f6937ced249bb5a/rust/experiments/src/validation/mod.rs | rust/experiments/src/validation/mod.rs | pub mod validate;
| rust | Apache-2.0 | 92bc0071fa11570df6b048ae0f6937ced249bb5a | 2026-01-04T20:24:11.030795Z | false |
mc2-project/delphi | https://github.com/mc2-project/delphi/blob/92bc0071fa11570df6b048ae0f6937ced249bb5a/rust/experiments/src/validation/validate.rs | rust/experiments/src/validation/validate.rs | use crate::*;
use neural_network::{ndarray::Array4, tensors::Input, NeuralArchitecture};
use rand::SeedableRng;
use rand_chacha::ChaChaRng;
use std::{
cmp,
sync::atomic::{AtomicUsize, Ordering},
};
const RANDOMNESS: [u8; 32] = [
0x11, 0xe0, 0x8f, 0xbc, 0x89, 0xa7, 0x34, 0x01, 0x45, 0x86, 0x82, 0xb6, 0x51, 0xda, 0xf4, 0x76,
0x5d, 0xc9, 0x8d, 0xea, 0x23, 0xf2, 0x90, 0x8f, 0x9d, 0x03, 0xf2, 0x77, 0xd3, 0x4a, 0x52, 0xd2,
];
pub fn softmax(x: &Input<TenBitExpFP>) -> Input<TenBitExpFP> {
let mut max: TenBitExpFP = x[[0, 0, 0, 0]];
x.iter().for_each(|e| {
max = match max.cmp(e) {
cmp::Ordering::Less => *e,
_ => max,
};
});
let mut e_x: Input<TenBitExpFP> = x.clone();
e_x.iter_mut().for_each(|e| {
*e = f64::from(*e - max).exp().into();
});
let e_x_sum = 1.0 / f64::from(e_x.iter().fold(TenBitExpFP::zero(), |sum, val| sum + *val));
e_x.iter_mut().for_each(|e| *e *= e_x_sum.into());
return e_x;
}
pub fn run(
network: NeuralNetwork<TenBitAS, TenBitExpFP>,
architecture: NeuralArchitecture<TenBitAS, TenBitExpFP>,
images: Vec<Array4<f64>>,
classes: Vec<i64>,
plaintext: Vec<i64>,
) {
let base_port = 8001;
let image_idx = AtomicUsize::new(0);
let port_idx = AtomicUsize::new(0);
let correct = AtomicUsize::new(0);
let correct_pt = AtomicUsize::new(0);
let cat_failures = AtomicUsize::new(0);
let non_cat_failures = AtomicUsize::new(0);
let thread_fn = || {
let i = image_idx.fetch_add(1, Ordering::SeqCst);
if i >= images.len() {
return;
}
let port_off = port_idx.fetch_add(1, Ordering::SeqCst);
let server_addr = format!("127.0.0.1:{}", base_port + port_off);
let mut server_rng = ChaChaRng::from_seed(RANDOMNESS);
let mut client_rng = ChaChaRng::from_seed(RANDOMNESS);
let mut client_output = Output::zeros((1, 10, 0, 0));
crossbeam::thread::scope(|s| {
let server_output = s.spawn(|_| nn_server(&server_addr, &network, &mut server_rng));
client_output = nn_client(
&server_addr,
&architecture,
(images[i].clone()).into(),
&mut client_rng,
);
server_output.join().unwrap();
})
.unwrap();
let sm = softmax(&client_output);
let max = sm.iter().map(|e| f64::from(*e)).fold(0. / 0., f64::max);
let index = sm.iter().position(|e| f64::from(*e) == max).unwrap() as i64;
// Check the l1 norm of the resulting vector. If it's above 50000 we probably
// had a catastrophic failure so tally that
let mut big_fail = false;
if client_output
.iter()
.fold(0.0, |acc, x| acc + f64::from(*x).abs())
> 5000.0
{
big_fail = true;
cat_failures.fetch_add(1, Ordering::SeqCst);
}
if index == classes[i] {
correct.fetch_add(1, Ordering::SeqCst);
}
if index == classes[i] && (plaintext[i] == 1) {
correct_pt.fetch_add(1, Ordering::SeqCst);
}
if (index == classes[i] && plaintext[i] == 0) || (index != classes[i] && plaintext[i] == 1)
{
println!(
"DIFFERED ON IMAGE {} - Correct is {}, and plaintext is {}",
i, classes[i], plaintext[i]
);
if !big_fail {
non_cat_failures.fetch_add(1, Ordering::SeqCst);
println!("Protocol result: [");
for result in &client_output {
println!(" {:?}, {}", result, result);
}
println!("Softmax:");
for result in &sm {
println!(" {:?}, {}", result, result);
}
println!("]");
println!("Out: {}", index);
}
} else {
println!("IMAGE {} CORRECT!", i);
}
};
// Only spawn as many threads as will fit on the cores
let num_threads = num_cpus::get() / 2;
for _ in (0..images.len()).step_by(num_threads) {
crossbeam::thread::scope(|s| {
for _ in 0..num_threads {
s.spawn(|_| thread_fn()).join().unwrap();
}
})
.unwrap();
port_idx.fetch_sub(num_threads, Ordering::SeqCst);
}
let correct = correct.into_inner();
let correct_pt = correct_pt.into_inner();
let cat_failures = cat_failures.into_inner();
let non_cat_failures = non_cat_failures.into_inner();
println!("Overall Correct: {}", correct);
println!("Plaintext Correct: {}", correct_pt);
println!("Catastrophic Failures: {}", cat_failures);
println!("Non-Catastrophic Failures: {}", non_cat_failures);
}
| rust | Apache-2.0 | 92bc0071fa11570df6b048ae0f6937ced249bb5a | 2026-01-04T20:24:11.030795Z | false |
mc2-project/delphi | https://github.com/mc2-project/delphi/blob/92bc0071fa11570df6b048ae0f6937ced249bb5a/rust/experiments/src/validation/minionn/validate.rs | rust/experiments/src/validation/minionn/validate.rs | use clap::{App, Arg, ArgMatches};
use experiments::minionn::construct_minionn;
use neural_network::{ndarray::Array4, npy::NpyData};
use rand::SeedableRng;
use rand_chacha::ChaChaRng;
use std::{io::Read, path::Path};
const RANDOMNESS: [u8; 32] = [
0x11, 0xe0, 0x8f, 0xbc, 0x89, 0xa7, 0x34, 0x01, 0x45, 0x86, 0x82, 0xb6, 0x51, 0xda, 0xf4, 0x76,
0x5d, 0xc9, 0x8d, 0xea, 0x23, 0xf2, 0x90, 0x8f, 0x9d, 0x03, 0xf2, 0x77, 0xd3, 0x4a, 0x52, 0xd2,
];
fn get_args() -> ArgMatches<'static> {
App::new("minionn-accuracy")
.arg(
Arg::with_name("weights")
.short("w")
.long("weights")
.takes_value(true)
.help("Path to weights")
.required(true),
)
.arg(
Arg::with_name("images")
.short("i")
.long("images")
.takes_value(true)
.help("Path to test images")
.required(true),
)
.arg(
Arg::with_name("layers")
.short("l")
.long("layers")
.takes_value(true)
.help("Number of polynomial layers (0-7)")
.required(true),
)
.get_matches()
}
fn main() {
let mut rng = ChaChaRng::from_seed(RANDOMNESS);
let args = get_args();
let weights = args.value_of("weights").unwrap();
let images = args.value_of("images").unwrap();
let layers = clap::value_t!(args.value_of("layers"), usize).unwrap();
// Build network
let mut network = construct_minionn(None, 1, layers, &mut rng);
let architecture = (&network).into();
// Load network weights
network.from_numpy(&weights).unwrap();
// Open all images, classes, and classification results
let data_dir = Path::new(&images);
let mut buf = vec![];
std::fs::File::open(data_dir.join(Path::new("classes.npy")))
.unwrap()
.read_to_end(&mut buf)
.unwrap();
let classes: Vec<i64> = NpyData::from_bytes(&buf).unwrap().to_vec();
buf = vec![];
std::fs::File::open(data_dir.join(Path::new("plaintext.npy")))
.unwrap()
.read_to_end(&mut buf)
.unwrap();
let plaintext: Vec<i64> = NpyData::from_bytes(&buf).unwrap().to_vec();
let mut images: Vec<Array4<f64>> = Vec::new();
for i in 0..classes.len() {
buf = vec![];
std::fs::File::open(data_dir.join(Path::new(&format!("image_{}.npy", i))))
.unwrap()
.read_to_end(&mut buf)
.unwrap();
let image_vec: Vec<f64> = NpyData::from_bytes(&buf).unwrap().to_vec();
let input = Array4::from_shape_vec((1, 3, 32, 32), image_vec).unwrap();
images.push(input);
}
experiments::validation::validate::run(network, architecture, images, classes, plaintext);
}
| rust | Apache-2.0 | 92bc0071fa11570df6b048ae0f6937ced249bb5a | 2026-01-04T20:24:11.030795Z | false |
mc2-project/delphi | https://github.com/mc2-project/delphi/blob/92bc0071fa11570df6b048ae0f6937ced249bb5a/rust/experiments/src/throughput/client.rs | rust/experiments/src/throughput/client.rs | use crate::*;
use ::neural_network::{tensors::Input, NeuralArchitecture};
use protocols::neural_network::NNProtocol;
use rand::thread_rng;
use std::{io::BufReader, net::TcpStream};
pub fn nn_client<R: RngCore + CryptoRng>(
num_clients: usize,
server_addr: &str,
architectures: &[(
(usize, usize, usize, usize),
NeuralArchitecture<TenBitAS, TenBitExpFP>,
)],
rng: &mut R,
) {
// Sample a random input.
let input_dims = architectures[0]
.1
.layers
.first()
.unwrap()
.input_dimensions();
let batch_size = input_dims.0;
let mut client_states = Vec::new();
for (_, architecture) in architectures {
let client_state = {
// client's connection to server.
let stream = TcpStream::connect(server_addr).expect("connecting to server failed");
let mut read_stream = IMuxSync::new(vec![BufReader::new(stream.try_clone().unwrap())]);
let mut write_stream = IMuxSync::new(vec![stream]);
NNProtocol::offline_client_protocol(
&mut read_stream,
&mut write_stream,
&architecture,
rng,
)
.unwrap()
};
client_states.push(client_state);
}
let mut inputs = Vec::new();
for (input_dims, _) in architectures {
let mut input = Input::zeros(*input_dims);
input
.iter_mut()
.for_each(|in_i| *in_i = generate_random_number(rng).1);
inputs.push(input);
}
let start = std::time::Instant::now();
let _ = crossbeam::thread::scope(|s| {
let mut results = Vec::new();
for _ in 0..num_clients {
let result = s.spawn(|_| {
let mut rng = thread_rng();
let _index: usize = rng.next_u64() as usize % 4;
let architecture = &architectures[0].1;
let input = &inputs[0];
let client_state = &client_states[0];
let start = std::time::Instant::now();
let stream = TcpStream::connect(server_addr).expect("connecting to server failed");
let mut read_stream =
IMuxSync::new(vec![BufReader::new(stream.try_clone().unwrap())]);
let mut write_stream = IMuxSync::new(vec![stream]);
let _ = NNProtocol::online_client_protocol(
&mut read_stream,
&mut write_stream,
&input,
architecture,
&client_state,
)
.unwrap();
start.elapsed()
});
results.push(result);
}
for result in results {
println!(
"Served {} clients in {}s",
batch_size,
result.join().unwrap().as_millis() as f64 / 1000.0
);
}
})
.unwrap();
let end = start.elapsed();
println!(
"Served {} clients in {}s",
batch_size * num_clients,
end.as_millis() as f64 / 1000.0
);
}
| rust | Apache-2.0 | 92bc0071fa11570df6b048ae0f6937ced249bb5a | 2026-01-04T20:24:11.030795Z | false |
mc2-project/delphi | https://github.com/mc2-project/delphi/blob/92bc0071fa11570df6b048ae0f6937ced249bb5a/rust/experiments/src/throughput/mod.rs | rust/experiments/src/throughput/mod.rs | pub mod client;
pub mod server;
| rust | Apache-2.0 | 92bc0071fa11570df6b048ae0f6937ced249bb5a | 2026-01-04T20:24:11.030795Z | false |
mc2-project/delphi | https://github.com/mc2-project/delphi/blob/92bc0071fa11570df6b048ae0f6937ced249bb5a/rust/experiments/src/throughput/server.rs | rust/experiments/src/throughput/server.rs | use crate::*;
use protocols::neural_network::NNProtocol;
use rand::{CryptoRng, RngCore};
use std::{io::BufReader, net::TcpListener};
pub fn nn_server<R: RngCore + CryptoRng>(
server_addr: &str,
nns: &[(
(usize, usize, usize, usize),
NeuralNetwork<TenBitAS, TenBitExpFP>,
)],
rng: &mut R,
) {
let server_listener = TcpListener::bind(server_addr).unwrap();
let mut server_states = Vec::new();
for (_, nn) in nns {
let server_state = {
// client's connection to server.
let stream = server_listener
.incoming()
.next()
.unwrap()
.expect("server connection failed!");
let mut read_stream = IMuxSync::new(vec![BufReader::new(stream.try_clone().unwrap())]);
let mut write_stream = IMuxSync::new(vec![stream]);
NNProtocol::offline_server_protocol(&mut read_stream, &mut write_stream, &nn, rng)
.unwrap()
};
server_states.push(server_state);
}
let _ = crossbeam::thread::scope(|s| {
let mut results = Vec::new();
for stream in server_listener.incoming() {
let result = s.spawn(|_| {
let stream = stream.expect("server connection failed!");
let mut read_stream =
IMuxSync::new(vec![BufReader::new(stream.try_clone().unwrap())]);
let mut write_stream = IMuxSync::new(vec![stream]);
NNProtocol::online_server_protocol(
&mut read_stream,
&mut write_stream,
&nns[0].1,
&server_states[0],
)
.unwrap()
});
results.push(result);
}
for result in results {
let _ = result.join().unwrap();
}
})
.unwrap();
}
| rust | Apache-2.0 | 92bc0071fa11570df6b048ae0f6937ced249bb5a | 2026-01-04T20:24:11.030795Z | false |
mc2-project/delphi | https://github.com/mc2-project/delphi/blob/92bc0071fa11570df6b048ae0f6937ced249bb5a/rust/experiments/src/throughput/linear_only/client.rs | rust/experiments/src/throughput/linear_only/client.rs | use clap::{App, Arg, ArgMatches};
use experiments::linear_only::construct_networks;
use rand::SeedableRng;
use rand_chacha::ChaChaRng;
const RANDOMNESS: [u8; 32] = [
0x11, 0xe0, 0x8f, 0xbc, 0x89, 0xa7, 0x34, 0x01, 0x45, 0x86, 0x82, 0xb6, 0x51, 0xda, 0xf4, 0x76,
0x5d, 0xc9, 0x8d, 0xea, 0x23, 0xf2, 0x90, 0x8f, 0x9d, 0x03, 0xf2, 0x77, 0xd3, 0x4a, 0x52, 0xd2,
];
fn get_args() -> ArgMatches<'static> {
App::new("tp-client")
.arg(
Arg::with_name("ip")
.short("i")
.long("ip")
.takes_value(true)
.help("Server IP address")
.required(true),
)
.arg(
Arg::with_name("clients")
.short("c")
.long("clients")
.takes_value(true)
.help("Number of clients")
.required(true),
)
.arg(
Arg::with_name("batch")
.short("b")
.long("batch")
.takes_value(true)
.help("Batch size")
.required(true),
)
.arg(
Arg::with_name("gpu")
.short("g")
.long("gpu")
.help("Whether to use a GPU (0/1)"),
)
.arg(
Arg::with_name("port")
.short("p")
.long("port")
.takes_value(true)
.help("Server port (default 8000)")
.required(false),
)
.get_matches()
}
fn main() {
let mut rng = ChaChaRng::from_seed(RANDOMNESS);
let args = get_args();
let ip = args.value_of("ip").unwrap();
let num_clients = clap::value_t!(args.value_of("clients"), usize).unwrap();
let batch_size = clap::value_t!(args.value_of("batch"), usize).unwrap();
let use_gpu = args.is_present("gpu");
let port = args.value_of("port").unwrap_or("8000");
let server_addr = format!("{}:{}", ip, port);
let vs = if use_gpu {
tch::nn::VarStore::new(tch::Device::cuda_if_available())
} else {
tch::nn::VarStore::new(tch::Device::Cpu)
};
let network = construct_networks(Some(&vs.root()), batch_size, &mut rng);
let architectures = network
.into_iter()
.map(|(i, n)| (i, (&n).into()))
.collect::<Vec<_>>();
experiments::throughput::client::nn_client(num_clients, &server_addr, &architectures, &mut rng);
}
| rust | Apache-2.0 | 92bc0071fa11570df6b048ae0f6937ced249bb5a | 2026-01-04T20:24:11.030795Z | false |
mc2-project/delphi | https://github.com/mc2-project/delphi/blob/92bc0071fa11570df6b048ae0f6937ced249bb5a/rust/experiments/src/throughput/linear_only/server.rs | rust/experiments/src/throughput/linear_only/server.rs | use clap::{App, Arg, ArgMatches};
use experiments::linear_only::construct_networks;
use rand::SeedableRng;
use rand_chacha::ChaChaRng;
const RANDOMNESS: [u8; 32] = [
0x11, 0xe0, 0x8f, 0xbc, 0x89, 0xa7, 0x34, 0x01, 0x45, 0x86, 0x82, 0xb6, 0x51, 0xda, 0xf4, 0x76,
0x5d, 0xc9, 0x8d, 0xea, 0x23, 0xf2, 0x90, 0x8f, 0x9d, 0x03, 0xf2, 0x77, 0xd3, 0x4a, 0x52, 0xd2,
];
fn get_args() -> ArgMatches<'static> {
App::new("tp-client")
.arg(
Arg::with_name("batch")
.short("b")
.long("batch")
.takes_value(true)
.help("Batch size")
.required(true),
)
.arg(
Arg::with_name("gpu")
.short("g")
.long("gpu")
.help("Whether to use a GPU (0/1)"),
)
.arg(
Arg::with_name("port")
.short("p")
.long("port")
.takes_value(true)
.help("Server port (default 8000)")
.required(false),
)
.get_matches()
}
fn main() {
let mut rng = ChaChaRng::from_seed(RANDOMNESS);
let args = get_args();
let batch_size = clap::value_t!(args.value_of("batch"), usize).unwrap();
let use_gpu = args.is_present("gpu");
let port = args.value_of("port").unwrap_or("8000");
let server_addr = format!("0.0.0.0:{}", port);
let vs = if use_gpu {
tch::nn::VarStore::new(tch::Device::cuda_if_available())
} else {
tch::nn::VarStore::new(tch::Device::Cpu)
};
let network = construct_networks(Some(&vs.root()), batch_size, &mut rng);
experiments::throughput::server::nn_server(&server_addr, &network, &mut rng);
}
| rust | Apache-2.0 | 92bc0071fa11570df6b048ae0f6937ced249bb5a | 2026-01-04T20:24:11.030795Z | false |
mc2-project/delphi | https://github.com/mc2-project/delphi/blob/92bc0071fa11570df6b048ae0f6937ced249bb5a/rust/experiments/src/latency/client.rs | rust/experiments/src/latency/client.rs | use crate::*;
use algebra::{fields::PrimeField, FpParameters};
use crypto_primitives::gc::{fancy_garbling, fancy_garbling::Wire};
use io_utils::{counting::CountingIO, imux::IMuxSync};
use neural_network::{
layers::{LayerInfo, LinearLayerInfo, NonLinearLayerInfo},
NeuralArchitecture,
};
use num_traits::identities::Zero;
use ocelot::ot::{AlszReceiver as OTReceiver, Receiver};
use protocols::{
gc::ClientGcMsgRcv,
linear_layer::{LinearProtocol, OfflineClientKeySend},
};
use protocols_sys::*;
use rand::SeedableRng;
use rand_chacha::ChaChaRng;
use scuttlebutt::Channel;
use std::{
collections::BTreeMap,
io::{BufReader, BufWriter},
net::TcpStream,
};
const RANDOMNESS: [u8; 32] = [
0x11, 0xe0, 0x8f, 0xbc, 0x89, 0xa7, 0x34, 0x01, 0x45, 0x86, 0x82, 0xb6, 0x51, 0xda, 0xf4, 0x76,
0x5d, 0xc9, 0x8d, 0xea, 0x23, 0xf2, 0x90, 0x8f, 0x9d, 0x03, 0xf2, 0x77, 0xd3, 0x4a, 0x52, 0xd2,
];
pub fn nn_client<R: RngCore + CryptoRng>(
server_addr: &str,
architecture: NeuralArchitecture<TenBitAS, TenBitExpFP>,
rng: &mut R,
) {
// Sample a random input.
let input_dims = architecture.layers.first().unwrap().input_dimensions();
let mut input = Input::zeros(input_dims);
input
.iter_mut()
.for_each(|in_i| *in_i = generate_random_number(rng).1);
crate::nn_client(server_addr, &architecture, input, rng);
}
fn cg_helper<R: RngCore + CryptoRng>(
layers: &[usize],
architecture: &NeuralArchitecture<TenBitAS, TenBitExpFP>,
cfhe: ClientFHE,
reader: &mut IMuxSync<CountingIO<BufReader<TcpStream>>>,
writer: &mut IMuxSync<CountingIO<BufWriter<TcpStream>>>,
rng: &mut R,
) {
let mut in_shares = BTreeMap::new();
let mut out_shares = BTreeMap::new();
for i in layers.iter() {
match &architecture.layers[*i] {
LayerInfo::NLL(_dims, NonLinearLayerInfo::ReLU) => panic!(),
LayerInfo::NLL(_dims, NonLinearLayerInfo::PolyApprox { .. }) => panic!(),
LayerInfo::LL(dims, linear_layer_info) => {
let input_dims = dims.input_dimensions();
let output_dims = dims.output_dimensions();
let (in_share, mut out_share) = match &linear_layer_info {
LinearLayerInfo::Conv2d { .. } | LinearLayerInfo::FullyConnected => {
let mut cg_handler = match &linear_layer_info {
LinearLayerInfo::Conv2d { .. } => {
SealClientCG::Conv2D(client_cg::Conv2D::new(
&cfhe,
linear_layer_info,
input_dims,
output_dims,
))
}
LinearLayerInfo::FullyConnected => {
SealClientCG::FullyConnected(client_cg::FullyConnected::new(
&cfhe,
linear_layer_info,
input_dims,
output_dims,
))
}
_ => unreachable!(),
};
LinearProtocol::offline_client_protocol(
reader,
writer,
input_dims,
output_dims,
&mut cg_handler,
rng,
)
.unwrap()
}
_ => {
// AvgPool and Identity don't require an offline communication
if out_shares.keys().any(|k| *k == &(i - 1)) {
// If the layer comes after a linear layer, apply the function to
// the last layer's output share
let prev_output_share = out_shares.get(&(i - 1)).unwrap();
let mut output_share = Output::zeros(dims.output_dimensions());
linear_layer_info.evaluate_naive(prev_output_share, &mut output_share);
(Input::zeros(dims.input_dimensions()), output_share)
} else {
// Otherwise, just return randomizers of 0
(
Input::zeros(dims.input_dimensions()),
Output::zeros(dims.output_dimensions()),
)
}
}
};
// We reduce here becase the input to future layers requires
// shares to already be reduced correctly; for example,
// `online_server_protocol` reduces at the end of each layer.
for share in &mut out_share {
share.inner.signed_reduce_in_place();
}
// r
in_shares.insert(i, in_share);
// -(Lr + s)
out_shares.insert(i, out_share);
}
}
}
}
pub fn cg(server_addr: &str, architecture: NeuralArchitecture<TenBitAS, TenBitExpFP>) {
let (mut r1, mut w1) = client_connect(server_addr);
// Give server time to start async listener
std::thread::sleep_ms(1000);
let (mut r2, mut w2) = client_connect(server_addr);
let key_time = timer_start!(|| "Keygen");
let mut key_share = KeyShare::new();
let (cfhe, keys_vec) = key_share.generate();
let sent_message = OfflineClientKeySend::new(&keys_vec);
protocols::bytes::serialize(&mut w1, &sent_message).unwrap();
timer_end!(key_time);
let key_time = timer_start!(|| "Keygen");
let mut key_share = KeyShare::new();
let (cfhe_2, keys_vec) = key_share.generate();
let sent_message = OfflineClientKeySend::new(&keys_vec);
protocols::bytes::serialize(&mut w1, &sent_message).unwrap();
timer_end!(key_time);
w1.reset();
let (t1_layers, t2_layers) = match architecture.layers.len() {
9 => (vec![0, 5, 6], vec![2, 3, 8]),
17 => (vec![0, 4, 5, 12, 14], vec![2, 7, 9, 10, 16]),
_ => panic!(),
};
let linear_time = timer_start!(|| "Linear layers offline phase");
crossbeam::scope(|s| {
let r1 = &mut r1;
let r2 = &mut r2;
let w1 = &mut w1;
let w2 = &mut w2;
let architecture_1 = &architecture;
let architecture_2 = &architecture;
s.spawn(move |_| {
let mut rng = &mut ChaChaRng::from_seed(RANDOMNESS);
cg_helper(&t1_layers, architecture_1, cfhe, r1, w1, &mut rng);
});
s.spawn(move |_| {
let mut rng = &mut ChaChaRng::from_seed(RANDOMNESS);
cg_helper(&t2_layers, architecture_2, cfhe_2, r2, w2, &mut rng);
});
})
.unwrap();
timer_end!(linear_time);
add_to_trace!(|| "Communication", || format!(
"Read {} bytes\nWrote {} bytes",
r1.count() + r2.count(),
w1.count() + w2.count()
));
}
pub fn gc<R: RngCore + CryptoRng>(server_addr: &str, number_of_relus: usize, rng: &mut R) {
let (mut reader, mut writer) = client_connect(server_addr);
// Keygen
let key_time = timer_start!(|| "Keygen");
let mut key_share = KeyShare::new();
let (_, keys_vec) = key_share.generate();
let sent_message = OfflineClientKeySend::new(&keys_vec);
protocols::bytes::serialize(&mut writer, &sent_message).unwrap();
timer_end!(key_time);
writer.reset();
// Generate dummy labels/layer for CDS
let shares = vec![AdditiveShare::<TenBitExpParams>::zero(); number_of_relus];
use fancy_garbling::util::*;
let start_time = timer_start!(|| "ReLU offline protocol");
let p = u128::from(<<F as PrimeField>::Params>::MODULUS.0);
let field_size = crypto_primitives::gc::num_bits(p);
let rcv_gc_time = timer_start!(|| "Receiving GCs");
let mut gc_s = Vec::with_capacity(number_of_relus);
let mut r_wires = Vec::with_capacity(number_of_relus);
let num_chunks = (number_of_relus as f64 / 8192.0).ceil() as usize;
for i in 0..num_chunks {
let in_msg: ClientGcMsgRcv = protocols::bytes::deserialize(&mut reader).unwrap();
let (gc_chunks, r_wire_chunks) = in_msg.msg();
if i < (num_chunks - 1) {
assert_eq!(gc_chunks.len(), 8192);
}
gc_s.extend(gc_chunks);
r_wires.extend(r_wire_chunks);
}
timer_end!(rcv_gc_time);
add_to_trace!(|| "GC Communication", || format!(
"Read {} bytes\nWrote {} bytes",
reader.count(),
writer.count()
));
reader.reset();
writer.reset();
assert_eq!(gc_s.len(), number_of_relus);
let bs = shares
.iter()
.flat_map(|s| u128_to_bits(protocols::gc::u128_from_share(*s), field_size))
.map(|b| b == 1)
.collect::<Vec<_>>();
let _ = if number_of_relus != 0 {
let r = reader.get_mut_ref().remove(0);
let w = writer.get_mut_ref().remove(0);
let ot_time = timer_start!(|| "OTs");
let mut channel = Channel::new(r, w);
let mut ot = OTReceiver::init(&mut channel, rng).expect("should work");
let labels = ot
.receive(&mut channel, bs.as_slice(), rng)
.expect("should work");
let labels = labels
.into_iter()
.map(|l| Wire::from_block(l, 2))
.collect::<Vec<_>>();
timer_end!(ot_time);
labels
} else {
Vec::new()
};
timer_end!(start_time);
add_to_trace!(|| "OT Communication", || format!(
"Read {} bytes\nWrote {} bytes",
reader.count(),
writer.count()
));
}
| rust | Apache-2.0 | 92bc0071fa11570df6b048ae0f6937ced249bb5a | 2026-01-04T20:24:11.030795Z | false |
mc2-project/delphi | https://github.com/mc2-project/delphi/blob/92bc0071fa11570df6b048ae0f6937ced249bb5a/rust/experiments/src/latency/mod.rs | rust/experiments/src/latency/mod.rs | pub mod client;
pub mod server;
| rust | Apache-2.0 | 92bc0071fa11570df6b048ae0f6937ced249bb5a | 2026-01-04T20:24:11.030795Z | false |
mc2-project/delphi | https://github.com/mc2-project/delphi/blob/92bc0071fa11570df6b048ae0f6937ced249bb5a/rust/experiments/src/latency/server.rs | rust/experiments/src/latency/server.rs | use crate::*;
use algebra::{fields::PrimeField, FpParameters, UniformRandom};
use crypto_primitives::gc::fancy_garbling;
use io_utils::{counting::CountingIO, imux::IMuxSync};
use neural_network::{
layers::{Layer, NonLinearLayer},
NeuralNetwork,
};
use ocelot::ot::{AlszSender as OTSender, Sender};
use protocols::{
gc::ServerGcMsgSend,
linear_layer::{LinearProtocol, OfflineServerKeyRcv},
};
use protocols_sys::*;
use rand::SeedableRng;
use rand_chacha::ChaChaRng;
use rayon::prelude::*;
use scuttlebutt::Channel;
use std::{
collections::BTreeMap,
io::{BufReader, BufWriter},
net::TcpStream,
};
const RANDOMNESS: [u8; 32] = [
0x11, 0xe0, 0x8f, 0xbc, 0x89, 0xa7, 0x34, 0x01, 0x45, 0x86, 0x82, 0xb6, 0x51, 0xda, 0xf4, 0x76,
0x5d, 0xc9, 0x8d, 0xea, 0x23, 0xf2, 0x90, 0x8f, 0x9d, 0x03, 0xf2, 0x77, 0xd3, 0x4a, 0x52, 0xd2,
];
fn cg_helper<R: RngCore + CryptoRng>(
layers: &[usize],
nn: &NeuralNetwork<TenBitAS, TenBitExpFP>,
sfhe: ServerFHE,
reader: &mut IMuxSync<CountingIO<BufReader<TcpStream>>>,
writer: &mut IMuxSync<CountingIO<BufWriter<TcpStream>>>,
rng: &mut R,
) {
let mut linear_state = BTreeMap::new();
for i in layers.iter() {
match &nn.layers[*i] {
Layer::NLL(NonLinearLayer::ReLU(..)) => {}
Layer::NLL(NonLinearLayer::PolyApprox { .. }) => {}
Layer::LL(layer) => {
let randomizer = match &layer {
LinearLayer::Conv2d { .. } | LinearLayer::FullyConnected { .. } => {
let mut cg_handler = match &layer {
LinearLayer::Conv2d { .. } => SealServerCG::Conv2D(
server_cg::Conv2D::new(&sfhe, layer, &layer.kernel_to_repr()),
),
LinearLayer::FullyConnected { .. } => {
SealServerCG::FullyConnected(server_cg::FullyConnected::new(
&sfhe,
layer,
&layer.kernel_to_repr(),
))
}
_ => unreachable!(),
};
LinearProtocol::<TenBitExpParams>::offline_server_protocol(
reader,
writer,
layer.input_dimensions(),
layer.output_dimensions(),
&mut cg_handler,
rng,
)
.unwrap()
}
// AvgPool and Identity don't require an offline phase
LinearLayer::AvgPool { dims, .. } => Output::zeros(dims.output_dimensions()),
LinearLayer::Identity { dims } => Output::zeros(dims.output_dimensions()),
};
linear_state.insert(i, randomizer);
}
}
}
}
pub fn cg(server_addr: &str, nn: NeuralNetwork<TenBitAS, TenBitExpFP>) {
let (mut r1, mut w1) = server_connect(server_addr);
let (mut r2, mut w2) = server_connect(server_addr);
let key_time = timer_start!(|| "Keygen");
let keys: OfflineServerKeyRcv = protocols::bytes::deserialize(&mut r1).unwrap();
let mut key_share = KeyShare::new();
let sfhe = key_share.receive(keys.msg());
timer_end!(key_time);
let key_time = timer_start!(|| "Keygen");
let keys: OfflineServerKeyRcv = protocols::bytes::deserialize(&mut r1).unwrap();
let mut key_share = KeyShare::new();
let sfhe_2 = key_share.receive(keys.msg());
timer_end!(key_time);
r1.reset();
let (t1_layers, t2_layers) = match nn.layers.len() {
9 => (vec![0, 5, 6], vec![2, 3, 8]),
17 => (vec![0, 4, 5, 12, 14], vec![2, 7, 9, 10, 16]),
_ => panic!(),
};
let linear_time = timer_start!(|| "Linear layers offline phase");
crossbeam::scope(|s| {
let r1 = &mut r1;
let r2 = &mut r2;
let w1 = &mut w1;
let w2 = &mut w2;
let nn_1 = &nn;
let nn_2 = &nn;
s.spawn(move |_| {
let mut rng = &mut ChaChaRng::from_seed(RANDOMNESS);
cg_helper(&t1_layers, nn_1, sfhe, r1, w1, &mut rng);
});
s.spawn(move |_| {
let mut rng = &mut ChaChaRng::from_seed(RANDOMNESS);
cg_helper(&t2_layers, nn_2, sfhe_2, r2, w2, &mut rng);
});
})
.unwrap();
timer_end!(linear_time);
add_to_trace!(|| "Communication", || format!(
"Read {} bytes\nWrote {} bytes",
r1.count() + r2.count(),
w1.count() + w2.count()
));
}
pub fn gc<R: RngCore + CryptoRng>(server_addr: &str, number_of_relus: usize, rng: &mut R) {
let (mut reader, mut writer) = server_connect(server_addr);
let key_time = timer_start!(|| "Keygen");
let keys: OfflineServerKeyRcv = protocols::bytes::deserialize(&mut reader).unwrap();
let mut key_share = KeyShare::new();
let _ = Some(key_share.receive(keys.msg()));
timer_end!(key_time);
reader.reset();
let start_time = timer_start!(|| "ReLU offline protocol");
let mut gc_s = Vec::with_capacity(number_of_relus);
let mut encoders = Vec::with_capacity(number_of_relus);
let p = (<<F as PrimeField>::Params>::MODULUS.0).into();
let c = protocols::gc::make_relu::<TenBitExpParams>();
let garble_time = timer_start!(|| "Garbling");
(0..number_of_relus)
.into_par_iter()
.map(|_| {
let mut c = c.clone();
let (en, gc) = fancy_garbling::garble(&mut c).unwrap();
(en, gc)
})
.unzip_into_vecs(&mut encoders, &mut gc_s);
timer_end!(garble_time);
let encode_time = timer_start!(|| "Encoding inputs");
let num_garbler_inputs = c.num_garbler_inputs();
let num_evaluator_inputs = c.num_evaluator_inputs();
let zero_inputs = vec![0u16; num_evaluator_inputs];
let one_inputs = vec![1u16; num_evaluator_inputs];
let mut labels = Vec::with_capacity(number_of_relus * num_evaluator_inputs);
let mut randomizer_labels = Vec::with_capacity(number_of_relus);
let mut output_randomizers = Vec::with_capacity(number_of_relus);
for enc in encoders.iter() {
let r = F::uniform(rng);
output_randomizers.push(r);
let r_bits: u64 = ((-r).into_repr()).into();
let r_bits =
fancy_garbling::util::u128_to_bits(r_bits.into(), crypto_primitives::gc::num_bits(p));
for w in ((num_garbler_inputs / 2)..num_garbler_inputs)
.zip(r_bits)
.map(|(i, r_i)| enc.encode_garbler_input(r_i, i))
{
randomizer_labels.push(w);
}
let all_zeros = enc.encode_evaluator_inputs(&zero_inputs);
let all_ones = enc.encode_evaluator_inputs(&one_inputs);
all_zeros
.into_iter()
.zip(all_ones)
.for_each(|(label_0, label_1)| labels.push((label_0.as_block(), label_1.as_block())));
}
timer_end!(encode_time);
let send_gc_time = timer_start!(|| "Sending GCs");
let randomizer_label_per_relu = if number_of_relus == 0 {
8192
} else {
randomizer_labels.len() / number_of_relus
};
for msg_contents in gc_s
.chunks(8192)
.zip(randomizer_labels.chunks(randomizer_label_per_relu * 8192))
{
let sent_message = ServerGcMsgSend::new(&msg_contents);
protocols::bytes::serialize(&mut writer, &sent_message).unwrap();
writer.flush().unwrap();
}
timer_end!(send_gc_time);
add_to_trace!(|| "GC Communication", || format!(
"Read {} bytes\nWrote {} bytes",
reader.count(),
writer.count()
));
reader.reset();
writer.reset();
if number_of_relus != 0 {
let r = reader.get_mut_ref().remove(0);
let w = writer.get_mut_ref().remove(0);
let ot_time = timer_start!(|| "OTs");
let mut channel = Channel::new(r, w);
let mut ot = OTSender::init(&mut channel, rng).unwrap();
ot.send(&mut channel, labels.as_slice(), rng).unwrap();
timer_end!(ot_time);
}
timer_end!(start_time);
add_to_trace!(|| "OT Communication", || format!(
"Read {} bytes\nWrote {} bytes",
reader.count(),
writer.count()
));
}
| rust | Apache-2.0 | 92bc0071fa11570df6b048ae0f6937ced249bb5a | 2026-01-04T20:24:11.030795Z | false |
mc2-project/delphi | https://github.com/mc2-project/delphi/blob/92bc0071fa11570df6b048ae0f6937ced249bb5a/rust/experiments/src/latency/minionn/client.rs | rust/experiments/src/latency/minionn/client.rs | use clap::{App, Arg, ArgMatches};
use experiments::minionn::construct_minionn;
use rand::SeedableRng;
use rand_chacha::ChaChaRng;
const RANDOMNESS: [u8; 32] = [
0x11, 0xe0, 0x8f, 0xbc, 0x89, 0xa7, 0x34, 0x01, 0x45, 0x86, 0x82, 0xb6, 0x51, 0xda, 0xf4, 0x76,
0x5d, 0xc9, 0x8d, 0xea, 0x23, 0xf2, 0x90, 0x8f, 0x9d, 0x03, 0xf2, 0x77, 0xd3, 0x4a, 0x52, 0xd2,
];
fn get_args() -> ArgMatches<'static> {
App::new("minionn-client")
.arg(
Arg::with_name("ip")
.short("i")
.long("ip")
.takes_value(true)
.help("Server IP address")
.required(true),
)
.arg(
Arg::with_name("layers")
.short("l")
.long("layers")
.takes_value(true)
.help("Number of polynomial layers (0-7)")
.required(true),
)
.arg(
Arg::with_name("port")
.short("p")
.long("port")
.takes_value(true)
.help("Server port (default 8000)")
.required(false),
)
.get_matches()
}
fn main() {
let vs = tch::nn::VarStore::new(tch::Device::cuda_if_available());
let mut rng = ChaChaRng::from_seed(RANDOMNESS);
let args = get_args();
let ip = args.value_of("ip").unwrap();
let layers = clap::value_t!(args.value_of("layers"), usize).unwrap();
let port = args.value_of("port").unwrap_or("8000");
let server_addr = format!("{}:{}", ip, port);
let network = construct_minionn(Some(&vs.root()), 1, layers, &mut rng);
let architecture = (&network).into();
experiments::latency::client::nn_client(&server_addr, architecture, &mut rng);
}
| rust | Apache-2.0 | 92bc0071fa11570df6b048ae0f6937ced249bb5a | 2026-01-04T20:24:11.030795Z | false |
mc2-project/delphi | https://github.com/mc2-project/delphi/blob/92bc0071fa11570df6b048ae0f6937ced249bb5a/rust/experiments/src/latency/minionn/server.rs | rust/experiments/src/latency/minionn/server.rs | use clap::{App, Arg, ArgMatches};
use experiments::minionn::construct_minionn;
use rand::SeedableRng;
use rand_chacha::ChaChaRng;
const RANDOMNESS: [u8; 32] = [
0x11, 0xe0, 0x8f, 0xbc, 0x89, 0xa7, 0x34, 0x01, 0x45, 0x86, 0x82, 0xb6, 0x51, 0xda, 0xf4, 0x76,
0x5d, 0xc9, 0x8d, 0xea, 0x23, 0xf2, 0x90, 0x8f, 0x9d, 0x03, 0xf2, 0x77, 0xd3, 0x4a, 0x52, 0xd2,
];
fn get_args() -> ArgMatches<'static> {
App::new("minionn-server")
.arg(
Arg::with_name("layers")
.short("l")
.long("layers")
.takes_value(true)
.help("Number of polynomial layers (0-7)")
.required(true),
)
.arg(
Arg::with_name("port")
.short("p")
.long("port")
.takes_value(true)
.help("Port to listen on (default 8000)")
.required(false),
)
.get_matches()
}
fn main() {
let vs = tch::nn::VarStore::new(tch::Device::cuda_if_available());
let mut rng = ChaChaRng::from_seed(RANDOMNESS);
let args = get_args();
let layers = clap::value_t!(args.value_of("layers"), usize).unwrap();
let port = args.value_of("port").unwrap_or("8000");
let server_addr = format!("0.0.0.0:{}", port);
let network = construct_minionn(Some(&vs.root()), 1, layers, &mut rng);
experiments::nn_server(&server_addr, &network, &mut rng);
}
| rust | Apache-2.0 | 92bc0071fa11570df6b048ae0f6937ced249bb5a | 2026-01-04T20:24:11.030795Z | false |
mc2-project/delphi | https://github.com/mc2-project/delphi/blob/92bc0071fa11570df6b048ae0f6937ced249bb5a/rust/experiments/src/latency/cg/client.rs | rust/experiments/src/latency/cg/client.rs | use clap::{App, Arg, ArgMatches};
use experiments::{minionn::construct_minionn, mnist::construct_mnist};
use rand::SeedableRng;
use rand_chacha::ChaChaRng;
const RANDOMNESS: [u8; 32] = [
0x11, 0xe0, 0x8f, 0xbc, 0x89, 0xa7, 0x34, 0x01, 0x45, 0x86, 0x82, 0xb6, 0x51, 0xda, 0xf4, 0x76,
0x5d, 0xc9, 0x8d, 0xea, 0x23, 0xf2, 0x90, 0x8f, 0x9d, 0x03, 0xf2, 0x77, 0xd3, 0x4a, 0x52, 0xd2,
];
fn get_args() -> ArgMatches<'static> {
App::new("cg-client")
.arg(
Arg::with_name("model")
.short("m")
.long("model")
.takes_value(true)
.help("MNIST (0), MiniONN(1)")
.required(true),
)
.arg(
Arg::with_name("ip")
.short("i")
.long("ip")
.takes_value(true)
.help("Server IP address")
.required(true),
)
.arg(
Arg::with_name("port")
.short("p")
.long("port")
.takes_value(true)
.help("Server port (default 8000)")
.required(false),
)
.get_matches()
}
fn main() {
let vs = tch::nn::VarStore::new(tch::Device::cuda_if_available());
let mut rng = ChaChaRng::from_seed(RANDOMNESS);
let args = get_args();
let ip = args.value_of("ip").unwrap();
let port = args.value_of("port").unwrap_or("8000");
let server_addr = format!("{}:{}", ip, port);
let model = clap::value_t!(args.value_of("model"), usize).unwrap();
let network = match model {
0 => construct_mnist(Some(&vs.root()), 1, 0, &mut rng),
1 => construct_minionn(Some(&vs.root()), 1, 0, &mut rng),
_ => panic!(),
};
let architecture = (&network).into();
experiments::latency::client::cg(&server_addr, architecture);
}
| rust | Apache-2.0 | 92bc0071fa11570df6b048ae0f6937ced249bb5a | 2026-01-04T20:24:11.030795Z | false |
mc2-project/delphi | https://github.com/mc2-project/delphi/blob/92bc0071fa11570df6b048ae0f6937ced249bb5a/rust/experiments/src/latency/cg/server.rs | rust/experiments/src/latency/cg/server.rs | use clap::{App, Arg, ArgMatches};
use experiments::{minionn::construct_minionn, mnist::construct_mnist};
use rand::SeedableRng;
use rand_chacha::ChaChaRng;
const RANDOMNESS: [u8; 32] = [
0x11, 0xe0, 0x8f, 0xbc, 0x89, 0xa7, 0x34, 0x01, 0x45, 0x86, 0x82, 0xb6, 0x51, 0xda, 0xf4, 0x76,
0x5d, 0xc9, 0x8d, 0xea, 0x23, 0xf2, 0x90, 0x8f, 0x9d, 0x03, 0xf2, 0x77, 0xd3, 0x4a, 0x52, 0xd2,
];
fn get_args() -> ArgMatches<'static> {
App::new("cg-server")
.arg(
Arg::with_name("model")
.short("m")
.long("model")
.takes_value(true)
.help("MNIST (0), MiniONN(1)")
.required(true),
)
.arg(
Arg::with_name("port")
.short("p")
.long("port")
.takes_value(true)
.help("Port to listen on (default 8000)")
.required(false),
)
.get_matches()
}
fn main() {
let vs = tch::nn::VarStore::new(tch::Device::cuda_if_available());
let mut rng = ChaChaRng::from_seed(RANDOMNESS);
let args = get_args();
let port = args.value_of("port").unwrap_or("8000");
let server_addr = format!("0.0.0.0:{}", port);
let model = clap::value_t!(args.value_of("model"), usize).unwrap();
let network = match model {
0 => construct_mnist(Some(&vs.root()), 1, 0, &mut rng),
1 => construct_minionn(Some(&vs.root()), 1, 0, &mut rng),
_ => panic!(),
};
experiments::latency::server::cg(&server_addr, network);
}
| rust | Apache-2.0 | 92bc0071fa11570df6b048ae0f6937ced249bb5a | 2026-01-04T20:24:11.030795Z | false |
mc2-project/delphi | https://github.com/mc2-project/delphi/blob/92bc0071fa11570df6b048ae0f6937ced249bb5a/rust/experiments/src/latency/resnet32/client.rs | rust/experiments/src/latency/resnet32/client.rs | use clap::{App, Arg, ArgMatches};
use experiments::resnet32::construct_resnet_32;
use rand::SeedableRng;
use rand_chacha::ChaChaRng;
const RANDOMNESS: [u8; 32] = [
0x11, 0xe0, 0x8f, 0xbc, 0x89, 0xa7, 0x34, 0x01, 0x45, 0x86, 0x82, 0xb6, 0x51, 0xda, 0xf4, 0x76,
0x5d, 0xc9, 0x8d, 0xea, 0x23, 0xf2, 0x90, 0x8f, 0x9d, 0x03, 0xf2, 0x77, 0xd3, 0x4a, 0x52, 0xd2,
];
fn get_args() -> ArgMatches<'static> {
App::new("resnet32-client")
.arg(
Arg::with_name("ip")
.short("i")
.long("ip")
.takes_value(true)
.help("Server IP address")
.required(true),
)
.arg(
Arg::with_name("layers")
.short("l")
.long("layers")
.takes_value(true)
.help("Number of polynomial layers (6/12/14/16/18/20/22/24/26)")
.required(true),
)
.arg(
Arg::with_name("port")
.short("p")
.long("port")
.takes_value(true)
.help("Server port (default 8000)")
.required(false),
)
.get_matches()
}
fn main() {
let vs = tch::nn::VarStore::new(tch::Device::cuda_if_available());
let mut rng = ChaChaRng::from_seed(RANDOMNESS);
let args = get_args();
let ip = args.value_of("ip").unwrap();
let layers = clap::value_t!(args.value_of("layers"), usize).unwrap();
let port = args.value_of("port").unwrap_or("8000");
let server_addr = format!("{}:{}", ip, port);
let network = construct_resnet_32(Some(&vs.root()), 1, layers, &mut rng);
let architecture = (&network).into();
experiments::latency::client::nn_client(&server_addr, architecture, &mut rng);
}
| rust | Apache-2.0 | 92bc0071fa11570df6b048ae0f6937ced249bb5a | 2026-01-04T20:24:11.030795Z | false |
mc2-project/delphi | https://github.com/mc2-project/delphi/blob/92bc0071fa11570df6b048ae0f6937ced249bb5a/rust/experiments/src/latency/resnet32/server.rs | rust/experiments/src/latency/resnet32/server.rs | use clap::{App, Arg, ArgMatches};
use experiments::resnet32::construct_resnet_32;
use rand::SeedableRng;
use rand_chacha::ChaChaRng;
const RANDOMNESS: [u8; 32] = [
0x11, 0xe0, 0x8f, 0xbc, 0x89, 0xa7, 0x34, 0x01, 0x45, 0x86, 0x82, 0xb6, 0x51, 0xda, 0xf4, 0x76,
0x5d, 0xc9, 0x8d, 0xea, 0x23, 0xf2, 0x90, 0x8f, 0x9d, 0x03, 0xf2, 0x77, 0xd3, 0x4a, 0x52, 0xd2,
];
fn get_args() -> ArgMatches<'static> {
App::new("resnet32-server")
.arg(
Arg::with_name("layers")
.short("l")
.long("layers")
.takes_value(true)
.help("Number of polynomial layers (6/12/14/16/18/20/22/24/26)")
.required(true),
)
.arg(
Arg::with_name("port")
.short("p")
.long("port")
.takes_value(true)
.help("Server port (default 8000)")
.required(false),
)
.get_matches()
}
fn main() {
let vs = tch::nn::VarStore::new(tch::Device::cuda_if_available());
let mut rng = ChaChaRng::from_seed(RANDOMNESS);
let args = get_args();
let layers = clap::value_t!(args.value_of("layers"), usize).unwrap();
let port = args.value_of("port").unwrap_or("8000");
let server_addr = format!("0.0.0.0:{}", port);
let network = construct_resnet_32(Some(&vs.root()), 1, layers, &mut rng);
experiments::nn_server(&server_addr, &network, &mut rng);
}
| rust | Apache-2.0 | 92bc0071fa11570df6b048ae0f6937ced249bb5a | 2026-01-04T20:24:11.030795Z | false |
mc2-project/delphi | https://github.com/mc2-project/delphi/blob/92bc0071fa11570df6b048ae0f6937ced249bb5a/rust/experiments/src/latency/mnist/client.rs | rust/experiments/src/latency/mnist/client.rs | use clap::{App, Arg, ArgMatches};
use experiments::mnist::construct_mnist;
use rand::SeedableRng;
use rand_chacha::ChaChaRng;
const RANDOMNESS: [u8; 32] = [
0x11, 0xe0, 0x8f, 0xbc, 0x89, 0xa7, 0x34, 0x01, 0x45, 0x86, 0x82, 0xb6, 0x51, 0xda, 0xf4, 0x76,
0x5d, 0xc9, 0x8d, 0xea, 0x23, 0xf2, 0x90, 0x8f, 0x9d, 0x03, 0xf2, 0x77, 0xd3, 0x4a, 0x52, 0xd2,
];
fn get_args() -> ArgMatches<'static> {
App::new("mnist-client")
.arg(
Arg::with_name("ip")
.short("i")
.long("ip")
.takes_value(true)
.help("Server IP address")
.required(true),
)
.arg(
Arg::with_name("layers")
.short("l")
.long("layers")
.takes_value(true)
.help("Number of polynomial layers (0-3)")
.required(true),
)
.arg(
Arg::with_name("port")
.short("p")
.long("port")
.takes_value(true)
.help("Server port (default 8000)")
.required(false),
)
.get_matches()
}
fn main() {
let vs = tch::nn::VarStore::new(tch::Device::cuda_if_available());
let mut rng = ChaChaRng::from_seed(RANDOMNESS);
let args = get_args();
let ip = args.value_of("ip").unwrap();
let layers = clap::value_t!(args.value_of("layers"), usize).unwrap();
let port = args.value_of("port").unwrap_or("8000");
let server_addr = format!("{}:{}", ip, port);
let network = construct_mnist(Some(&vs.root()), 1, layers, &mut rng);
let architecture = (&network).into();
experiments::latency::client::nn_client(&server_addr, architecture, &mut rng);
}
| rust | Apache-2.0 | 92bc0071fa11570df6b048ae0f6937ced249bb5a | 2026-01-04T20:24:11.030795Z | false |
mc2-project/delphi | https://github.com/mc2-project/delphi/blob/92bc0071fa11570df6b048ae0f6937ced249bb5a/rust/experiments/src/latency/mnist/server.rs | rust/experiments/src/latency/mnist/server.rs | use clap::{App, Arg, ArgMatches};
use experiments::mnist::construct_mnist;
use rand::SeedableRng;
use rand_chacha::ChaChaRng;
const RANDOMNESS: [u8; 32] = [
0x11, 0xe0, 0x8f, 0xbc, 0x89, 0xa7, 0x34, 0x01, 0x45, 0x86, 0x82, 0xb6, 0x51, 0xda, 0xf4, 0x76,
0x5d, 0xc9, 0x8d, 0xea, 0x23, 0xf2, 0x90, 0x8f, 0x9d, 0x03, 0xf2, 0x77, 0xd3, 0x4a, 0x52, 0xd2,
];
fn get_args() -> ArgMatches<'static> {
App::new("mnist-server")
.arg(
Arg::with_name("layers")
.short("l")
.long("layers")
.takes_value(true)
.help("Number of polynomial layers (0-3)")
.required(true),
)
.arg(
Arg::with_name("port")
.short("p")
.long("port")
.takes_value(true)
.help("Port to listen on (default 8000)")
.required(false),
)
.get_matches()
}
fn main() {
let vs = tch::nn::VarStore::new(tch::Device::cuda_if_available());
let mut rng = ChaChaRng::from_seed(RANDOMNESS);
let args = get_args();
let layers = clap::value_t!(args.value_of("layers"), usize).unwrap();
let port = args.value_of("port").unwrap_or("8000");
let server_addr = format!("0.0.0.0:{}", port);
let network = construct_mnist(Some(&vs.root()), 1, layers, &mut rng);
experiments::nn_server(&server_addr, &network, &mut rng);
}
| rust | Apache-2.0 | 92bc0071fa11570df6b048ae0f6937ced249bb5a | 2026-01-04T20:24:11.030795Z | false |
mc2-project/delphi | https://github.com/mc2-project/delphi/blob/92bc0071fa11570df6b048ae0f6937ced249bb5a/rust/experiments/src/latency/gc/client.rs | rust/experiments/src/latency/gc/client.rs | use clap::{App, Arg, ArgMatches};
use rand::SeedableRng;
use rand_chacha::ChaChaRng;
const RANDOMNESS: [u8; 32] = [
0x11, 0xe0, 0x8f, 0xbc, 0x89, 0xa7, 0x34, 0x01, 0x45, 0x86, 0x82, 0xb6, 0x51, 0xda, 0xf4, 0x76,
0x5d, 0xc9, 0x8d, 0xea, 0x23, 0xf2, 0x90, 0x8f, 0x9d, 0x03, 0xf2, 0x77, 0xd3, 0x4a, 0x52, 0xd2,
];
fn get_args() -> ArgMatches<'static> {
App::new("gc-client")
.arg(
Arg::with_name("model")
.short("m")
.long("model")
.takes_value(true)
.help("MNIST (0), MiniONN(1)")
.required(true),
)
.arg(
Arg::with_name("ip")
.short("i")
.long("ip")
.takes_value(true)
.help("Server IP address")
.required(true),
)
.arg(
Arg::with_name("port")
.short("p")
.long("port")
.takes_value(true)
.help("Server port (default 8000)")
.required(false),
)
.get_matches()
}
fn main() {
let mut rng = ChaChaRng::from_seed(RANDOMNESS);
let args = get_args();
let ip = args.value_of("ip").unwrap();
let port = args.value_of("port").unwrap_or("8000");
let server_addr = format!("{}:{}", ip, port);
let model = clap::value_t!(args.value_of("model"), usize).unwrap();
assert!(model == 0 || model == 1);
let mnist = [9216, 1024, 100].iter().sum();
let minionn = [65536, 65536, 16384, 16384, 4096, 4096, 1024].iter().sum();
let activations: usize = if model == 0 { mnist } else { minionn };
experiments::latency::client::gc(&server_addr, activations, &mut rng);
}
| rust | Apache-2.0 | 92bc0071fa11570df6b048ae0f6937ced249bb5a | 2026-01-04T20:24:11.030795Z | false |
mc2-project/delphi | https://github.com/mc2-project/delphi/blob/92bc0071fa11570df6b048ae0f6937ced249bb5a/rust/experiments/src/latency/gc/server.rs | rust/experiments/src/latency/gc/server.rs | use clap::{App, Arg, ArgMatches};
use rand::SeedableRng;
use rand_chacha::ChaChaRng;
const RANDOMNESS: [u8; 32] = [
0x11, 0xe0, 0x8f, 0xbc, 0x89, 0xa7, 0x34, 0x01, 0x45, 0x86, 0x82, 0xb6, 0x51, 0xda, 0xf4, 0x76,
0x5d, 0xc9, 0x8d, 0xea, 0x23, 0xf2, 0x90, 0x8f, 0x9d, 0x03, 0xf2, 0x77, 0xd3, 0x4a, 0x52, 0xd2,
];
fn get_args() -> ArgMatches<'static> {
App::new("gc-server")
.arg(
Arg::with_name("model")
.short("m")
.long("model")
.takes_value(true)
.help("MNIST (0), MiniONN(1)")
.required(true),
)
.arg(
Arg::with_name("port")
.short("p")
.long("port")
.takes_value(true)
.help("Port to listen on (default 8000)")
.required(false),
)
.get_matches()
}
fn main() {
let mut rng = ChaChaRng::from_seed(RANDOMNESS);
let args = get_args();
let port = args.value_of("port").unwrap_or("8000");
let server_addr = format!("0.0.0.0:{}", port);
let model = clap::value_t!(args.value_of("model"), usize).unwrap();
assert!(model == 0 || model == 1);
let mnist = [9216, 1024, 100].iter().sum();
let minionn = [65536, 65536, 16384, 16384, 4096, 4096, 1024].iter().sum();
let activations: usize = if model == 0 { mnist } else { minionn };
experiments::latency::server::gc(&server_addr, activations, &mut rng);
}
| rust | Apache-2.0 | 92bc0071fa11570df6b048ae0f6937ced249bb5a | 2026-01-04T20:24:11.030795Z | false |
mc2-project/delphi | https://github.com/mc2-project/delphi/blob/92bc0071fa11570df6b048ae0f6937ced249bb5a/rust/experiments/src/inference/mod.rs | rust/experiments/src/inference/mod.rs | pub mod inference;
| rust | Apache-2.0 | 92bc0071fa11570df6b048ae0f6937ced249bb5a | 2026-01-04T20:24:11.030795Z | false |
mc2-project/delphi | https://github.com/mc2-project/delphi/blob/92bc0071fa11570df6b048ae0f6937ced249bb5a/rust/experiments/src/inference/inference.rs | rust/experiments/src/inference/inference.rs | use crate::*;
use neural_network::{ndarray::Array4, tensors::Input, NeuralArchitecture};
use rand::SeedableRng;
use rand_chacha::ChaChaRng;
use std::cmp;
const RANDOMNESS: [u8; 32] = [
0x11, 0xe0, 0x8f, 0xbc, 0x89, 0xa7, 0x34, 0x01, 0x45, 0x86, 0x82, 0xb6, 0x51, 0xda, 0xf4, 0x76,
0x5d, 0xc9, 0x8d, 0xea, 0x23, 0xf2, 0x90, 0x8f, 0x9d, 0x03, 0xf2, 0x77, 0xd3, 0x4a, 0x52, 0xd2,
];
pub fn softmax(x: &Input<TenBitExpFP>) -> Input<TenBitExpFP> {
let mut max: TenBitExpFP = x[[0, 0, 0, 0]];
x.iter().for_each(|e| {
max = match max.cmp(e) {
cmp::Ordering::Less => *e,
_ => max,
};
});
let mut e_x: Input<TenBitExpFP> = x.clone();
e_x.iter_mut().for_each(|e| {
*e = f64::from(*e - max).exp().into();
});
let e_x_sum = 1.0 / f64::from(e_x.iter().fold(TenBitExpFP::zero(), |sum, val| sum + *val));
e_x.iter_mut().for_each(|e| *e *= e_x_sum.into());
return e_x;
}
pub fn run(
network: NeuralNetwork<TenBitAS, TenBitExpFP>,
architecture: NeuralArchitecture<TenBitAS, TenBitExpFP>,
image: Array4<f64>,
class: i64,
) {
let mut server_rng = ChaChaRng::from_seed(RANDOMNESS);
let mut client_rng = ChaChaRng::from_seed(RANDOMNESS);
let server_addr = "127.0.0.1:8001";
let mut client_output = Output::zeros((1, 10, 0, 0));
crossbeam::thread::scope(|s| {
let server_output = s.spawn(|_| nn_server(&server_addr, &network, &mut server_rng));
client_output = s
.spawn(|_| {
nn_client(
&server_addr,
&architecture,
(image.clone()).into(),
&mut client_rng,
)
})
.join()
.unwrap();
server_output.join().unwrap();
})
.unwrap();
let sm = softmax(&client_output);
let max = sm.iter().map(|e| f64::from(*e)).fold(0. / 0., f64::max);
let index = sm.iter().position(|e| f64::from(*e) == max).unwrap() as i64;
println!("Correct class is {}, inference result is {}", class, index);
}
| rust | Apache-2.0 | 92bc0071fa11570df6b048ae0f6937ced249bb5a | 2026-01-04T20:24:11.030795Z | false |
mc2-project/delphi | https://github.com/mc2-project/delphi/blob/92bc0071fa11570df6b048ae0f6937ced249bb5a/rust/experiments/src/inference/minionn/inference.rs | rust/experiments/src/inference/minionn/inference.rs | use clap::{App, Arg, ArgMatches};
use experiments::minionn::construct_minionn;
use neural_network::{ndarray::Array4, npy::NpyData};
use rand::SeedableRng;
use rand_chacha::ChaChaRng;
use std::{io::Read, path::Path};
const RANDOMNESS: [u8; 32] = [
0x11, 0xe0, 0x8f, 0xbc, 0x89, 0xa7, 0x34, 0x01, 0x45, 0x86, 0x82, 0xb6, 0x51, 0xda, 0xf4, 0x76,
0x5d, 0xc9, 0x8d, 0xea, 0x23, 0xf2, 0x90, 0x8f, 0x9d, 0x03, 0xf2, 0x77, 0xd3, 0x4a, 0x52, 0xd2,
];
fn get_args() -> ArgMatches<'static> {
App::new("minionn-inference")
.arg(
Arg::with_name("weights")
.short("w")
.long("weights")
.takes_value(true)
.help("Path to weights")
.required(true),
)
.arg(
Arg::with_name("layers")
.short("l")
.long("layers")
.takes_value(true)
.help("Number of polynomial layers (0-7)")
.required(true),
)
.get_matches()
}
fn main() {
let mut rng = ChaChaRng::from_seed(RANDOMNESS);
let args = get_args();
let weights = args.value_of("weights").unwrap();
let layers = clap::value_t!(args.value_of("layers"), usize).unwrap();
// Build network
let mut network = construct_minionn(None, 1, layers, &mut rng);
let architecture = (&network).into();
// Load network weights
network.from_numpy(&weights).unwrap();
// Open image and class
let mut buf = vec![];
std::fs::File::open(Path::new("class.npy"))
.unwrap()
.read_to_end(&mut buf)
.unwrap();
let class: i64 = NpyData::from_bytes(&buf).unwrap().to_vec()[0];
buf = vec![];
std::fs::File::open(Path::new("image.npy"))
.unwrap()
.read_to_end(&mut buf)
.unwrap();
let image_vec: Vec<f64> = NpyData::from_bytes(&buf).unwrap().to_vec();
let image = Array4::from_shape_vec((1, 3, 32, 32), image_vec).unwrap();
experiments::inference::inference::run(network, architecture, image, class);
}
| rust | Apache-2.0 | 92bc0071fa11570df6b048ae0f6937ced249bb5a | 2026-01-04T20:24:11.030795Z | false |
mc2-project/delphi | https://github.com/mc2-project/delphi/blob/92bc0071fa11570df6b048ae0f6937ced249bb5a/rust/crypto-primitives/src/beavers_mul.rs | rust/crypto-primitives/src/beavers_mul.rs | use rand_chacha::ChaChaRng;
use rand_core::SeedableRng;
use std::{marker::PhantomData, ops::Neg};
use crate::additive_share::{AdditiveShare, Share};
use algebra::{
fixed_point::{FixedPoint, FixedPointParameters},
PrimeField, UniformRandom,
};
use serde::{Deserialize, Serialize};
/// Shares of a triple `[[a]]`, `[[b]]`, `[[c]]` such that `ab = c`.
#[derive(Serialize, Deserialize, Copy, Clone)]
#[serde(bound = "P: PrimeField")]
pub struct Triple<P: PrimeField> {
/// A share of the `a` part of the triple.
pub a: P,
/// A share of the `b` part of the triple.
pub b: P,
/// A share of the `c` part of the triple.
pub c: P,
}
/// Shares of the intermediate step.
#[derive(Clone, Copy, Serialize, Deserialize)]
#[serde(bound = "T: Share")]
pub struct BlindedSharedInputs<T: Share> {
/// A share of the `x-a`.
pub blinded_x: AdditiveShare<T>,
/// A share of the `y-b`.
pub blinded_y: AdditiveShare<T>,
}
/// Result of combining shares in `BlindedSharedInput`.
#[derive(Serialize, Deserialize)]
#[serde(bound = "T: Share")]
pub struct BlindedInputs<T: Share> {
/// `x-a`.
pub blinded_x: T,
/// `y-b`.
pub blinded_y: T,
}
/// Objects that can be multiplied via Beaver's triples protocols must implement
/// this trait.
pub trait BeaversMul<T>
where
T: Share,
<T as Share>::Ring: PrimeField,
{
/// Share inputs by consuming a triple.
fn share_and_blind_inputs(
x: &AdditiveShare<T>,
y: &AdditiveShare<T>,
triple: &Triple<T::Ring>,
) -> BlindedSharedInputs<T> {
let blinded_x = T::randomize_local_share(x, &triple.a.neg());
let blinded_y = T::randomize_local_share(y, &triple.b.neg());
BlindedSharedInputs {
blinded_x,
blinded_y,
}
}
/// Reconstruct inputs that have been blinded in the previous step.
fn reconstruct_blinded_inputs(
b1: BlindedSharedInputs<T>,
b2: BlindedSharedInputs<T>,
) -> BlindedInputs<T> {
BlindedInputs {
blinded_x: b1.blinded_x.combine(&b2.blinded_x),
blinded_y: b1.blinded_y.combine(&b2.blinded_y),
}
}
/// Multiply blinded inputs.
fn multiply_blinded_inputs(
party_index: usize,
bl: BlindedInputs<T>,
t: &Triple<T::Ring>,
) -> AdditiveShare<T>;
}
/// An implementation of Beaver's multiplication algorithm for shares of
/// `FixedPoint<P>`.
pub struct FPBeaversMul<P: FixedPointParameters>(PhantomData<P>);
impl<P: FixedPointParameters> BeaversMul<FixedPoint<P>> for FPBeaversMul<P> {
fn multiply_blinded_inputs(
party_index: usize,
bl: BlindedInputs<FixedPoint<P>>,
t: &Triple<P::Field>,
) -> AdditiveShare<FixedPoint<P>> {
let alpha = bl.blinded_x.inner;
let beta = bl.blinded_y.inner;
let res = if party_index == 1 {
t.c + (alpha * t.b) + (beta * t.a) + (alpha * beta)
} else {
t.c + (alpha * t.b) + (beta * t.a)
};
AdditiveShare::new(FixedPoint::with_num_muls(res, 1))
}
}
/// An **insecure** method of generating triples. This is intended *purely* for
/// testing purposes.
pub struct InsecureTripleGen<T: Share>(ChaChaRng, PhantomData<T>);
impl<T: Share> InsecureTripleGen<T> {
/// Create a new `Self` from a random seed.
pub fn new(seed: [u8; 32]) -> Self {
Self(ChaChaRng::from_seed(seed), PhantomData)
}
}
impl<T: Share> InsecureTripleGen<T>
where
<T as Share>::Ring: PrimeField,
{
/// Sample a triple for both parties.
pub fn generate_triple_shares(&mut self) -> (Triple<T::Ring>, Triple<T::Ring>) {
let a = T::Ring::uniform(&mut self.0);
let b = T::Ring::uniform(&mut self.0);
let c = a * b;
let a_randomizer = T::Ring::uniform(&mut self.0);
let b_randomizer = T::Ring::uniform(&mut self.0);
let c_randomizer = T::Ring::uniform(&mut self.0);
let party_1_triple = Triple {
a: a - a_randomizer,
b: b - b_randomizer,
c: c - c_randomizer,
};
let party_2_triple = Triple {
a: a_randomizer,
b: b_randomizer,
c: c_randomizer,
};
(party_1_triple, party_2_triple)
}
}
#[cfg(test)]
mod tests {
use super::*;
use algebra::fields::near_mersenne_64::F;
use rand::Rng;
struct TenBitExpParams {}
impl FixedPointParameters for TenBitExpParams {
type Field = F;
const MANTISSA_CAPACITY: u8 = 5;
const EXPONENT_CAPACITY: u8 = 5;
}
type TenBitExpFP = FixedPoint<TenBitExpParams>;
const RANDOMNESS: [u8; 32] = [
0x11, 0xe0, 0x8f, 0xbc, 0x89, 0xa7, 0x34, 0x01, 0x45, 0x86, 0x82, 0xb6, 0x51, 0xda, 0xf4,
0x76, 0x5d, 0xc9, 0x8d, 0xea, 0x23, 0xf2, 0x90, 0x8f, 0x9d, 0x03, 0xf2, 0x77, 0xd3, 0x4a,
0x52, 0xd2,
];
fn generate_random_number<R: Rng>(rng: &mut R) -> (f64, TenBitExpFP) {
let is_neg: bool = rng.gen();
let mul = if is_neg { -10.0 } else { 10.0 };
let float: f64 = rng.gen();
let f = TenBitExpFP::truncate_float(float * mul);
let n = TenBitExpFP::from(f);
(f, n)
}
#[test]
fn test_triple_gen() {
let mut gen = InsecureTripleGen::<TenBitExpFP>::new(RANDOMNESS);
for _ in 0..1000 {
let (t1, t2) = gen.generate_triple_shares();
assert_eq!((t1.a + &t2.a) * (t1.b + &t2.b), (t1.c + &t2.c));
}
}
#[test]
fn test_share_and_blind() {
let mut rng = ChaChaRng::from_seed(RANDOMNESS);
let seed = RANDOMNESS;
let mut gen = InsecureTripleGen::<TenBitExpFP>::new(seed);
for _ in 0..1000 {
let (t1, t2) = gen.generate_triple_shares();
let (_, n1) = generate_random_number(&mut rng);
let (_, n2) = generate_random_number(&mut rng);
let (s11, s12) = n1.share(&mut rng);
let (s21, s22) = n2.share(&mut rng);
let p1_bl_input = FPBeaversMul::share_and_blind_inputs(&s11, &s21, &t1);
let p2_bl_input = FPBeaversMul::share_and_blind_inputs(&s12, &s22, &t2);
let a = t1.a + &t2.a;
let b = t1.b + &t2.b;
assert_eq!(
p1_bl_input.blinded_x.combine(&p2_bl_input.blinded_x),
n1 - TenBitExpFP::new(a)
);
assert_eq!(
p1_bl_input.blinded_y.combine(&p2_bl_input.blinded_y),
n2 - TenBitExpFP::new(b)
);
}
}
#[test]
fn test_reconstruct_blinded_inputs() {
let mut rng = ChaChaRng::from_seed(RANDOMNESS);
let seed = RANDOMNESS;
let mut gen = InsecureTripleGen::<TenBitExpFP>::new(seed);
for _ in 0..1000 {
let (t1, t2) = gen.generate_triple_shares();
let (_, n1) = generate_random_number(&mut rng);
let (_, n2) = generate_random_number(&mut rng);
let (s11, s12) = n1.share(&mut rng);
let (s21, s22) = n2.share(&mut rng);
let p1_bl_input = FPBeaversMul::share_and_blind_inputs(&s11, &s21, &t1);
let p2_bl_input = FPBeaversMul::share_and_blind_inputs(&s12, &s22, &t2);
let (p1_bl_input, p2_bl_input) = (
FPBeaversMul::reconstruct_blinded_inputs(p1_bl_input, p2_bl_input),
FPBeaversMul::reconstruct_blinded_inputs(p2_bl_input, p1_bl_input),
);
let a = t1.a + &t2.a;
let b = t1.b + &t2.b;
assert_eq!(p1_bl_input.blinded_x, p2_bl_input.blinded_x);
assert_eq!(p1_bl_input.blinded_x, n1 - TenBitExpFP::new(a));
assert_eq!(p1_bl_input.blinded_y, p2_bl_input.blinded_y);
assert_eq!(p1_bl_input.blinded_y, n2 - TenBitExpFP::new(b));
}
}
#[test]
fn test_beavers_mul() {
let mut rng = ChaChaRng::from_seed(RANDOMNESS);
let seed = RANDOMNESS;
let mut gen = InsecureTripleGen::<TenBitExpFP>::new(seed);
for _ in 0..1000 {
let (t1, t2) = gen.generate_triple_shares();
let (f1, n1) = generate_random_number(&mut rng);
let (f2, n2) = generate_random_number(&mut rng);
let f3 = f1 * f2;
let n3 = TenBitExpFP::from(f3);
let (s11, s12) = n1.share(&mut rng);
let (s21, s22) = n2.share(&mut rng);
let p1_bl_input = FPBeaversMul::share_and_blind_inputs(&s11, &s21, &t1);
let p2_bl_input = FPBeaversMul::share_and_blind_inputs(&s12, &s22, &t2);
let (p1_bl_input, p2_bl_input) = (
FPBeaversMul::reconstruct_blinded_inputs(p1_bl_input, p2_bl_input),
FPBeaversMul::reconstruct_blinded_inputs(p2_bl_input, p1_bl_input),
);
let s31 = FPBeaversMul::multiply_blinded_inputs(1, p1_bl_input, &t1);
let s32 = FPBeaversMul::multiply_blinded_inputs(2, p2_bl_input, &t2);
let n4 = s31.combine(&s32);
assert_eq!(
n4, n3,
"test failed with f1 = {:?}, f2 = {:?}, f3 = {:?}",
f1, f2, f3
);
}
}
#[test]
fn test_beavers_mul_with_trunc() {
let mut rng = ChaChaRng::from_seed(RANDOMNESS);
let seed = RANDOMNESS;
let mut gen = InsecureTripleGen::<TenBitExpFP>::new(seed);
for _ in 0..1000 {
let (t1, t2) = gen.generate_triple_shares();
let (f1, n1) = generate_random_number(&mut rng);
let (f2, n2) = generate_random_number(&mut rng);
let f3 = f1 * f2;
let n3 = TenBitExpFP::from(f3);
let (s11, s12) = n1.share(&mut rng);
let (s21, s22) = n2.share(&mut rng);
let p1_bl_input = FPBeaversMul::share_and_blind_inputs(&s11, &s21, &t1);
let p2_bl_input = FPBeaversMul::share_and_blind_inputs(&s12, &s22, &t2);
let (p1_bl_input, p2_bl_input) = (
FPBeaversMul::reconstruct_blinded_inputs(p1_bl_input, p2_bl_input),
FPBeaversMul::reconstruct_blinded_inputs(p2_bl_input, p1_bl_input),
);
let mut s31 = FPBeaversMul::multiply_blinded_inputs(1, p1_bl_input, &t1);
let mut s32 = FPBeaversMul::multiply_blinded_inputs(2, p2_bl_input, &t2);
s31.inner.signed_reduce_in_place();
s32.inner.signed_reduce_in_place();
let n4 = s31.combine(&s32);
assert_eq!(
n4, n3,
"test failed with f1 = {:?}, f2 = {:?}, f3 = {:?}",
f1, f2, f3
);
}
}
}
| rust | Apache-2.0 | 92bc0071fa11570df6b048ae0f6937ced249bb5a | 2026-01-04T20:24:11.030795Z | false |
mc2-project/delphi | https://github.com/mc2-project/delphi/blob/92bc0071fa11570df6b048ae0f6937ced249bb5a/rust/crypto-primitives/src/lib.rs | rust/crypto-primitives/src/lib.rs | //! Some core cryptographic primitives used by Delphi.
#![deny(unused_import_braces, unused_qualifications, trivial_casts)]
#![deny(trivial_numeric_casts, private_in_public, variant_size_differences)]
#![deny(stable_features, unreachable_pub, non_shorthand_field_patterns)]
#![deny(unused_attributes, unused_imports, unused_mut, missing_docs)]
#![deny(renamed_and_removed_lints, stable_features, unused_allocation)]
#![deny(unused_comparisons, bare_trait_objects, unused_must_use)]
#![forbid(unsafe_code)]
/// Defines `struct`s and `trait`s for constructing additively-shared ring
/// elements.
pub mod additive_share;
/// Defines `struct`s and `trait`s for multiplying additive shares.
pub mod beavers_mul;
/// Generates a circuit for computing the ReLU of an additively shared value.
pub mod gc;
pub use additive_share::{AdditiveShare, Share};
pub use beavers_mul::*;
pub use gc::relu;
| rust | Apache-2.0 | 92bc0071fa11570df6b048ae0f6937ced249bb5a | 2026-01-04T20:24:11.030795Z | false |
mc2-project/delphi | https://github.com/mc2-project/delphi/blob/92bc0071fa11570df6b048ae0f6937ced249bb5a/rust/crypto-primitives/src/gc.rs | rust/crypto-primitives/src/gc.rs | #![allow(non_snake_case)]
use algebra::{BitIterator, FixedPointParameters, Fp64Parameters, FpParameters, PrimeField};
pub use fancy_garbling;
use fancy_garbling::{
circuit::CircuitBuilder, error::CircuitBuilderError, util, BinaryBundle, BinaryGadgets,
BundleGadgets, Fancy,
};
#[inline(always)]
fn mux_single_bit<F: Fancy>(
f: &mut F,
b: &F::Item,
x: &F::Item,
y: &F::Item,
) -> Result<F::Item, F::Error> {
let y_plus_x = f.add(x, y)?;
let res = f.mul(b, &y_plus_x)?;
f.add(&x, &res)
}
/// If `b = 0` returns `x` else `y`.
///
/// `b` must be mod 2 but `x` and `y` can be have any modulus.
fn mux<F: Fancy>(
f: &mut F,
b: &F::Item,
x: &BinaryBundle<F::Item>,
y: &BinaryBundle<F::Item>,
) -> Result<Vec<F::Item>, F::Error> {
x.wires()
.iter()
.zip(y.wires())
.map(|(x, y)| mux_single_bit(f, b, x, y))
.collect()
}
#[inline]
fn mod_p_helper<F: Fancy>(
b: &mut F,
neg_p: &BinaryBundle<F::Item>,
bits: &BinaryBundle<F::Item>,
) -> Result<BinaryBundle<F::Item>, F::Error> {
let (result, borrow) = b.bin_addition(&bits, &neg_p)?;
// If p underflowed, then we want the result, otherwise we're fine with the
// original.
mux(b, &borrow, &bits, &result).map(BinaryBundle::new)
}
/// Binary adder. Returns the result and the carry.
fn adder_const<F: Fancy>(
f: &mut F,
x: &F::Item,
y: &F::Item,
b: bool,
carry_in: Option<&F::Item>,
) -> Result<(F::Item, Option<F::Item>), F::Error> {
if let Some(c) = carry_in {
let z1 = f.xor(x, y)?;
let z2 = f.xor(&z1, c)?;
let z3 = f.xor(x, c)?;
let z4 = f.and(&z1, &z3)?;
let carry = f.xor(&z4, x)?;
Ok((z2, Some(carry)))
} else {
let z = f.xor(x, y)?;
let carry = if !b { None } else { Some(f.and(x, y)?) };
Ok((z, carry))
}
}
fn neg_p_over_2_helper<F: Fancy>(
f: &mut F,
neg_p_over_2: u128,
neg_p_over_2_bits: &BinaryBundle<F::Item>,
bits: &BinaryBundle<F::Item>,
) -> Result<BinaryBundle<F::Item>, F::Error> {
let xwires = bits.wires();
let ywires = neg_p_over_2_bits.wires();
let mut neg_p_over_2 = BitIterator::new([neg_p_over_2 as u64]).collect::<Vec<_>>();
neg_p_over_2.reverse();
let mut neg_p_over_2 = neg_p_over_2.into_iter();
let mut seen_one = neg_p_over_2.next().unwrap();
let (mut z, mut c) = adder_const(f, &xwires[0], &ywires[0], seen_one, None)?;
let mut bs = vec![z];
for ((x, y), b) in xwires[1..(xwires.len() - 1)]
.iter()
.zip(&ywires[1..])
.zip(neg_p_over_2)
{
seen_one |= b;
let res = adder_const(f, x, y, seen_one, c.as_ref())?;
z = res.0;
c = res.1;
bs.push(z);
}
z = f.add_many(&[
xwires.last().unwrap().clone(),
ywires.last().unwrap().clone(),
c.unwrap(),
])?;
bs.push(z);
Ok(BinaryBundle::new(bs))
}
/// Compute the number of bits needed to represent `p`, plus one.
#[inline]
pub fn num_bits(p: u128) -> usize {
(p.next_power_of_two() * 2).trailing_zeros() as usize
}
/// Compute the `ReLU` of `n` over the field `P::Field`.
pub fn relu<P: FixedPointParameters>(
b: &mut CircuitBuilder,
n: usize,
) -> Result<(), CircuitBuilderError>
where
<P::Field as PrimeField>::Params: Fp64Parameters,
P::Field: PrimeField<BigInt = <<P::Field as PrimeField>::Params as FpParameters>::BigInt>,
{
let p = u128::from(<<P::Field as PrimeField>::Params>::MODULUS.0);
let exponent_size = P::EXPONENT_CAPACITY as usize;
let p_over_2 = p / 2;
// Convert to two's complement
let neg_p_over_2 = !p_over_2 + 1;
// Convert to two's complement. Equivalent to `let neg_p = -(p as i128) as u128;
let neg_p = !p + 1;
let q = 2;
let num_bits = num_bits(p);
let moduli = vec![q; num_bits];
// Construct constant for addition with neg p
let neg_p = b.bin_constant_bundle(neg_p, num_bits)?;
let neg_p_over_2_bits = b
.constant_bundle(&util::u128_to_bits(neg_p_over_2, num_bits), &moduli)?
.into();
let zero = b.constant(0, 2)?;
let one = b.constant(1, 2)?;
for _ in 0..n {
let s1 = BinaryBundle::new(b.evaluator_inputs(&moduli));
let s2 = BinaryBundle::new(b.garbler_inputs(&moduli));
let s2_next = BinaryBundle::new(b.garbler_inputs(&moduli));
// Add secret shares as integers
let res = b.bin_addition_no_carry(&s1, &s2)?;
// Take the result mod p;
let layer_input = mod_p_helper(b, &neg_p, &res).unwrap();
// Compare with p/2
// Since we take > p/2 as negative, if the number is less than p/2, it is
// positive.
let res = neg_p_over_2_helper(b, neg_p_over_2, &neg_p_over_2_bits, &layer_input)?;
// Take the sign bit
let zs_is_positive = res.wires().last().unwrap();
// Compute the relu
let mut relu_res = Vec::with_capacity(num_bits);
let relu_6_size = exponent_size + 3;
// We choose 5 arbitrarily here; the idea is that we won't see values of
// greater than 2^8.
// We then drop the larger bits
for wire in layer_input.wires().iter().take(relu_6_size + 5) {
relu_res.push(b.and(&zs_is_positive, wire)?);
}
let is_seven = b.and_many(&relu_res[(exponent_size + 1)..relu_6_size])?;
let some_higher_bit_is_set = b.or_many(&relu_res[relu_6_size..])?;
let should_be_six = b.or(&some_higher_bit_is_set, &is_seven)?;
for wire in &mut relu_res[relu_6_size..] {
*wire = zero;
}
let lsb = &mut relu_res[exponent_size];
*lsb = mux_single_bit(b, &should_be_six, lsb, &zero)?;
let middle_bit = &mut relu_res[exponent_size + 1];
*middle_bit = mux_single_bit(b, &should_be_six, middle_bit, &one)?;
let msb = &mut relu_res[exponent_size + 2];
*msb = mux_single_bit(b, &should_be_six, msb, &one)?;
for wire in &mut relu_res[..exponent_size] {
*wire = mux_single_bit(b, &should_be_six, wire, &zero)?;
}
relu_res.extend(std::iter::repeat(zero).take(num_bits - relu_6_size - 5));
let relu_res = BinaryBundle::new(relu_res);
let res = b.bin_addition_no_carry(&relu_res, &s2_next)?;
let next_share = mod_p_helper(b, &neg_p, &res)?;
b.output_bundle(&next_share)?;
}
Ok(())
}
#[cfg(test)]
mod test {
use super::*;
use crate::Share;
use algebra::{fields::near_mersenne_64::F, *};
use fancy_garbling::circuit::CircuitBuilder;
use rand::{thread_rng, Rng};
struct TenBitExpParams {}
impl FixedPointParameters for TenBitExpParams {
type Field = F;
const MANTISSA_CAPACITY: u8 = 3;
const EXPONENT_CAPACITY: u8 = 10;
}
type TenBitExpFP = FixedPoint<TenBitExpParams>;
fn generate_random_number<R: Rng>(rng: &mut R) -> (f64, TenBitExpFP) {
let is_neg: bool = rng.gen();
let mul = if is_neg { -10.0 } else { 10.0 };
let float: f64 = rng.gen();
let f = TenBitExpFP::truncate_float(float * mul);
let n = TenBitExpFP::from(f);
(f, n)
}
/// Compute the product of some u16s as a u128.
#[inline]
pub(crate) fn product(xs: &[u16]) -> u128 {
xs.iter().fold(1, |acc, &x| acc * x as u128)
}
#[test]
pub(crate) fn test_relu() {
// TODO: There is currently an off-by-one in this test that causes it
// to fail occasionally
let mut rng = thread_rng();
let n = 42;
let q = 2;
let p = <F as PrimeField>::Params::MODULUS.0 as u128;
let Q = product(&vec![q; n]);
println!("n={} q={} Q={}", n, q, Q);
let mut b = CircuitBuilder::new();
relu::<TenBitExpParams>(&mut b, 1).unwrap();
let mut c = b.finish();
let _ = c.print_info();
let zero = TenBitExpFP::zero();
let six = TenBitExpFP::from(6.0);
for i in 0..10000 {
let (_, n1) = generate_random_number(&mut rng);
let (s1, s2) = n1.share(&mut rng);
let res_should_be_fp = if n1 <= zero {
zero
} else if n1 > six {
six
} else {
n1
};
let res_should_be = res_should_be_fp.inner.into_repr().0 as u128;
let z1 = F::uniform(&mut rng).into_repr().0 as u128;
let res_should_be = (res_should_be + z1) % p;
let s1 = s1.inner.inner.into_repr().0 as u128;
let mut garbler_inputs = util::u128_to_bits(s1, n);
garbler_inputs.extend_from_slice(&util::u128_to_bits(z1, n));
let s2 = s2.inner.inner.into_repr().0 as u128;
let evaluator_inputs = util::u128_to_bits(s2, n);
let (en, ev) = fancy_garbling::garble(&mut c).unwrap();
let xs = en.encode_garbler_inputs(&garbler_inputs);
let ys = en.encode_evaluator_inputs(&evaluator_inputs);
let garbled_eval_results = ev.eval(&mut c, &xs, &ys).unwrap();
let evaluated_results = c.eval_plain(&garbler_inputs, &evaluator_inputs).unwrap();
assert!(
util::u128_from_bits(&evaluated_results).abs_diff(res_should_be) <= 1,
"Iteration {}, Pre-ReLU value is {}, value should be {}, {:?}",
i,
n1,
res_should_be_fp,
res_should_be_fp
);
assert!(
util::u128_from_bits(&garbled_eval_results).abs_diff(res_should_be) <= 1,
"Iteration {}, Pre-ReLU value is {}, value should be {}, {:?}",
i,
n1,
res_should_be_fp,
res_should_be_fp
);
}
}
}
| rust | Apache-2.0 | 92bc0071fa11570df6b048ae0f6937ced249bb5a | 2026-01-04T20:24:11.030795Z | false |
mc2-project/delphi | https://github.com/mc2-project/delphi/blob/92bc0071fa11570df6b048ae0f6937ced249bb5a/rust/crypto-primitives/src/additive_share.rs | rust/crypto-primitives/src/additive_share.rs | use rand_core::{CryptoRng, RngCore};
use std::ops::{Add, AddAssign, Mul, MulAssign, Neg, Sub, SubAssign};
use algebra::{
fixed_point::{FixedPoint, FixedPointParameters},
UniformRandom,
};
use num_traits::Zero;
use serde::{Deserialize, Serialize};
/// Represents a type that can be additively shared.
pub trait Share:
Sized
+ Clone
+ Copy
+ std::fmt::Debug
+ Eq
+ Serialize
+ for<'de> Deserialize<'de>
+ Add<Self, Output = Self>
+ Sub<Self, Output = Self>
+ Mul<<Self as Share>::Constant, Output = Self>
+ Add<<Self as Share>::Constant, Output = Self>
+ Neg<Output = Self>
+ AddAssign<Self>
+ SubAssign<Self>
+ MulAssign<<Self as Share>::Constant>
+ AddAssign<<Self as Share>::Constant>
{
/// The underlying ring that the shares are created over.
type Ring: for<'a> Add<&'a Self::Ring, Output = Self::Ring>
+ for<'a> Sub<&'a Self::Ring, Output = Self::Ring>
+ Copy
+ Zero
+ Neg<Output = Self::Ring>
+ UniformRandom;
/// The underlying ring that the shares are created over.
type Constant: Into<Self>;
/// Create shares for `self`.
fn share<R: RngCore + CryptoRng>(
&self,
rng: &mut R,
) -> (AdditiveShare<Self>, AdditiveShare<Self>) {
let r = Self::Ring::uniform(rng);
self.share_with_randomness(&r)
}
/// Create shares for `self` using randomness `r`.
fn share_with_randomness(&self, r: &Self::Ring) -> (AdditiveShare<Self>, AdditiveShare<Self>);
/// Randomize a share `s` with randomness `r`.
fn randomize_local_share(s: &AdditiveShare<Self>, r: &Self::Ring) -> AdditiveShare<Self>;
}
#[derive(Default, Hash, Clone, Copy, Debug, Eq, PartialEq, Serialize, Deserialize)]
#[serde(bound = "T: Share")]
#[must_use]
/// Represents an additive share of `T`.
pub struct AdditiveShare<T: Share> {
/// The secret share.
pub inner: T,
}
impl<T: Share> AdditiveShare<T> {
/// Construct a new share from `inner`.
#[inline]
pub fn new(inner: T) -> Self {
Self { inner }
}
/// Combine two additive shares to obtain the shared value.
pub fn combine(&self, other: &Self) -> T {
self.inner + other.inner
}
/// Add a constant to the share.
#[inline]
pub fn add_constant(mut self, other: T::Constant) -> Self {
self.inner += other;
self
}
/// Add a constant to the share in place..
#[inline]
pub fn add_constant_in_place(&mut self, other: T::Constant) {
self.inner += other;
}
}
impl<T: Share + Zero> Zero for AdditiveShare<T> {
fn zero() -> Self {
Self::new(T::zero())
}
fn is_zero(&self) -> bool {
self.inner.is_zero()
}
}
impl<P: FixedPointParameters> AdditiveShare<FixedPoint<P>> {
/// Double the share.
#[inline]
pub fn double(&self) -> Self {
let mut result = *self;
result.inner.double_in_place();
result
}
/// Double the share in place.
#[inline]
pub fn double_in_place(&mut self) -> &mut Self {
self.inner.double_in_place();
self
}
}
/// Iterate over `self.inner` as `u64`s
pub struct ShareIterator<T: Share + IntoIterator<Item = u64>> {
inner: <T as IntoIterator>::IntoIter,
}
impl<T: Share + IntoIterator<Item = u64>> Iterator for ShareIterator<T> {
type Item = u64;
fn next(&mut self) -> Option<Self::Item> {
self.inner.next()
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
impl<T: Share + IntoIterator<Item = u64>> ExactSizeIterator for ShareIterator<T> {}
impl<T: Share + IntoIterator<Item = u64>> IntoIterator for AdditiveShare<T> {
type Item = u64;
type IntoIter = ShareIterator<T>;
#[inline]
fn into_iter(self) -> Self::IntoIter {
Self::IntoIter {
inner: self.inner.into_iter(),
}
}
}
impl<T: Share + std::iter::FromIterator<u64>> std::iter::FromIterator<u64> for AdditiveShare<T> {
/// Creates a FixedPoint from an iterator over limbs in little-endian order
#[inline]
fn from_iter<I: IntoIterator<Item = u64>>(iter: I) -> Self {
Self::new(T::from_iter(iter))
}
}
impl<T: Share> Add<Self> for AdditiveShare<T> {
type Output = Self;
#[inline]
fn add(mut self, other: Self) -> Self {
self.inner = self.inner + other.inner;
self
}
}
impl<T: Share> AddAssign<Self> for AdditiveShare<T> {
#[inline]
fn add_assign(&mut self, other: Self) {
self.inner += other.inner;
}
}
impl<T: Share> Sub for AdditiveShare<T> {
type Output = Self;
#[inline]
fn sub(mut self, other: Self) -> Self {
self.inner -= other.inner;
self
}
}
impl<T: Share> SubAssign for AdditiveShare<T> {
#[inline]
fn sub_assign(&mut self, other: Self) {
self.inner -= other.inner;
}
}
impl<T: Share> Neg for AdditiveShare<T> {
type Output = Self;
#[inline]
fn neg(mut self) -> Self {
self.inner = -self.inner;
self
}
}
impl<T: Share> Mul<T::Constant> for AdditiveShare<T> {
type Output = Self;
#[inline]
fn mul(mut self, other: T::Constant) -> Self {
self *= other;
self
}
}
impl<T: Share> MulAssign<T::Constant> for AdditiveShare<T> {
#[inline]
fn mul_assign(&mut self, other: T::Constant) {
self.inner *= other;
}
}
impl<T: Share> From<T> for AdditiveShare<T> {
#[inline]
fn from(other: T) -> Self {
Self { inner: other }
}
}
impl<P: FixedPointParameters> From<AdditiveShare<FixedPoint<P>>> for FixedPoint<P> {
#[inline]
fn from(other: AdditiveShare<FixedPoint<P>>) -> Self {
other.inner
}
}
/// Operations on shares mimic those of `FixedPoint<P>` itself.
/// This means that
/// * Multiplication by a constant does not automatically truncate the result;
/// * Addition, subtraction, and addition by a constant automatically
/// promote the result to have the correct number of multiplications (max(in1,
/// in2));
/// * `signed_reduce` behaves the same on `FixedPoint<P>` and
/// `AdditiveShare<FixedPoint<P>>`.
impl<P: FixedPointParameters> Share for FixedPoint<P> {
type Ring = P::Field;
type Constant = Self;
#[inline]
fn share_with_randomness(&self, r: &Self::Ring) -> (AdditiveShare<Self>, AdditiveShare<Self>) {
let mut cur = *self;
cur.inner += r;
(AdditiveShare::new(cur), AdditiveShare::new(Self::new(-*r)))
}
#[inline]
fn randomize_local_share(cur: &AdditiveShare<Self>, r: &Self::Ring) -> AdditiveShare<Self> {
let mut cur = *cur;
cur.inner.inner += r;
cur
}
}
#[cfg(test)]
mod tests {
use super::*;
use algebra::fields::near_mersenne_64::F;
use rand::{Rng, SeedableRng};
use rand_chacha::ChaChaRng;
struct TenBitExpParams {}
impl FixedPointParameters for TenBitExpParams {
type Field = F;
const MANTISSA_CAPACITY: u8 = 5;
const EXPONENT_CAPACITY: u8 = 5;
}
type TenBitExpFP = FixedPoint<TenBitExpParams>;
// type FPShare = AdditiveShare<TenBitExpFP>;
const RANDOMNESS: [u8; 32] = [
0x99, 0xe0, 0x8f, 0xbc, 0x89, 0xa7, 0x34, 0x01, 0x45, 0x86, 0x82, 0xb6, 0x51, 0xda, 0xf4,
0x76, 0x5d, 0xc9, 0x8d, 0x62, 0x23, 0xf2, 0x90, 0x8f, 0x9d, 0x03, 0xf2, 0x77, 0xd3, 0x4a,
0x52, 0xd2,
];
fn generate_random_number<R: Rng>(rng: &mut R) -> (f64, TenBitExpFP) {
let is_neg: bool = rng.gen();
let mul = if is_neg { -10.0 } else { 10.0 };
let float: f64 = rng.gen();
let f = TenBitExpFP::truncate_float(float * mul);
let n = TenBitExpFP::from(f);
(f, n)
}
#[test]
fn test_share_combine() {
let mut rng = ChaChaRng::from_seed(RANDOMNESS);
for _ in 0..1000 {
let (_, n) = generate_random_number(&mut rng);
let (s1, s2) = n.share(&mut rng);
assert_eq!(s1.combine(&s2), n);
}
}
#[test]
fn test_double() {
let mut rng = ChaChaRng::from_seed(RANDOMNESS);
for _ in 0..1000 {
let (_, n) = generate_random_number(&mut rng);
let (mut s1, mut s2) = n.share(&mut rng);
s1.double_in_place();
s2.double_in_place();
assert_eq!(s1.combine(&s2), n.double());
}
}
#[test]
fn test_neg() {
let mut rng = ChaChaRng::from_seed(RANDOMNESS);
for _ in 0..1000 {
let (_, n) = generate_random_number(&mut rng);
let (mut s1, mut s2) = n.share(&mut rng);
s1 = -s1;
s2 = -s2;
assert_eq!(s1.combine(&s2), -n);
}
}
#[test]
fn test_mul_by_const() {
let mut rng = ChaChaRng::from_seed(RANDOMNESS);
for _ in 0..1000 {
let (_, n1) = generate_random_number(&mut rng);
let (_, n2) = generate_random_number(&mut rng);
let (mut s1, mut s2) = n1.share(&mut rng);
s1 = s1 * n2;
s2 = s2 * n2;
assert_eq!(s1.combine(&s2), n1 * n2);
}
}
#[test]
fn test_mul_by_const_with_trunc() {
let mut rng = ChaChaRng::from_seed(RANDOMNESS);
for _ in 0..1000 {
let (_, n1) = generate_random_number(&mut rng);
let (_, n2) = generate_random_number(&mut rng);
let (mut s1, mut s2) = n1.share(&mut rng);
s1 = s1 * n2;
s2 = s2 * n2;
s1.inner.signed_reduce_in_place();
s2.inner.signed_reduce_in_place();
assert_eq!(s1.combine(&s2), n1 * n2);
}
}
#[test]
fn test_add() {
let mut rng = ChaChaRng::from_seed(RANDOMNESS);
for _ in 0..1000 {
let (f1, n1) = generate_random_number(&mut rng);
let (f2, n2) = generate_random_number(&mut rng);
let f3 = f1 + f2;
let n3 = TenBitExpFP::from(f3);
let (s11, s12) = n1.share(&mut rng);
let (s21, s22) = n2.share(&mut rng);
let s31 = s11 + s21;
let s32 = s12 + s22;
assert_eq!(
s31.combine(&s32),
n3,
"test failed with f1 = {:?}, f2 = {:?}, f3 = {:?}",
f1,
f2,
f3
);
}
}
#[test]
fn test_sub() {
let mut rng = ChaChaRng::from_seed(RANDOMNESS);
for _ in 0..1000 {
let (f1, n1) = generate_random_number(&mut rng);
let (f2, n2) = generate_random_number(&mut rng);
let f3 = f1 - f2;
let n3 = TenBitExpFP::from(f3);
let (s11, s12) = n1.share(&mut rng);
let (s21, s22) = n2.share(&mut rng);
let s31 = s11 - s21;
let s32 = s12 - s22;
assert_eq!(
s31.combine(&s32),
n3,
"test failed with f1 = {:?}, f2 = {:?}, f3 = {:?}",
f1,
f2,
f3
);
}
}
}
| rust | Apache-2.0 | 92bc0071fa11570df6b048ae0f6937ced249bb5a | 2026-01-04T20:24:11.030795Z | false |
mc2-project/delphi | https://github.com/mc2-project/delphi/blob/92bc0071fa11570df6b048ae0f6937ced249bb5a/rust/crypto-primitives/benches/beavers_mul.rs | rust/crypto-primitives/benches/beavers_mul.rs | use criterion::{criterion_group, criterion_main, Criterion};
// use itertools::Itertools;
use std::time::Duration;
use algebra::{fields::near_mersenne_64::F, fixed_point::*};
use crypto_primitives::{additive_share::Share, beavers_mul::*};
use rand::{Rng, SeedableRng};
use rand_chacha::ChaChaRng;
fn generate_random_number<R: Rng>(rng: &mut R) -> (f64, TenBitExpFP) {
let is_neg: bool = rng.gen();
let mul = if is_neg { -10.0 } else { 10.0 };
let float: f64 = rng.gen();
let f = TenBitExpFP::truncate_float(float * mul);
let n = TenBitExpFP::from(f);
(f, n)
}
struct TenBitExpParams {}
impl FixedPointParameters for TenBitExpParams {
type Field = F;
const MANTISSA_CAPACITY: u8 = 5;
const EXPONENT_CAPACITY: u8 = 5;
}
type TenBitExpFP = FixedPoint<TenBitExpParams>;
const RANDOMNESS: [u8; 32] = [
0x11, 0xe0, 0x8f, 0xbc, 0x89, 0xa7, 0x34, 0x01, 0x45, 0x86, 0x82, 0xb6, 0x51, 0xda, 0xf4, 0x76,
0x5d, 0xc9, 0x8d, 0xea, 0x23, 0xf2, 0x90, 0x8f, 0x9d, 0x03, 0xf2, 0x77, 0xd3, 0x4a, 0x52, 0xd2,
];
fn bench_beavers_mul(c: &mut Criterion) {
c.bench_function_over_inputs(
&format!("beavers_mul"),
move |bench, num| {
let mut rng = ChaChaRng::from_seed(RANDOMNESS);
let seed = RANDOMNESS;
let mut gen = InsecureTripleGen::<TenBitExpFP>::new(seed);
let mut inputs = vec![];
for _ in 0..1000 {
let (t1, t2) = gen.generate_triple_shares();
let (_, n1) = generate_random_number(&mut rng);
let (_, n2) = generate_random_number(&mut rng);
let (s11, s12) = n1.share(&mut rng);
let (s21, s22) = n2.share(&mut rng);
inputs.push([(s11, s12, t1), (s21, s22, t2)]);
}
let mut i = 0;
bench.iter(|| {
for _ in 0..*num {
let [(s11, s12, t1), (s21, s22, t2)] = inputs[i % 1000].clone();
let p1_bl_input = FPBeaversMul::share_and_blind_inputs(&s11, &s21, &t1);
let p2_bl_input = FPBeaversMul::share_and_blind_inputs(&s12, &s22, &t2);
let (p1_bl_input, p2_bl_input) = (
FPBeaversMul::reconstruct_blinded_inputs(p1_bl_input, p2_bl_input),
FPBeaversMul::reconstruct_blinded_inputs(p2_bl_input, p1_bl_input),
);
let mut s31 = FPBeaversMul::multiply_blinded_inputs(1, p1_bl_input, &t1);
let mut s32 = FPBeaversMul::multiply_blinded_inputs(2, p2_bl_input, &t2);
s31.inner.signed_reduce_in_place();
s32.inner.signed_reduce_in_place();
let n4 = s31.combine(&s32);
i += 1;
let _ = criterion::black_box(n4);
}
});
},
vec![1, 10, 100, 1000, 10000],
);
}
criterion_group! {
name = beavers_mul;
config = Criterion::default().warm_up_time(Duration::from_millis(100));
targets = bench_beavers_mul
}
criterion_main!(beavers_mul);
| rust | Apache-2.0 | 92bc0071fa11570df6b048ae0f6937ced249bb5a | 2026-01-04T20:24:11.030795Z | false |
mc2-project/delphi | https://github.com/mc2-project/delphi/blob/92bc0071fa11570df6b048ae0f6937ced249bb5a/rust/crypto-primitives/benches/garbling.rs | rust/crypto-primitives/benches/garbling.rs | use criterion::{criterion_group, criterion_main, Bencher, Criterion};
use std::time::Duration;
use fancy_garbling::{
circuit::{Circuit, CircuitBuilder},
util::RngExt,
};
use algebra::{fields::near_mersenne_64::F, *};
struct TenBitExpParams {}
impl FixedPointParameters for TenBitExpParams {
type Field = F;
const MANTISSA_CAPACITY: u8 = 3;
const EXPONENT_CAPACITY: u8 = 10;
}
fn make_relu(n: usize) -> Circuit {
let mut b = CircuitBuilder::new();
crypto_primitives::gc::relu::<TenBitExpParams>(&mut b, n).unwrap();
b.finish()
}
fn relu_gb(c: &mut Criterion) {
c.bench_function_over_inputs(
&"relu_gb",
move |bench: &mut Bencher, &num: &&usize| {
let mut c = make_relu(*num);
bench.iter(|| {
let gb = fancy_garbling::garble(&mut c).unwrap();
criterion::black_box(gb);
});
},
&[1, 10, 100usize],
);
}
fn relu_ev(c: &mut Criterion) {
c.bench_function_over_inputs(
&"relu_ev",
move |bench: &mut Bencher, &num: &&usize| {
let mut rng = rand::thread_rng();
let mut c = make_relu(*num);
let (en, ev) = fancy_garbling::garble(&mut c).unwrap();
let gb_inps: Vec<_> = (0..c.num_garbler_inputs())
.map(|i| rng.gen_u16() % c.garbler_input_mod(i))
.collect();
let ev_inps: Vec<_> = (0..c.num_evaluator_inputs())
.map(|i| rng.gen_u16() % c.evaluator_input_mod(i))
.collect();
let xs = en.encode_garbler_inputs(&gb_inps);
let ys = en.encode_evaluator_inputs(&ev_inps);
bench.iter(|| {
let ys = ev.eval(&mut c, &xs, &ys).unwrap();
criterion::black_box(ys);
});
},
&[1, 10, 100usize],
);
}
criterion_group! {
name = garbling;
config = Criterion::default().warm_up_time(Duration::from_millis(100)).sample_size(10);
targets = relu_gb, relu_ev,
}
criterion_main!(garbling);
| rust | Apache-2.0 | 92bc0071fa11570df6b048ae0f6937ced249bb5a | 2026-01-04T20:24:11.030795Z | false |
mc2-project/delphi | https://github.com/mc2-project/delphi/blob/92bc0071fa11570df6b048ae0f6937ced249bb5a/rust/protocols-sys/build.rs | rust/protocols-sys/build.rs | use cmake::Config;
use std::{env, path::PathBuf};
fn main() {
let project_dir = PathBuf::from(env::var("CARGO_MANIFEST_DIR").unwrap());
let cplus_header = project_dir.join("c++/src/lib/interface.h");
// Build Delphi
let delphi_install_prefix = Config::new("c++").define("UNITTESTS", "0").build();
println!(
"cargo:rustc-link-search={}",
delphi_install_prefix.display()
);
println!(
"cargo:rustc-link-search={}/lib",
delphi_install_prefix.display()
);
// Link libraries
println!("cargo:rustc-link-lib=static=seal-3.6");
println!("cargo:rustc-link-lib=static=DelphiOffline");
println!("cargo:rustc-link-lib=dylib=stdc++");
// Tell cargo to invalidate the built crate whenever the wrapper changes
println!("cargo:rerun-if-changed={}", cplus_header.to_str().unwrap());
// Run bindgen on c++ directory
let bindings = bindgen::Builder::default()
.no_copy("ServerFHE|ClientFHE|ServerTriples|ClientTriples")
.header(format!("{}", cplus_header.to_str().unwrap()))
.parse_callbacks(Box::new(bindgen::CargoCallbacks))
.generate()
.expect("Unable to generate bindings");
// Write the bindings to src/bindings.rs
bindings
.write_to_file(project_dir.join("src/bindings.rs"))
.expect("Couldn't write bindings!");
}
| rust | Apache-2.0 | 92bc0071fa11570df6b048ae0f6937ced249bb5a | 2026-01-04T20:24:11.030795Z | false |
mc2-project/delphi | https://github.com/mc2-project/delphi/blob/92bc0071fa11570df6b048ae0f6937ced249bb5a/rust/protocols-sys/src/server_cg.rs | rust/protocols-sys/src/server_cg.rs | use crate::*;
use neural_network::{
layers::{convolution::Padding, LinearLayer},
tensors::{Kernel, Output},
};
use std::os::raw::c_char;
pub struct Conv2D<'a> {
data: Metadata,
sfhe: &'a ServerFHE,
masks: *mut *mut *mut *mut c_char,
shares: Option<ServerShares>,
}
pub struct FullyConnected<'a> {
data: Metadata,
sfhe: &'a ServerFHE,
masks: *mut *mut c_char,
shares: Option<ServerShares>,
}
pub enum SealServerCG<'a> {
Conv2D(Conv2D<'a>),
FullyConnected(FullyConnected<'a>),
}
pub trait ServerCG {
type Keys;
fn new<F, C>(sfhe: Self::Keys, layer: &LinearLayer<F, C>, kernel: &Kernel<u64>) -> Self;
fn preprocess(&mut self, linear_share: &Output<u64>);
fn process(&mut self, client_share: Vec<c_char>) -> Vec<c_char>;
}
impl<'a> SealServerCG<'a> {
pub fn preprocess(&mut self, linear_share: &Output<u64>) {
match self {
Self::Conv2D(s) => s.preprocess(linear_share),
Self::FullyConnected(s) => s.preprocess(linear_share),
}
}
pub fn process(&mut self, client_share: Vec<c_char>) -> Vec<c_char> {
match self {
Self::Conv2D(s) => s.process(client_share),
Self::FullyConnected(s) => s.process(client_share),
}
}
}
impl<'a> ServerCG for Conv2D<'a> {
type Keys = &'a ServerFHE;
fn new<F, C>(sfhe: &'a ServerFHE, layer: &LinearLayer<F, C>, kernel: &Kernel<u64>) -> Self {
let (input_dims, _, kernel_dims) = layer.all_dimensions();
let params = match layer {
LinearLayer::Conv2d { params, .. } => params,
_ => panic!("Incorrect Layer"),
};
let data = unsafe {
conv_metadata(
sfhe.encoder,
input_dims.2 as i32,
input_dims.3 as i32,
kernel_dims.2 as i32,
kernel_dims.3 as i32,
kernel_dims.1 as i32,
kernel_dims.0 as i32,
params.stride as i32,
params.stride as i32,
params.padding == Padding::Valid,
)
};
let mut tmp_images = Vec::new();
let mut kernel_vec: Vec<_> = vec![std::ptr::null(); data.out_chans as usize];
for out_c in 0..data.out_chans as usize {
// No easy way to convert directly to double pointer so create a vector for
// each double pointer, get a pointer to it, and push it to tmp_images
// so that it doesn't get dropped.
//
// At the end of the outer scope, tmp_images will be dropped after
// kernel_vec, so we won't have a use after free kind of situation.
let mut tmp_image: Vec<*const u64> = vec![std::ptr::null(); data.inp_chans as usize];
for (inp_c, tmp_i) in tmp_image.iter_mut().enumerate() {
*tmp_i = kernel
.slice(s![out_c, inp_c, .., ..])
.to_slice()
.expect("Error converting kernel")
.as_ptr();
}
kernel_vec[out_c] = tmp_image.as_ptr();
// This ensures that tmp_image lives on past the scope of the loop.
tmp_images.push(tmp_image);
}
let masks = unsafe { server_conv_preprocess(sfhe, &data, kernel_vec.as_ptr()) };
Self {
data,
sfhe,
masks,
shares: None,
}
}
fn preprocess(&mut self, linear_share: &Output<u64>) {
let mut linear_vec: Vec<_> = vec![std::ptr::null(); self.data.out_chans as usize];
for out_c in 0..self.data.out_chans as usize {
linear_vec[out_c] = linear_share
.slice(s![0, out_c, .., ..])
.as_slice()
.expect("Error converting server randomness")
.as_ptr();
}
self.shares = Some(unsafe {
server_conv_preprocess_shares(self.sfhe, &self.data, linear_vec.as_ptr())
});
}
fn process(&mut self, mut client_share: Vec<c_char>) -> Vec<c_char> {
let mut shares = self.shares.unwrap();
let client_share_ct = SerialCT {
inner: client_share.as_mut_ptr(),
size: client_share.len() as u64,
};
unsafe {
server_conv_online(
self.sfhe,
&self.data,
client_share_ct,
self.masks,
&mut shares,
)
};
self.shares = Some(shares);
// Return ciphertexts as vectors
let linear_ct_vec = unsafe {
std::slice::from_raw_parts(shares.linear_ct.inner, shares.linear_ct.size as usize)
.to_vec()
};
linear_ct_vec
}
}
impl<'a> ServerCG for FullyConnected<'a> {
type Keys = &'a ServerFHE;
fn new<F, C>(sfhe: &'a ServerFHE, layer: &LinearLayer<F, C>, kernel: &Kernel<u64>) -> Self {
let (input_dims, output_dims, _) = layer.all_dimensions();
let data = unsafe {
fc_metadata(
sfhe.encoder,
(input_dims.1 * input_dims.2 * input_dims.3) as i32,
output_dims.1 as i32,
)
};
let mut kernel_vec: Vec<*const u64> = vec![std::ptr::null(); data.filter_h as usize];
for row in 0..data.filter_h as usize {
kernel_vec[row] = kernel
.slice(s![row, .., .., ..])
.to_slice()
.expect("Error converting kernel")
.as_ptr();
}
let masks = unsafe { server_fc_preprocess(sfhe, &data, kernel_vec.as_ptr()) };
Self {
data,
sfhe,
masks,
shares: None,
}
}
fn preprocess(&mut self, linear_share: &Output<u64>) {
let linear: *const u64;
linear = linear_share
.slice(s![0, .., .., ..])
.as_slice()
.expect("Error converting server randomness")
.as_ptr();
self.shares = Some(unsafe { server_fc_preprocess_shares(self.sfhe, &self.data, linear) });
}
fn process(&mut self, mut client_share: Vec<c_char>) -> Vec<c_char> {
let mut shares = self.shares.unwrap();
let client_share_ct = SerialCT {
inner: client_share.as_mut_ptr(),
size: client_share.len() as u64,
};
unsafe {
server_fc_online(
self.sfhe,
&self.data,
client_share_ct,
self.masks,
&mut shares,
)
};
self.shares = Some(shares);
// Return ciphertexts as vectors
let linear_ct_vec = unsafe {
std::slice::from_raw_parts(shares.linear_ct.inner, shares.linear_ct.size as usize)
.to_vec()
};
linear_ct_vec
}
}
impl<'a> Drop for Conv2D<'a> {
fn drop(&mut self) {
unsafe { server_conv_free(&self.data, self.masks, &mut self.shares.unwrap()) };
}
}
impl<'a> Drop for FullyConnected<'a> {
fn drop(&mut self) {
unsafe { server_fc_free(&self.data, self.masks, &mut self.shares.unwrap()) };
}
}
| rust | Apache-2.0 | 92bc0071fa11570df6b048ae0f6937ced249bb5a | 2026-01-04T20:24:11.030795Z | false |
mc2-project/delphi | https://github.com/mc2-project/delphi/blob/92bc0071fa11570df6b048ae0f6937ced249bb5a/rust/protocols-sys/src/server_gen.rs | rust/protocols-sys/src/server_gen.rs | use crate::*;
use std::os::raw::c_char;
pub trait ServerGen {
type Keys;
/// The type of messages passed between client and server
type MsgType;
/// Create new ServerGen object
fn new(keys: Self::Keys) -> Self;
/// Preprocess `a`, `b`, and `c` randomizers
fn triples_preprocess(&self, a: &[u64], b: &[u64], r: &[u64]) -> ServerTriples;
/// Process clients's input and return `c` shares for client
fn triples_online(
&self,
shares: &mut ServerTriples,
a: &mut [Self::MsgType],
b: &mut [Self::MsgType],
) -> Vec<Self::MsgType>;
}
/// SEAL implementation of ClientGen
pub struct SealServerGen<'a> {
sfhe: &'a ServerFHE,
}
impl<'a> ServerGen for SealServerGen<'a> {
type Keys = &'a ServerFHE;
/// Messages are SEAL ciphertexts which are passed as opaque C pointers
type MsgType = c_char;
fn new(sfhe: Self::Keys) -> Self {
Self { sfhe }
}
fn triples_preprocess(&self, a: &[u64], b: &[u64], r: &[u64]) -> ServerTriples {
unsafe {
server_triples_preprocess(
self.sfhe,
a.len() as u32,
a.as_ptr(),
b.as_ptr(),
r.as_ptr(),
)
}
}
fn triples_online(
&self,
shares: &mut ServerTriples,
a: &mut [Self::MsgType],
b: &mut [Self::MsgType],
) -> Vec<Self::MsgType> {
let a_ct = SerialCT {
inner: a.as_mut_ptr(),
size: a.len() as u64,
};
let b_ct = SerialCT {
inner: b.as_mut_ptr(),
size: b.len() as u64,
};
unsafe { server_triples_online(self.sfhe, a_ct, b_ct, shares) };
let result = unsafe {
std::slice::from_raw_parts(shares.c_ct.inner, shares.c_ct.size as usize).to_vec()
};
result
}
}
| rust | Apache-2.0 | 92bc0071fa11570df6b048ae0f6937ced249bb5a | 2026-01-04T20:24:11.030795Z | false |
mc2-project/delphi | https://github.com/mc2-project/delphi/blob/92bc0071fa11570df6b048ae0f6937ced249bb5a/rust/protocols-sys/src/lib.rs | rust/protocols-sys/src/lib.rs | #![allow(
non_snake_case,
non_camel_case_types,
non_upper_case_globals,
dead_code
)]
#[macro_use]
pub extern crate ndarray;
pub mod client_cg;
pub mod client_gen;
pub mod key_share;
pub mod server_cg;
pub mod server_gen;
pub use client_cg::*;
pub use client_gen::*;
pub use key_share::KeyShare;
pub use server_cg::*;
pub use server_gen::*;
use std::os::raw::c_char;
include!("bindings.rs");
pub struct SealCT {
pub inner: SerialCT,
}
impl SealCT {
pub fn new() -> Self {
let inner = SerialCT {
inner: std::ptr::null_mut(),
size: 0,
};
Self { inner }
}
/// Encrypt a vector using SEAL
pub fn encrypt_vec(&mut self, cfhe: &ClientFHE, input: Vec<u64>) -> Vec<c_char> {
self.inner = unsafe { encrypt_vec(cfhe, input.as_ptr(), input.len() as u64) };
unsafe { std::slice::from_raw_parts(self.inner.inner, self.inner.size as usize).to_vec() }
}
/// Decrypt a vector of SEAL ciphertexts. Assumes `inner.share_size` is set.
pub fn decrypt_vec(&mut self, cfhe: &ClientFHE, mut ct: Vec<c_char>, size: usize) -> Vec<u64> {
// Don't replace the current inner CT, since the received ciphertext was
// allocated by Rust
let mut recv_ct = SerialCT {
inner: ct.as_mut_ptr(),
size: ct.len() as u64,
};
unsafe {
let raw_vec = decrypt_vec(cfhe, &mut recv_ct, size as u64);
std::slice::from_raw_parts(raw_vec, size as usize).to_vec()
}
}
}
impl Drop for ClientFHE {
fn drop(&mut self) {
unsafe {
client_free_keys(self);
}
}
}
unsafe impl Send for ClientFHE {}
unsafe impl Sync for ClientFHE {}
impl Drop for ServerFHE {
fn drop(&mut self) {
unsafe {
server_free_keys(self);
}
}
}
unsafe impl Send for ServerFHE {}
unsafe impl Sync for ServerFHE {}
impl Drop for ClientTriples {
fn drop(&mut self) {
unsafe {
client_triples_free(self);
}
}
}
unsafe impl Send for ClientTriples {}
unsafe impl Sync for ClientTriples {}
impl Drop for ServerTriples {
fn drop(&mut self) {
unsafe {
server_triples_free(self);
}
}
}
unsafe impl Send for ServerTriples {}
unsafe impl Sync for ServerTriples {}
| rust | Apache-2.0 | 92bc0071fa11570df6b048ae0f6937ced249bb5a | 2026-01-04T20:24:11.030795Z | false |
mc2-project/delphi | https://github.com/mc2-project/delphi/blob/92bc0071fa11570df6b048ae0f6937ced249bb5a/rust/protocols-sys/src/key_share.rs | rust/protocols-sys/src/key_share.rs | use crate::*;
use std::slice::from_raw_parts;
pub struct KeyShare(SerialCT);
impl KeyShare {
pub fn new() -> Self {
Self(SerialCT {
inner: ::std::ptr::null_mut(),
size: 0,
})
}
pub fn generate(&mut self) -> (ClientFHE, Vec<std::os::raw::c_char>) {
let cfhe = unsafe { client_keygen(&mut self.0) };
(cfhe, unsafe {
from_raw_parts(self.0.inner, self.0.size as usize).to_vec()
})
}
pub fn receive(&mut self, mut keys_vec: Vec<std::os::raw::c_char>) -> ServerFHE {
let serial_keys = SerialCT {
inner: keys_vec.as_mut_ptr(),
size: keys_vec.len() as u64,
};
unsafe { server_keygen(serial_keys) }
}
}
impl Drop for KeyShare {
fn drop(&mut self) {
unsafe {
free_ct(&mut self.0);
}
}
}
| rust | Apache-2.0 | 92bc0071fa11570df6b048ae0f6937ced249bb5a | 2026-01-04T20:24:11.030795Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.