repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1 value | license stringclasses 7 values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2 classes |
|---|---|---|---|---|---|---|---|---|
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/bin/sequencer_bench.rs | fantoch_ps/src/bin/sequencer_bench.rs | use clap::{Arg, Command};
use fantoch::metrics::Histogram;
use fantoch::run::chan::{ChannelReceiver, ChannelSender};
use fantoch::run::task;
use fantoch::time::{RunTime, SysTime};
use fantoch::HashMap;
use parking_lot::Mutex;
use rand::Rng;
use std::cmp::max;
use std::collections::BTreeSet;
use std::error::Error;
use std::iter::FromIterator;
use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::Arc;
use tokio::sync::oneshot;
const DEFAULT_KEYS: usize = 100;
const DEFAULT_KEYS_PER_COMMAND: usize = 1;
const DEFAULT_CLIENTS: usize = 10;
const DEFAULT_COMMANDS_PER_CLIENT: usize = 10000;
const DEFAULT_CHECK_VOTES: bool = true;
const CHANNEL_BUFFER_SIZE: usize = 10000;
type Key = usize;
type KeySet = BTreeSet<Key>;
type VoteRange = (Key, u64, u64);
fn main() -> Result<(), Box<dyn Error>> {
let (
keys_number,
client_number,
commands_per_client,
keys_per_command,
check_votes,
) = parse_args();
// get number of cpus
let cpus = num_cpus::get();
println!("cpus: {}", cpus);
// maybe warn about number of keys
if keys_number < cpus {
println!(
"warning: number of keys {} is lower than the number of cpus {}",
keys_number, cpus
);
}
// create tokio runtime
let runtime = tokio::runtime::Builder::new_multi_thread()
.worker_threads(cpus)
.thread_name("sequencer-bench")
.build()
.expect("tokio runtime build should work");
runtime.block_on(bench(
cpus,
keys_number,
client_number,
commands_per_client,
keys_per_command,
check_votes,
))
}
async fn bench(
cpus: usize,
keys_number: usize,
client_number: usize,
commands_per_client: usize,
keys_per_command: usize,
check_votes: bool,
) -> Result<(), Box<dyn Error>> {
// create sequencer
let sequencer = Arc::new(AtomicSequencer::new(keys_number));
// create as many workers as cpus
let to_workers: Vec<_> = (0..cpus)
.map(|id| {
task::spawn_consumer(CHANNEL_BUFFER_SIZE, |rx| {
worker(id, rx, sequencer.clone())
})
})
.collect();
// spawn clients
let handles = (0..client_number).map(|id| {
tokio::spawn(client(
id,
keys_number,
commands_per_client,
keys_per_command,
check_votes,
to_workers.clone(),
))
});
// wait for all clients to complete and aggregate values
let mut latency = Histogram::new();
let mut all_votes = HashMap::new();
for join_result in futures::future::join_all(handles).await {
let (client_histogram, votes) = join_result?;
latency.merge(&client_histogram);
for (key, vote_start, vote_end) in votes {
let current_key_votes =
all_votes.entry(key).or_insert_with(BTreeSet::new);
for vote in vote_start..=vote_end {
// insert vote and check it hasn't been added before
assert!(current_key_votes.insert(vote));
}
}
}
// check that we have all votes (no gaps that would prevent
// timestamp-stability)
for (_key, key_votes) in all_votes {
// get number of votes
let key_votes_count = key_votes.len();
// we should have all votes from 1 to `key_votes_count`
assert_eq!(
key_votes,
BTreeSet::from_iter((1..=key_votes_count).map(|vote| vote as u64))
);
}
println!("latency: {:?}", latency);
Ok(())
}
// async fn worker(sequencer: )
async fn worker<S>(
id: usize,
mut requests: ChannelReceiver<(
u64,
KeySet,
oneshot::Sender<Vec<VoteRange>>,
)>,
sequencer: Arc<S>,
) where
S: Sequencer,
{
println!("worker {} started...", id);
while let Some((proposal, cmd, client)) = requests.recv().await {
let result = sequencer.next(proposal, cmd);
if let Err(e) = client.send(result) {
println!("error while sending next result to client: {:?}", e);
}
}
}
async fn client(
id: usize,
keys_number: usize,
commands_per_client: usize,
keys_per_command: usize,
check_votes: bool,
mut to_workers: Vec<
ChannelSender<(u64, KeySet, oneshot::Sender<Vec<VoteRange>>)>,
>,
) -> (Histogram, Vec<VoteRange>) {
println!("client {} started...", id);
// create histogram and list with all votes received
let mut histogram = Histogram::new();
let mut all_votes = Vec::new();
// create time and highest proposal seen
let time = RunTime;
let mut proposal: u64 = 0;
for _ in 0..commands_per_client {
// generate random command
let mut command = BTreeSet::new();
while command.len() < keys_per_command {
// generate random key
let key = rand::thread_rng().gen_range(0..keys_number);
command.insert(key);
}
// increase highest proposal by 1
proposal += 1;
// create oneshot channel and send command
let (tx, rx) = oneshot::channel();
// get one key touched by the command
let key = command
.iter()
.next()
.expect("minimum keys per command should be 1");
// select worker responsible for that key
let worker_index = key % to_workers.len();
// create request
let request = (proposal, command, tx);
// register start time
let start_start = time.micros();
// send request to worker
if let Err(e) = to_workers[worker_index].send(request).await {
println!(
"error sending request to worker {}: {:?}",
worker_index, e
);
}
// wait for reply
match rx.await {
Ok(votes) => {
// register end time
let end_time = time.micros();
// update highest proposal seen:w
let highest_reply = votes
.iter()
.map(|(_, _, vote_end)| vote_end)
.max()
.expect("there should be at least one vote");
proposal = max(*highest_reply, proposal);
// update histogram
let latency = end_time - start_start;
histogram.increment(latency as u64);
// update list with all votes
if check_votes {
all_votes.extend(votes);
}
}
Err(e) => {
println!(
"error receiving reply from worker {}: {:?}",
worker_index, e
);
}
}
}
(histogram, all_votes)
}
trait Sequencer {
fn new(keys_number: usize) -> Self;
fn next(&self, proposal: u64, cmd: KeySet) -> Vec<VoteRange>;
}
struct LockSequencer {
keys: Vec<Mutex<u64>>,
}
impl Sequencer for LockSequencer {
fn new(keys_number: usize) -> Self {
let mut keys = Vec::with_capacity(keys_number);
keys.resize_with(keys_number, Default::default);
Self { keys }
}
fn next(&self, proposal: u64, cmd: KeySet) -> Vec<VoteRange> {
let vote_count = cmd.len();
let mut votes = Vec::with_capacity(vote_count);
let mut locks = Vec::with_capacity(vote_count);
let mut max_sequence = proposal;
for key in cmd {
let lock = self.keys[key].lock();
max_sequence = max(max_sequence, *lock + 1);
locks.push((key, lock))
}
for (key, mut lock) in locks {
// compute vote start and vote end
let vote_start = *lock + 1;
// save vote range
votes.push((key, vote_start, max_sequence));
// set new value
*lock = max_sequence;
}
assert_eq!(votes.capacity(), vote_count);
votes
}
}
struct AtomicSequencer {
keys: Vec<AtomicU64>,
}
impl Sequencer for AtomicSequencer {
fn new(keys_number: usize) -> Self {
let mut keys = Vec::with_capacity(keys_number);
keys.resize_with(keys_number, Default::default);
Self { keys }
}
fn next(&self, proposal: u64, cmd: KeySet) -> Vec<VoteRange> {
let max_vote_count = cmd.len() * 2 - 1;
let mut votes = Vec::with_capacity(max_vote_count);
let max_sequence = cmd
.into_iter()
.map(|key| {
let previous_value = self.keys[key]
.fetch_update(
Ordering::Relaxed,
Ordering::Relaxed,
|value| Some(max(proposal, value + 1)),
)
.expect("updates always succeed");
// compute vote start and vote end
let vote_start = previous_value + 1;
let vote_end = max(proposal, previous_value + 1);
// save vote range
votes.push((key, vote_start, vote_end));
// return vote end
vote_end
})
.max()
.expect("there should be a maximum sequence");
let new_votes: Vec<_> = votes
.iter()
.filter_map(|(key, _vote_start, vote_end)| {
// check if we should vote more
if *vote_end < max_sequence {
let result = self.keys[*key].fetch_update(
Ordering::Relaxed,
Ordering::Relaxed,
|value| {
if value < max_sequence {
Some(max_sequence)
} else {
None
}
},
);
// check if we generated more votes (maybe votes by other
// threads have been generated and it's
// no longer possible to generate votes)
if let Ok(previous_value) = result {
let vote_start = previous_value + 1;
let vote_end = max_sequence;
return Some((*key, vote_start, vote_end));
}
}
None
})
.collect();
votes.extend(new_votes);
assert_eq!(votes.capacity(), max_vote_count);
votes
}
}
fn parse_args() -> (usize, usize, usize, usize, bool) {
let matches = Command::new("sequencer_bench")
.version("0.1")
.author("Vitor Enes <vitorenesduarte@gmail.com>")
.about("Benchmark timestamp-assignment in tempo")
.arg(
Arg::new("keys")
.long("keys")
.value_name("KEYS")
.help("total number of keys; default: 100")
.takes_value(true),
)
.arg(
Arg::new("clients")
.long("clients")
.value_name("CLIENTS")
.help("total number of clients; default: 10")
.takes_value(true),
)
.arg(
Arg::new("commands_per_client")
.long("commands_per_client")
.value_name("COMMANDS_PER_CLIENT")
.help("number of commands to be issued by each client; default: 10000")
.takes_value(true),
)
.arg(
Arg::new("keys_per_command")
.long("keys_per_command")
.value_name("KEYS_PER_COMMAND")
.help("number of keys accessed by each command; default: 1")
.takes_value(true),
)
.arg(
Arg::new("check_votes")
.long("check_votes")
.value_name("CHECK_VOTES")
.help("checks if votes generated are correct; default: true")
.takes_value(true),
)
.get_matches();
// parse arguments
let keys = parse_keys(matches.value_of("keys"));
let clients = parse_clients(matches.value_of("clients"));
let commands_per_client =
parse_commands_per_client(matches.value_of("commands_per_client"));
let keys_per_command =
parse_keys_per_command(matches.value_of("keys_per_command"));
let check_votes = parse_check_votes(matches.value_of("check_votes"));
println!("keys: {:?}", keys);
println!("clients: {:?}", clients);
println!("commands per client: {:?}", commands_per_client);
println!("keys per command: {:?}", keys_per_command);
println!("check votes: {:?}", check_votes);
(
keys,
clients,
commands_per_client,
keys_per_command,
check_votes,
)
}
fn parse_keys(keys: Option<&str>) -> usize {
parse_number(keys).unwrap_or(DEFAULT_KEYS)
}
fn parse_clients(clients: Option<&str>) -> usize {
parse_number(clients).unwrap_or(DEFAULT_CLIENTS)
}
fn parse_commands_per_client(commands_per_client: Option<&str>) -> usize {
parse_number(commands_per_client).unwrap_or(DEFAULT_COMMANDS_PER_CLIENT)
}
fn parse_keys_per_command(keys_per_command: Option<&str>) -> usize {
parse_number(keys_per_command).unwrap_or(DEFAULT_KEYS_PER_COMMAND)
}
fn parse_check_votes(check_votes: Option<&str>) -> bool {
check_votes
.map(|check_votes| {
check_votes
.parse::<bool>()
.expect("check votes should be a bool")
})
.unwrap_or(DEFAULT_CHECK_VOTES)
}
fn parse_number(number: Option<&str>) -> Option<usize> {
number.map(|number| number.parse::<usize>().expect("should be a number"))
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/bin/atlas.rs | fantoch_ps/src/bin/atlas.rs | mod common;
use color_eyre::Report;
use fantoch_ps::protocol::AtlasSequential;
// TODO can we generate all the protocol binaries with a macro?
fn main() -> Result<(), Report> {
common::protocol::run::<AtlasSequential>()
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/bin/tempo_locked.rs | fantoch_ps/src/bin/tempo_locked.rs | mod common;
use color_eyre::Report;
use fantoch_ps::protocol::TempoLocked;
fn main() -> Result<(), Report> {
common::protocol::run::<TempoLocked>()
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/bin/caesar_locked.rs | fantoch_ps/src/bin/caesar_locked.rs | mod common;
use color_eyre::Report;
use fantoch_ps::protocol::CaesarLocked;
fn main() -> Result<(), Report> {
common::protocol::run::<CaesarLocked>()
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/bin/shard_distribution.rs | fantoch_ps/src/bin/shard_distribution.rs | use fantoch::client::{KeyGen, Workload};
use fantoch::id::{ClientId, RiflGen};
use fantoch::metrics::Histogram;
fn main() {
let n = 3;
let keys_per_command = 2;
let commands_per_client = 500;
let payload_size = 0;
let clients_per_region = 1024;
let shard_counts = vec![2, 3, 4, 5, 6];
let coefficients = vec![
0.1, 0.5, 0.6, 0.7, 1.0, 1.25, 1.5, 1.75, 2.0, 2.5, 3.0, 4.0, 6.0, 8.0,
12.0, 16.0, 24.0, 32.0, 64.0, 128.0,
];
// start csvs
let header = format!(
",{}",
shard_counts
.iter()
.map(|s| s.to_string())
.collect::<Vec<_>>()
.join(",")
);
let mut s_csv = header.clone();
let mut k_csv = header.clone();
for coefficient in coefficients {
println!("zipf = {}", coefficient);
let key_gen = KeyGen::Zipf {
coefficient,
total_keys_per_shard: 1_000_000,
};
// add row start to csvs
s_csv = format!("{}\n{}", s_csv, coefficient);
k_csv = format!("{}\n{}", k_csv, coefficient);
for shard_count in shard_counts.clone() {
let total_clients = clients_per_region * n;
// create target shard histogram
let mut shards_histogram = Histogram::new();
let mut keys_histogram = Histogram::new();
for client_id in 1..=total_clients {
let client_id = client_id as ClientId;
let mut workload = Workload::new(
shard_count,
key_gen,
keys_per_command,
commands_per_client,
payload_size,
);
let mut rifl_gen = RiflGen::new(client_id);
let mut key_gen_state = workload
.key_gen()
.initial_state(workload.shard_count(), client_id);
while let Some((_target_shard, cmd)) =
workload.next_cmd(&mut rifl_gen, &mut key_gen_state)
{
// update histograms
shards_histogram.increment(cmd.shard_count() as u64);
keys_histogram.increment(cmd.total_key_count() as u64);
}
}
let total_commands = total_clients * commands_per_client;
let percentage = |hist: &Histogram, key| {
hist.inner().get(&key).unwrap_or(&0) * 100 / total_commands
};
let s2_percentage = percentage(&shards_histogram, 2);
let k2_percentage = percentage(&keys_histogram, 2);
match keys_per_command {
2 => {
println!(
" shards = {} | #s=2 -> %{:<3} | #k=2 -> %{:<3}",
shard_count, s2_percentage, k2_percentage
);
s_csv = format!("{},{}", s_csv, s2_percentage);
k_csv = format!("{},{}", k_csv, k2_percentage);
}
3 => {
let s3_percentage = percentage(&shards_histogram, 3);
let k3_percentage = percentage(&keys_histogram, 3);
println!(
" shards = {} | #s=2 -> %{:<3} | #s=3 -> %{:<3} | #k=2 -> %{:<3} | #k=3 -> %{:<3}",
shard_count, s2_percentage, s3_percentage, k2_percentage, k3_percentage
);
s_csv = format!(
"{},{} | {}",
s_csv, s2_percentage, s3_percentage
);
k_csv = format!(
"{},{} | {}",
k_csv, k2_percentage, k3_percentage
);
}
_ => {
panic!(
"unsupported keys_per_command = {}",
keys_per_command
);
}
}
}
println!("{}", s_csv);
println!("{}", k_csv);
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/bin/epaxos.rs | fantoch_ps/src/bin/epaxos.rs | mod common;
use color_eyre::Report;
use fantoch_ps::protocol::EPaxosSequential;
fn main() -> Result<(), Report> {
common::protocol::run::<EPaxosSequential>()
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/bin/client.rs | fantoch_ps/src/bin/client.rs | mod common;
use clap::{Arg, Command};
use color_eyre::Report;
use fantoch::client::{KeyGen, Workload};
use fantoch::id::ClientId;
use fantoch::info;
use std::time::Duration;
const RANGE_SEP: &str = "-";
const DEFAULT_KEYS_PER_COMMAND: usize = 1;
const DEFAULT_SHARD_COUNT: usize = 1;
const DEFAULT_KEY_GEN: KeyGen = KeyGen::ConflictPool {
conflict_rate: 100,
pool_size: 1,
};
const DEFAULT_COMMANDS_PER_CLIENT: usize = 1000;
const DEFAULT_READ_ONLY_PERCENTAGE: usize = 0;
const DEFAULT_PAYLOAD_SIZE: usize = 100;
const DEFAULT_BATCH_MAX_SIZE: usize = 1;
const DEFAULT_BATCH_MAX_DELAY: Duration = Duration::from_millis(5);
type ClientArgs = (
Vec<ClientId>,
Vec<String>,
Option<Duration>,
Workload,
usize,
Duration,
bool,
usize,
Option<usize>,
Option<String>,
usize,
Option<usize>,
);
fn main() -> Result<(), Report> {
let (args, _guard) = parse_args();
let (
ids,
addresses,
interval,
workload,
batch_max_size,
batch_max_delay,
tcp_nodelay,
channel_buffer_size,
status_frequency,
metrics_file,
stack_size,
cpus,
) = args;
common::tokio_runtime(stack_size, cpus).block_on(fantoch::run::client(
ids,
addresses,
interval,
workload,
batch_max_size,
batch_max_delay,
tcp_nodelay,
channel_buffer_size,
status_frequency,
metrics_file,
))
}
fn parse_args() -> (ClientArgs, tracing_appender::non_blocking::WorkerGuard) {
let matches = Command::new("client")
.version("0.1")
.author("Vitor Enes <vitorenesduarte@gmail.com>")
.about("Runs a client that will connect to some instance of a protocol.")
.arg(
Arg::new("ids")
.long("ids")
.value_name("ID_RANGE")
.help("a range of client identifiers represented as START-END; as many client as the number of identifers will be created")
.required(true)
.takes_value(true),
)
.arg(
Arg::new("addresses")
.long("addresses")
.value_name("ADDRESSES")
.help("comma-separated list of addresses to connect to (in the form IP:PORT e.g. 127.0.0.1:3000)")
.required(true)
.takes_value(true),
)
.arg(
Arg::new("interval")
.long("interval")
.value_name("INTERVAL")
.help("if this value is set, an open-loop client will be created (by default is closed-loop) and the value set is used as the interval (in milliseconds) between submitted commands")
.takes_value(true),
)
.arg(
Arg::new("shard_count")
.long("shard_count")
.value_name("SHARD_COUNT")
.help("number of shards accessed in the system; default: 1")
.takes_value(true),
)
.arg(
Arg::new("key_gen")
.long("key_gen")
.value_name("KEY_GEN")
.help("representation of a key generator; possible values 'conflict_pool,100,1' where 100 is the conflict rate and 1 the pool size, or 'zipf,1.3,10000' where 1.3 is the zipf coefficient (which should be non-zero) and 10000 the number of keys (per shard) in the distribution; default: 'conflict_rate,100,1'")
.takes_value(true),
)
.arg(
Arg::new("keys_per_command")
.long("keys_per_command")
.value_name("KEYS_PER_COMMAND")
.help("number of keys accessed by each command to be issued by each client; default: 1")
.takes_value(true),
)
.arg(
Arg::new("commands_per_client")
.long("commands_per_client")
.value_name("COMMANDS_PER_CLIENT")
.help("number of commands to be issued by each client; default: 1000")
.takes_value(true),
)
.arg(
Arg::new("read_only_percentage")
.long("read_only_percentage")
.value_name("READ_ONLY_PERCENTAGE")
.help("percentage of read-only commands; default: 0")
.takes_value(true),
)
.arg(
Arg::new("payload_size")
.long("payload_size")
.value_name("PAYLOAD_SIZE")
.help("size of the command payload; default: 100 (bytes)")
.takes_value(true),
)
.arg(
Arg::new("batch_max_size")
.long("batch_max_size")
.value_name("BATCH_MAX_SIZE")
.help("max size of the batch; default: 1 (i.e., no batching)")
.takes_value(true),
)
.arg(
Arg::new("batch_max_delay")
.long("batch_max_delay")
.value_name("BATCH_MAX_DELAY")
.help("max delay of a batch; default: 5 (milliseconds)")
.takes_value(true),
)
.arg(
Arg::new("tcp_nodelay")
.long("tcp_nodelay")
.value_name("TCP_NODELAY")
.help("set TCP_NODELAY; default: true")
.takes_value(true),
)
.arg(
Arg::new("channel_buffer_size")
.long("channel_buffer_size")
.value_name("CHANNEL_BUFFER_SIZE")
.help("set the size of the buffer in each channel used for task communication; default: 10000")
.takes_value(true),
)
.arg(
Arg::new("status_frequency")
.long("status_frequency")
.value_name("STATUS_FREQUENCY")
.help("frequency of status messages; if set with 1, a status message will be shown for each completed command; default: no status messages are shown")
.takes_value(true),
)
.arg(
Arg::new("metrics_file")
.long("metrics_file")
.value_name("METRICS_FILE")
.help("file in which metrics are written to; by default metrics are not logged")
.takes_value(true),
)
.arg(
Arg::new("stack_size")
.long("stack_size")
.value_name("STACK_SIZE")
.help("stack size (in bytes) of each tokio thread; default: 2 * 1024 * 1024 (bytes)")
.takes_value(true),
)
.arg(
Arg::new("cpus")
.long("cpus")
.value_name("CPUS")
.help("number of cpus to be used by tokio; by default all available cpus are used")
.takes_value(true),
)
.arg(
Arg::new("log_file")
.long("log_file")
.value_name("LOG_FILE")
.help("file to which logs will be written to; if not set, logs will be redirect to the stdout")
.takes_value(true),
)
.get_matches();
let tracing_directives = None;
let guard = fantoch::util::init_tracing_subscriber(
matches.value_of("log_file"),
tracing_directives,
);
// parse arguments
let ids = parse_id_range(matches.value_of("ids"));
let addresses = parse_addresses(matches.value_of("addresses"));
let interval = parse_interval(matches.value_of("interval"));
let workload = parse_workload(
matches.value_of("shard_count"),
matches.value_of("key_gen"),
matches.value_of("keys_per_command"),
matches.value_of("commands_per_client"),
matches.value_of("read_only_percentage"),
matches.value_of("payload_size"),
);
let batch_max_size =
parse_batch_max_size(matches.value_of("batch_max_size"));
let batch_max_delay =
parse_batch_max_delay(matches.value_of("batch_max_delay"));
let tcp_nodelay =
common::parse_tcp_nodelay(matches.value_of("tcp_nodelay"));
let channel_buffer_size = common::parse_channel_buffer_size(
matches.value_of("channel_buffer_size"),
);
let status_frequency =
parse_status_frequency(matches.value_of("status_frequency"));
let metrics_file = parse_metrics_file(matches.value_of("metrics_file"));
let stack_size = common::parse_stack_size(matches.value_of("stack_size"));
let cpus = common::parse_cpus(matches.value_of("cpus"));
info!("ids: {}-{}", ids.first().unwrap(), ids.last().unwrap());
info!("client number: {}", ids.len());
info!("addresses: {:?}", addresses);
info!("workload: {:?}", workload);
info!("batch_max_size: {:?}", batch_max_size);
info!("batch_max_delay: {:?}", batch_max_delay);
info!("tcp_nodelay: {:?}", tcp_nodelay);
info!("channel buffer size: {:?}", channel_buffer_size);
info!("status frequency: {:?}", status_frequency);
info!("metrics file: {:?}", metrics_file);
info!("stack size: {:?}", stack_size);
let args = (
ids,
addresses,
interval,
workload,
batch_max_size,
batch_max_delay,
tcp_nodelay,
channel_buffer_size,
status_frequency,
metrics_file,
stack_size,
cpus,
);
(args, guard)
}
fn parse_id_range(id_range: Option<&str>) -> Vec<ClientId> {
let bounds: Vec<_> = id_range
.expect("id range should be set")
.split(RANGE_SEP)
.map(|bound| {
bound
.parse::<ClientId>()
.expect("range bound should be a number")
})
.collect();
// check that we only have two bounds: start and end
if bounds.len() == 2 {
let start = bounds[0];
let end = bounds[1];
(start..=end).collect()
} else {
panic!("invalid id range (there should only be a lower bound and an uppper bound)")
}
}
fn parse_addresses(addresses: Option<&str>) -> Vec<String> {
addresses
.expect("addresses should be set")
.split(common::protocol::LIST_SEP)
.map(|address| address.to_string())
.collect()
}
fn parse_millis_duration(millis: Option<&str>) -> Option<Duration> {
millis.map(|millis| {
let millis = millis.parse::<u64>().expect("millis should be a number");
Duration::from_millis(millis)
})
}
fn parse_interval(interval: Option<&str>) -> Option<Duration> {
parse_millis_duration(interval)
}
fn parse_workload(
shard_count: Option<&str>,
key_gen: Option<&str>,
keys_per_command: Option<&str>,
commands_per_client: Option<&str>,
read_only_percentage: Option<&str>,
payload_size: Option<&str>,
) -> Workload {
let shard_count = parse_shard_count(shard_count);
let key_gen = parse_key_gen(key_gen);
let keys_per_command = parse_keys_per_command(keys_per_command);
let commands_per_client = parse_commands_per_client(commands_per_client);
let read_only_percentage = parse_read_only_percentage(read_only_percentage);
let payload_size = parse_payload_size(payload_size);
let mut workload = Workload::new(
shard_count,
key_gen,
keys_per_command,
commands_per_client,
payload_size,
);
workload.set_read_only_percentage(read_only_percentage);
workload
}
fn parse_keys_per_command(number: Option<&str>) -> usize {
number
.map(|number| {
number
.parse::<usize>()
.expect("keys per command should be a number")
})
.unwrap_or(DEFAULT_KEYS_PER_COMMAND)
}
fn parse_shard_count(number: Option<&str>) -> usize {
number
.map(|number| {
number
.parse::<usize>()
.expect("shard count should be a number")
})
.unwrap_or(DEFAULT_SHARD_COUNT)
}
fn parse_key_gen(key_gen: Option<&str>) -> KeyGen {
key_gen
.map(|key_gen| {
let parts: Vec<_>= key_gen.split(',').collect();
match parts.len() {
2 | 3 => (),
_ => panic!("invalid specification of key generator: {:?}", key_gen)
};
match parts[0] {
"conflict_pool" => {
if parts.len() != 3 {
panic!("conflict_pool key generator takes two arguments");
}
let conflict_rate = parts[1]
.parse::<usize>()
.expect("conflict rate should be a number");
let pool_size = parts[2]
.parse::<usize>()
.expect("pool size should be a number");
KeyGen::ConflictPool { conflict_rate, pool_size }
}
"zipf" => {
if parts.len() != 3 {
panic!("zipf key generator takes two arguments");
}
let coefficient = parts[1]
.parse::<f64>()
.expect("zipf coefficient should be a float");
let keys_per_shard = parts[2]
.parse::<usize>()
.expect("number of keys (per shard) in the zipf distribution should be a number");
KeyGen::Zipf {
coefficient, total_keys_per_shard: keys_per_shard
}
}
kgen => panic!("invalid key generator type: {}", kgen),
}
})
.unwrap_or(DEFAULT_KEY_GEN)
}
fn parse_commands_per_client(number: Option<&str>) -> usize {
number
.map(|number| {
number
.parse::<usize>()
.expect("commands per client should be a number")
})
.unwrap_or(DEFAULT_COMMANDS_PER_CLIENT)
}
fn parse_read_only_percentage(number: Option<&str>) -> usize {
number
.map(|number| {
number
.parse::<usize>()
.expect("read only percentage should be a number")
})
.unwrap_or(DEFAULT_READ_ONLY_PERCENTAGE)
}
fn parse_payload_size(number: Option<&str>) -> usize {
number
.map(|number| {
number
.parse::<usize>()
.expect("payload size should be a number")
})
.unwrap_or(DEFAULT_PAYLOAD_SIZE)
}
fn parse_batch_max_size(number: Option<&str>) -> usize {
number
.map(|number| {
number
.parse::<usize>()
.expect("batch max size should be a number")
})
.unwrap_or(DEFAULT_BATCH_MAX_SIZE)
}
fn parse_batch_max_delay(duration: Option<&str>) -> Duration {
parse_millis_duration(duration).unwrap_or(DEFAULT_BATCH_MAX_DELAY)
}
fn parse_status_frequency(status_frequency: Option<&str>) -> Option<usize> {
status_frequency.map(|status_frequency| {
status_frequency
.parse::<usize>()
.expect("status frequency should be a number")
})
}
pub fn parse_metrics_file(metrics_file: Option<&str>) -> Option<String> {
metrics_file.map(String::from)
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/bin/epaxos_locked.rs | fantoch_ps/src/bin/epaxos_locked.rs | mod common;
use color_eyre::Report;
use fantoch_ps::protocol::EPaxosLocked;
fn main() -> Result<(), Report> {
common::protocol::run::<EPaxosLocked>()
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/bin/tempo_atomic.rs | fantoch_ps/src/bin/tempo_atomic.rs | mod common;
use color_eyre::Report;
use fantoch_ps::protocol::TempoAtomic;
fn main() -> Result<(), Report> {
common::protocol::run::<TempoAtomic>()
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/bin/fpaxos.rs | fantoch_ps/src/bin/fpaxos.rs | mod common;
use color_eyre::Report;
use fantoch_ps::protocol::FPaxos;
fn main() -> Result<(), Report> {
common::protocol::run::<FPaxos>()
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/bin/basic.rs | fantoch_ps/src/bin/basic.rs | mod common;
use color_eyre::Report;
use fantoch::protocol::Basic;
fn main() -> Result<(), Report> {
common::protocol::run::<Basic>()
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/bin/tempo.rs | fantoch_ps/src/bin/tempo.rs | mod common;
use color_eyre::Report;
use fantoch_ps::protocol::TempoSequential;
fn main() -> Result<(), Report> {
common::protocol::run::<TempoSequential>()
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/bin/common/mod.rs | fantoch_ps/src/bin/common/mod.rs | /// This modules contains common functionality to parse protocol arguments.
#[allow(dead_code)]
pub mod protocol;
use fantoch::info;
use std::time::Duration;
const DEFAULT_TCP_NODELAY: bool = true;
const DEFAULT_TCP_BUFFER_SIZE: usize = 8 * 1024; // 8 KBs
const DEFAULT_CHANNEL_BUFFER_SIZE: usize = 10000;
const DEFAULT_STACK_SIZE: usize = 8 * 1024 * 1024; // 8MBs
#[allow(dead_code)]
pub fn tokio_runtime(
stack_size: usize,
cpus: Option<usize>,
) -> tokio::runtime::Runtime {
// get number of cpus
let available = num_cpus::get();
let cpus = cpus.unwrap_or(available);
info!("cpus: {} of {}", cpus, available);
// create tokio runtime
tokio::runtime::Builder::new_multi_thread()
.worker_threads(cpus)
.thread_stack_size(stack_size)
.enable_io()
.enable_time()
.thread_name("runner")
.build()
.expect("tokio runtime build should work")
}
pub fn parse_tcp_nodelay(tcp_nodelay: Option<&str>) -> bool {
tcp_nodelay
.map(|tcp_nodelay| {
tcp_nodelay
.parse::<bool>()
.expect("tcp_nodelay should be a boolean")
})
.unwrap_or(DEFAULT_TCP_NODELAY)
}
pub fn parse_tcp_buffer_size(buffer_size: Option<&str>) -> usize {
parse_buffer_size(buffer_size, DEFAULT_TCP_BUFFER_SIZE)
}
pub fn parse_tcp_flush_interval(interval: Option<&str>) -> Option<Duration> {
interval.map(|interval| {
let millis = interval
.parse::<u64>()
.expect("flush interval should be a number");
Duration::from_millis(millis)
})
}
pub fn parse_channel_buffer_size(buffer_size: Option<&str>) -> usize {
parse_buffer_size(buffer_size, DEFAULT_CHANNEL_BUFFER_SIZE)
}
fn parse_buffer_size(buffer_size: Option<&str>, default: usize) -> usize {
buffer_size
.map(|buffer_size| {
buffer_size
.parse::<usize>()
.expect("buffer size should be a number")
})
.unwrap_or(default)
}
pub fn parse_stack_size(stack_size: Option<&str>) -> usize {
stack_size
.map(|stack_size| {
stack_size
.parse::<usize>()
.expect("stack size should be a number")
})
.unwrap_or(DEFAULT_STACK_SIZE)
}
pub fn parse_cpus(cpus: Option<&str>) -> Option<usize> {
cpus.map(|cpus| cpus.parse::<usize>().expect("cpus should be a number"))
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/bin/common/protocol.rs | fantoch_ps/src/bin/common/protocol.rs | use clap::{Arg, Command};
use color_eyre::Report;
use fantoch::config::Config;
use fantoch::id::{ProcessId, ShardId};
use fantoch::info;
use fantoch::protocol::Protocol;
use std::net::IpAddr;
use std::time::Duration;
pub const LIST_SEP: &str = ",";
const DEFAULT_SHARDS: usize = 1;
const DEFAULT_SHARD_ID: ShardId = 0;
const DEFAULT_IP: &str = "127.0.0.1";
const DEFAULT_PORT: u16 = 3000;
const DEFAULT_CLIENT_PORT: u16 = 4000;
const DEFAULT_EXECUTE_AT_COMMIT: bool = false;
const DEFAULT_EXECUTOR_CLEANUP_INTERVAL: Duration = Duration::from_millis(5);
const DEFAULT_WORKERS: usize = 1;
const DEFAULT_EXECUTORS: usize = 1;
const DEFAULT_MULTIPLEXING: usize = 1;
const DEFAULT_NFR: bool = false;
// tempo's config
const DEFAULT_TEMPO_TINY_QUORUMS: bool = false;
const DEFAULT_TEMPO_DETACHED_SEND_INTERVAL: Duration = Duration::from_millis(5);
// protocol's config
const DEFAULT_SKIP_FAST_ACK: bool = false;
#[global_allocator]
#[cfg(feature = "jemalloc")]
static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc;
type ProtocolArgs = (
ProcessId,
ShardId,
Option<Vec<(ProcessId, ShardId)>>,
IpAddr,
u16,
u16,
Vec<(String, Option<Duration>)>,
Config,
bool,
usize,
Option<Duration>,
usize,
usize,
usize,
usize,
usize,
Option<String>,
Option<Duration>,
Option<String>,
usize,
Option<usize>,
);
#[allow(dead_code)]
pub fn run<P>() -> Result<(), Report>
where
P: Protocol + Send + 'static,
{
let (args, _guard) = parse_args();
let (
process_id,
shard_id,
sorted_processes,
ip,
port,
client_port,
addresses,
config,
tcp_nodelay,
tcp_buffer_size,
tcp_flush_interval,
process_channel_buffer_size,
client_channel_buffer_size,
workers,
executors,
multiplexing,
execution_log,
ping_interval,
metrics_file,
stack_size,
cpus,
) = args;
let process = fantoch::run::process::<P, String>(
process_id,
shard_id,
sorted_processes,
ip,
port,
client_port,
addresses,
config,
tcp_nodelay,
tcp_buffer_size,
tcp_flush_interval,
process_channel_buffer_size,
client_channel_buffer_size,
workers,
executors,
multiplexing,
execution_log,
ping_interval,
metrics_file,
);
super::tokio_runtime(stack_size, cpus).block_on(process)
}
fn parse_args() -> (ProtocolArgs, tracing_appender::non_blocking::WorkerGuard) {
let matches = Command::new("process")
.version("0.1")
.author("Vitor Enes <vitorenesduarte@gmail.com>")
.about("Runs an instance of some protocol.")
.arg(
Arg::new("id")
.long("id")
.value_name("ID")
.help("process identifier")
.required(true)
.takes_value(true),
)
.arg(
Arg::new("shard_id")
.long("shard_id")
.value_name("SHARD_ID")
.help("shard identifier; default: 0")
.required(true)
.takes_value(true),
)
.arg(
Arg::new("sorted_processes")
.long("sorted")
.value_name("SORTED_PROCESSES")
.help("comma-separated list of 'ID-SHARD_ID', where ID is the process id and SHARD-ID the identifier of the shard it belongs to, sorted by distance; if not set, processes will ping each other and try to figure out this list from ping latency; for this, 'ping_interval' should be set")
.takes_value(true),
)
.arg(
Arg::new("ip")
.long("ip")
.value_name("IP")
.help("ip to bind to; default: 127.0.0.1")
.takes_value(true),
)
.arg(
Arg::new("port")
.long("port")
.value_name("PORT")
.help("port to bind to; default: 3000")
.takes_value(true),
)
.arg(
Arg::new("client_port")
.long("client_port")
.value_name("CLIENT_PORT")
.help("client port to bind to; default: 4000")
.takes_value(true),
)
.arg(
Arg::new("addresses")
.long("addresses")
.value_name("ADDRESSES")
.help("comma-separated list of addresses to connect to; if a delay (in milliseconds) is to be injected, the address should be of the form IP:PORT-DELAY; for example, 127.0.0.1:3000-120 injects a delay of 120 milliseconds before sending a message to the process at the 127.0.0.1:3000 address")
.required(true)
.takes_value(true),
)
.arg(
Arg::new("n")
.long("processes")
.value_name("PROCESS_NUMBER")
.help("number of processes")
.required(true)
.takes_value(true),
)
.arg(
Arg::new("f")
.long("faults")
.value_name("FAULT_NUMBER")
.help("number of allowed faults")
.required(true)
.takes_value(true),
)
.arg(
Arg::new("shard_count")
.long("shard_count")
.value_name("SHARDS_COUNT")
.help("number of shards; default: 1")
.required(true)
.takes_value(true),
)
.arg(
Arg::new("execute_at_commit")
.long("execute_at_commit")
.value_name("EXECUTE_AT_COMMIT")
.help("bool indicating whether execution should be skipped; default: false")
.takes_value(true),
)
.arg(
Arg::new("executor_cleanup_interval")
.long("executor_cleanup_interval")
.value_name("EXECUTOR_CLEANUP_INTERVAL")
.help("executor cleanup interval (in milliseconds); default: 5")
.takes_value(true),
)
.arg(
Arg::new("executor_monitor_pending_interval")
.long("executor_monitor_pending_interval")
.value_name("EXECUTOR_MONITOR_PENDING_INTERVAL")
.help("executor monitor pending interval (in milliseconds); if no value if set, pending commands are not monitored")
.takes_value(true),
)
.arg(
Arg::new("gc_interval")
.long("gc_interval")
.value_name("GC_INTERVAL")
.help("garbage collection interval (in milliseconds); if no value if set, stability doesn't run and commands are deleted at commit time")
.takes_value(true),
)
.arg(
Arg::new("leader")
.long("leader")
.value_name("LEADER")
.help("id of the starting leader process in leader-based protocols")
.takes_value(true),
)
.arg(
Arg::new("nfr")
.long("nfr")
.value_name("NFR")
.help("boolean indicating whether NFR is enabled; default: false")
.takes_value(true),
)
.arg(
Arg::new("tempo_tiny_quorums")
.long("tempo_tiny_quorums")
.value_name("TEMPO_TINY_QUORUMS")
.help("boolean indicating whether tempo's tiny quorums are enabled; default: false")
.takes_value(true),
)
.arg(
Arg::new("tempo_clock_bump_interval")
.long("tempo_clock_bump_interval")
.value_name("TEMPO_CLOCK_BUMP_INTERVAL")
.help("number indicating the interval (in milliseconds) between clock bumps; if this value is not set, then clocks are not bumped periodically")
.takes_value(true),
)
.arg(
Arg::new("tempo_detached_send_interval")
.long("tempo_detached_send_interval")
.value_name("TEMPO_DETACHED_SEND_INTERVAL")
.help("number indicating the interval (in milliseconds) between mdetached messages are sent; default: 5")
.takes_value(true),
)
.arg(
Arg::new("skip_fast_ack")
.long("skip_fast_ack")
.value_name("SKIP_FAST_ACK")
.help("boolean indicating whether protocols should try to enable the skip fast ack optimization; default: false")
.takes_value(true),
)
.arg(
Arg::new("tcp_nodelay")
.long("tcp_nodelay")
.value_name("TCP_NODELAY")
.help("TCP_NODELAY; default: true")
.takes_value(true),
)
.arg(
Arg::new("tcp_buffer_size")
.long("tcp_buffer_size")
.value_name("TCP_BUFFER_SIZE")
.help("size of the TCP buffer; default: 8192 (bytes)")
.takes_value(true),
)
.arg(
Arg::new("tcp_flush_interval")
.long("tcp_flush_interval")
.value_name("TCP_FLUSH_INTERVAL")
.help("TCP flush interval (in milliseconds); if 0, then flush occurs on every send; default: 0")
.takes_value(true),
)
.arg(
Arg::new("process_channel_buffer_size")
.long("process_channel_buffer_size")
.value_name("PROCESS_CHANNEL_BUFFER_SIZE")
.help(
"size of the buffer in each channel used for task communication related to the processes; default: 100",
)
.takes_value(true),
)
.arg(
Arg::new("client_channel_buffer_size")
.long("client_channel_buffer_size")
.value_name("CLIENT_CHANNEL_BUFFER_SIZE")
.help(
"size of the buffer in each channel used for task communication related to the clients; default: 100",
)
.takes_value(true),
)
.arg(
Arg::new("workers")
.long("workers")
.value_name("WORKERS")
.help("number of protocol workers; default: 1")
.takes_value(true),
)
.arg(
Arg::new("executors")
.long("executors")
.value_name("EXECUTORS")
.help("number of executors; default: 1")
.takes_value(true),
)
.arg(
Arg::new("multiplexing")
.long("multiplexing")
.value_name("MULTIPLEXING")
.help("number of connections between replicas; default: 1")
.takes_value(true),
)
.arg(
Arg::new("execution_log")
.long("execution_log")
.value_name("EXECUTION_LOG")
.help("log file in which execution info should be written to; by default this information is not logged")
.takes_value(true),
)
.arg(
Arg::new("ping_interval")
.long("ping_interval")
.value_name("PING_INTERVAL")
.help("number indicating the interval (in milliseconds) between pings between processes; by default there's no pinging; if set, this value should be > 0")
.takes_value(true),
)
.arg(
Arg::new("metrics_file")
.long("metrics_file")
.value_name("METRICS_FILE")
.help("file in which metrics are (periodically, every 5s) written to; by default metrics are not logged")
.takes_value(true),
)
.arg(
Arg::new("stack_size")
.long("stack_size")
.value_name("STACK_SIZE")
.help("stack size (in bytes) of each tokio thread; default: 2 * 1024 * 1024 (bytes)")
.takes_value(true),
)
.arg(
Arg::new("cpus")
.long("cpus")
.value_name("CPUS")
.help("number of cpus to be used by tokio; by default all available cpus are used")
.takes_value(true),
)
.arg(
Arg::new("log_file")
.long("log_file")
.value_name("LOG_FILE")
.help("file to which logs will be written to; if not set, logs will be redirect to the stdout")
.takes_value(true),
)
.get_matches();
let tracing_directives = None;
let guard = fantoch::util::init_tracing_subscriber(
matches.value_of("log_file"),
tracing_directives,
);
// parse arguments
let process_id = parse_process_id(matches.value_of("id"));
let shard_id = parse_shard_id(matches.value_of("shard_id"));
let sorted_processes =
parse_sorted_processes(matches.value_of("sorted_processes"));
let ip = parse_ip(matches.value_of("ip"));
let port = parse_port(matches.value_of("port"));
let client_port = parse_client_port(matches.value_of("client_port"));
let addresses = parse_addresses(matches.value_of("addresses"));
// parse config
let config = build_config(
parse_n(matches.value_of("n")),
parse_f(matches.value_of("f")),
parse_shard_count(matches.value_of("shard_count")),
parse_execute_at_commit(matches.value_of("execute_at_commit")),
parse_executor_cleanup_interval(
matches.value_of("executor_cleanup_interval"),
),
parse_executor_monitor_pending_interval(
matches.value_of("executor_monitor_pending_interval"),
),
parse_gc_interval(matches.value_of("gc_interval")),
parse_leader(matches.value_of("leader")),
parse_nfr(matches.value_of("nfr")),
parse_tempo_tiny_quorums(matches.value_of("tempo_tiny_quorums")),
parse_tempo_clock_bump_interval(
matches.value_of("tempo_clock_bump_interval"),
),
parse_tempo_detached_send_interval(
matches.value_of("tempo_detached_send_interval"),
),
parse_skip_fast_ack(matches.value_of("skip_fast_ack")),
);
let tcp_nodelay = super::parse_tcp_nodelay(matches.value_of("tcp_nodelay"));
let tcp_buffer_size =
super::parse_tcp_buffer_size(matches.value_of("tcp_buffer_size"));
let tcp_flush_interval =
super::parse_tcp_flush_interval(matches.value_of("tcp_flush_interval"));
let process_channel_buffer_size = super::parse_channel_buffer_size(
matches.value_of("process_channel_buffer_size"),
);
let client_channel_buffer_size = super::parse_channel_buffer_size(
matches.value_of("client_channel_buffer_size"),
);
let workers = parse_workers(matches.value_of("workers"));
let executors = parse_executors(matches.value_of("executors"));
let multiplexing = parse_multiplexing(matches.value_of("multiplexing"));
let execution_log = parse_execution_log(matches.value_of("execution_log"));
let ping_interval = parse_ping_interval(matches.value_of("ping_interval"));
let metrics_file = parse_metrics_file(matches.value_of("metrics_file"));
let stack_size = super::parse_stack_size(matches.value_of("stack_size"));
let cpus = super::parse_cpus(matches.value_of("cpus"));
info!("process id: {}", process_id);
info!("sorted processes: {:?}", sorted_processes);
info!("ip: {:?}", ip);
info!("port: {}", port);
info!("client port: {}", client_port);
info!("addresses: {:?}", addresses);
info!("config: {:?}", config);
info!("tcp_nodelay: {:?}", tcp_nodelay);
info!("tcp buffer size: {:?}", tcp_buffer_size);
info!("tcp flush interval: {:?}", tcp_flush_interval);
info!(
"process channel buffer size: {:?}",
process_channel_buffer_size
);
info!(
"client channel buffer size: {:?}",
client_channel_buffer_size
);
info!("workers: {:?}", workers);
info!("executors: {:?}", executors);
info!("multiplexing: {:?}", multiplexing);
info!("execution log: {:?}", execution_log);
info!("ping_interval: {:?}", ping_interval);
info!("metrics file: {:?}", metrics_file);
info!("stack size: {:?}", stack_size);
let args = (
process_id,
shard_id,
sorted_processes,
ip,
port,
client_port,
addresses,
config,
tcp_nodelay,
tcp_buffer_size,
tcp_flush_interval,
process_channel_buffer_size,
client_channel_buffer_size,
workers,
executors,
multiplexing,
execution_log,
ping_interval,
metrics_file,
stack_size,
cpus,
);
(args, guard)
}
fn parse_process_id(id: Option<&str>) -> ProcessId {
parse_id::<ProcessId>(id.expect("process id should be set"))
}
fn parse_shard_id(shard_id: Option<&str>) -> ShardId {
shard_id
.map(|id| parse_id::<ShardId>(id))
.unwrap_or(DEFAULT_SHARD_ID)
}
fn parse_id<I>(id: &str) -> I
where
I: std::str::FromStr,
<I as std::str::FromStr>::Err: std::fmt::Debug,
{
id.parse::<I>().expect("id should be a number")
}
fn parse_sorted_processes(
ids: Option<&str>,
) -> Option<Vec<(ProcessId, ShardId)>> {
ids.map(|ids| {
ids.split(LIST_SEP)
.map(|entry| {
let parts: Vec<_> = entry.split('-').collect();
assert_eq!(parts.len(), 2, "each sorted process entry should have the form 'ID-SHARD_ID'");
let id = parse_id::<ProcessId>(parts[0]);
let shard_id = parse_id::<ShardId>(parts[1]);
(id, shard_id)
})
.collect()
})
}
fn parse_ip(ip: Option<&str>) -> IpAddr {
ip.unwrap_or(DEFAULT_IP)
.parse::<IpAddr>()
.expect("ip should be a valid ip address")
}
fn parse_port(port: Option<&str>) -> u16 {
port.map(|port| port.parse::<u16>().expect("port should be a number"))
.unwrap_or(DEFAULT_PORT)
}
fn parse_client_port(port: Option<&str>) -> u16 {
port.map(|port| {
port.parse::<u16>().expect("client port should be a number")
})
.unwrap_or(DEFAULT_CLIENT_PORT)
}
fn parse_addresses(addresses: Option<&str>) -> Vec<(String, Option<Duration>)> {
addresses
.expect("addresses should be set")
.split(LIST_SEP)
.map(|address| {
let parts: Vec<_> = address.split('-').collect();
let address = parts[0].to_string();
match parts.len() {
1 => {
// in this case, no delay was set
(address, None)
}
2 => {
let millis = parts[1]
.parse::<u64>()
.expect("address delay should be a number");
let delay = Duration::from_millis(millis);
(address, Some(delay))
}
_ => {
panic!("invalid address: {:?}", address);
}
}
})
.collect()
}
pub fn build_config(
n: usize,
f: usize,
shard_count: usize,
execute_at_commit: bool,
executor_cleanup_interval: Duration,
executor_monitor_pending_interval: Option<Duration>,
gc_interval: Option<Duration>,
leader: Option<ProcessId>,
nfr: bool,
tempo_tiny_quorums: bool,
tempo_clock_bump_interval: Option<Duration>,
tempo_detached_send_interval: Duration,
skip_fast_ack: bool,
) -> Config {
// create config
let mut config = Config::new(n, f);
config.set_shard_count(shard_count);
config.set_execute_at_commit(execute_at_commit);
config.set_executor_cleanup_interval(executor_cleanup_interval);
if let Some(interval) = executor_monitor_pending_interval {
config.set_executor_monitor_pending_interval(interval);
}
if let Some(interval) = gc_interval {
config.set_gc_interval(interval);
}
// set leader if we have one
if let Some(leader) = leader {
config.set_leader(leader);
}
config.set_nfr(nfr);
// set tempo's config
config.set_tempo_tiny_quorums(tempo_tiny_quorums);
if let Some(interval) = tempo_clock_bump_interval {
config.set_tempo_clock_bump_interval(interval);
}
config.set_tempo_detached_send_interval(tempo_detached_send_interval);
// set protocol's config
config.set_skip_fast_ack(skip_fast_ack);
config
}
pub fn parse_n(n: Option<&str>) -> usize {
n.expect("n should be set")
.parse::<usize>()
.expect("n should be a number")
}
pub fn parse_f(f: Option<&str>) -> usize {
f.expect("f should be set")
.parse::<usize>()
.expect("f should be a number")
}
pub fn parse_shard_count(shards: Option<&str>) -> usize {
shards
.map(|shards| {
shards.parse::<usize>().expect("shards should be a number")
})
.unwrap_or(DEFAULT_SHARDS)
}
pub fn parse_execute_at_commit(execute_at_commit: Option<&str>) -> bool {
execute_at_commit
.map(|execute_at_commit| {
execute_at_commit
.parse::<bool>()
.expect("execute_at_commit should be a bool")
})
.unwrap_or(DEFAULT_EXECUTE_AT_COMMIT)
}
pub fn parse_executor_cleanup_interval(interval: Option<&str>) -> Duration {
interval
.map(|interval| {
let ms = interval
.parse::<u64>()
.expect("executor_cleanup_interval should be a number");
Duration::from_millis(ms)
})
.unwrap_or(DEFAULT_EXECUTOR_CLEANUP_INTERVAL)
}
pub fn parse_executor_monitor_pending_interval(
interval: Option<&str>,
) -> Option<Duration> {
interval.map(|interval| {
let ms = interval
.parse::<u64>()
.expect("executor_monitor_pending_interval should be a number");
Duration::from_millis(ms)
})
}
pub fn parse_gc_interval(gc_interval: Option<&str>) -> Option<Duration> {
gc_interval.map(|gc_interval| {
let ms = gc_interval
.parse::<u64>()
.expect("gc_interval should be a number");
Duration::from_millis(ms)
})
}
fn parse_leader(leader: Option<&str>) -> Option<ProcessId> {
leader.map(|leader| parse_id(leader))
}
fn parse_nfr(nfr: Option<&str>) -> bool {
nfr.map(|nfr| nfr.parse::<bool>().expect("nfr should be a bool"))
.unwrap_or(DEFAULT_NFR)
}
fn parse_tempo_tiny_quorums(tempo_tiny_quorums: Option<&str>) -> bool {
tempo_tiny_quorums
.map(|tempo_tiny_quorums| {
tempo_tiny_quorums
.parse::<bool>()
.expect("tempo_tiny_quorums should be a bool")
})
.unwrap_or(DEFAULT_TEMPO_TINY_QUORUMS)
}
fn parse_tempo_clock_bump_interval(interval: Option<&str>) -> Option<Duration> {
interval.map(|interval| {
let ms = interval
.parse::<u64>()
.expect("tempo_clock_bump_interval should be a number");
Duration::from_millis(ms)
})
}
fn parse_tempo_detached_send_interval(interval: Option<&str>) -> Duration {
interval
.map(|interval| {
let ms = interval
.parse::<u64>()
.expect("tempo_detached_send_interval should be a number");
Duration::from_millis(ms)
})
.unwrap_or(DEFAULT_TEMPO_DETACHED_SEND_INTERVAL)
}
pub fn parse_skip_fast_ack(skip_fast_ack: Option<&str>) -> bool {
skip_fast_ack
.map(|skip_fast_ack| {
skip_fast_ack
.parse::<bool>()
.expect("skip_fast_ack should be a boolean")
})
.unwrap_or(DEFAULT_SKIP_FAST_ACK)
}
fn parse_workers(workers: Option<&str>) -> usize {
workers
.map(|workers| {
workers
.parse::<usize>()
.expect("workers should be a number")
})
.unwrap_or(DEFAULT_WORKERS)
}
fn parse_executors(executors: Option<&str>) -> usize {
executors
.map(|executors| {
executors
.parse::<usize>()
.expect("executors should be a number")
})
.unwrap_or(DEFAULT_EXECUTORS)
}
fn parse_multiplexing(multiplexing: Option<&str>) -> usize {
multiplexing
.map(|multiplexing| {
multiplexing
.parse::<usize>()
.expect("multiplexing should be a number")
})
.unwrap_or(DEFAULT_MULTIPLEXING)
}
pub fn parse_execution_log(execution_log: Option<&str>) -> Option<String> {
execution_log.map(String::from)
}
fn parse_ping_interval(interval: Option<&str>) -> Option<Duration> {
interval.map(|interval| {
let millis = interval
.parse::<u64>()
.expect("ping_interval should be a number");
Duration::from_millis(millis)
})
}
pub fn parse_metrics_file(metrics_file: Option<&str>) -> Option<String> {
metrics_file.map(String::from)
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/executor/slot.rs | fantoch_ps/src/executor/slot.rs | use fantoch::command::Command;
use fantoch::config::Config;
use fantoch::executor::{
ExecutionOrderMonitor, Executor, ExecutorMetrics, ExecutorResult,
};
use fantoch::id::{ProcessId, ShardId};
use fantoch::kvs::KVStore;
use fantoch::protocol::MessageIndex;
use fantoch::time::SysTime;
use fantoch::HashMap;
use serde::{Deserialize, Serialize};
use std::collections::VecDeque;
type Slot = u64;
#[derive(Clone)]
pub struct SlotExecutor {
shard_id: ShardId,
config: Config,
store: KVStore,
next_slot: Slot,
// TODO maybe BinaryHeap
to_execute: HashMap<Slot, Command>,
metrics: ExecutorMetrics,
to_clients: VecDeque<ExecutorResult>,
}
impl Executor for SlotExecutor {
type ExecutionInfo = SlotExecutionInfo;
fn new(_process_id: ProcessId, shard_id: ShardId, config: Config) -> Self {
let store = KVStore::new(config.executor_monitor_execution_order());
// the next slot to be executed is 1
let next_slot = 1;
// there's nothing to execute in the beginning
let to_execute = HashMap::new();
let metrics = ExecutorMetrics::new();
let to_clients = Default::default();
Self {
shard_id,
config,
store,
next_slot,
to_execute,
metrics,
to_clients,
}
}
fn handle(&mut self, info: Self::ExecutionInfo, _time: &dyn SysTime) {
let SlotExecutionInfo { slot, cmd } = info;
// we shouldn't receive execution info about slots already executed
// TODO actually, if recovery is involved, then this may not be
// necessarily true
assert!(slot >= self.next_slot);
if self.config.execute_at_commit() {
self.execute(cmd);
} else {
// add received command to the commands to be executed and try to
// execute commands
// TODO here we could optimize and only insert the command if it
// isn't the command that will be executed in the next
// slot
let res = self.to_execute.insert(slot, cmd);
assert!(res.is_none());
self.try_next_slot();
}
}
fn to_clients(&mut self) -> Option<ExecutorResult> {
self.to_clients.pop_front()
}
fn parallel() -> bool {
false
}
fn metrics(&self) -> &ExecutorMetrics {
&self.metrics
}
fn monitor(&self) -> Option<ExecutionOrderMonitor> {
self.store.monitor().cloned()
}
}
impl SlotExecutor {
fn try_next_slot(&mut self) {
// gather commands while the next command to be executed exists
while let Some(cmd) = self.to_execute.remove(&self.next_slot) {
self.execute(cmd);
// update the next slot to be executed
self.next_slot += 1;
}
}
fn execute(&mut self, cmd: Command) {
// execute the command
let results = cmd.execute(self.shard_id, &mut self.store);
// update results if this rifl is pending
self.to_clients.extend(results);
}
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct SlotExecutionInfo {
slot: Slot,
cmd: Command,
}
impl SlotExecutionInfo {
pub fn new(slot: Slot, cmd: Command) -> Self {
Self { slot, cmd }
}
}
impl MessageIndex for SlotExecutionInfo {
fn index(&self) -> Option<(usize, usize)> {
None
}
}
#[cfg(test)]
mod tests {
use super::*;
use fantoch::id::Rifl;
use fantoch::kvs::KVOp;
use permutator::Permutation;
use std::collections::BTreeMap;
#[test]
fn slot_executor_flow() {
// create rifls
let rifl_1 = Rifl::new(1, 1);
let rifl_2 = Rifl::new(2, 1);
let rifl_3 = Rifl::new(3, 1);
let rifl_4 = Rifl::new(4, 1);
let rifl_5 = Rifl::new(5, 1);
let rifl_6 = Rifl::new(6, 1);
// create commands
let key = String::from("a");
let cmd_1 = Command::from(
rifl_1,
vec![(key.clone(), KVOp::Put(String::from("1")))],
);
let cmd_2 = Command::from(rifl_2, vec![(key.clone(), KVOp::Get)]);
let cmd_3 = Command::from(
rifl_3,
vec![(key.clone(), KVOp::Put(String::from("2")))],
);
let cmd_4 = Command::from(rifl_4, vec![(key.clone(), KVOp::Get)]);
let cmd_5 = Command::from(
rifl_5,
vec![(key.clone(), KVOp::Put(String::from("3")))],
);
let cmd_6 = Command::from(rifl_6, vec![(key.clone(), KVOp::Get)]);
// create expected results:
// - we don't expect rifl 1 because we will not wait for it in the
// executor
let mut expected_results = BTreeMap::new();
expected_results.insert(rifl_1, vec![None]);
expected_results.insert(rifl_2, vec![Some(String::from("1"))]);
expected_results.insert(rifl_3, vec![None]);
expected_results.insert(rifl_4, vec![Some(String::from("2"))]);
expected_results.insert(rifl_5, vec![None]);
expected_results.insert(rifl_6, vec![Some(String::from("3"))]);
// create execution info
let ei_1 = SlotExecutionInfo::new(1, cmd_1);
let ei_2 = SlotExecutionInfo::new(2, cmd_2);
let ei_3 = SlotExecutionInfo::new(3, cmd_3);
let ei_4 = SlotExecutionInfo::new(4, cmd_4);
let ei_5 = SlotExecutionInfo::new(5, cmd_5);
let ei_6 = SlotExecutionInfo::new(6, cmd_6);
let mut infos = vec![ei_1, ei_2, ei_3, ei_4, ei_5, ei_6];
// check the execution order for all possible permutations
infos.permutation().for_each(|p| {
// create config (that will not be used)
let process_id = 1;
let config = Config::new(0, 0);
// there's a single shard
let shard_id = 0;
// create slot executor
let mut executor = SlotExecutor::new(process_id, shard_id, config);
let results: BTreeMap<_, _> = p
.clone()
.into_iter()
.flat_map(|info| {
executor.handle(info, &fantoch::time::RunTime);
executor
.to_clients_iter()
.map(|executor_result| {
assert_eq!(
key, executor_result.key,
"expected key not in partial"
);
(
executor_result.rifl,
executor_result.partial_results,
)
})
.collect::<Vec<_>>()
})
.collect();
assert_eq!(results, expected_results);
});
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/executor/mod.rs | fantoch_ps/src/executor/mod.rs | // This module contains the implementation of a dependency graph executor.
mod graph;
// This module contains the implementation of a votes table executor.
mod table;
// This module contains the implementation of a predecessors executor.
mod pred;
// This module contains the implementation of an slot executor.
mod slot;
// Re-exports.
pub use graph::{GraphExecutionInfo, GraphExecutor};
pub use pred::{PredecessorsExecutionInfo, PredecessorsExecutor};
pub use slot::{SlotExecutionInfo, SlotExecutor};
pub use table::{TableExecutionInfo, TableExecutor};
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/executor/graph/tarjan.rs | fantoch_ps/src/executor/graph/tarjan.rs | use super::index::{VertexIndex, VertexRef};
use crate::protocol::common::graph::Dependency;
use fantoch::command::Command;
use fantoch::config::Config;
use fantoch::id::{Dot, ProcessId};
use fantoch::singleton;
use fantoch::time::SysTime;
use fantoch::HashSet;
use fantoch::{debug, trace};
use std::cmp;
use std::collections::BTreeSet;
use threshold::AEClock;
/// commands are sorted inside an SCC given their dot
pub type SCC = BTreeSet<Dot>;
#[derive(PartialEq)]
pub enum FinderResult {
Found,
MissingDependencies(HashSet<Dependency>),
NotPending,
NotFound,
}
#[derive(Clone)]
pub struct TarjanSCCFinder {
process_id: ProcessId,
config: Config,
id: usize,
stack: Vec<Dot>,
sccs: Vec<SCC>,
missing_deps: HashSet<Dependency>,
}
impl TarjanSCCFinder {
/// Creates a new SCC finder that employs Tarjan's algorithm.
pub fn new(process_id: ProcessId, config: Config) -> Self {
Self {
process_id,
config,
id: 0,
stack: Vec::new(),
sccs: Vec::new(),
missing_deps: HashSet::new(),
}
}
/// Returns a list with the SCCs found.
#[must_use]
pub fn sccs(&mut self) -> Vec<SCC> {
std::mem::take(&mut self.sccs)
}
/// Returns a set with all dots visited.
/// It also resets the ids of all vertices still on the stack.
#[must_use]
pub fn finalize(
&mut self,
vertex_index: &VertexIndex,
) -> (HashSet<Dot>, HashSet<Dependency>) {
let _process_id = self.process_id;
// reset id
self.id = 0;
// reset the id of each dot in the stack, while computing the set of
// visited dots
let mut visited = HashSet::new();
while let Some(dot) = self.stack.pop() {
trace!(
"p{}: Finder::finalize removing {:?} from stack",
_process_id,
dot
);
// find vertex and reset its id
let vertex = if let Some(vertex) = vertex_index.find(&dot) {
vertex
} else {
panic!(
"p{}: Finder::finalize stack member {:?} should exist",
self.process_id, dot
);
};
vertex.write().id = 0;
// add dot to set of visited
visited.insert(dot);
}
// return visited dots and missing dependencies (if any)
(visited, std::mem::take(&mut self.missing_deps))
}
/// Tries to find an SCC starting from root `dot`.
pub fn strong_connect(
&mut self,
first_find: bool,
dot: Dot,
vertex_ref: &VertexRef<'_>,
executed_clock: &mut AEClock<ProcessId>,
added_to_executed_clock: &mut HashSet<Dot>,
vertex_index: &VertexIndex,
scc_count: &mut usize,
missing_deps_count: &mut usize,
) -> FinderResult {
// update id
self.id += 1;
// get vertex
let mut vertex = vertex_ref.write();
// set id and low for vertex
vertex.id = self.id;
vertex.low = self.id;
// add to the stack
vertex.on_stack = true;
self.stack.push(dot);
debug!(
"p{}: Finder::strong_connect {:?} with id {}",
self.process_id, dot, self.id
);
for i in 0..vertex.deps.len() {
// TODO we should panic if we find a dependency highest than self
let ignore = |dep_dot: Dot| {
// ignore self or if already executed
dep_dot == dot
|| executed_clock
.contains(&dep_dot.source(), dep_dot.sequence())
};
// get dep dot
let dep_dot = vertex.deps[i].dot;
if ignore(dep_dot) {
trace!(
"p{}: Finder::strong_connect ignoring dependency {:?}",
self.process_id,
dep_dot
);
continue;
}
match vertex_index.find(&dep_dot) {
None => {
let dep = vertex.deps[i].clone();
debug!(
"p{}: Finder::strong_connect missing {:?}",
self.process_id, dep
);
if self.config.shard_count() == 1 || !first_find {
return FinderResult::MissingDependencies(singleton![
dep
]);
} else {
// if partial replication *and* it's the first search
// we're doing for the root dot, simply save this `dep`
// as a missing dependency but keep going; this makes
// sure that we will request all missing dependencies in
// a single request
self.missing_deps.insert(dep);
*missing_deps_count += 1;
};
}
Some(dep_vertex_ref) => {
// get vertex
let mut dep_vertex = dep_vertex_ref.read();
// if not visited, visit
if dep_vertex.id == 0 {
trace!(
"p{}: Finder::strong_connect non-visited {:?}",
self.process_id,
dep_dot
);
// drop guards
drop(vertex);
drop(dep_vertex);
// OPTIMIZATION: passing the dep vertex ref as an
// argument to `strong_connect` avoids double look-up
let mut dep_missing_deps_count = 0;
let result = self.strong_connect(
first_find,
dep_dot,
&dep_vertex_ref,
executed_clock,
added_to_executed_clock,
vertex_index,
scc_count,
&mut dep_missing_deps_count,
);
// update missing deps count with the number of missing
// deps of our dep
*missing_deps_count += dep_missing_deps_count;
// if missing dependency, give up
if let FinderResult::MissingDependencies(_) = result {
return result;
}
// get guards again
vertex = vertex_ref.write();
dep_vertex = dep_vertex_ref.read();
// min low with dep low
vertex.low = cmp::min(vertex.low, dep_vertex.low);
// drop dep guard
drop(dep_vertex);
} else {
// if visited and on the stack
if dep_vertex.on_stack {
trace!("p{}: Finder::strong_connect dependency on stack {:?}", self.process_id, dep_dot);
// min low with dep id
vertex.low = cmp::min(vertex.low, dep_vertex.id);
}
// drop dep guard
drop(dep_vertex);
}
}
}
}
// if after visiting all neighbors, an SCC was found if vertex.id ==
// vertex.low
// - good news: the SCC members are on the stack
if *missing_deps_count == 0 && vertex.id == vertex.low {
let mut scc = SCC::new();
// drop guards
drop(vertex);
drop(vertex_ref);
loop {
// pop an element from the stack
let member_dot = self
.stack
.pop()
.expect("there should be an SCC member on the stack");
debug!(
"p{}: Finder::strong_connect new SCC member {:?}",
self.process_id, member_dot
);
// get its vertex and change its `on_stack` value
let member_vertex_ref = vertex_index
.find(&member_dot)
.expect("stack member should exist");
// increment number of commands found
*scc_count += 1;
// get its vertex and change its `on_stack` value
let mut member_vertex = member_vertex_ref.write();
member_vertex.on_stack = false;
// add it to the SCC and check it wasn't there before
assert!(scc.insert(member_dot));
// drop guards
drop(member_vertex);
drop(member_vertex_ref);
// update executed clock:
// - this is a nice optimization (that I think we missed in
// Atlas); instead of waiting for the root-level recursion to
// finish in order to update `executed_clock` (which is
// consulted to decide what are the dependencies of a
// command), we can update it right here, possibly reducing a
// few iterations
// TODO add this check back:
// check if the command is replicated by my shard
// let is_mine =
// member_vertex.cmd.replicated_by(&self.shard_id);
// if executed_clock.write("Finder::strong_connect", |clock| {
// clock.add(&member_dot.source(), member_dot.sequence())
// })
// && is_mine
// {
// panic!(
// "p{}: Finder::strong_connect dot {:?} already
// executed", self.process_id,
// member_dot );
// }
executed_clock.add(&member_dot.source(), member_dot.sequence());
if self.config.shard_count() > 1 {
added_to_executed_clock.insert(member_dot);
}
trace!(
"p{}: Finder::strong_connect executed clock {:?}",
self.process_id,
executed_clock
);
// quit if root is found
if member_dot == dot {
break;
}
}
// add scc to to the set of sccs
self.sccs.push(scc);
FinderResult::Found
} else {
FinderResult::NotFound
}
}
}
#[derive(Debug, Clone)]
pub struct Vertex {
pub dot: Dot,
pub cmd: Command,
pub deps: Vec<Dependency>,
pub start_time_ms: u64,
// specific to tarjan's algorithm
id: usize,
low: usize,
on_stack: bool,
}
impl Vertex {
pub fn new(
dot: Dot,
cmd: Command,
deps: Vec<Dependency>,
time: &dyn SysTime,
) -> Self {
let start_time_ms = time.millis();
Self {
dot,
cmd,
deps,
start_time_ms,
id: 0,
low: 0,
on_stack: false,
}
}
/// Consumes the vertex, returning its command.
pub fn into_command(self, time: &dyn SysTime) -> (u64, Command) {
let end_time_ms = time.millis();
let duration_ms = end_time_ms - self.start_time_ms;
(duration_ms, self.cmd)
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/executor/graph/index.rs | fantoch_ps/src/executor/graph/index.rs | use super::tarjan::Vertex;
use crate::protocol::common::graph::Dependency;
use fantoch::config::Config;
use fantoch::hash_map::{Entry, HashMap};
use fantoch::id::{Dot, ProcessId, ShardId};
use fantoch::info;
use fantoch::shared::{SharedMap, SharedMapRef};
use fantoch::time::SysTime;
use fantoch::HashSet;
use parking_lot::{RwLock, RwLockReadGuard};
use std::collections::BTreeMap;
use std::sync::Arc;
use std::time::Duration;
use threshold::AEClock;
pub type VertexRef<'a> = SharedMapRef<'a, Dot, RwLock<Vertex>>;
#[derive(Debug, Clone)]
pub struct VertexIndex {
process_id: ProcessId,
index: Arc<SharedMap<Dot, RwLock<Vertex>>>,
}
impl VertexIndex {
pub fn new(process_id: ProcessId) -> Self {
Self {
process_id,
index: Arc::new(SharedMap::new()),
}
}
/// Indexes a new vertex, returning any previous vertex indexed.
pub fn index(&mut self, vertex: Vertex) -> Option<Vertex> {
let dot = vertex.dot;
let cell = RwLock::new(vertex);
self.index.insert(dot, cell).map(|cell| cell.into_inner())
}
#[allow(dead_code)]
pub fn dots(&self) -> impl Iterator<Item = Dot> + '_ {
self.index.iter().map(|entry| *entry.key())
}
pub fn find(&self, dot: &Dot) -> Option<VertexRef<'_>> {
self.index.get(dot)
}
/// Removes a vertex from the index.
pub fn remove(&mut self, dot: &Dot) -> Option<Vertex> {
self.index.remove(dot).map(|(_, cell)| cell.into_inner())
}
pub fn monitor_pending(
&self,
executed_clock: &AEClock<ProcessId>,
monitor_pending_threshold: Duration,
time: &dyn SysTime,
) {
// collect pending commands
let now_ms = time.millis();
let threshold_ms = monitor_pending_threshold.as_millis() as u64;
let mut pending = BTreeMap::new();
let mut pending_without_missing_deps = HashSet::new();
self.index.iter().for_each(|vertex_ref| {
// check if we should show this pending command
let vertex = vertex_ref.read();
let pending_for_ms = now_ms - vertex.start_time_ms;
if pending_for_ms >= threshold_ms {
// compute missing dependencies
let mut visited = HashSet::new();
let missing_deps = self.missing_dependencies(&vertex, executed_clock, &mut visited);
if missing_deps.is_empty() {
pending_without_missing_deps.insert(vertex.dot);
}
pending.entry(pending_for_ms).or_insert_with(Vec::new).push(
format!(
"p{}: {:?} is pending for {:?}ms with deps {:?} | missing {:?}",
self.process_id,
vertex.dot,
pending_for_ms,
vertex.deps,
missing_deps,
),
)
}
});
// show pending commands: pending longest first
for (_pending_for_ms, pending) in pending.into_iter().rev() {
for fmt in pending {
info!("{}", fmt);
}
}
// panic if there's a pending command without missing dependencies
if !pending_without_missing_deps.is_empty() {
panic!("p{}: the following commands are pending without missing dependencies: {:?}", self.process_id, pending_without_missing_deps);
}
}
fn missing_dependencies(
&self,
vertex: &RwLockReadGuard<'_, Vertex>,
executed_clock: &AEClock<ProcessId>,
visited: &mut HashSet<Dot>,
) -> HashSet<Dot> {
let mut missing_dependencies = HashSet::new();
// add self to the set of visited pending commands
if !visited.insert(vertex.dot) {
// if already visited, return
return missing_dependencies;
}
for dep in &vertex.deps {
let dep_dot = dep.dot;
if executed_clock.contains(&dep_dot.source(), dep_dot.sequence()) {
// ignore executed dep
continue;
}
// if it's not executed, check if it's also pending
if let Some(dep_vertex_ref) = self.index.get(&dep_dot) {
let dep_vertex = dep_vertex_ref.read();
// if it is, then we're pending for the same reason our
// dependency is pending
missing_dependencies.extend(self.missing_dependencies(
&dep_vertex,
executed_clock,
visited,
));
} else {
// if it's not pending, then there's a missing dependency
missing_dependencies.insert(dep_dot);
}
}
missing_dependencies
}
}
#[derive(Debug, Clone)]
pub struct PendingIndex {
shard_id: ShardId,
config: Config,
index: HashMap<Dot, HashSet<Dot>>,
}
impl PendingIndex {
pub fn new(shard_id: ShardId, config: Config) -> Self {
Self {
shard_id,
config,
index: HashMap::new(),
}
}
/// Indexes a new `dot` as a child of `parent`:
/// - when `parent.dot` is executed, we'll try to execute `dot` as
/// `parent.dot` was a dependency and maybe now `dot` can be executed
#[must_use]
pub fn index(
&mut self,
parent: &Dependency,
dot: Dot,
) -> Option<(Dot, ShardId)> {
match self.index.entry(parent.dot) {
Entry::Vacant(vacant) => {
// save `dot` as a child
let mut children = HashSet::new();
children.insert(dot);
// create `parent` entry
vacant.insert(children);
// this is the first time we detect `parent.dot` as a missing
// dependency; in this case, we may have to ask another
// shard for its info if we don't replicate it; we can know if
// we replicate it by inspecting `parent.shards`
let is_mine = parent
.shards
.as_ref()
.expect("shards should be set if it's not a noop")
.contains(&self.shard_id);
if !is_mine {
let target = parent.dot.target_shard(self.config.n());
return Some((parent.dot, target));
}
}
Entry::Occupied(mut children) => {
// in this case, `parent` has already been a missing dependency
// of another command, so simply save `dot` as a child
children.get_mut().insert(dot);
}
}
None
}
/// Finds all pending dots for a given dependency dot.
pub fn remove(&mut self, dep_dot: &Dot) -> Option<HashSet<Dot>> {
self.index.remove(dep_dot)
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/executor/graph/executor.rs | fantoch_ps/src/executor/graph/executor.rs | use crate::executor::graph::DependencyGraph;
use crate::protocol::common::graph::Dependency;
use fantoch::command::Command;
use fantoch::config::Config;
use fantoch::executor::{
ExecutionOrderMonitor, Executor, ExecutorMetrics, ExecutorResult,
};
use fantoch::id::{Dot, ProcessId, ShardId};
use fantoch::kvs::KVStore;
use fantoch::protocol::MessageIndex;
use fantoch::time::SysTime;
use fantoch::HashSet;
use fantoch::{debug, trace};
use serde::{Deserialize, Serialize};
use std::collections::VecDeque;
use std::fmt;
use std::iter::FromIterator;
#[derive(Clone)]
pub struct GraphExecutor {
executor_index: usize,
process_id: ProcessId,
shard_id: ShardId,
config: Config,
graph: DependencyGraph,
store: KVStore,
to_clients: VecDeque<ExecutorResult>,
to_executors: Vec<(ShardId, GraphExecutionInfo)>,
}
impl Executor for GraphExecutor {
type ExecutionInfo = GraphExecutionInfo;
fn new(process_id: ProcessId, shard_id: ShardId, config: Config) -> Self {
// this value will be overwritten
let executor_index = 0;
let graph = DependencyGraph::new(process_id, shard_id, &config);
let store = KVStore::new(config.executor_monitor_execution_order());
let to_clients = Default::default();
let to_executors = Default::default();
Self {
executor_index,
process_id,
shard_id,
config,
graph,
store,
to_clients,
to_executors,
}
}
fn set_executor_index(&mut self, index: usize) {
self.executor_index = index;
self.graph.set_executor_index(index);
}
fn cleanup(&mut self, time: &dyn SysTime) {
if self.config.shard_count() > 1 {
self.graph.cleanup(time);
self.fetch_actions(time);
}
}
fn monitor_pending(&mut self, time: &dyn SysTime) {
self.graph.monitor_pending(time);
}
fn handle(&mut self, info: GraphExecutionInfo, time: &dyn SysTime) {
match info {
GraphExecutionInfo::Add { dot, cmd, deps } => {
if self.config.execute_at_commit() {
self.execute(cmd);
} else {
// handle new command
let deps = Vec::from_iter(deps);
self.graph.handle_add(dot, cmd, deps, time);
self.fetch_actions(time);
}
}
GraphExecutionInfo::Request { from, dots } => {
self.graph.handle_request(from, dots, time);
self.fetch_actions(time);
}
GraphExecutionInfo::RequestReply { infos } => {
self.graph.handle_request_reply(infos, time);
self.fetch_actions(time);
}
GraphExecutionInfo::Executed { dots } => {
self.graph.handle_executed(dots, time);
}
}
}
fn to_clients(&mut self) -> Option<ExecutorResult> {
self.to_clients.pop_front()
}
fn to_executors(&mut self) -> Option<(ShardId, GraphExecutionInfo)> {
self.to_executors.pop()
}
fn parallel() -> bool {
true
}
fn metrics(&self) -> &ExecutorMetrics {
&self.graph.metrics()
}
fn monitor(&self) -> Option<ExecutionOrderMonitor> {
self.store.monitor().cloned()
}
}
impl GraphExecutor {
fn fetch_actions(&mut self, time: &dyn SysTime) {
self.fetch_commands_to_execute(time);
if self.config.shard_count() > 1 {
self.fetch_to_executors(time);
self.fetch_requests(time);
self.fetch_request_replies(time);
}
}
fn fetch_commands_to_execute(&mut self, _time: &dyn SysTime) {
// get more commands that are ready to be executed
while let Some(cmd) = self.graph.command_to_execute() {
trace!(
"p{}: @{} GraphExecutor::comands_to_execute {:?} | time = {}",
self.process_id,
self.executor_index,
cmd.rifl(),
_time.millis()
);
self.execute(cmd);
}
}
fn fetch_to_executors(&mut self, _time: &dyn SysTime) {
if let Some(added) = self.graph.to_executors() {
debug!(
"p{}: @{} GraphExecutor::to_executors {:?} | time = {}",
self.process_id,
self.executor_index,
added,
_time.millis()
);
let executed = GraphExecutionInfo::executed(added);
self.to_executors.push((self.shard_id, executed));
}
}
fn fetch_requests(&mut self, _time: &dyn SysTime) {
for (to, dots) in self.graph.requests() {
trace!(
"p{}: @{} GraphExecutor::fetch_requests {:?} {:?} | time = {}",
self.process_id,
self.executor_index,
to,
dots,
_time.millis()
);
let request = GraphExecutionInfo::request(self.shard_id, dots);
self.to_executors.push((to, request));
}
}
fn fetch_request_replies(&mut self, _time: &dyn SysTime) {
for (to, infos) in self.graph.request_replies() {
trace!(
"p{}: @{} Graph::fetch_request_replies {:?} {:?} | time = {}",
self.process_id,
self.executor_index,
to,
infos,
_time.millis()
);
let reply = GraphExecutionInfo::request_reply(infos);
self.to_executors.push((to, reply));
}
}
fn execute(&mut self, cmd: Command) {
// execute the command
let results = cmd.execute(self.shard_id, &mut self.store);
self.to_clients.extend(results);
}
}
impl fmt::Debug for GraphExecutor {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:#?}", self.graph)
}
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub enum GraphExecutionInfo {
Add {
dot: Dot,
cmd: Command,
deps: HashSet<Dependency>,
},
Request {
from: ShardId,
dots: HashSet<Dot>,
},
RequestReply {
infos: Vec<super::RequestReply>,
},
Executed {
dots: HashSet<Dot>,
},
}
impl GraphExecutionInfo {
pub fn add(dot: Dot, cmd: Command, deps: HashSet<Dependency>) -> Self {
Self::Add { dot, cmd, deps }
}
fn request(from: ShardId, dots: HashSet<Dot>) -> Self {
Self::Request { from, dots }
}
fn request_reply(infos: Vec<super::RequestReply>) -> Self {
Self::RequestReply { infos }
}
fn executed(dots: HashSet<Dot>) -> Self {
Self::Executed { dots }
}
}
impl MessageIndex for GraphExecutionInfo {
fn index(&self) -> Option<(usize, usize)> {
const MAIN_INDEX: usize = 0;
const SECONDARY_INDEX: usize = 1;
const fn main_executor() -> Option<(usize, usize)> {
Some((0, MAIN_INDEX))
}
const fn secondary_executor() -> Option<(usize, usize)> {
Some((0, SECONDARY_INDEX))
}
match self {
Self::Add { .. } => main_executor(),
Self::Request { .. } => secondary_executor(),
Self::RequestReply { .. } => main_executor(),
Self::Executed { .. } => secondary_executor(),
}
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/executor/graph/mod.rs | fantoch_ps/src/executor/graph/mod.rs | // This module contains the definition of `TarjanSCCFinder` and `FinderResult`.
mod tarjan;
/// This module contains the definition of `VertexIndex` and `PendingIndex`.
mod index;
/// This modules contains the definition of `GraphExecutor` and
/// `GraphExecutionInfo`.
mod executor;
// Re-exports.
pub use executor::{GraphExecutionInfo, GraphExecutor};
use self::index::{PendingIndex, VertexIndex};
use self::tarjan::{FinderResult, TarjanSCCFinder, Vertex, SCC};
use crate::protocol::common::graph::Dependency;
use fantoch::command::Command;
use fantoch::config::Config;
use fantoch::executor::{ExecutorMetrics, ExecutorMetricsKind};
use fantoch::id::{Dot, ProcessId, ShardId};
use fantoch::time::SysTime;
use fantoch::util;
use fantoch::{debug, trace};
use fantoch::{HashMap, HashSet};
use serde::{Deserialize, Serialize};
use std::collections::VecDeque;
use std::fmt;
use std::time::Duration;
use threshold::AEClock;
const MONITOR_PENDING_THRESHOLD: Duration = Duration::from_secs(1);
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub enum RequestReply {
Info {
dot: Dot,
cmd: Command,
deps: Vec<Dependency>,
},
Executed {
dot: Dot,
},
}
#[derive(Clone)]
pub struct DependencyGraph {
executor_index: usize,
process_id: ProcessId,
executed_clock: AEClock<ProcessId>,
vertex_index: VertexIndex,
pending_index: PendingIndex,
finder: TarjanSCCFinder,
metrics: ExecutorMetrics,
// worker 0 (handles commands):
// - adds new commands `to_execute`
// - `out_requests` dependencies to be able to order commands
// - notifies remaining workers about what's been executed through
// `added_to_executed_clock`
to_execute: VecDeque<Command>,
out_requests: HashMap<ShardId, HashSet<Dot>>,
added_to_executed_clock: HashSet<Dot>,
// auxiliary workers (handles requests):
// - may have `buffered_in_requests` when doesn't have the command yet
// - produces `out_request_replies` when it has the command
buffered_in_requests: HashMap<ShardId, HashSet<Dot>>,
out_request_replies: HashMap<ShardId, Vec<RequestReply>>,
}
enum FinderInfo {
// set of dots in found SCCs
Found(Vec<Dot>),
// set of dots in found SCCs (it's possible to find SCCs even though the
// search for another dot failed), missing dependencies and set of dots
// visited while searching for SCCs
MissingDependencies(Vec<Dot>, HashSet<Dot>, HashSet<Dependency>),
// in case we try to find SCCs on dots that are no longer pending
NotPending,
}
impl DependencyGraph {
/// Create a new `Graph`.
pub fn new(
process_id: ProcessId,
shard_id: ShardId,
config: &Config,
) -> Self {
// this value will be overwritten
let executor_index = 0;
// create executed clock and its snapshot
let ids: Vec<_> =
util::all_process_ids(config.shard_count(), config.n())
.map(|(process_id, _)| process_id)
.collect();
let executed_clock = AEClock::with(ids.clone());
// create indexes
let vertex_index = VertexIndex::new(process_id);
let pending_index = PendingIndex::new(shard_id, *config);
// create finder
let finder = TarjanSCCFinder::new(process_id, *config);
let metrics = ExecutorMetrics::new();
// create to execute
let to_execute = Default::default();
// create requests and request replies
let out_requests = Default::default();
// only track what's added to the executed clock if partial replication
let added_to_executed_clock = HashSet::new();
let buffered_in_requests = Default::default();
let out_request_replies = Default::default();
DependencyGraph {
executor_index,
process_id,
executed_clock,
vertex_index,
pending_index,
finder,
metrics,
to_execute,
out_requests,
added_to_executed_clock,
buffered_in_requests,
out_request_replies,
}
}
fn set_executor_index(&mut self, index: usize) {
self.executor_index = index;
}
/// Returns a new command ready to be executed.
#[must_use]
pub fn command_to_execute(&mut self) -> Option<Command> {
self.to_execute.pop_front()
}
/// Returns which dots have been added to the executed clock.
#[must_use]
pub fn to_executors(&mut self) -> Option<HashSet<Dot>> {
if self.added_to_executed_clock.is_empty() {
None
} else {
Some(std::mem::take(&mut self.added_to_executed_clock))
}
}
/// Returns a request.
#[must_use]
pub fn requests(&mut self) -> HashMap<ShardId, HashSet<Dot>> {
std::mem::take(&mut self.out_requests)
}
/// Returns a set of request replies.
#[must_use]
pub fn request_replies(&mut self) -> HashMap<ShardId, Vec<RequestReply>> {
std::mem::take(&mut self.out_request_replies)
}
#[cfg(test)]
fn commands_to_execute(&mut self) -> VecDeque<Command> {
std::mem::take(&mut self.to_execute)
}
fn metrics(&self) -> &ExecutorMetrics {
&self.metrics
}
fn cleanup(&mut self, time: &dyn SysTime) {
trace!(
"p{}: @{} Graph::cleanup | time = {}",
self.process_id,
self.executor_index,
time.millis()
);
if self.executor_index > 0 {
// if not main executor, check pending remote requests
self.check_pending_requests(time);
}
}
fn monitor_pending(&self, time: &dyn SysTime) {
debug!(
"p{}: @{} Graph::monitor_pending | time = {}",
self.process_id,
self.executor_index,
time.millis()
);
if self.executor_index == 0 {
// check requests that have been committed at least 1 second ago
self.vertex_index.monitor_pending(
&self.executed_clock,
MONITOR_PENDING_THRESHOLD,
time,
)
}
}
fn handle_executed(&mut self, dots: HashSet<Dot>, _time: &dyn SysTime) {
debug!(
"p{}: @{} Graph::handle_executed {:?} | time = {}",
self.process_id,
self.executor_index,
dots,
_time.millis()
);
if self.executor_index > 0 {
for dot in dots {
self.executed_clock.add(&dot.source(), dot.sequence());
}
}
}
/// Add a new command with its clock to the queue.
pub fn handle_add(
&mut self,
dot: Dot,
cmd: Command,
deps: Vec<Dependency>,
time: &dyn SysTime,
) {
assert_eq!(self.executor_index, 0);
debug!(
"p{}: @{} Graph::handle_add {:?} {:?} | time = {}",
self.process_id,
self.executor_index,
dot,
deps,
time.millis()
);
// create new vertex for this command
let vertex = Vertex::new(dot, cmd, deps, time);
if self.vertex_index.index(vertex).is_some() {
panic!(
"p{}: @{} Graph::handle_add tried to index already indexed {:?}",
self.process_id, self.executor_index, dot
);
}
// get current command ready count and count newly ready commands
let initial_ready = self.to_execute.len();
let mut total_scc_count = 0;
// try to find new SCCs
let first_find = true;
match self.find_scc(first_find, dot, &mut total_scc_count, time) {
FinderInfo::Found(dots) => {
// try to execute other commands if new SCCs were found
self.check_pending(dots, &mut total_scc_count, time);
}
FinderInfo::MissingDependencies(dots, _visited, missing_deps) => {
// update the pending
self.index_pending(dot, missing_deps, time);
// try to execute other commands if new SCCs were found
self.check_pending(dots, &mut total_scc_count, time);
}
FinderInfo::NotPending => {
panic!("just added dot must be pending");
}
}
// check that all newly ready commands have been incorporated
assert_eq!(self.to_execute.len(), initial_ready + total_scc_count);
trace!(
"p{}: @{} Graph::log executed {:?} | pending {:?} | time = {}",
self.process_id,
self.executor_index,
self.executed_clock,
self.vertex_index
.dots()
.collect::<std::collections::BTreeSet<_>>(),
time.millis()
);
}
fn handle_request(
&mut self,
from: ShardId,
dots: HashSet<Dot>,
time: &dyn SysTime,
) {
assert!(self.executor_index > 0);
trace!(
"p{}: @{} Graph::handle_request {:?} from {:?} | time = {}",
self.process_id,
self.executor_index,
dots,
from,
time.millis()
);
// save in requests metric
self.metrics.aggregate(ExecutorMetricsKind::InRequests, 1);
// try to process requests
self.process_requests(from, dots.into_iter(), time)
}
fn process_requests(
&mut self,
from: ShardId,
dots: impl Iterator<Item = Dot>,
time: &dyn SysTime,
) {
assert!(self.executor_index > 0);
for dot in dots {
if let Some(vertex) = self.vertex_index.find(&dot) {
let vertex = vertex.read();
// panic if the shard that requested this vertex replicates it
if vertex.cmd.replicated_by(&from) {
panic!(
"p{}: @{} Graph::process_requests {:?} is replicated by {:?} (WARN) | time = {}",
self.process_id,
self.executor_index,
dot,
from,
time.millis()
)
} else {
debug!(
"p{}: @{} Graph::process_requests {:?} sending info to {:?} | time = {}",
self.process_id,
self.executor_index,
dot,
from,
time.millis()
);
self.out_request_replies.entry(from).or_default().push(
RequestReply::Info {
dot,
cmd: vertex.cmd.clone(),
deps: vertex.deps.clone(),
},
)
}
} else {
// if we don't have it, then check if it's executed (in our
// snapshot)
if self.executed_clock.contains(&dot.source(), dot.sequence()) {
debug!(
"p{}: @{} Graph::process_requests {:?} sending executed to {:?} | time = {}",
self.process_id,
self.executor_index,
dot,
from,
time.millis()
);
// if it's executed, notify the shard that it has already
// been executed
// - TODO: the Janus paper says that in this case, we should
// send the full SCC; this will require a GC mechanism
self.out_request_replies
.entry(from)
.or_default()
.push(RequestReply::Executed { dot });
} else {
debug!(
"p{}: @{} Graph::process_requests {:?} buffered from {:?} | time = {}",
self.process_id,
self.executor_index,
dot,
from,
time.millis()
);
// buffer request again
self.buffered_in_requests
.entry(from)
.or_default()
.insert(dot);
}
}
}
}
pub fn handle_request_reply(
&mut self,
infos: Vec<RequestReply>,
time: &dyn SysTime,
) {
assert_eq!(self.executor_index, 0);
for info in infos {
debug!(
"p{}: @{} Graph::handle_request_reply {:?} | time = {}",
self.process_id,
self.executor_index,
info,
time.millis()
);
match info {
RequestReply::Info { dot, cmd, deps } => {
// add requested command to our graph
self.handle_add(dot, cmd, deps, time)
}
RequestReply::Executed { dot } => {
// update executed clock
self.executed_clock.add(&dot.source(), dot.sequence());
self.added_to_executed_clock.insert(dot);
// check pending
let dots = vec![dot];
let mut total_scc_count = 0;
self.check_pending(dots, &mut total_scc_count, time);
}
}
}
}
#[must_use]
fn find_scc(
&mut self,
first_find: bool,
dot: Dot,
total_scc_count: &mut usize,
time: &dyn SysTime,
) -> FinderInfo {
assert_eq!(self.executor_index, 0);
trace!(
"p{}: @{} Graph::find_scc {:?} | time = {}",
self.process_id,
self.executor_index,
dot,
time.millis()
);
// execute tarjan's algorithm
let mut scc_count = 0;
let mut missing_deps_count = 0;
let finder_result = self.strong_connect(
first_find,
dot,
&mut scc_count,
&mut missing_deps_count,
);
// update total scc's found
*total_scc_count += scc_count;
// get sccs
let sccs = self.finder.sccs();
// save new SCCs
let mut dots = Vec::with_capacity(scc_count);
sccs.into_iter().for_each(|scc| {
self.save_scc(scc, &mut dots, time);
});
// reset finder state and get visited dots
let (visited, missing_deps) = self.finder.finalize(&self.vertex_index);
assert!(
// we can have a count higher the the number of dependencies if
// there are cycles
missing_deps.len() <= missing_deps_count,
"more missing deps than the ones counted"
);
// NOTE: what follows must be done even if
// `FinderResult::MissingDependency` was returned - it's possible that
// while running the finder for some dot `X` we actually found SCCs with
// another dots, even though the find for this dot `X` failed!
// save new SCCs if any were found
match finder_result {
FinderResult::Found => FinderInfo::Found(dots),
FinderResult::MissingDependencies(result_missing_deps) => {
// if `MissingDependencies` was returned, then `missing_deps`
// must be empty since we give up on the first missing dep
assert!(
missing_deps.is_empty(),
"if MissingDependencies is returned, missing_deps must be empty"
);
FinderInfo::MissingDependencies(
dots,
visited,
result_missing_deps,
)
}
FinderResult::NotPending => FinderInfo::NotPending,
FinderResult::NotFound => {
// in this case, `missing_deps` must be non-empty
assert!(
!missing_deps.is_empty(),
"either there's a missing dependency, or we should find an SCC"
);
FinderInfo::MissingDependencies(dots, visited, missing_deps)
}
}
}
fn save_scc(&mut self, scc: SCC, dots: &mut Vec<Dot>, time: &dyn SysTime) {
assert_eq!(self.executor_index, 0);
// save chain size metric
self.metrics
.collect(ExecutorMetricsKind::ChainSize, scc.len() as u64);
scc.into_iter().for_each(|dot| {
trace!(
"p{}: @{} Graph::save_scc removing {:?} from indexes | time = {}",
self.process_id,
self.executor_index,
dot,
time.millis()
);
// remove from vertex index
let vertex = self
.vertex_index
.remove(&dot)
.expect("dots from an SCC should exist");
// update the set of ready dots
dots.push(dot);
// get command
let (duration_ms, cmd) = vertex.into_command(time);
// save execution delay metric
self.metrics
.collect(ExecutorMetricsKind::ExecutionDelay, duration_ms);
// add command to commands to be executed
self.to_execute.push_back(cmd);
})
}
fn index_pending(
&mut self,
dot: Dot,
missing_deps: HashSet<Dependency>,
_time: &dyn SysTime,
) {
let mut requests = 0;
for dep in missing_deps {
if let Some((dep_dot, target_shard)) =
self.pending_index.index(&dep, dot)
{
debug!(
"p{}: @{} Graph::index_pending will ask {:?} to {:?} | time = {}",
self.process_id,
self.executor_index,
dep_dot,
target_shard,
_time.millis()
);
requests += 1;
self.out_requests
.entry(target_shard)
.or_default()
.insert(dep_dot);
}
}
// save out requests metric
self.metrics
.aggregate(ExecutorMetricsKind::OutRequests, requests);
}
fn check_pending(
&mut self,
mut dots: Vec<Dot>,
total_scc_count: &mut usize,
time: &dyn SysTime,
) {
assert_eq!(self.executor_index, 0);
while let Some(dot) = dots.pop() {
// get pending commands that depend on this dot
if let Some(pending) = self.pending_index.remove(&dot) {
debug!(
"p{}: @{} Graph::try_pending {:?} depended on {:?} | time = {}",
self.process_id,
self.executor_index,
pending,
dot,
time.millis()
);
self.try_pending(pending, &mut dots, total_scc_count, time);
} else {
debug!(
"p{}: @{} Graph::try_pending nothing depended on {:?} | time = {}",
self.process_id,
self.executor_index,
dot,
time.millis()
);
}
}
// once there are no more dots to try, no command in pending should be
// possible to be executed, so we give up!
}
fn try_pending(
&mut self,
pending: HashSet<Dot>,
dots: &mut Vec<Dot>,
total_scc_count: &mut usize,
time: &dyn SysTime,
) {
assert_eq!(self.executor_index, 0);
// try to find new SCCs for each of those commands
let mut visited = HashSet::new();
let first_find = false;
for dot in pending {
// only try to find new SCCs from non-visited commands
if !visited.contains(&dot) {
match self.find_scc(first_find, dot, total_scc_count, time) {
FinderInfo::Found(new_dots) => {
// reset visited
visited.clear();
// if new SCCs were found, now there are more
// child dots to check
dots.extend(new_dots);
}
FinderInfo::MissingDependencies(
new_dots,
new_visited,
missing_deps,
) => {
// update pending
self.index_pending(dot, missing_deps, time);
if !new_dots.is_empty() {
// if we found a new SCC, reset visited;
visited.clear();
} else {
// otherwise, try other pending commands,
// but don't try those that were visited in
// this search
visited.extend(new_visited);
}
// if new SCCs were found, now there are more
// child dots to check
dots.extend(new_dots);
}
FinderInfo::NotPending => {
// this happens if the pending dot is no longer
// pending
}
}
}
}
}
fn strong_connect(
&mut self,
first_find: bool,
dot: Dot,
scc_count: &mut usize,
missing_deps_count: &mut usize,
) -> FinderResult {
assert_eq!(self.executor_index, 0);
// get the vertex
match self.vertex_index.find(&dot) {
Some(vertex) => self.finder.strong_connect(
first_find,
dot,
&vertex,
&mut self.executed_clock,
&mut self.added_to_executed_clock,
&self.vertex_index,
scc_count,
missing_deps_count,
),
None => {
// in this case this `dot` is no longer pending
FinderResult::NotPending
}
}
}
fn check_pending_requests(&mut self, time: &dyn SysTime) {
let buffered = std::mem::take(&mut self.buffered_in_requests);
for (from, dots) in buffered {
self.process_requests(from, dots.into_iter(), time);
}
}
}
impl fmt::Debug for DependencyGraph {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "vertex index:")?;
write!(f, "{:#?}", self.vertex_index)?;
write!(f, "pending index:")?;
write!(f, "{:#?}", self.pending_index)?;
write!(f, "executed:")?;
write!(f, "{:?}", self.executed_clock)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::util;
use fantoch::id::{ClientId, Rifl, ShardId};
use fantoch::kvs::{KVOp, Key};
use fantoch::time::RunTime;
use fantoch::HashMap;
use permutator::{Combination, Permutation};
use rand::seq::SliceRandom;
use rand::Rng;
use std::cell::RefCell;
use std::cmp::Ordering;
use std::collections::{BTreeMap, BTreeSet};
use std::iter::FromIterator;
use threshold::{AEClock, AboveExSet, EventSet};
fn dep(dot: Dot, shard_id: ShardId) -> Dependency {
Dependency {
dot,
shards: Some(BTreeSet::from_iter(vec![shard_id])),
}
}
#[test]
fn simple() {
// create queue
let process_id = 1;
let shard_id = 0;
let n = 2;
let f = 1;
let config = Config::new(n, f);
let mut queue = DependencyGraph::new(process_id, shard_id, &config);
let time = RunTime;
// create dots
let dot_0 = Dot::new(1, 1);
let dot_1 = Dot::new(2, 1);
// cmd 0
let cmd_0 = Command::from(
Rifl::new(1, 1),
vec![(String::from("A"), KVOp::Put(String::new()))],
);
let deps_0 = vec![dep(dot_1, shard_id)];
// cmd 1
let cmd_1 = Command::from(
Rifl::new(2, 1),
vec![(String::from("A"), KVOp::Put(String::new()))],
);
let deps_1 = vec![dep(dot_0, shard_id)];
// add cmd 0
queue.handle_add(dot_0, cmd_0.clone(), deps_0, &time);
// check commands ready to be executed
assert!(queue.commands_to_execute().is_empty());
// add cmd 1
queue.handle_add(dot_1, cmd_1.clone(), deps_1, &time);
// check commands ready to be executed
assert_eq!(queue.commands_to_execute(), vec![cmd_0, cmd_1]);
}
/// We have 5 commands by the same process (process A) that access the same
/// key. We have `n = 5` and `f = 1` and thus the fast quorum size of 3.
/// The fast quorum used by process A is `{A, B, C}`. We have the
/// following order of commands in the 3 processes:
/// - A: (1,1) (1,2) (1,3) (1,4) (1,5)
/// - B: (1,3) (1,4) (1,1) (1,2) (1,5)
/// - C: (1,1) (1,2) (1,4) (1,5) (1,3)
///
/// The above results in the following final dependencies:
/// - dep[(1,1)] = {(1,4)}
/// - dep[(1,2)] = {(1,4)}
/// - dep[(1,3)] = {(1,5)}
/// - dep[(1,4)] = {(1,3)}
/// - dep[(1,5)] = {(1,4)}
///
/// The executor then receives the commit notifications of (1,3) (1,4) and
/// (1,5) and, if transitive conflicts are assumed, these 3 commands
/// form an SCC. This is because with this assumption we only check the
/// highest conflicting command per replica, and thus (1,3) (1,4) and
/// (1,5) have "all the dependencies".
///
/// Then, the executor executes whichever missing command comes first, since
/// (1,1) and (1,2) "only depend on an SCC already formed". This means that
/// if two executors receive (1,3) (1,4) (1,5), and then one receives (1,1)
/// and (1,2) while the other receives (1,2) and (1,1), they will execute
/// (1,1) and (1,2) in differents order, leading to an inconsistency.
///
/// This example is impossible if commands from the same process are
/// processed (on the replicas computing dependencies) in their submission
/// order. With this, a command never depends on later commands from the
/// same process, which seems to be enough to prevent this issue. This means
/// that parallelizing the processing of messages needs to be on a
/// per-process basis, i.e. commands by the same process are always
/// processed by the same worker.
#[test]
fn transitive_conflicts_assumption_regression_test_1() {
// config
let n = 5;
// dots
let dot_1 = Dot::new(1, 1);
let dot_2 = Dot::new(1, 2);
let dot_3 = Dot::new(1, 3);
let dot_4 = Dot::new(1, 4);
let dot_5 = Dot::new(1, 5);
// deps
let deps_1 = HashSet::from_iter(vec![dot_4]);
let deps_2 = HashSet::from_iter(vec![dot_4]);
let deps_3 = HashSet::from_iter(vec![dot_5]);
let deps_4 = HashSet::from_iter(vec![dot_3]);
let deps_5 = HashSet::from_iter(vec![dot_4]);
let order_a = vec![
(dot_3, None, deps_3.clone()),
(dot_4, None, deps_4.clone()),
(dot_5, None, deps_5.clone()),
(dot_1, None, deps_1.clone()),
(dot_2, None, deps_2.clone()),
];
let order_b = vec![
(dot_3, None, deps_3),
(dot_4, None, deps_4),
(dot_5, None, deps_5),
(dot_2, None, deps_2),
(dot_1, None, deps_1),
];
let order_a = check_termination(n, order_a);
let order_b = check_termination(n, order_b);
assert!(order_a != order_b);
}
/// Simple example showing why encoding of dependencies matters for the
/// `transitive_conflicts` optimization to be correct (which, makes the name
/// of the optimization not great):
/// - 3 replicas (A, B, C), and 3 commands
/// * command (A, 1), keys = {x}
/// * command (A, 2), keys = {y}
/// * command (B, 1), keys = {x, y}
///
/// First, (A, 1) is submitted and gets no dependencies:
/// - {A -> 0, B -> 0, C -> 0}
/// Then, (A, 2) is submitted and also gets no dependencies:
/// - {A -> 0, B -> 0, C -> 0}
/// Finally, (B, 1) is submitted and gets (A, 2) as a dependency:
/// - {A -> 2, B -> 0, C -> 0}
/// It only gets (A, 2) because we only return the highest conflicting
/// command from each replica.
///
/// With the optimization, the order in which commands are received by the
/// ordering component affects results:
/// - (A, 1), (A, 2), (B, 1): commands are executed in the order they're
/// received, producing correct results
/// - (A, 2), (B, 1), (A, 1): (B, 1) is executed before (A, 1) and shouldn't
///
/// Without the optimization, (B, 1) would be forced to wait for (A, 1) in
/// the last case, producing a correct result.
///
/// It looks like the optimization would be correct if, instead of returning
/// the highest conflicting command per replica, we would return the highest
/// conflict command per replica *per key*.
#[test]
fn transitive_conflicts_assumption_regression_test_2() {
// config
let n = 3;
let keys = |keys: Vec<&str>| {
keys.into_iter()
.map(|key| key.to_string())
.collect::<BTreeSet<_>>()
};
// cmd 1,1
let dot_1_1 = Dot::new(1, 1);
let keys_1_1 = keys(vec!["A"]);
let deps_1_1 = HashSet::new();
// cmd 1,2
let dot_1_2 = Dot::new(1, 2);
let keys_1_2 = keys(vec!["B"]);
let deps_1_2 = HashSet::new();
// cmd 2,1
let dot_2_1 = Dot::new(2, 1);
let keys_2_1 = keys(vec!["A", "B"]);
let deps_2_1 = HashSet::from_iter(vec![dot_1_2]);
let order_a = vec![
(dot_1_1, Some(keys_1_1.clone()), deps_1_1.clone()),
(dot_1_2, Some(keys_1_2.clone()), deps_1_2.clone()),
(dot_2_1, Some(keys_2_1.clone()), deps_2_1.clone()),
];
let order_b = vec![
(dot_1_2, Some(keys_1_2), deps_1_2),
(dot_2_1, Some(keys_2_1), deps_2_1),
(dot_1_1, Some(keys_1_1), deps_1_1),
];
let order_a = check_termination(n, order_a);
let order_b = check_termination(n, order_b);
assert!(order_a != order_b);
}
#[test]
fn cycle() {
// config
let n = 3;
// dots
let dot_1 = Dot::new(1, 1);
let dot_2 = Dot::new(2, 1);
let dot_3 = Dot::new(3, 1);
// deps
let deps_1 = HashSet::from_iter(vec![dot_3]);
let deps_2 = HashSet::from_iter(vec![dot_1]);
let deps_3 = HashSet::from_iter(vec![dot_2]);
let args = vec![
(dot_1, None, deps_1),
(dot_2, None, deps_2),
(dot_3, None, deps_3),
];
shuffle_it(n, args);
}
#[test]
fn test_add_random() {
let shard_id = 0;
let n = 2;
let iterations = 10;
let events_per_process = 3;
(0..iterations).for_each(|_| {
let args = random_adds(shard_id, n, events_per_process);
shuffle_it(n, args);
});
}
fn random_adds(
shard_id: ShardId,
n: usize,
events_per_process: usize,
) -> Vec<(Dot, Option<BTreeSet<Key>>, HashSet<Dot>)> {
let mut possible_keys: Vec<_> =
('A'..='D').map(|key| key.to_string()).collect();
// create dots
let dots: Vec<_> = fantoch::util::process_ids(shard_id, n)
.flat_map(|process_id| {
(1..=events_per_process)
.map(move |event| Dot::new(process_id, event as u64))
})
.collect();
// compute keys and empty deps
let deps: HashMap<_, _> = dots
.clone()
.into_iter()
.map(|dot| {
// select two random keys from the set of possible keys:
// - this makes sure that the conflict relation is not
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | true |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/executor/pred/index.rs | fantoch_ps/src/executor/pred/index.rs | use crate::protocol::common::pred::{CaesarDeps, Clock};
use fantoch::command::Command;
use fantoch::hash_map::HashMap;
use fantoch::id::Dot;
use fantoch::time::SysTime;
use fantoch::HashSet;
use std::cell::RefCell;
use std::sync::Arc;
#[derive(Debug, Clone)]
pub struct Vertex {
pub dot: Dot,
pub cmd: Command,
pub clock: Clock,
pub deps: Arc<CaesarDeps>,
pub start_time_ms: u64,
missing_deps: usize,
}
impl Vertex {
pub fn new(
dot: Dot,
cmd: Command,
clock: Clock,
deps: Arc<CaesarDeps>,
time: &dyn SysTime,
) -> Self {
let start_time_ms = time.millis();
Self {
dot,
cmd,
clock,
deps,
start_time_ms,
missing_deps: 0,
}
}
pub fn get_missing_deps(&self) -> usize {
self.missing_deps
}
pub fn set_missing_deps(&mut self, missing_deps: usize) {
// this value can only be written if it's at zero
assert_eq!(self.missing_deps, 0);
self.missing_deps = missing_deps;
}
// Decreases the number of missing deps by one.
pub fn decrease_missing_deps(&mut self) {
// this value can only be decreased if it's non zero
assert!(self.missing_deps > 0);
self.missing_deps -= 1;
}
/// Consumes the vertex, returning its command.
pub fn into_command(self, time: &dyn SysTime) -> (u64, Command) {
let end_time_ms = time.millis();
let duration_ms = end_time_ms - self.start_time_ms;
(duration_ms, self.cmd)
}
}
#[derive(Debug, Clone)]
pub struct VertexIndex {
index: HashMap<Dot, RefCell<Vertex>>,
}
impl VertexIndex {
pub fn new() -> Self {
Self {
index: HashMap::new(),
}
}
/// Indexes a new vertex, returning any previous vertex indexed.
pub fn index(&mut self, vertex: Vertex) -> Option<Vertex> {
let dot = vertex.dot;
self.index
.insert(dot, RefCell::new(vertex))
.map(|cell| cell.into_inner())
}
#[allow(dead_code)]
pub fn dots(&self) -> impl Iterator<Item = &Dot> + '_ {
self.index.keys()
}
pub fn find(&self, dot: &Dot) -> Option<&RefCell<Vertex>> {
self.index.get(dot)
}
/// Removes a vertex from the index.
pub fn remove(&mut self, dot: &Dot) -> Option<Vertex> {
self.index.remove(dot).map(|cell| cell.into_inner())
}
}
#[derive(Debug, Clone)]
pub struct PendingIndex {
index: HashMap<Dot, HashSet<Dot>>,
}
impl PendingIndex {
pub fn new() -> Self {
Self {
index: HashMap::new(),
}
}
/// Indexes a new `dot` with `dep_dot` as a missing dependency.
pub fn index(&mut self, dot: Dot, dep_dot: Dot) {
self.index.entry(dep_dot).or_default().insert(dot);
}
/// Finds all pending dots for a given dependency.
pub fn remove(&mut self, dep_dot: &Dot) -> HashSet<Dot> {
self.index.remove(dep_dot).unwrap_or_default()
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/executor/pred/executor.rs | fantoch_ps/src/executor/pred/executor.rs | use crate::executor::pred::PredecessorsGraph;
use crate::protocol::common::pred::{CaesarDeps, Clock};
use fantoch::command::Command;
use fantoch::config::Config;
use fantoch::executor::{
ExecutionOrderMonitor, Executor, ExecutorMetrics, ExecutorResult,
};
use fantoch::id::{Dot, ProcessId, ShardId};
use fantoch::kvs::KVStore;
use fantoch::protocol::{CommittedAndExecuted, MessageIndex};
use fantoch::time::SysTime;
use fantoch::trace;
use serde::{Deserialize, Serialize};
use std::collections::VecDeque;
use std::sync::Arc;
#[derive(Clone)]
pub struct PredecessorsExecutor {
process_id: ProcessId,
shard_id: ShardId,
graph: PredecessorsGraph,
store: KVStore,
to_clients: VecDeque<ExecutorResult>,
}
impl Executor for PredecessorsExecutor {
type ExecutionInfo = PredecessorsExecutionInfo;
fn new(process_id: ProcessId, shard_id: ShardId, config: Config) -> Self {
let graph = PredecessorsGraph::new(process_id, &config);
let store = KVStore::new(config.executor_monitor_execution_order());
let to_clients = Default::default();
Self {
process_id,
shard_id,
graph,
store,
to_clients,
}
}
fn handle(&mut self, info: PredecessorsExecutionInfo, time: &dyn SysTime) {
// handle new command
self.graph
.add(info.dot, info.cmd, info.clock, info.deps, time);
// get more commands that are ready to be executed
while let Some(cmd) = self.graph.command_to_execute() {
trace!(
"p{}: PredecessorsExecutor::comands_to_execute {:?} | time = {}",
self.process_id,
cmd.rifl(),
time.millis()
);
self.execute(cmd);
}
}
fn to_clients(&mut self) -> Option<ExecutorResult> {
self.to_clients.pop_front()
}
fn executed(
&mut self,
_time: &dyn SysTime,
) -> Option<CommittedAndExecuted> {
let committed_and_executed = self.graph.committed_and_executed();
trace!(
"p{}: PredecessorsExecutor::executed {:?} | time = {}",
self.process_id,
committed_and_executed,
_time.millis()
);
Some(committed_and_executed)
}
fn parallel() -> bool {
false
}
fn metrics(&self) -> &ExecutorMetrics {
&self.graph.metrics()
}
fn monitor(&self) -> Option<ExecutionOrderMonitor> {
self.store.monitor().cloned()
}
}
impl PredecessorsExecutor {
fn execute(&mut self, cmd: Command) {
// execute the command
let results = cmd.execute(self.shard_id, &mut self.store);
self.to_clients.extend(results);
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PredecessorsExecutionInfo {
dot: Dot,
cmd: Command,
clock: Clock,
deps: Arc<CaesarDeps>,
}
impl PredecessorsExecutionInfo {
pub fn new(
dot: Dot,
cmd: Command,
clock: Clock,
deps: Arc<CaesarDeps>,
) -> Self {
Self {
dot,
cmd,
clock,
deps,
}
}
}
impl MessageIndex for PredecessorsExecutionInfo {
fn index(&self) -> Option<(usize, usize)> {
None
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/executor/pred/mod.rs | fantoch_ps/src/executor/pred/mod.rs | /// This module contains the definition of `Vertex`, `VertexIndex` and
/// `PendingIndex`.
mod index;
/// This modules contains the definition of `PredecessorsExecutor` and
/// `PredecessorsExecutionInfo`.
mod executor;
// Re-exports.
pub use executor::{PredecessorsExecutionInfo, PredecessorsExecutor};
use self::index::{PendingIndex, Vertex, VertexIndex};
use crate::protocol::common::pred::{CaesarDeps, Clock};
use fantoch::command::Command;
use fantoch::config::Config;
use fantoch::executor::{ExecutorMetrics, ExecutorMetricsKind};
use fantoch::id::{Dot, ProcessId};
use fantoch::protocol::CommittedAndExecuted;
use fantoch::time::SysTime;
use fantoch::util;
use fantoch::{debug, trace};
use std::collections::VecDeque;
use std::fmt;
use std::sync::Arc;
use threshold::AEClock;
#[derive(Clone)]
pub struct PredecessorsGraph {
process_id: ProcessId,
committed_clock: AEClock<ProcessId>,
executed_clock: AEClock<ProcessId>,
vertex_index: VertexIndex,
// mapping from non committed dep to pending dot
phase_one_pending_index: PendingIndex,
// mapping from committed (but not executed) dep to pending dot
phase_two_pending_index: PendingIndex,
metrics: ExecutorMetrics,
// count of committed commands
new_committed_dots: u64,
// dots of new commands executed
new_executed_dots: Vec<Dot>,
to_execute: VecDeque<Command>,
execute_at_commit: bool,
}
impl PredecessorsGraph {
/// Create a new `Graph`.
pub fn new(process_id: ProcessId, config: &Config) -> Self {
// create executed clock and its snapshot
let ids: Vec<_> =
util::all_process_ids(config.shard_count(), config.n())
.map(|(process_id, _)| process_id)
.collect();
let committed_clock = AEClock::with(ids.clone());
let executed_clock = AEClock::with(ids.clone());
// create indexes
let vertex_index = VertexIndex::new();
let phase_one_pending_index = PendingIndex::new();
let phase_two_pending_index = PendingIndex::new();
let metrics = ExecutorMetrics::new();
let new_committed_dots = 0;
let new_executed_dots = Vec::new();
// create to execute
let to_execute = VecDeque::new();
let execute_at_commit = config.execute_at_commit();
PredecessorsGraph {
process_id,
executed_clock,
committed_clock,
vertex_index,
phase_one_pending_index,
phase_two_pending_index,
metrics,
new_committed_dots,
new_executed_dots,
to_execute,
execute_at_commit,
}
}
/// Returns a new command ready to be executed.
#[must_use]
pub fn command_to_execute(&mut self) -> Option<Command> {
self.to_execute.pop_front()
}
#[cfg(test)]
fn commands_to_execute(&mut self) -> VecDeque<Command> {
std::mem::take(&mut self.to_execute)
}
fn committed_and_executed(&mut self) -> CommittedAndExecuted {
(
std::mem::take(&mut self.new_committed_dots),
std::mem::take(&mut self.new_executed_dots),
)
}
fn metrics(&self) -> &ExecutorMetrics {
&self.metrics
}
/// Add a new command.
pub fn add(
&mut self,
dot: Dot,
cmd: Command,
clock: Clock,
deps: Arc<CaesarDeps>,
time: &dyn SysTime,
) {
debug!(
"p{}: Predecessors::add {:?} {:?} {:?} | time = {}",
self.process_id,
dot,
clock,
deps,
time.millis()
);
// mark dot as committed
self.new_committed_dots += 1;
assert!(self.committed_clock.add(&dot.source(), dot.sequence()));
// we assume that commands to not depend on themselves
assert!(!deps.contains(&dot));
if self.execute_at_commit {
self.execute(dot, cmd, time);
} else {
// index the command
self.index_committed_command(dot, cmd, clock, deps, time);
// try all commands that are pending on phase one due to this
// command
self.try_phase_one_pending(dot, time);
// move command to phase 1
self.move_to_phase_one(dot, time);
trace!(
"p{}: Predecessors::log committed {:?} | executed {:?} | index {:?} | time = {}",
self.process_id,
self.committed_clock,
self.executed_clock,
self.vertex_index
.dots()
.collect::<std::collections::BTreeSet<_>>(),
time.millis()
);
}
}
fn move_to_phase_one(&mut self, dot: Dot, time: &dyn SysTime) {
debug!(
"p{}: Predecessors::move_1 {:?} | time = {}",
self.process_id,
dot,
time.millis()
);
// get vertex
let vertex_ref = self
.vertex_index
.find(&dot)
.expect("command just indexed must exist");
let mut vertex = vertex_ref.borrow_mut();
// compute number of non yet committed dependencies
let mut non_committed_deps_count = 0;
for dep_dot in vertex.deps.iter() {
let committed = self
.committed_clock
.contains(&dep_dot.source(), dep_dot.sequence());
if !committed {
trace!(
"p{}: Predecessors::move_1 non committed dep {:?} | time = {}",
self.process_id,
dep_dot,
time.millis()
);
non_committed_deps_count += 1;
self.phase_one_pending_index.index(dot, *dep_dot);
}
}
trace!(
"p{}: Predecessors::move_1 {:?} missing deps for {:?} | time = {}",
self.process_id,
non_committed_deps_count,
dot,
time.millis()
);
if non_committed_deps_count > 0 {
// if it has non committed deps, simply save that value
vertex.set_missing_deps(non_committed_deps_count);
} else {
// move command to phase two
drop(vertex);
self.move_to_phase_two(dot, time);
}
}
/// Moves a command to phase two, i.e., where it waits for all its
/// dependencies to become executed.
fn move_to_phase_two(&mut self, dot: Dot, time: &dyn SysTime) {
debug!(
"p{}: Predecessors::move_2 {:?} | time = {}",
self.process_id,
dot,
time.millis()
);
// get vertex
let vertex_ref = self
.vertex_index
.find(&dot)
.expect("command moved to phase two must exist");
let mut vertex = vertex_ref.borrow_mut();
// compute number of yet executed dependencies
let mut non_executed_deps_count = 0;
for dep_dot in vertex.deps.iter() {
// consider only non-executed dependencies with a lower clock
let executed = self
.executed_clock
.contains(&dep_dot.source(), dep_dot.sequence());
if !executed {
trace!(
"p{}: Predecessors::move_2 non executed dep {:?} | time = {}",
self.process_id,
dep_dot,
time.millis()
);
// get the dependency and check its clock to see if it should be
// consider
let dep_ref = self
.vertex_index
.find(&dep_dot)
.expect("non-executed dependency must exist");
let dep = dep_ref.borrow();
// only consider this dep if it has a lower clock
if dep.clock < vertex.clock {
trace!(
"p{}: Predecessors::move_2 non executed dep with lower clock {:?} | time = {}",
self.process_id,
dep_dot,
time.millis()
);
non_executed_deps_count += 1;
self.phase_two_pending_index.index(dot, *dep_dot);
}
}
}
trace!(
"p{}: Predecessors::move_2 {:?} missing deps for {:?} | time = {}",
self.process_id,
non_executed_deps_count,
dot,
time.millis()
);
if non_executed_deps_count > 0 {
// if it has committed but non executed deps, simply save that value
vertex.set_missing_deps(non_executed_deps_count);
} else {
// save the command to be executed
drop(vertex);
self.save_to_execute(dot, time);
}
}
fn index_committed_command(
&mut self,
dot: Dot,
cmd: Command,
clock: Clock,
deps: Arc<CaesarDeps>,
time: &dyn SysTime,
) {
// create new vertex for this command and index it
let vertex = Vertex::new(dot, cmd, clock, deps, time);
if self.vertex_index.index(vertex).is_some() {
panic!(
"p{}: Predecessors::index tried to index already indexed {:?}",
self.process_id, dot
);
}
}
fn try_phase_one_pending(&mut self, dot: Dot, time: &dyn SysTime) {
for pending_dot in self.phase_one_pending_index.remove(&dot) {
// get vertex
let vertex_ref = self
.vertex_index
.find(&pending_dot)
.expect("command pending at phase one must exist");
let mut vertex = vertex_ref.borrow_mut();
// a non-committed dep became committed, so update the number of
// missing deps at phase one
vertex.decrease_missing_deps();
// check if there are no more missing deps, and if so, move the
// command to phase two
if vertex.get_missing_deps() == 0 {
// move command to phase two
drop(vertex);
self.move_to_phase_two(pending_dot, time);
}
}
}
fn try_phase_two_pending(&mut self, dot: Dot, time: &dyn SysTime) {
for pending_dot in self.phase_two_pending_index.remove(&dot) {
// get vertex
let vertex_ref = self
.vertex_index
.find(&pending_dot)
.expect("command pending at phase two must exist");
let mut vertex = vertex_ref.borrow_mut();
// a non-executed dep became executed, so update the number of
// missing deps at phase two
vertex.decrease_missing_deps();
// check if there are no more missing deps, and if so, save the
// command to be executed
if vertex.get_missing_deps() == 0 {
// save the command to be executed
drop(vertex);
self.save_to_execute(pending_dot, time);
}
}
}
fn save_to_execute(&mut self, dot: Dot, time: &dyn SysTime) {
trace!(
"p{}: Predecessors::save removing {:?} from indexes | time = {}",
self.process_id,
dot,
time.millis()
);
// remove from vertex index
let vertex = self
.vertex_index
.remove(&dot)
.expect("ready-to-execute command should exist");
// get command
let (duration_ms, cmd) = vertex.into_command(time);
// save execution delay metric
self.metrics
.collect(ExecutorMetricsKind::ExecutionDelay, duration_ms);
// mark dot as executed and add command to commands to be executed
self.execute(dot, cmd, time);
// try commands pending at phase two due to this command
self.try_phase_two_pending(dot, time);
}
fn execute(&mut self, dot: Dot, cmd: Command, _time: &dyn SysTime) {
trace!(
"p{}: Predecessors::update_executed {:?} | time = {}",
self.process_id,
dot,
_time.millis()
);
// mark dot as executed
self.new_executed_dots.push(dot);
assert!(self.executed_clock.add(&dot.source(), dot.sequence()));
// add command to commands to be executed
self.to_execute.push_back(cmd);
}
}
impl fmt::Debug for PredecessorsGraph {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "vertex index:")?;
write!(f, "{:#?}", self.vertex_index)?;
write!(f, "phase one pending index:")?;
write!(f, "{:#?}", self.phase_one_pending_index)?;
write!(f, "phase two pending index:")?;
write!(f, "{:#?}", self.phase_two_pending_index)?;
write!(f, "executed:")?;
write!(f, "{:?}", self.executed_clock)
}
}
#[cfg(test)]
mod tests {
use super::*;
use fantoch::id::{ClientId, Rifl};
use fantoch::kvs::{KVOp, Key};
use fantoch::time::RunTime;
use fantoch::{HashMap, HashSet};
use permutator::{Combination, Permutation};
use rand::seq::SliceRandom;
use rand::Rng;
use std::cell::RefCell;
use std::cmp::Ordering;
use std::collections::{BTreeMap, BTreeSet};
use std::iter::FromIterator;
fn caesar_deps(deps: Vec<Dot>) -> Arc<CaesarDeps> {
Arc::new(CaesarDeps::from_iter(deps))
}
#[test]
fn simple() {
// create queue
let p1 = 1;
let p2 = 2;
let n = 2;
let f = 1;
let config = Config::new(n, f);
let mut queue = PredecessorsGraph::new(p1, &config);
let time = RunTime;
// create dots
let dot_0 = Dot::new(p1, 1);
let dot_1 = Dot::new(p2, 1);
// cmd 0
let cmd_0 = Command::from(
Rifl::new(1, 1),
vec![(String::from("A"), KVOp::Put(String::new()))],
);
let clock_0 = Clock::from(2, p1);
let deps_0 = caesar_deps(vec![dot_1]);
// cmd 1
let cmd_1 = Command::from(
Rifl::new(2, 1),
vec![(String::from("A"), KVOp::Put(String::new()))],
);
let clock_1 = Clock::from(1, p2);
let deps_1 = caesar_deps(vec![dot_0]);
// add cmd 0
queue.add(dot_0, cmd_0.clone(), clock_0, deps_0, &time);
// check commands ready to be executed
assert!(queue.commands_to_execute().is_empty());
// add cmd 1
queue.add(dot_1, cmd_1.clone(), clock_1, deps_1, &time);
// check commands ready to be executed
assert_eq!(queue.commands_to_execute(), vec![cmd_1, cmd_0]);
}
#[test]
fn test_add_random() {
let n = 2;
let iterations = 10;
let events_per_process = 3;
(0..iterations).for_each(|_| {
let args = random_adds(n, events_per_process);
shuffle_it(n, args);
});
}
fn random_adds(
n: usize,
events_per_process: usize,
) -> Vec<(Dot, Option<BTreeSet<Key>>, Clock, Arc<CaesarDeps>)> {
let mut rng = rand::thread_rng();
let mut possible_keys: Vec<_> =
('A'..='D').map(|key| key.to_string()).collect();
// create dots
let dots: Vec<_> =
fantoch::util::process_ids(fantoch::command::DEFAULT_SHARD_ID, n)
.flat_map(|process_id| {
(1..=events_per_process)
.map(move |event| Dot::new(process_id, event as u64))
})
.collect();
// compute all possible clocks
let mut all_clocks: Vec<_> = (1..=dots.len())
.map(|clock| {
let process_id = 1;
Clock::from(clock as u64, process_id)
})
.collect();
// shuffle the clocks
all_clocks.shuffle(&mut rng);
// compute keys, clock, and empty deps
let dot_to_data: HashMap<_, _> = dots
.clone()
.into_iter()
.map(|dot| {
// select two random keys from the set of possible keys:
// - this makes sure that the conflict relation is not
// transitive
possible_keys.shuffle(&mut rng);
let mut keys = BTreeSet::new();
assert!(keys.insert(possible_keys[0].clone()));
assert!(keys.insert(possible_keys[1].clone()));
// assign a random clock to this command
let clock = all_clocks
.pop()
.expect("there must be a clock for each command");
// create empty deps
let deps = CaesarDeps::new();
(dot, (Some(keys), clock, RefCell::new(deps)))
})
.collect();
// for each pair of dots
dots.combination(2).for_each(|dots| {
let left = dots[0];
let right = dots[1];
// find their data
let (left_keys, left_clock, left_deps) =
dot_to_data.get(left).expect("left dot data must exist");
let (right_keys, right_clock, right_deps) =
dot_to_data.get(right).expect("right dot data must exist");
// unwrap keys
let left_keys = left_keys.as_ref().expect("left keys should exist");
let right_keys =
right_keys.as_ref().expect("right keys should exist");
// check if the commands conflict (i.e. if the keys being accessed
// intersect)
let conflict = left_keys.intersection(&right_keys).next().is_some();
// if the commands conflict:
// - the one with the lower clock should be a dependency of the
// other
// - the one with the higher clock doesn't have to be a dependency
// of the other, but that can happen
if conflict {
// borrow their clocks mutably
let mut left_deps = left_deps.borrow_mut();
let mut right_deps = right_deps.borrow_mut();
// check to which sets of deps we should add the other command
let (add_left_to_right, add_right_to_left) =
match left_clock.cmp(&right_clock) {
Ordering::Less => (true, rng.gen_bool(0.5)),
Ordering::Greater => (rng.gen_bool(0.5), true),
_ => unreachable!("clocks must be different"),
};
if add_left_to_right {
right_deps.insert(*left);
}
if add_right_to_left {
left_deps.insert(*right);
}
}
});
dot_to_data
.into_iter()
.map(|(dot, (keys, clock, deps_cell))| {
let deps = deps_cell.into_inner();
(dot, keys, clock, Arc::new(deps))
})
.collect()
}
fn shuffle_it(
n: usize,
mut args: Vec<(Dot, Option<BTreeSet<Key>>, Clock, Arc<CaesarDeps>)>,
) {
let total_order = check_termination(n, args.clone());
args.permutation().for_each(|permutation| {
println!("permutation = {:?}", permutation);
let sorted = check_termination(n, permutation);
assert_eq!(total_order, sorted);
});
}
fn check_termination(
n: usize,
args: Vec<(Dot, Option<BTreeSet<Key>>, Clock, Arc<CaesarDeps>)>,
) -> BTreeMap<Key, Vec<Rifl>> {
// create queue
let process_id = 1;
let f = 1;
let config = Config::new(n, f);
let mut queue = PredecessorsGraph::new(process_id, &config);
let time = RunTime;
let mut all_rifls = HashSet::new();
let mut sorted = BTreeMap::new();
args.into_iter().for_each(|(dot, keys, clock, deps)| {
// create command rifl from its dot
let rifl = Rifl::new(dot.source() as ClientId, dot.sequence());
// create command:
// - set single CONF key if no keys were provided
let keys = keys.unwrap_or_else(|| {
BTreeSet::from_iter(vec![String::from("CONF")])
});
let ops = keys.into_iter().map(|key| {
let value = String::from("");
(key, KVOp::Put(value))
});
let cmd = Command::from(rifl, ops);
// add to the set of all rifls
assert!(all_rifls.insert(rifl));
// add it to the queue
queue.add(dot, cmd, clock, deps, &time);
// get ready to execute
let to_execute = queue.commands_to_execute();
// for each command ready to be executed
to_execute.iter().for_each(|cmd| {
// get its rifl
let rifl = cmd.rifl();
// remove it from the set of rifls
assert!(all_rifls.remove(&cmd.rifl()));
// and add it to the sorted results
cmd.keys(fantoch::command::DEFAULT_SHARD_ID)
.for_each(|key| {
sorted
.entry(key.clone())
.or_insert_with(Vec::new)
.push(rifl);
})
});
});
// the set of all rifls should be empty
if !all_rifls.is_empty() {
panic!("the set of all rifls should be empty");
}
// return sorted commands
sorted
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/executor/table/executor.rs | fantoch_ps/src/executor/table/executor.rs | use crate::executor::table::MultiVotesTable;
use crate::protocol::common::table::VoteRange;
use fantoch::config::Config;
use fantoch::executor::{
ExecutionOrderMonitor, Executor, ExecutorMetrics, ExecutorResult,
MessageKey,
};
use fantoch::id::{Dot, ProcessId, Rifl, ShardId};
use fantoch::kvs::{KVOp, KVStore, Key};
use fantoch::shared::SharedMap;
use fantoch::time::SysTime;
use fantoch::trace;
use fantoch::HashMap;
use parking_lot::Mutex;
use serde::{Deserialize, Serialize};
use std::collections::VecDeque;
use std::sync::Arc;
#[derive(Clone)]
pub struct TableExecutor {
process_id: ProcessId,
shard_id: ShardId,
execute_at_commit: bool,
table: MultiVotesTable,
store: KVStore,
metrics: ExecutorMetrics,
to_clients: VecDeque<ExecutorResult>,
to_executors: Vec<(ShardId, TableExecutionInfo)>,
pending: HashMap<Key, PendingPerKey>,
rifl_to_stable_count: Arc<SharedMap<Rifl, Mutex<u64>>>,
}
#[derive(Clone, Default)]
struct PendingPerKey {
pending: VecDeque<Pending>,
stable_shards_buffered: HashMap<Rifl, usize>,
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct Pending {
rifl: Rifl,
shard_to_keys: Arc<HashMap<ShardId, Vec<Key>>>,
// number of keys on being accessed on this shard
shard_key_count: u64,
// number of shards the key is not stable at yet
missing_stable_shards: usize,
ops: Arc<Vec<KVOp>>,
}
impl Pending {
pub fn new(
shard_id: ShardId,
rifl: Rifl,
shard_to_keys: Arc<HashMap<ShardId, Vec<Key>>>,
ops: Arc<Vec<KVOp>>,
) -> Self {
let shard_key_count = shard_to_keys
.get(&shard_id)
.expect("my shard should be accessed by this command")
.len() as u64;
let missing_stable_shards = shard_to_keys.len();
Self {
rifl,
shard_to_keys,
shard_key_count,
missing_stable_shards,
ops,
}
}
pub fn single_key_command(&self) -> bool {
// the command is single key if it accesses a single shard and the
// number of keys accessed in that shard is one
self.missing_stable_shards == 1 && self.shard_key_count == 1
}
}
impl Executor for TableExecutor {
type ExecutionInfo = TableExecutionInfo;
fn new(process_id: ProcessId, shard_id: ShardId, config: Config) -> Self {
let (_, _, stability_threshold) = config.tempo_quorum_sizes();
let table = MultiVotesTable::new(
process_id,
shard_id,
config.n(),
stability_threshold,
);
let store = KVStore::new(config.executor_monitor_execution_order());
let metrics = ExecutorMetrics::new();
let to_clients = Default::default();
let to_executors = Default::default();
let pending = Default::default();
let rifl_to_stable_count = Arc::new(SharedMap::new());
Self {
process_id,
shard_id,
execute_at_commit: config.execute_at_commit(),
table,
store,
metrics,
to_clients,
to_executors,
pending,
rifl_to_stable_count,
}
}
fn handle(&mut self, info: Self::ExecutionInfo, _time: &dyn SysTime) {
// handle each new info by updating the votes table and execute ready
// commands
match info {
TableExecutionInfo::AttachedVotes {
dot,
clock,
key,
rifl,
shard_to_keys,
ops,
votes,
} => {
let pending =
Pending::new(self.shard_id, rifl, shard_to_keys, ops);
if self.execute_at_commit {
self.execute(key, pending);
} else {
let to_execute = self
.table
.add_attached_votes(dot, clock, &key, pending, votes);
self.send_stable_or_execute(key, to_execute);
}
}
TableExecutionInfo::DetachedVotes { key, votes } => {
if !self.execute_at_commit {
let to_execute = self.table.add_detached_votes(&key, votes);
self.send_stable_or_execute(key, to_execute);
}
}
TableExecutionInfo::StableAtShard { key, rifl } => {
self.handle_stable_msg(key, rifl)
}
}
}
fn to_clients(&mut self) -> Option<ExecutorResult> {
self.to_clients.pop_front()
}
fn to_executors(&mut self) -> Option<(ShardId, TableExecutionInfo)> {
self.to_executors.pop()
}
fn parallel() -> bool {
true
}
fn metrics(&self) -> &ExecutorMetrics {
&self.metrics
}
fn monitor(&self) -> Option<ExecutionOrderMonitor> {
self.store.monitor().cloned()
}
}
impl TableExecutor {
fn handle_stable_msg(&mut self, key: Key, rifl: Rifl) {
// get pending commands on this key
let pending_per_key = self.pending.entry(key.clone()).or_default();
trace!("p{}: key={} StableAtShard {:?}", self.process_id, key, rifl);
if let Some(pending) = pending_per_key.pending.get_mut(0) {
// check if it's a message about the first command pending
if pending.rifl == rifl {
// decrease number of missing stable shards
pending.missing_stable_shards -= 1;
trace!(
"p{}: key={} StableAtShard {:?} | missing shards {:?}",
self.process_id,
key,
rifl,
pending.missing_stable_shards
);
if pending.missing_stable_shards == 0 {
// if the command is stable at all shards, remove command
// from pending and execute it
let pending = pending_per_key.pending.pop_front().unwrap();
Self::do_execute(
key.clone(),
pending,
&mut self.store,
&mut self.to_clients,
);
// try to execute the remaining pending commands
while let Some(pending) =
pending_per_key.pending.pop_front()
{
let try_result =
Self::execute_single_or_mark_it_as_stable(
&key,
pending,
&mut self.store,
&mut self.to_clients,
&mut self.to_executors,
&mut pending_per_key.stable_shards_buffered,
&self.rifl_to_stable_count,
);
if let Some(pending) = try_result {
// if this command cannot be executed, buffer it and
// give up trying to execute more commands
pending_per_key.pending.push_front(pending);
return;
}
}
}
} else {
// in this case, the command on this message is not yet
// stable locally; in this case, we buffer this message
*pending_per_key
.stable_shards_buffered
.entry(rifl)
.or_default() += 1;
}
} else {
// in this case, the command on this message is not yet stable
// locally; in this case, we buffer this message
*pending_per_key
.stable_shards_buffered
.entry(rifl)
.or_default() += 1;
}
}
fn send_stable_or_execute<I>(&mut self, key: Key, mut to_execute: I)
where
I: Iterator<Item = Pending>,
{
let pending_per_key = self.pending.entry(key.clone()).or_default();
if !pending_per_key.pending.is_empty() {
// if there's already commmands pending at this key, then no
// command can be executed, and thus we add them all as pending
pending_per_key.pending.extend(to_execute);
return;
}
// execute commands while no command is added as pending
while let Some(pending) = to_execute.next() {
trace!(
"p{}: key={} try_execute_single {:?} | missing shards {:?}",
self.process_id,
key,
pending.rifl,
pending.missing_stable_shards
);
let try_result = Self::execute_single_or_mark_it_as_stable(
&key,
pending,
&mut self.store,
&mut self.to_clients,
&mut self.to_executors,
&mut pending_per_key.stable_shards_buffered,
&self.rifl_to_stable_count,
);
if let Some(pending) = try_result {
// if this command cannot be executed, then add it (and all the
// remaining commands as pending) and give up trying to execute
// more commands
assert!(pending_per_key.pending.is_empty());
pending_per_key.pending.push_back(pending);
pending_per_key.pending.extend(to_execute);
return;
}
}
}
#[must_use]
fn execute_single_or_mark_it_as_stable(
key: &Key,
mut pending: Pending,
store: &mut KVStore,
to_clients: &mut VecDeque<ExecutorResult>,
to_executors: &mut Vec<(ShardId, TableExecutionInfo)>,
stable_shards_buffered: &mut HashMap<Rifl, usize>,
rifl_to_stable_count: &Arc<SharedMap<Rifl, Mutex<u64>>>,
) -> Option<Pending> {
let rifl = pending.rifl;
if pending.single_key_command() {
// if the command is single-key, execute immediately
Self::do_execute(key.clone(), pending, store, to_clients);
None
} else {
// closure that sends the stable message
let mut send_stable_msg = || {
for (shard_id, shard_keys) in pending.shard_to_keys.iter() {
for shard_key in shard_keys {
if shard_key != key {
let msg = TableExecutionInfo::stable_at_shard(
shard_key.clone(),
rifl,
);
to_executors.push((*shard_id, msg));
}
}
}
true
};
if pending.shard_key_count == 1 {
// if this command access a single key on this shard, then send
// the stable message right away
assert!(send_stable_msg());
// and update the number of shards the key is stable at
pending.missing_stable_shards -= 1;
} else {
// otherwise, increase rifl count
let count_ref =
rifl_to_stable_count.get_or(&rifl, || Mutex::new(0));
let mut count = count_ref.lock();
*count += 1;
// if we're the last key at this shard increasing the rifl count
// to the number of keys in this shard, then
// notify all keys that the command is stable at
// this shard
if *count == pending.shard_key_count {
// the command is stable at this shard; so send stable
// messsage
assert!(send_stable_msg());
// and update the number of shards the key is stable at
pending.missing_stable_shards -= 1;
// cleanup
drop(count);
drop(count_ref);
rifl_to_stable_count
.remove(&rifl)
.expect("rifl must exist as a key");
}
}
// check if there's any buffered stable messages
if let Some(count) = stable_shards_buffered.remove(&rifl) {
pending.missing_stable_shards -= count;
}
if pending.missing_stable_shards == 0 {
// if the command is already stable at shards, then execute it
Self::do_execute(key.clone(), pending, store, to_clients);
None
} else {
// in this case, the command cannot be executed; so send it back
// to be buffered
Some(pending)
}
}
}
fn execute(&mut self, key: Key, stable: Pending) {
Self::do_execute(key, stable, &mut self.store, &mut self.to_clients)
}
fn do_execute(
key: Key,
stable: Pending,
store: &mut KVStore,
to_clients: &mut VecDeque<ExecutorResult>,
) {
// take the ops inside the arc if we're the last with a reference to it
// (otherwise, clone them)
let rifl = stable.rifl;
let ops = stable.ops;
let ops =
Arc::try_unwrap(ops).unwrap_or_else(|ops| ops.as_ref().clone());
// execute ops in the `KVStore`
let partial_results = store.execute(&key, ops, rifl);
to_clients.push_back(ExecutorResult::new(rifl, key, partial_results));
}
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub enum TableExecutionInfo {
AttachedVotes {
dot: Dot,
clock: u64,
key: Key,
rifl: Rifl,
shard_to_keys: Arc<HashMap<ShardId, Vec<Key>>>,
ops: Arc<Vec<KVOp>>,
votes: Vec<VoteRange>,
},
DetachedVotes {
key: Key,
votes: Vec<VoteRange>,
},
StableAtShard {
key: Key,
rifl: Rifl,
},
}
impl TableExecutionInfo {
pub fn attached_votes(
dot: Dot,
clock: u64,
key: Key,
rifl: Rifl,
shard_to_keys: Arc<HashMap<ShardId, Vec<Key>>>,
ops: Arc<Vec<KVOp>>,
votes: Vec<VoteRange>,
) -> Self {
Self::AttachedVotes {
dot,
clock,
key,
rifl,
shard_to_keys,
ops,
votes,
}
}
pub fn detached_votes(key: Key, votes: Vec<VoteRange>) -> Self {
Self::DetachedVotes { key, votes }
}
pub fn stable_at_shard(key: Key, rifl: Rifl) -> Self {
Self::StableAtShard { key, rifl }
}
}
impl MessageKey for TableExecutionInfo {
fn key(&self) -> &Key {
match self {
Self::AttachedVotes { key, .. } => key,
Self::DetachedVotes { key, .. } => key,
Self::StableAtShard { key, .. } => key,
}
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/executor/table/mod.rs | fantoch_ps/src/executor/table/mod.rs | /// This modules contains the definition of `TableExecutor` and
/// `TableExecutionInfo`.
mod executor;
// Re-exports.
pub use executor::{TableExecutionInfo, TableExecutor};
use crate::protocol::common::table::VoteRange;
use executor::Pending;
use fantoch::id::{Dot, ProcessId, ShardId};
use fantoch::kvs::Key;
use fantoch::trace;
use fantoch::util;
use fantoch::HashMap;
use std::collections::BTreeMap;
use std::mem;
use threshold::{ARClock, EventSet};
type SortId = (u64, Dot);
#[derive(Clone)]
pub struct MultiVotesTable {
process_id: ProcessId,
shard_id: ShardId,
n: usize,
stability_threshold: usize,
tables: HashMap<Key, VotesTable>,
}
impl MultiVotesTable {
/// Create a new `MultiVotesTable` instance given the stability threshold.
pub fn new(
process_id: ProcessId,
shard_id: ShardId,
n: usize,
stability_threshold: usize,
) -> Self {
Self {
process_id,
shard_id,
n,
stability_threshold,
tables: HashMap::new(),
}
}
/// Add a new command, its clock and votes to the votes table.
pub fn add_attached_votes(
&mut self,
dot: Dot,
clock: u64,
key: &Key,
pending: Pending,
votes: Vec<VoteRange>,
) -> impl Iterator<Item = Pending> {
// add ops and votes to the votes tables, and at the same time
// compute which ops are safe to be executed
self.update_table(key, |table| {
table.add_attached_votes(dot, clock, pending, votes);
table.stable_ops()
})
}
/// Adds detached votes to the votes table.
pub fn add_detached_votes(
&mut self,
key: &Key,
votes: Vec<VoteRange>,
) -> impl Iterator<Item = Pending> {
// add detached votes to the votes tables, and at the same time compute
// which ops are safe to be executed
self.update_table(key, |table| {
table.add_detached_votes(votes);
table.stable_ops()
})
}
// Generic function to be used when updating some votes table.
#[must_use]
fn update_table<F, I>(&mut self, key: &Key, update: F) -> I
where
F: FnOnce(&mut VotesTable) -> I,
I: Iterator<Item = Pending>,
{
let table = match self.tables.get_mut(key) {
Some(table) => table,
None => {
// table does not exist, let's create a new one and insert it
let table = VotesTable::new(
key.clone(),
self.process_id,
self.shard_id,
self.n,
self.stability_threshold,
);
self.tables.entry(key.clone()).or_insert(table)
}
};
// update table
update(table)
}
}
#[derive(Clone)]
struct VotesTable {
key: Key,
process_id: ProcessId,
n: usize,
stability_threshold: usize,
// `votes_clock` collects all votes seen until now so that we can compute
// which timestamp is stable
votes_clock: ARClock<ProcessId>,
// this buffer saves us always allocating a vector when computing the
// stable clock (see `stable_clock`)
frontiers_buffer: Vec<u64>,
ops: BTreeMap<SortId, Pending>,
}
impl VotesTable {
fn new(
key: Key,
process_id: ProcessId,
shard_id: ShardId,
n: usize,
stability_threshold: usize,
) -> Self {
let ids = util::process_ids(shard_id, n);
let votes_clock = ARClock::with(ids);
let frontiers_buffer = Vec::with_capacity(n);
Self {
key,
process_id,
n,
stability_threshold,
votes_clock,
frontiers_buffer,
ops: BTreeMap::new(),
}
}
fn add_attached_votes(
&mut self,
dot: Dot,
clock: u64,
pending: Pending,
votes: Vec<VoteRange>,
) {
// create sort identifier:
// - if two ops got assigned the same clock, they will be ordered by
// their dot
let sort_id = (clock, dot);
trace!(
"p{}: key={} Table::add {:?} {:?} | sort id {:?}",
self.process_id,
self.key,
dot,
clock,
sort_id
);
// add op to the sorted list of ops to be executed
let res = self.ops.insert(sort_id, pending);
// and check there was nothing there for this exact same position
assert!(res.is_none());
// update votes with the votes used on this command
self.add_detached_votes(votes);
}
fn add_detached_votes(&mut self, votes: Vec<VoteRange>) {
trace!(
"p{}: key={} Table::add_votes votes: {:?}",
self.process_id,
self.key,
votes
);
votes.into_iter().for_each(|vote_range| {
// assert there's at least one new vote
assert!(self.votes_clock.add_range(
&vote_range.voter(),
vote_range.start(),
vote_range.end()
));
// assert that the clock size didn't change
assert_eq!(self.votes_clock.len(), self.n);
});
trace!(
"p{}: key={} Table::add_votes votes_clock: {:?}",
self.process_id,
self.key,
self.votes_clock
);
}
fn stable_ops(&mut self) -> impl Iterator<Item = Pending> {
// compute *next* stable sort id:
// - if clock 10 is stable, then we can execute all ops with an id
// smaller than `(11,0)`
// - if id with `(11,0)` is also part of this local structure, we can
// also execute it without 11 being stable, because, once 11 is
// stable, it will be the first to be executed either way
let stable_clock = self.stable_clock();
trace!(
"p{}: key={} Table::stable_ops stable_clock: {:?}",
self.process_id,
self.key,
stable_clock
);
let first_dot = Dot::new(1, 1);
let next_stable = (stable_clock + 1, first_dot);
// in fact, in the above example, if `(11,0)` is executed, we can also
// execute `(11,1)`, and with that, execute `(11,2)` and so on
// TODO loop while the previous flow is true and also return those ops
// ACTUALLY maybe we can't since now we need to use dots (and not
// process ids) to break ties
// compute the list of ops that can be executed now
let stable = {
// remove from `self.ops` ops higher than `next_stable`, including
// `next_stable`
let mut remaining = self.ops.split_off(&next_stable);
// swap remaining with `self.ops`
mem::swap(&mut remaining, &mut self.ops);
// now remaining contains what's the stable
remaining
};
trace!(
"p{}: key={} Table::stable_ops stable dots: {:?}",
self.process_id,
self.key,
stable.iter().map(|((_, dot), _)| *dot).collect::<Vec<_>>()
);
// return stable ops
stable.into_iter().map(|(_, pending)| pending)
}
// Computes the (potentially) new stable clock in this table.
fn stable_clock(&mut self) -> u64 {
// NOTE: we don't use `self.votes_clocks.frontier_threshold` function in
// order to save us an allocation
let clock_size = self.votes_clock.len();
if self.stability_threshold <= clock_size {
// clear current frontiers
self.frontiers_buffer.clear();
// get frontiers and sort them
for (_, eset) in self.votes_clock.iter() {
self.frontiers_buffer.push(eset.frontier());
}
self.frontiers_buffer.sort_unstable();
// get the frontier at the correct threshold
*self
.frontiers_buffer
.iter()
.nth(clock_size - self.stability_threshold)
.expect("there should be a stable clock")
} else {
panic!("stability threshold must always be smaller than the number of processes")
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use fantoch::command::DEFAULT_SHARD_ID;
use fantoch::id::{ClientId, Rifl};
use fantoch::kvs::KVOp;
use permutator::Permutation;
use std::sync::Arc;
#[test]
fn votes_table_majority_quorums() {
// process ids
let process_id_1 = 1;
let process_id_2 = 2;
let process_id_3 = 3;
let process_id_4 = 4;
let process_id_5 = 5;
// let's consider that n = 5 and q = 3
// so the threshold should be n - q + 1 = 3
let process_id = 1;
let shard_id = 0;
let n = 5;
let stability_threshold = 3;
let mut table = VotesTable::new(
String::from("KEY"),
process_id,
shard_id,
n,
stability_threshold,
);
// in this example we'll use the dot as rifl;
// also, all commands access a single key
let pending = |value: &'static str, rifl: Rifl| -> Pending {
let shard_to_keys = Arc::new(
vec![(DEFAULT_SHARD_ID, vec!["KEY".to_string()])]
.into_iter()
.collect(),
);
let ops = Arc::new(vec![KVOp::Put(String::from(value))]);
Pending::new(DEFAULT_SHARD_ID, rifl, shard_to_keys, ops)
};
// a1
let a1 = "A1";
// assumes a single client per process that has the same id as the
// process
// p1, final clock = 1
let a1_dot = Dot::new(process_id_1, 1);
let a1_clock = 1;
let a1_rifl = Rifl::new(process_id_1 as ClientId, 1);
// p1, p2 and p3 voted with 1
let a1_votes = vec![
VoteRange::new(process_id_1, 1, 1),
VoteRange::new(process_id_2, 1, 1),
VoteRange::new(process_id_3, 1, 1),
];
// c1
let c1 = "C1";
// p3, final clock = 3
let c1_dot = Dot::new(process_id_3, 1);
let c1_clock = 3;
let c1_rifl = Rifl::new(process_id_3 as ClientId, 1);
// p1 voted with 2, p2 voted with 3 and p3 voted with 2
let c1_votes = vec![
VoteRange::new(process_id_1, 2, 2),
VoteRange::new(process_id_2, 3, 3),
VoteRange::new(process_id_3, 2, 2),
];
// d1
let d1 = "D1";
// p4, final clock = 3
let d1_dot = Dot::new(process_id_4, 1);
let d1_clock = 3;
let d1_rifl = Rifl::new(process_id_4 as ClientId, 1);
// p2 voted with 2, p3 voted with 3 and p4 voted with 1-3
let d1_votes = vec![
VoteRange::new(process_id_2, 2, 2),
VoteRange::new(process_id_3, 3, 3),
VoteRange::new(process_id_4, 1, 3),
];
// e1
let e1 = "E1";
// p5, final clock = 4
let e1_dot = Dot::new(process_id_5, 1);
let e1_clock = 4;
let e1_rifl = Rifl::new(process_id_5 as ClientId, 1);
// p1 voted with 3, p4 voted with 4 and p5 voted with 1-4
let e1_votes = vec![
VoteRange::new(process_id_1, 3, 3),
VoteRange::new(process_id_4, 4, 4),
VoteRange::new(process_id_5, 1, 4),
];
// e2
let e2 = "E2";
// p5, final clock = 5
let e2_dot = Dot::new(process_id_5, 2);
let e2_clock = 5;
let e2_rifl = Rifl::new(process_id_5 as ClientId, 2);
// p1 voted with 4-5, p4 voted with 5 and p5 voted with 5
let e2_votes = vec![
VoteRange::new(process_id_1, 4, 5),
VoteRange::new(process_id_4, 5, 5),
VoteRange::new(process_id_5, 5, 5),
];
// add a1 to table
table.add_attached_votes(
a1_dot,
a1_clock,
pending(a1, a1_rifl),
a1_votes.clone(),
);
// get stable: a1
let stable = table.stable_ops().collect::<Vec<_>>();
assert_eq!(stable, vec![pending(a1, a1_rifl)]);
// add d1 to table
table.add_attached_votes(
d1_dot,
d1_clock,
pending(d1, d1_rifl),
d1_votes.clone(),
);
// get stable: none
let stable = table.stable_ops().collect::<Vec<_>>();
assert_eq!(stable, vec![]);
// add c1 to table
table.add_attached_votes(
c1_dot,
c1_clock,
pending(c1, c1_rifl),
c1_votes.clone(),
);
// get stable: c1 then d1
let stable = table.stable_ops().collect::<Vec<_>>();
assert_eq!(stable, vec![pending(c1, c1_rifl), pending(d1, d1_rifl)]);
// add e2 to table
table.add_attached_votes(
e2_dot,
e2_clock,
pending(e2, e2_rifl),
e2_votes.clone(),
);
// get stable: none
let stable = table.stable_ops().collect::<Vec<_>>();
assert_eq!(stable, vec![]);
// add e1 to table
table.add_attached_votes(
e1_dot,
e1_clock,
pending(e1, e1_rifl),
e1_votes.clone(),
);
// get stable: none
let stable = table.stable_ops().collect::<Vec<_>>();
assert_eq!(stable, vec![pending(e1, e1_rifl), pending(e2, e2_rifl)]);
// run all the permutations of the above and check that the final total
// order is the same
let total_order = vec![
pending(a1, a1_rifl),
pending(c1, c1_rifl),
pending(d1, d1_rifl),
pending(e1, e1_rifl),
pending(e2, e2_rifl),
];
let mut all_ops = vec![
(a1_dot, a1_clock, pending(a1, a1_rifl), a1_votes),
(c1_dot, c1_clock, pending(c1, c1_rifl), c1_votes),
(d1_dot, d1_clock, pending(d1, d1_rifl), d1_votes),
(e1_dot, e1_clock, pending(e1, e1_rifl), e1_votes),
(e2_dot, e2_clock, pending(e2, e2_rifl), e2_votes),
];
all_ops.permutation().for_each(|p| {
let mut table = VotesTable::new(
String::from("KEY"),
process_id_1,
shard_id,
n,
stability_threshold,
);
let permutation_total_order: Vec<_> = p
.clone()
.into_iter()
.flat_map(|(dot, clock, pending, votes)| {
table.add_attached_votes(dot, clock, pending, votes);
table.stable_ops()
})
.collect();
assert_eq!(total_order, permutation_total_order);
});
}
#[test]
fn votes_table_tiny_quorums() {
let shard_id = 0;
// process ids
let process_id_1 = 1;
let process_id_2 = 2;
let process_id_3 = 3;
let process_id_4 = 4;
let process_id_5 = 5;
// let's consider that n = 5 and f = 1 and we're using write quorums of
// size f + 1 so the threshold should be n - f = 4;
let n = 5;
let f = 1;
let stability_threshold = n - f;
let mut table = VotesTable::new(
String::from("KEY"),
process_id_1,
shard_id,
n,
stability_threshold,
);
// in this example we'll use the dot as rifl;
// also, all commands access a single key
let pending = |value: &'static str, rifl: Rifl| -> Pending {
let shard_to_keys = Arc::new(
vec![(DEFAULT_SHARD_ID, vec!["KEY".to_string()])]
.into_iter()
.collect(),
);
let ops = Arc::new(vec![KVOp::Put(String::from(value))]);
Pending::new(DEFAULT_SHARD_ID, rifl, shard_to_keys, ops)
};
// a1
let a1 = "A1";
// p1, final clock = 1
let a1_dot = Dot::new(process_id_1, 1);
let a1_clock = 1;
let a1_rifl = Rifl::new(process_id_1 as ClientId, 1);
// p1, p2 voted with 1
let a1_votes = vec![
VoteRange::new(process_id_1, 1, 1),
VoteRange::new(process_id_2, 1, 1),
];
// add a1 to table
table.add_attached_votes(
a1_dot,
a1_clock,
pending(a1, a1_rifl),
a1_votes.clone(),
);
// get stable: none
let stable = table.stable_ops().collect::<Vec<_>>();
assert_eq!(stable, vec![]);
// c1
let c1 = "C1";
// p3, final clock = 2
let c1_dot = Dot::new(process_id_3, 1);
let c1_clock = 2;
let c1_rifl = Rifl::new(process_id_3 as ClientId, 1);
// p2 voted with 2, p3 voted with 1-2
let c1_votes = vec![
VoteRange::new(process_id_3, 1, 1),
VoteRange::new(process_id_2, 2, 2),
VoteRange::new(process_id_3, 2, 2),
];
// add c1 to table
table.add_attached_votes(
c1_dot,
c1_clock,
pending(c1, c1_rifl),
c1_votes.clone(),
);
// get stable: none
let stable = table.stable_ops().collect::<Vec<_>>();
assert_eq!(stable, vec![]);
// e1
let e1 = "E1";
// p5, final clock = 1
let e1_dot = Dot::new(process_id_5, 1);
let e1_clock = 1;
let e1_rifl = Rifl::new(process_id_5 as ClientId, 1);
// p5 and p4 voted with 1
let e1_votes = vec![
VoteRange::new(process_id_5, 1, 1),
VoteRange::new(process_id_4, 1, 1),
];
// add e1 to table
table.add_attached_votes(
e1_dot,
e1_clock,
pending(e1, e1_rifl),
e1_votes.clone(),
);
// get stable: a1 and e1
let stable = table.stable_ops().collect::<Vec<_>>();
assert_eq!(stable, vec![pending(a1, a1_rifl), pending(e1, e1_rifl)]);
// a2
let a2 = "A2";
// p1, final clock = 3
let a2_dot = Dot::new(process_id_1, 2);
let a2_clock = 3;
let a2_rifl = Rifl::new(process_id_1 as ClientId, 2);
// p1 voted with 2-3 and p2 voted with 3
let a2_votes = vec![
VoteRange::new(process_id_1, 2, 2),
VoteRange::new(process_id_2, 3, 3),
VoteRange::new(process_id_1, 3, 3),
];
// add a2 to table
table.add_attached_votes(
a2_dot,
a2_clock,
pending(a2, a2_rifl),
a2_votes.clone(),
);
// get stable: none
let stable = table.stable_ops().collect::<Vec<_>>();
assert_eq!(stable, vec![]);
// d1
let d1 = "D1";
// p4, final clock = 3
let d1_dot = Dot::new(process_id_4, 1);
let d1_clock = 3;
let d1_rifl = Rifl::new(process_id_4 as ClientId, 1);
// p4 voted with 2-3 and p3 voted with 3
let d1_votes = vec![
VoteRange::new(process_id_4, 2, 2),
VoteRange::new(process_id_3, 3, 3),
VoteRange::new(process_id_4, 3, 3),
];
// add d1 to table
table.add_attached_votes(
d1_dot,
d1_clock,
pending(d1, d1_rifl),
d1_votes.clone(),
);
// get stable
let stable = table.stable_ops().collect::<Vec<_>>();
assert_eq!(
stable,
vec![
pending(c1, c1_rifl),
pending(a2, a2_rifl),
pending(d1, d1_rifl),
]
);
}
#[test]
fn detached_votes() {
let shard_id = 0;
// create table
let process_id = 1;
let n = 5;
let stability_threshold = 3;
let mut table =
MultiVotesTable::new(process_id, shard_id, n, stability_threshold);
// create keys
let key_a = String::from("A");
let key_b = String::from("B");
// closure to compute the stable clock for some key
let stable_clock = |table: &mut MultiVotesTable, key: &Key| {
table
.tables
.get_mut(key)
.expect("table for this key should exist")
.stable_clock()
};
// p1 votes on key A
let process_id = 1;
let stable = table
.add_detached_votes(&key_a, vec![VoteRange::new(process_id, 1, 1)])
.collect::<Vec<_>>();
assert!(stable.is_empty());
// check stable clocks
assert_eq!(stable_clock(&mut table, &key_a), 0);
// p1 votes on key b
let stable = table
.add_detached_votes(&key_b, vec![VoteRange::new(process_id, 1, 1)])
.collect::<Vec<_>>();
assert!(stable.is_empty());
// check stable clocks
assert_eq!(stable_clock(&mut table, &key_a), 0);
assert_eq!(stable_clock(&mut table, &key_b), 0);
// p2 votes on key A
let process_id = 2;
let stable = table
.add_detached_votes(&key_a, vec![VoteRange::new(process_id, 1, 1)])
.collect::<Vec<_>>();
assert!(stable.is_empty());
// check stable clocks
assert_eq!(stable_clock(&mut table, &key_a), 0);
assert_eq!(stable_clock(&mut table, &key_b), 0);
// p3 votes on key A
let process_id = 3;
let stable = table
.add_detached_votes(&key_a, vec![VoteRange::new(process_id, 1, 1)])
.collect::<Vec<_>>();
assert!(stable.is_empty());
// check stable clocks
assert_eq!(stable_clock(&mut table, &key_a), 1);
assert_eq!(stable_clock(&mut table, &key_b), 0);
// p3 votes on key B
let stable = table
.add_detached_votes(&key_b, vec![VoteRange::new(process_id, 1, 1)])
.collect::<Vec<_>>();
assert!(stable.is_empty());
// check stable clocks
assert_eq!(stable_clock(&mut table, &key_a), 1);
assert_eq!(stable_clock(&mut table, &key_b), 0);
// p4 votes on key B
let process_id = 4;
let stable = table
.add_detached_votes(&key_b, vec![VoteRange::new(process_id, 1, 1)])
.collect::<Vec<_>>();
assert!(stable.is_empty());
// check stable clocks
assert_eq!(stable_clock(&mut table, &key_a), 1);
assert_eq!(stable_clock(&mut table, &key_b), 1);
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/protocol/atlas.rs | fantoch_ps/src/protocol/atlas.rs | use crate::executor::{GraphExecutionInfo, GraphExecutor};
use crate::protocol::common::graph::{
Dependency, KeyDeps, LockedKeyDeps, QuorumDeps, SequentialKeyDeps,
};
use crate::protocol::common::synod::{Synod, SynodMessage};
use crate::protocol::partial::{self, ShardsCommits};
use fantoch::command::Command;
use fantoch::config::Config;
use fantoch::id::{Dot, ProcessId, ShardId};
use fantoch::protocol::{
Action, BaseProcess, Info, MessageIndex, Protocol, ProtocolMetrics,
SequentialCommandsInfo, VClockGCTrack,
};
use fantoch::time::SysTime;
use fantoch::{singleton, trace};
use fantoch::{HashMap, HashSet};
use serde::{Deserialize, Serialize};
use std::time::Duration;
use threshold::VClock;
pub type AtlasSequential = Atlas<SequentialKeyDeps>;
pub type AtlasLocked = Atlas<LockedKeyDeps>;
#[derive(Debug, Clone)]
pub struct Atlas<KD: KeyDeps> {
bp: BaseProcess,
key_deps: KD,
cmds: SequentialCommandsInfo<AtlasInfo>,
gc_track: VClockGCTrack,
to_processes: Vec<Action<Self>>,
to_executors: Vec<GraphExecutionInfo>,
// set of processes in my shard
shard_processes: HashSet<ProcessId>,
// commit notifications that arrived before the initial `MCollect` message
// (this may be possible even without network failures due to multiplexing)
buffered_commits: HashMap<Dot, (ProcessId, ConsensusValue)>,
}
impl<KD: KeyDeps> Protocol for Atlas<KD> {
type Message = Message;
type PeriodicEvent = PeriodicEvent;
type Executor = GraphExecutor;
/// Creates a new `Atlas` process.
fn new(
process_id: ProcessId,
shard_id: ShardId,
config: Config,
) -> (Self, Vec<(PeriodicEvent, Duration)>) {
// compute fast and write quorum sizes
let (fast_quorum_size, write_quorum_size) = config.atlas_quorum_sizes();
// create protocol data-structures
let bp = BaseProcess::new(
process_id,
shard_id,
config,
fast_quorum_size,
write_quorum_size,
);
let key_deps = KD::new(shard_id, config.nfr());
let cmds = SequentialCommandsInfo::new(
process_id,
shard_id,
config.n(),
config.f(),
fast_quorum_size,
write_quorum_size,
);
let gc_track = VClockGCTrack::new(process_id, shard_id, config.n());
let to_processes = Vec::new();
let to_executors = Vec::new();
let shard_processes =
fantoch::util::process_ids(shard_id, config.n()).collect();
let buffered_commits = HashMap::new();
// create `Atlas`
let protocol = Self {
bp,
key_deps,
cmds,
gc_track,
to_processes,
to_executors,
shard_processes,
buffered_commits,
};
// create periodic events
let events = if let Some(interval) = config.gc_interval() {
vec![(PeriodicEvent::GarbageCollection, interval)]
} else {
vec![]
};
// return both
(protocol, events)
}
/// Returns the process identifier.
fn id(&self) -> ProcessId {
self.bp.process_id
}
/// Returns the shard identifier.
fn shard_id(&self) -> ShardId {
self.bp.shard_id
}
/// Updates the processes known by this process.
/// The set of processes provided is already sorted by distance.
fn discover(
&mut self,
processes: Vec<(ProcessId, ShardId)>,
) -> (bool, HashMap<ShardId, ProcessId>) {
let connect_ok = self.bp.discover(processes);
(connect_ok, self.bp.closest_shard_process().clone())
}
/// Submits a command issued by some client.
fn submit(&mut self, dot: Option<Dot>, cmd: Command, _time: &dyn SysTime) {
self.handle_submit(dot, cmd, true)
}
/// Handles protocol messages.
fn handle(
&mut self,
from: ProcessId,
from_shard_id: ShardId,
msg: Self::Message,
time: &dyn SysTime,
) {
match msg {
// Protocol messages
Message::MCollect {
dot,
cmd,
quorum,
deps,
} => self.handle_mcollect(from, dot, cmd, quorum, deps, time),
Message::MCollectAck { dot, deps } => {
self.handle_mcollectack(from, dot, deps, time)
}
Message::MCommit { dot, value } => {
self.handle_mcommit(from, dot, value, time)
}
Message::MConsensus { dot, ballot, value } => {
self.handle_mconsensus(from, dot, ballot, value, time)
}
Message::MConsensusAck { dot, ballot } => {
self.handle_mconsensusack(from, dot, ballot, time)
}
// Partial replication
Message::MForwardSubmit { dot, cmd } => {
self.handle_submit(Some(dot), cmd, false)
}
Message::MShardCommit { dot, deps } => {
self.handle_mshard_commit(from, from_shard_id, dot, deps, time)
}
Message::MShardAggregatedCommit { dot, deps } => {
self.handle_mshard_aggregated_commit(dot, deps, time)
}
// GC messages
Message::MCommitDot { dot } => {
self.handle_mcommit_dot(from, dot, time)
}
Message::MGarbageCollection { committed } => {
self.handle_mgc(from, committed, time)
}
Message::MStable { stable } => {
self.handle_mstable(from, stable, time)
}
}
}
/// Handles periodic local events.
fn handle_event(&mut self, event: Self::PeriodicEvent, time: &dyn SysTime) {
match event {
PeriodicEvent::GarbageCollection => {
self.handle_event_garbage_collection(time)
}
}
}
/// Returns a new action to be sent to other processes.
fn to_processes(&mut self) -> Option<Action<Self>> {
self.to_processes.pop()
}
/// Returns new execution info for executors.
fn to_executors(&mut self) -> Option<GraphExecutionInfo> {
self.to_executors.pop()
}
fn parallel() -> bool {
KD::parallel()
}
fn leaderless() -> bool {
true
}
fn metrics(&self) -> &ProtocolMetrics {
self.bp.metrics()
}
}
impl<KD: KeyDeps> Atlas<KD> {
/// Handles a submit operation by a client.
fn handle_submit(
&mut self,
dot: Option<Dot>,
cmd: Command,
target_shard: bool,
) {
// compute the command identifier
let dot = dot.unwrap_or_else(|| self.bp.next_dot());
// create submit actions
let create_mforward_submit =
|dot, cmd| Message::MForwardSubmit { dot, cmd };
partial::submit_actions(
&self.bp,
dot,
&cmd,
target_shard,
create_mforward_submit,
&mut self.to_processes,
);
// compute its deps
let deps = self.key_deps.add_cmd(dot, &cmd, None);
// create `MCollect` and target
let quorum = self.bp.maybe_adjust_fast_quorum(&cmd);
let mcollect = Message::MCollect {
dot,
cmd,
deps,
quorum,
};
let target = self.bp.all();
// add `Mcollect` send as action
self.to_processes.push(Action::ToSend {
target,
msg: mcollect,
});
}
fn handle_mcollect(
&mut self,
from: ProcessId,
dot: Dot,
cmd: Command,
quorum: HashSet<ProcessId>,
remote_deps: HashSet<Dependency>,
time: &dyn SysTime,
) {
trace!(
"p{}: MCollect({:?}, {:?}, {:?}) from {} | time={}",
self.id(),
dot,
cmd,
remote_deps,
from,
time.micros()
);
// get cmd info
let info = self.cmds.get(dot);
// discard message if no longer in START
if info.status != Status::START {
return;
}
// check if part of fast quorum
if !quorum.contains(&self.bp.process_id) {
// if not:
// - simply save the payload and set status to `PAYLOAD`
// - if we received the `MCommit` before the `MCollect`, handle the
// `MCommit` now
info.status = Status::PAYLOAD;
info.cmd = Some(cmd);
// check if there's a buffered commit notification; if yes, handle
// the commit again (since now we have the payload)
if let Some((from, value)) = self.buffered_commits.remove(&dot) {
self.handle_mcommit(from, dot, value, time);
}
return;
}
// check if it's a message from self
let message_from_self = from == self.bp.process_id;
let deps = if message_from_self {
// if it is, do not recompute deps
remote_deps
} else {
// otherwise, compute deps with the remote deps as past
self.key_deps.add_cmd(dot, &cmd, Some(remote_deps))
};
// update command info
info.status = Status::COLLECT;
info.quorum_deps.maybe_adjust_fast_quorum_size(quorum.len());
info.quorum = quorum;
info.cmd = Some(cmd);
// create and set consensus value
let value = ConsensusValue::with(deps.clone());
assert!(info.synod.set_if_not_accepted(|| value));
// create `MCollectAck` and target
let mcollectack = Message::MCollectAck { dot, deps };
let target = singleton![from];
// save new action
self.to_processes.push(Action::ToSend {
target,
msg: mcollectack,
});
}
fn handle_mcollectack(
&mut self,
from: ProcessId,
dot: Dot,
deps: HashSet<Dependency>,
_time: &dyn SysTime,
) {
trace!(
"p{}: MCollectAck({:?}, {:?}) from {} | time={}",
self.id(),
dot,
deps,
from,
_time.micros()
);
// get cmd info
let info = self.cmds.get(dot);
if info.status != Status::COLLECT {
// do nothing if we're no longer COLLECT
return;
}
// update quorum deps
info.quorum_deps.add(from, deps);
// check if we have all necessary replies
if info.quorum_deps.all() {
// compute threshold:
// - if the fast quorum is n/2 + f, then the threshold is f
// - if the fast quorum is a majority (for single-key reads with
// NFR), then the threshold is 1 (and thus the fast path is always
// taken)
let minority = self.bp.config.majority_quorum_size() - 1;
let threshold = info.quorum.len() - minority;
debug_assert!(threshold <= self.bp.config.f());
// check if threshold union if equal to union and get the union of
// all dependencies reported
let (all_deps, fast_path) =
info.quorum_deps.check_threshold(threshold);
// create consensus value
let value = ConsensusValue::with(all_deps);
// fast path metrics
let cmd = info.cmd.as_ref().unwrap();
self.bp.path(fast_path, cmd.read_only());
// fast path condition:
// - each dependency was reported by at least f processes
if fast_path {
// fast path: create `MCommit`
let shard_count = cmd.shard_count();
Self::mcommit_actions(
&self.bp,
info,
shard_count,
dot,
value,
&mut self.to_processes,
)
} else {
// slow path: create `MConsensus`
let ballot = info.synod.skip_prepare();
let mconsensus = Message::MConsensus { dot, ballot, value };
let target = self.bp.write_quorum();
// save new action
self.to_processes.push(Action::ToSend {
target,
msg: mconsensus,
});
}
}
}
fn handle_mcommit(
&mut self,
from: ProcessId,
dot: Dot,
value: ConsensusValue,
_time: &dyn SysTime,
) {
trace!(
"p{}: MCommit({:?}, {:?}) | time={}",
self.id(),
dot,
value.deps,
_time.micros()
);
// get cmd info
let info = self.cmds.get(dot);
if info.status == Status::START {
// TODO we missed the `MCollect` message and should try to recover
// the payload:
// - save this notification just in case we've received the
// `MCollect` and `MCommit` in opposite orders (due to
// multiplexing)
self.buffered_commits.insert(dot, (from, value));
return;
}
if info.status == Status::COMMIT {
// do nothing if we're already COMMIT
return;
}
// check it's not a noop
assert_eq!(
value.is_noop, false,
"handling noop's is not implemented yet"
);
// get command
let cmd = info
.cmd
.as_ref()
.expect("there should be a command payload");
// create execution info
let execution_info =
GraphExecutionInfo::add(dot, cmd.clone(), value.deps.clone());
self.to_executors.push(execution_info);
// update command info:
info.status = Status::COMMIT;
// handle commit in synod
let msg = SynodMessage::MChosen(value);
assert!(info.synod.handle(from, msg).is_none());
// check if this dot is targetted to my shard
let my_shard = self.shard_processes.contains(&dot.source());
// TODO: fix this once we implement recovery for partial replication
if self.gc_running() && my_shard {
// if running gc and this dot belongs to my shard, then notify self
// (i.e. the worker responsible for GC) with the committed dot
self.to_processes.push(Action::ToForward {
msg: Message::MCommitDot { dot },
});
} else {
// not running gc, so remove the dot info now
self.cmds.gc_single(dot);
}
}
fn handle_mconsensus(
&mut self,
from: ProcessId,
dot: Dot,
ballot: u64,
value: ConsensusValue,
_time: &dyn SysTime,
) {
trace!(
"p{}: MConsensus({:?}, {}, {:?}) | time={}",
self.id(),
dot,
ballot,
value.deps,
_time.micros()
);
// get cmd info
let info = self.cmds.get(dot);
// compute message: that can either be nothing, an ack or an mcommit
let msg = match info
.synod
.handle(from, SynodMessage::MAccept(ballot, value))
{
Some(SynodMessage::MAccepted(ballot)) => {
// the accept message was accepted: create `MConsensusAck`
Message::MConsensusAck { dot, ballot }
}
Some(SynodMessage::MChosen(value)) => {
// the value has already been chosen: create `MCommit`
Message::MCommit { dot, value}
}
None => {
// ballot too low to be accepted: nothing to do
return;
}
_ => panic!(
"no other type of message should be output by Synod in the MConsensus handler"
),
};
// create target
let target = singleton![from];
// save new action
self.to_processes.push(Action::ToSend { target, msg });
}
fn handle_mconsensusack(
&mut self,
from: ProcessId,
dot: Dot,
ballot: u64,
_time: &dyn SysTime,
) {
trace!(
"p{}: MConsensusAck({:?}, {}) | time={}",
self.id(),
dot,
ballot,
_time.micros()
);
// get cmd info
let info = self.cmds.get(dot);
// compute message: that can either be nothing or an mcommit
match info.synod.handle(from, SynodMessage::MAccepted(ballot)) {
Some(SynodMessage::MChosen(value)) => {
// enough accepts were gathered and the value has been chosen; create `MCommit`
let shard_count = info.cmd.as_ref().unwrap().shard_count();
Self::mcommit_actions(&self.bp, info, shard_count, dot, value, &mut self.to_processes)
}
None => {
// not enough accepts yet: nothing to do
}
_ => panic!(
"no other type of message should be output by Synod in the MConsensusAck handler"
),
}
}
fn handle_mshard_commit(
&mut self,
from: ProcessId,
_from_shard_id: ShardId,
dot: Dot,
deps: HashSet<Dependency>,
_time: &dyn SysTime,
) {
trace!(
"p{}: MShardCommit({:?}, {:?}) from shard {} | time={}",
self.id(),
dot,
deps,
_from_shard_id,
_time.micros()
);
// get cmd info
let info = self.cmds.get(dot);
let shard_count = info.cmd.as_ref().unwrap().shard_count();
let add_shards_commits_info =
|current_deps: &mut HashSet<Dependency>, deps| {
current_deps.extend(deps)
};
let create_mshard_aggregated_commit =
|dot, current_deps: &HashSet<Dependency>| {
Message::MShardAggregatedCommit {
dot,
deps: current_deps.clone(),
}
};
partial::handle_mshard_commit(
&self.bp,
&mut info.shards_commits,
shard_count,
from,
dot,
deps,
add_shards_commits_info,
create_mshard_aggregated_commit,
&mut self.to_processes,
)
}
fn handle_mshard_aggregated_commit(
&mut self,
dot: Dot,
deps: HashSet<Dependency>,
_time: &dyn SysTime,
) {
trace!(
"p{}: MShardAggregatedCommit({:?}, {:?}) | time={}",
self.id(),
dot,
deps,
_time.micros()
);
// get cmd info
let info = self.cmds.get(dot);
// nothing else to extract
let extract_mcommit_extra_data = |_| ();
let create_mcommit = |dot, deps, ()| {
let value = ConsensusValue::with(deps);
Message::MCommit { dot, value }
};
partial::handle_mshard_aggregated_commit(
&self.bp,
&mut info.shards_commits,
dot,
deps,
extract_mcommit_extra_data,
create_mcommit,
&mut self.to_processes,
)
}
fn handle_mcommit_dot(
&mut self,
from: ProcessId,
dot: Dot,
_time: &dyn SysTime,
) {
trace!(
"p{}: MCommitDot({:?}) | time={}",
self.id(),
dot,
_time.micros()
);
assert_eq!(from, self.bp.process_id);
self.gc_track.add_to_clock(&dot);
}
fn handle_mgc(
&mut self,
from: ProcessId,
committed: VClock<ProcessId>,
_time: &dyn SysTime,
) {
trace!(
"p{}: MGarbageCollection({:?}) from {} | time={}",
self.id(),
committed,
from,
_time.micros()
);
self.gc_track.update_clock_of(from, committed);
// compute newly stable dots
let stable = self.gc_track.stable();
// create `ToForward` to self
if !stable.is_empty() {
self.to_processes.push(Action::ToForward {
msg: Message::MStable { stable },
});
}
}
fn handle_mstable(
&mut self,
from: ProcessId,
stable: Vec<(ProcessId, u64, u64)>,
_time: &dyn SysTime,
) {
trace!(
"p{}: MStable({:?}) from {} | time={}",
self.id(),
stable,
from,
_time.micros()
);
assert_eq!(from, self.bp.process_id);
let stable_count = self.cmds.gc(stable);
self.bp.stable(stable_count);
}
fn handle_event_garbage_collection(&mut self, _time: &dyn SysTime) {
trace!(
"p{}: PeriodicEvent::GarbageCollection | time={}",
self.id(),
_time.micros()
);
// retrieve the committed clock
let committed = self.gc_track.clock().frontier();
// save new action
self.to_processes.push(Action::ToSend {
target: self.bp.all_but_me(),
msg: Message::MGarbageCollection { committed },
});
}
fn mcommit_actions(
bp: &BaseProcess,
info: &mut AtlasInfo,
shard_count: usize,
dot: Dot,
value: ConsensusValue,
to_processes: &mut Vec<Action<Self>>,
) {
let create_mcommit = |dot, value, ()| Message::MCommit { dot, value };
let create_mshard_commit =
|dot, value: ConsensusValue| Message::MShardCommit {
dot,
deps: value.deps,
};
// nothing to update
let update_shards_commit_info = |_: &mut HashSet<Dependency>, ()| {};
partial::mcommit_actions(
bp,
&mut info.shards_commits,
shard_count,
dot,
value,
(),
create_mcommit,
create_mshard_commit,
update_shards_commit_info,
to_processes,
)
}
fn gc_running(&self) -> bool {
self.bp.config.gc_interval().is_some()
}
}
// consensus value is a pair where the first component is a flag indicating
// whether this is a noop and the second component is the command's dependencies
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct ConsensusValue {
is_noop: bool,
deps: HashSet<Dependency>,
}
impl ConsensusValue {
fn bottom() -> Self {
let is_noop = false;
let deps = HashSet::new();
Self { is_noop, deps }
}
fn with(deps: HashSet<Dependency>) -> Self {
let is_noop = false;
Self { is_noop, deps }
}
}
fn proposal_gen(_values: HashMap<ProcessId, ConsensusValue>) -> ConsensusValue {
todo!("recovery not implemented yet")
}
// `AtlasInfo` contains all information required in the life-cyle of a
// `Command`
#[derive(Debug, Clone)]
struct AtlasInfo {
status: Status,
quorum: HashSet<ProcessId>,
synod: Synod<ConsensusValue>,
// `None` if not set yet
cmd: Option<Command>,
// `quorum_deps` is used by the coordinator to compute the threshold
// deps when deciding whether to take the fast path
quorum_deps: QuorumDeps,
// `shard_commits` is only used when commands accessed more than one shard
shards_commits: Option<ShardsCommits<HashSet<Dependency>>>,
}
impl Info for AtlasInfo {
fn new(
process_id: ProcessId,
_shard_id: ShardId,
n: usize,
f: usize,
fast_quorum_size: usize,
_write_quorum_size: usize,
) -> Self {
// create bottom consensus value
let initial_value = ConsensusValue::bottom();
Self {
status: Status::START,
quorum: HashSet::new(),
synod: Synod::new(process_id, n, f, proposal_gen, initial_value),
cmd: None,
quorum_deps: QuorumDeps::new(fast_quorum_size),
shards_commits: None,
}
}
}
// `Atlas` protocol messages
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub enum Message {
// Protocol messages
MCollect {
dot: Dot,
cmd: Command,
deps: HashSet<Dependency>,
quorum: HashSet<ProcessId>,
},
MCollectAck {
dot: Dot,
deps: HashSet<Dependency>,
},
MCommit {
dot: Dot,
value: ConsensusValue,
},
MConsensus {
dot: Dot,
ballot: u64,
value: ConsensusValue,
},
MConsensusAck {
dot: Dot,
ballot: u64,
},
// Partial replication messages
MForwardSubmit {
dot: Dot,
cmd: Command,
},
MShardCommit {
dot: Dot,
deps: HashSet<Dependency>,
},
MShardAggregatedCommit {
dot: Dot,
deps: HashSet<Dependency>,
},
// GC messages
MCommitDot {
dot: Dot,
},
MGarbageCollection {
committed: VClock<ProcessId>,
},
MStable {
stable: Vec<(ProcessId, u64, u64)>,
},
}
impl MessageIndex for Message {
fn index(&self) -> Option<(usize, usize)> {
use fantoch::load_balance::{
worker_dot_index_shift, worker_index_no_shift, GC_WORKER_INDEX,
};
match self {
// Protocol messages
Self::MCollect { dot, .. } => worker_dot_index_shift(&dot),
Self::MCollectAck { dot, .. } => worker_dot_index_shift(&dot),
Self::MCommit { dot, .. } => worker_dot_index_shift(&dot),
Self::MConsensus { dot, .. } => worker_dot_index_shift(&dot),
Self::MConsensusAck { dot, .. } => worker_dot_index_shift(&dot),
// Partial replication messages
Self::MForwardSubmit { dot, .. } => worker_dot_index_shift(&dot),
Self::MShardCommit { dot, .. } => worker_dot_index_shift(&dot),
Self::MShardAggregatedCommit { dot, .. } => {
worker_dot_index_shift(&dot)
}
// GC messages
Self::MCommitDot { .. } => worker_index_no_shift(GC_WORKER_INDEX),
Self::MGarbageCollection { .. } => {
worker_index_no_shift(GC_WORKER_INDEX)
}
Self::MStable { .. } => None,
}
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum PeriodicEvent {
GarbageCollection,
}
impl MessageIndex for PeriodicEvent {
fn index(&self) -> Option<(usize, usize)> {
use fantoch::load_balance::{worker_index_no_shift, GC_WORKER_INDEX};
match self {
Self::GarbageCollection => worker_index_no_shift(GC_WORKER_INDEX),
}
}
}
/// `Status` of commands.
#[derive(Debug, Clone, PartialEq, Eq)]
enum Status {
START,
PAYLOAD,
COLLECT,
COMMIT,
}
#[cfg(test)]
mod tests {
use super::*;
use fantoch::client::{Client, KeyGen, Workload};
use fantoch::executor::Executor;
use fantoch::planet::{Planet, Region};
use fantoch::sim::Simulation;
use fantoch::time::SimTime;
#[test]
fn sequential_atlas_test() {
atlas_flow::<SequentialKeyDeps>()
}
#[test]
fn locked_atlas_test() {
atlas_flow::<LockedKeyDeps>()
}
fn atlas_flow<KD: KeyDeps>() {
// create simulation
let mut simulation = Simulation::new();
// processes ids
let process_id_1 = 1;
let process_id_2 = 2;
let process_id_3 = 3;
// regions
let europe_west2 = Region::new("europe-west2");
let europe_west3 = Region::new("europe-west2");
let us_west1 = Region::new("europe-west2");
// there's a single shard
let shard_id = 0;
// processes
let processes = vec![
(process_id_1, shard_id, europe_west2.clone()),
(process_id_2, shard_id, europe_west3.clone()),
(process_id_3, shard_id, us_west1.clone()),
];
// planet
let planet = Planet::new();
// create system time
let time = SimTime::new();
// n and f
let n = 3;
let f = 1;
let config = Config::new(n, f);
// executors
let executor_1 = GraphExecutor::new(process_id_1, shard_id, config);
let executor_2 = GraphExecutor::new(process_id_2, shard_id, config);
let executor_3 = GraphExecutor::new(process_id_3, shard_id, config);
// atlas
let (mut atlas_1, _) = Atlas::<KD>::new(process_id_1, shard_id, config);
let (mut atlas_2, _) = Atlas::<KD>::new(process_id_2, shard_id, config);
let (mut atlas_3, _) = Atlas::<KD>::new(process_id_3, shard_id, config);
// discover processes in all atlas
let sorted = fantoch::util::sort_processes_by_distance(
&europe_west2,
&planet,
processes.clone(),
);
atlas_1.discover(sorted);
let sorted = fantoch::util::sort_processes_by_distance(
&europe_west3,
&planet,
processes.clone(),
);
atlas_2.discover(sorted);
let sorted = fantoch::util::sort_processes_by_distance(
&us_west1,
&planet,
processes.clone(),
);
atlas_3.discover(sorted);
// register processes
simulation.register_process(atlas_1, executor_1);
simulation.register_process(atlas_2, executor_2);
simulation.register_process(atlas_3, executor_3);
// client workload
let shard_count = 1;
let key_gen = KeyGen::ConflictPool {
conflict_rate: 100,
pool_size: 1,
};
let keys_per_command = 1;
let commands_per_client = 10;
let payload_size = 100;
let workload = Workload::new(
shard_count,
key_gen,
keys_per_command,
commands_per_client,
payload_size,
);
// create client 1 that is connected to atlas 1
let client_id = 1;
let client_region = europe_west2.clone();
let status_frequency = None;
let mut client_1 = Client::new(client_id, workload, status_frequency);
// discover processes in client 1
let closest = fantoch::util::closest_process_per_shard(
&client_region,
&planet,
processes,
);
client_1.connect(closest);
// start client
let (target_shard, cmd) = client_1
.cmd_send(&time)
.expect("there should be a first operation");
let target = client_1.shard_process(&target_shard);
// check that `target` is atlas 1
assert_eq!(target, process_id_1);
// register client
simulation.register_client(client_1);
// register command in executor and submit it in atlas 1
let (process, _, pending, time) = simulation.get_process(target);
pending.wait_for(&cmd);
process.submit(None, cmd, time);
let mut actions: Vec<_> = process.to_processes_iter().collect();
// there's a single action
assert_eq!(actions.len(), 1);
let mcollect = actions.pop().unwrap();
// check that the mcollect is being sent to *all* processes
let check_target = |target: &HashSet<ProcessId>| target.len() == n;
assert!(
matches!(mcollect.clone(), Action::ToSend{target, ..} if check_target(&target))
);
// handle mcollects
let mut mcollectacks =
simulation.forward_to_processes((process_id_1, mcollect));
// check that there are 2 mcollectacks
assert_eq!(mcollectacks.len(), 2 * f);
// handle the first mcollectack
let mcommits = simulation.forward_to_processes(
mcollectacks.pop().expect("there should be an mcollect ack"),
);
// no mcommit yet
assert!(mcommits.is_empty());
// handle the second mcollectack
let mut mcommits = simulation.forward_to_processes(
mcollectacks.pop().expect("there should be an mcollect ack"),
);
// there's a commit now
assert_eq!(mcommits.len(), 1);
// check that the mcommit is sent to everyone
let mcommit = mcommits.pop().expect("there should be an mcommit");
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | true |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/protocol/caesar.rs | fantoch_ps/src/protocol/caesar.rs | use crate::executor::{PredecessorsExecutionInfo, PredecessorsExecutor};
use crate::protocol::common::pred::{
CaesarDeps, Clock, KeyClocks, LockedKeyClocks, QuorumClocks, QuorumRetries,
};
use fantoch::command::Command;
use fantoch::config::Config;
use fantoch::id::{Dot, ProcessId, ShardId};
use fantoch::protocol::{
Action, BaseProcess, BasicGCTrack, CommittedAndExecuted, Info,
LockedCommandsInfo, MessageIndex, Protocol, ProtocolMetrics,
ProtocolMetricsKind,
};
use fantoch::time::SysTime;
use fantoch::{singleton, trace};
use fantoch::{HashMap, HashSet};
use parking_lot::MutexGuard;
use serde::{Deserialize, Deserializer, Serialize};
use std::mem;
use std::sync::Arc;
use std::time::Duration;
pub type CaesarLocked = Caesar<LockedKeyClocks>;
#[derive(Debug, Clone)]
pub struct Caesar<KC: KeyClocks> {
bp: BaseProcess,
key_clocks: KC,
cmds: LockedCommandsInfo<CaesarInfo>,
gc_track: BasicGCTrack,
committed_dots: u64,
executed_dots: u64,
// dots of new commands executed
new_executed_dots: Vec<Dot>,
to_processes: Vec<Action<Self>>,
to_executors: Vec<PredecessorsExecutionInfo>,
// retry requests that arrived before the initial `MPropose` message
// (this may be possible even without network failures due to multiplexing)
buffered_retries: HashMap<Dot, (ProcessId, Clock, CaesarDeps)>,
// commit notifications that arrived before the initial `MPropose` message
// (this may be possible even without network failures due to multiplexing)
buffered_commits: HashMap<Dot, (ProcessId, Clock, CaesarDeps)>,
// `try_to_unblock` calls to be repeated
try_to_unblock_again: Vec<(Dot, Clock, Arc<CaesarDeps>, HashSet<Dot>)>,
wait_condition: bool,
}
impl<KC: KeyClocks> Protocol for Caesar<KC> {
type Message = Message;
type PeriodicEvent = PeriodicEvent;
type Executor = PredecessorsExecutor;
/// Creates a new `Caesar` process.
fn new(
process_id: ProcessId,
shard_id: ShardId,
config: Config,
) -> (Self, Vec<(Self::PeriodicEvent, Duration)>) {
// compute fast and write quorum sizes
let (fast_quorum_size, write_quorum_size) =
config.caesar_quorum_sizes();
// create protocol data-structures
let bp = BaseProcess::new(
process_id,
shard_id,
config,
fast_quorum_size,
write_quorum_size,
);
let key_clocks = KC::new(process_id, shard_id);
let f = Self::allowed_faults(config.n());
let cmds = LockedCommandsInfo::new(
process_id,
shard_id,
config.n(),
f,
fast_quorum_size,
write_quorum_size,
);
let gc_track = BasicGCTrack::new(config.n());
let committed_dots = 0;
let executed_dots = 0;
let new_executed_dots = Vec::new();
let to_processes = Vec::new();
let to_executors = Vec::new();
let buffered_retries = HashMap::new();
let buffered_commits = HashMap::new();
let try_to_unblock_again = Vec::new();
let wait_condition = config.caesar_wait_condition();
// create `Caesar`
let protocol = Self {
bp,
key_clocks,
cmds,
gc_track,
committed_dots,
executed_dots,
new_executed_dots,
to_processes,
to_executors,
buffered_retries,
buffered_commits,
try_to_unblock_again,
wait_condition,
};
// create periodic events
let events = if let Some(interval) = config.gc_interval() {
vec![(PeriodicEvent::GarbageCollection, interval)]
} else {
vec![]
};
// return both
(protocol, events)
}
/// Returns the process identifier.
fn id(&self) -> ProcessId {
self.bp.process_id
}
/// Returns the shard identifier.
fn shard_id(&self) -> ShardId {
self.bp.shard_id
}
/// Updates the processes known by this process.
/// The set of processes provided is already sorted by distance.
fn discover(
&mut self,
processes: Vec<(ProcessId, ShardId)>,
) -> (bool, HashMap<ShardId, ProcessId>) {
let connect_ok = self.bp.discover(processes);
(connect_ok, self.bp.closest_shard_process().clone())
}
/// Submits a command issued by some client.
fn submit(&mut self, dot: Option<Dot>, cmd: Command, _time: &dyn SysTime) {
self.handle_submit(dot, cmd);
}
/// Handles protocol messages.
fn handle(
&mut self,
from: ProcessId,
_from_shard_id: ShardId,
msg: Self::Message,
time: &dyn SysTime,
) {
match msg {
Message::MPropose { dot, cmd, clock } => {
self.handle_mpropose(from, dot, cmd, clock, time)
}
Message::MProposeAck {
dot,
clock,
deps,
ok,
} => self.handle_mproposeack(from, dot, clock, deps, ok, time),
Message::MCommit { dot, clock, deps } => {
self.handle_mcommit(from, dot, clock, deps, time)
}
Message::MRetry { dot, clock, deps } => {
self.handle_mretry(from, dot, clock, deps, time)
}
Message::MRetryAck { dot, deps } => {
self.handle_mretryack(from, dot, deps, time)
}
Message::MGarbageCollection { executed } => {
self.handle_mgc(from, executed, time)
}
Message::MGCDot { dot } => self.handle_mgc_dot(dot, time),
}
// every time a new message is processed, try to unblock commands that
// couldn't be unblocked in the previous attempt
let try_to_unblock_again = mem::take(&mut self.try_to_unblock_again);
for (dot, clock, deps, blocking) in try_to_unblock_again {
self.try_to_unblock(dot, clock, deps, blocking, time)
}
}
/// Handles periodic local events.
fn handle_event(&mut self, event: Self::PeriodicEvent, time: &dyn SysTime) {
match event {
PeriodicEvent::GarbageCollection => {
self.handle_event_garbage_collection(time)
}
}
}
fn handle_executed(
&mut self,
committed_and_executed: CommittedAndExecuted,
_time: &dyn SysTime,
) {
trace!(
"p{}: handle_committed_and_executed({:?}) | time={}",
self.id(),
committed_and_executed,
_time.micros()
);
let (new_committed_dots, new_executed_dots) = committed_and_executed;
// update committed and executed
for dot in new_executed_dots.iter() {
self.gc_track_add(*dot);
}
self.committed_dots += new_committed_dots;
self.executed_dots += new_executed_dots.len() as u64;
self.new_executed_dots.extend(new_executed_dots);
}
/// Returns a new action to be sent to other processes.
fn to_processes(&mut self) -> Option<Action<Self>> {
self.to_processes.pop()
}
/// Returns new execution info for executors.
fn to_executors(&mut self) -> Option<PredecessorsExecutionInfo> {
self.to_executors.pop()
}
fn parallel() -> bool {
KC::parallel()
}
fn leaderless() -> bool {
true
}
fn metrics(&self) -> &ProtocolMetrics {
self.bp.metrics()
}
}
impl<KC: KeyClocks> Caesar<KC> {
/// Caesar always tolerates a minority of faults.
pub fn allowed_faults(n: usize) -> usize {
n / 2
}
/// Handles a submit operation by a client.
fn handle_submit(&mut self, dot: Option<Dot>, cmd: Command) {
// compute the command identifier
let dot = dot.unwrap_or_else(|| self.bp.next_dot());
// compute its clock
let clock = self.key_clocks.clock_next();
// create `MPropose` and target
let mpropose = Message::MPropose { dot, cmd, clock };
// here we send to everyone because we want the fastest fast quorum that
// replies with an ok (due to the waiting condition, this fast quorum
// may not be the closest one)
let target = self.bp.all();
// save new action
self.to_processes.push(Action::ToSend {
target,
msg: mpropose,
});
}
fn handle_mpropose(
&mut self,
from: ProcessId,
dot: Dot,
cmd: Command,
remote_clock: Clock,
time: &dyn SysTime,
) {
trace!(
"p{}: MPropose({:?}, {:?}, {:?}) from {} | time={}",
self.id(),
dot,
cmd,
remote_clock,
from,
time.micros()
);
// we use the following assumption in `Self::send_mpropose_ack`
assert_eq!(dot.source(), from);
// merge clocks
self.key_clocks.clock_join(&remote_clock);
// get cmd info
let info_ref = self.cmds.get_or_default(dot);
let mut info = info_ref.lock();
// discard message if no longer in START
if info.status != Status::START {
return;
}
// register start time if we're the coordinator
if dot.source() == from {
info.start_time_ms = Some(time.millis());
}
// if yes, compute set of predecessors
let mut blocked_by = HashSet::new();
let deps = self.key_clocks.predecessors(
dot,
&cmd,
remote_clock,
Some(&mut blocked_by),
);
// update command info
info.status = Status::PROPOSE_BEGIN;
info.cmd = Some(cmd);
info.deps = Arc::new(deps);
Self::update_clock(&mut self.key_clocks, dot, &mut info, remote_clock);
// save command's clock and update `blocked_by` before unlocking it
let clock = info.clock;
info.blocked_by = blocked_by.clone();
let blocked_by_len = blocked_by.len();
drop(info);
drop(info_ref);
// decision tracks what we should do in the end, after iterating each
// of the commands that is blocking us
#[derive(PartialEq, Eq, Debug)]
enum Reply {
ACCEPT,
REJECT,
WAIT,
}
let mut reply = Reply::WAIT;
let mut blocked_by_to_ignore = HashSet::new();
// we send an ok if no command is blocking this command
let ok = blocked_by.is_empty();
if ok {
reply = Reply::ACCEPT;
} else if !self.wait_condition {
// if the wait condition is not enabled, reject right away
reply = Reply::REJECT;
} else {
// if there are commands blocking us, iterate each of them and check
// if they are still blocking us (in the meantime, then may have
// been moved to the `ACCEPT` or `COMMIT` phase, and in that case we
// might be able to ignore them)
trace!(
"p{}: MPropose({:?}) blocked by {:?} | time={}",
self.id(),
dot,
blocked_by,
time.micros()
);
for blocked_by_dot in blocked_by {
if let Some(blocked_by_dot_ref) = self.cmds.get(blocked_by_dot)
{
// in this case, this the command hasn't been GCed since we
// got it from the key clocks, so we need to consider it
let mut blocked_by_info = blocked_by_dot_ref.lock();
// check whether this command has already safe clock and dep
// values (i.e. safe for us to make a decision based on
// them)
let has_safe_clock_and_dep = matches!(
blocked_by_info.status,
Status::ACCEPT | Status::COMMIT
);
if has_safe_clock_and_dep {
// if the clock and dep are "good enough", check if we
// can ignore the command
let safe_to_ignore = Self::safe_to_ignore(
self.bp.process_id,
dot,
clock,
blocked_by_info.clock,
&blocked_by_info.deps,
time,
);
trace!(
"p{}: MPropose({:?}) safe to ignore {:?}: {:?} | time={}",
self.bp.process_id,
dot,
blocked_by_dot,
safe_to_ignore,
time.micros()
);
if safe_to_ignore {
// the command can be ignored, and so we register
// that this command is in fact not blocking our
// command
blocked_by_to_ignore.insert(blocked_by_dot);
} else {
// if there's a single command that can't be
// ignored, our command must be rejected, and so we
// `break` as there's no point in checking all the
// other commands
reply = Reply::REJECT;
break;
}
} else {
// if the clock and dep are not safe yet, we're blocked
// by this command until they are
trace!(
"p{}: MPropose({:?}) still blocked by {:?} | time={}",
self.bp.process_id,
dot,
blocked_by_dot,
time.micros()
);
// register that this command is blocking our command
blocked_by_info.blocking.insert(dot);
}
} else {
trace!(
"p{}: MPropose({:?}) no longer blocked by {:?} | time={}",
self.bp.process_id,
dot,
blocked_by_dot,
time.micros()
);
// in this case, the command has been GCed, and for that
// reason we simply record that it can be ignored
// (as it has already been executed at all processes)
blocked_by_to_ignore.insert(blocked_by_dot);
}
}
if blocked_by_to_ignore.len() == blocked_by_len {
// if in the end it turns out that we're not blocked by any
// command, accept this command:
// - in this case, we must still have `Reply::WAIT`, or in other
// words, it can't be `Reply::REJECT`
assert_eq!(reply, Reply::WAIT);
reply = Reply::ACCEPT;
}
};
trace!(
"p{}: MPropose({:?}) decision {:?} | time={}",
self.bp.process_id,
dot,
reply,
time.micros()
);
// it's not possible that the command was GCed; for that, we would need
// to have executed it, but that's just not possible, as only this
// workers handles messages about this command; for this reason, we can
// have the `expect` below
let info_ref = self
.cmds
.get(dot)
.expect("the command must not have been GCed in the meantime");
let mut info = info_ref.lock();
// for the same reason as above, the command phase must still be
// `Status::PROPOSE_BEGIN`
assert_eq!(info.status, Status::PROPOSE_BEGIN);
// update it to `Status::PROPOSE_END`
info.status = Status::PROPOSE_END;
match reply {
Reply::ACCEPT => Self::accept_command(
self.bp.process_id,
dot,
&mut info,
&mut self.to_processes,
time,
),
Reply::REJECT => Self::reject_command(
self.bp.process_id,
dot,
&mut info,
&mut self.key_clocks,
&mut self.to_processes,
time,
),
Reply::WAIT => {
// in this case, we simply update the set of commands we need to
// wait for (since we may have decided to ignore some above)
for to_ignore_dot in blocked_by_to_ignore {
info.blocked_by.remove(&to_ignore_dot);
}
// after this, we must still be blocked by some command
assert!(!info.blocked_by.is_empty());
// save the current time as the moment where we started waiting
info.wait_start_time_ms = Some(time.millis());
}
}
drop(info);
drop(info_ref);
// check if there's a buffered retry request; if yes, handle the retry
// again (since now we have the payload)
if let Some((from, clock, deps)) = self.buffered_retries.remove(&dot) {
self.handle_mretry(from, dot, clock, deps, time);
}
// check if there's a buffered commit notification; if yes, handle the
// commit again (since now we have the payload)
if let Some((from, clock, deps)) = self.buffered_commits.remove(&dot) {
self.handle_mcommit(from, dot, clock, deps, time);
}
}
fn handle_mproposeack(
&mut self,
from: ProcessId,
dot: Dot,
clock: Clock,
deps: CaesarDeps,
ok: bool,
_time: &dyn SysTime,
) {
trace!(
"p{}: MProposeAck({:?}, {:?}, {:?}, {:?}) from {} | time={}",
self.id(),
dot,
clock,
deps,
ok,
from,
_time.micros()
);
// get cmd info
let info_ref = self.cmds.get_or_default(dot);
let mut info = info_ref.lock();
// do nothing if we're no longer PROPOSE_END or REJECT (yes, it seems
// that the coordinator can reject its own command; this case
// was only occurring in the simulator, but with concurrency I
// think it can happen in the runner as well, as it will be
// tricky to ensure a level of atomicity where the coordinator
// never rejects its own command):
// - this ensures that once an MCommit/MRetry is sent in this handler,
// further messages received are ignored
// - we can check this by asserting that `info.quorum_clocks.all()` is
// false, before adding any new info, as we do below
if !matches!(info.status, Status::PROPOSE_END | Status::REJECT) {
return;
}
if info.quorum_clocks.all() {
panic!(
"p{}: {:?} already had all MProposeAck needed",
self.bp.process_id, dot
);
}
// update quorum deps
info.quorum_clocks.add(from, clock, deps, ok);
// check if we have all necessary replies
if info.quorum_clocks.all() {
// if yes, get the aggregated results
let (aggregated_clock, aggregated_deps, fast_path) =
info.quorum_clocks.aggregated();
// fast path metrics
let cmd = info.cmd.as_ref().unwrap();
self.bp.path(fast_path, cmd.read_only());
// fast path condition: all processes reported ok
if fast_path {
// in this case, all processes have accepted the proposal by the
// coordinator; check that that's the case
assert_eq!(aggregated_clock, info.clock);
// fast path: create `MCommit`
let mcommit = Message::MCommit {
dot,
clock: aggregated_clock,
deps: aggregated_deps,
};
let target = self.bp.all();
// save new action
self.to_processes.push(Action::ToSend {
target,
msg: mcommit,
});
} else {
// slow path: create `MRetry`
let mconsensus = Message::MRetry {
dot,
clock: aggregated_clock,
deps: aggregated_deps,
};
// here we send to everyone because this message may unblock
// blocked commads; by only sending it to a majority, we would
// potentially block commands unnecessarily
let target = self.bp.all();
// save new action
self.to_processes.push(Action::ToSend {
target,
msg: mconsensus,
});
}
}
}
fn handle_mcommit(
&mut self,
from: ProcessId,
dot: Dot,
clock: Clock,
mut deps: CaesarDeps,
time: &dyn SysTime,
) {
trace!(
"p{}: MCommit({:?}, {:?}, {:?}) from {} | time={}",
self.id(),
dot,
clock,
deps,
from,
time.micros()
);
// merge clocks
self.key_clocks.clock_join(&clock);
// get cmd info
let info_ref = self.cmds.get_or_default(dot);
let mut info = info_ref.lock();
if info.status == Status::START {
// save this notification just in case we've received the `MPropose`
// and `MCommit` in opposite orders (due to multiplexing)
self.buffered_commits.insert(dot, (from, clock, deps));
return;
}
if info.status == Status::COMMIT {
// do nothing if we're already COMMIT
return;
}
// register commit time if we're the coordinator
if dot.source() == from {
let start_time_ms = info.start_time_ms.take().expect(
"the command should have been started by its coordinator",
);
let end_time_ms = time.millis();
// compute commit latency and collect this metric
let commit_latency = end_time_ms - start_time_ms;
self.bp.collect_metric(
ProtocolMetricsKind::CommitLatency,
commit_latency,
);
}
// register deps len
self.bp.collect_metric(
ProtocolMetricsKind::CommittedDepsLen,
deps.len() as u64,
);
// it's possible that a command ends up depending on itself;
// the executor assumes that that is not the case, so we remove it right
// away, before forwarding the command to the executor
deps.remove(&dot);
// update command info:
info.status = Status::COMMIT;
info.deps = Arc::new(deps);
Self::update_clock(&mut self.key_clocks, dot, &mut info, clock);
// create execution info
let cmd = info.cmd.clone().expect("there should be a command payload");
let execution_info =
PredecessorsExecutionInfo::new(dot, cmd, clock, info.deps.clone());
self.to_executors.push(execution_info);
// take the set of commands that this command is blocking and try to
// unblock them
let blocking = std::mem::take(&mut info.blocking);
let deps = info.deps.clone();
drop(info);
drop(info_ref);
self.try_to_unblock(dot, clock, deps, blocking, time);
// if we're not running gc, remove the dot info now
if !self.gc_running() {
self.gc_command(dot);
}
}
fn handle_mretry(
&mut self,
from: ProcessId,
dot: Dot,
clock: Clock,
deps: CaesarDeps,
time: &dyn SysTime,
) {
trace!(
"p{}: MRetry({:?}, {:?}, {:?}) from {} | time={}",
self.id(),
dot,
clock,
deps,
from,
time.micros()
);
// merge clocks
self.key_clocks.clock_join(&clock);
// get cmd info
let info_ref = self.cmds.get_or_default(dot);
let mut info = info_ref.lock();
if info.status == Status::START {
// save this notification just in case we've received the `MPropose`
// and `MRetry` in opposite orders (due to multiplexing)
self.buffered_retries.insert(dot, (from, clock, deps));
return;
}
if info.status == Status::COMMIT {
// do nothing if we're already COMMIT
return;
}
// update command info:
info.status = Status::ACCEPT;
info.deps = Arc::new(deps.clone());
Self::update_clock(&mut self.key_clocks, dot, &mut info, clock);
// compute new set of predecessors for the command
let cmd = info.cmd.as_ref().expect("command has been set");
let blocking = None;
let mut new_deps =
self.key_clocks.predecessors(dot, cmd, clock, blocking);
// aggregate with incoming deps
new_deps.merge(deps);
// create message and target
let msg = Message::MRetryAck {
dot,
deps: new_deps,
};
let target = singleton![from];
// save new action
self.to_processes.push(Action::ToSend { target, msg });
// take the set of commands that this command is blocking and try to
// unblock them
let blocking = std::mem::take(&mut info.blocking);
let deps = info.deps.clone();
drop(info);
drop(info_ref);
self.try_to_unblock(dot, clock, deps, blocking, time);
}
fn handle_mretryack(
&mut self,
from: ProcessId,
dot: Dot,
deps: CaesarDeps,
_time: &dyn SysTime,
) {
trace!(
"p{}: MRetryAck({:?}, {:?}) from {} | time={}",
self.id(),
dot,
deps,
from,
_time.micros()
);
// get cmd info
let info_ref = self.cmds.get_or_default(dot);
let mut info = info_ref.lock();
// do nothing if we're no longer ACCEPT:
// - this ensures that once an MCommit is sent in this handler, further
// messages received are ignored
// - we can check this by asserting that `info.quorum_retries.all()` is
// false, before adding any new info, as we do below
if info.status != Status::ACCEPT {
return;
}
if info.quorum_retries.all() {
panic!(
"p{}: {:?} already had all MRetryAck needed",
self.bp.process_id, dot
);
}
// update quorum retries
info.quorum_retries.add(from, deps);
// check if we have all necessary replies
if info.quorum_retries.all() {
// if yes, get the aggregated results
let aggregated_deps = info.quorum_retries.aggregated();
// create message and target
let mcommit = Message::MCommit {
dot,
clock: info.clock,
deps: aggregated_deps,
};
let target = self.bp.all();
// save new action
self.to_processes.push(Action::ToSend {
target,
msg: mcommit,
});
}
}
fn handle_mgc(
&mut self,
_from: ProcessId,
executed: Vec<Dot>,
_time: &dyn SysTime,
) {
trace!(
"p{}: MGarbageCollection({:?}) from {} | time={}",
self.id(),
executed,
_from,
_time.micros()
);
// update gc track and compute newly stable dots
for dot in executed {
self.gc_track_add(dot);
}
}
fn gc_track_add(&mut self, dot: Dot) {
let stable = self.gc_track.add(dot);
if stable {
self.to_processes.push(Action::ToForward {
msg: Message::MGCDot { dot },
});
}
}
fn handle_mgc_dot(&mut self, dot: Dot, _time: &dyn SysTime) {
trace!(
"p{}: MGCDot({:?}) | time={}",
self.id(),
dot,
_time.micros()
);
self.gc_command(dot);
self.bp.stable(1);
}
fn handle_event_garbage_collection(&mut self, _time: &dyn SysTime) {
trace!(
"p{}: PeriodicEvent::GarbageCollection | time={}",
self.id(),
_time.micros()
);
trace!(
"p{}: COMMITTED {:>20} EXECUTED {:>20} EXISTING {:>20}",
self.id(),
self.committed_dots,
self.executed_dots,
self.cmds.len()
);
// retrieve the executed dots
let executed = std::mem::take(&mut self.new_executed_dots);
// save new action
self.to_processes.push(Action::ToSend {
target: self.bp.all_but_me(),
msg: Message::MGarbageCollection { executed },
});
}
fn gc_running(&self) -> bool {
self.bp.config.gc_interval().is_some()
}
fn update_clock(
key_clocks: &mut KC,
dot: Dot,
info: &mut MutexGuard<'_, CaesarInfo>,
new_clock: Clock,
) {
// get the command
let cmd = info.cmd.as_ref().expect("command has been set");
// remove previous clock (if any)
Self::remove_clock(key_clocks, cmd, info.clock);
// add new clock to key clocks
key_clocks.add(dot, &cmd, new_clock);
// finally update the clock
info.clock = new_clock;
}
fn remove_clock(key_clocks: &mut KC, cmd: &Command, clock: Clock) {
// remove previous clock from key clocks if we added it before
let added_before = !clock.is_zero();
if added_before {
key_clocks.remove(cmd, clock);
}
}
fn gc_command(&mut self, dot: Dot) {
if let Some(info) = self.cmds.gc_single(&dot) {
// get the command
let cmd = info.cmd.expect("command has been set");
// remove previous clock (if any)
Self::remove_clock(&mut self.key_clocks, &cmd, info.clock);
} else {
panic!("we're the single worker performing gc, so all commands should exist");
}
}
fn safe_to_ignore(
_id: ProcessId,
my_dot: Dot,
my_clock: Clock,
their_clock: Clock,
their_deps: &CaesarDeps,
_time: &dyn SysTime,
) -> bool {
trace!(
"p{}: safe_to_ignore({:?}, {:?}, {:?}, {:?}) | time={}",
_id,
my_dot,
my_clock,
their_clock,
their_deps,
_time.micros()
);
// since clocks can only increase, the clock of the blocking command
// must be higher than ours (otherwise it couldn't have been
// reported as blocking in the first place)
assert!(my_clock < their_clock);
// since we (currently) have a lower clock than the command blocking us,
// it is only safe to ignore it if we are included in its dependencies
their_deps.contains(&my_dot)
}
fn try_to_unblock(
&mut self,
dot: Dot,
clock: Clock,
deps: Arc<CaesarDeps>,
blocking: HashSet<Dot>,
time: &dyn SysTime,
) {
trace!(
"p{}: try_to_unblock({:?}, {:?}, {:?}, {:?}) | time={}",
self.id(),
dot,
clock,
deps,
blocking,
time.micros()
);
// set of commands that are in the `PROPOSE_BEGIN` phase and can't be
// unblocked yet
let mut at_propose_begin = HashSet::new();
for blocked_dot in blocking {
trace!(
"p{}: try_to_unblock({:?}) checking {:?} | time={}",
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | true |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/protocol/epaxos.rs | fantoch_ps/src/protocol/epaxos.rs | use crate::executor::{GraphExecutionInfo, GraphExecutor};
use crate::protocol::common::graph::{
Dependency, KeyDeps, LockedKeyDeps, QuorumDeps, SequentialKeyDeps,
};
use crate::protocol::common::synod::{Synod, SynodMessage};
use fantoch::command::Command;
use fantoch::config::Config;
use fantoch::id::{Dot, ProcessId, ShardId};
use fantoch::protocol::{
Action, BaseProcess, Info, MessageIndex, Protocol, ProtocolMetrics,
SequentialCommandsInfo, VClockGCTrack,
};
use fantoch::time::SysTime;
use fantoch::{singleton, trace};
use fantoch::{HashMap, HashSet};
use serde::{Deserialize, Serialize};
use std::time::Duration;
use threshold::VClock;
pub type EPaxosSequential = EPaxos<SequentialKeyDeps>;
pub type EPaxosLocked = EPaxos<LockedKeyDeps>;
#[derive(Debug, Clone)]
pub struct EPaxos<KD: KeyDeps> {
bp: BaseProcess,
key_deps: KD,
cmds: SequentialCommandsInfo<EPaxosInfo>,
gc_track: VClockGCTrack,
to_processes: Vec<Action<Self>>,
to_executors: Vec<GraphExecutionInfo>,
// commit notifications that arrived before the initial `MCollect` message
// (this may be possible even without network failures due to multiplexing)
buffered_commits: HashMap<Dot, (ProcessId, ConsensusValue)>,
}
impl<KD: KeyDeps> Protocol for EPaxos<KD> {
type Message = Message;
type PeriodicEvent = PeriodicEvent;
type Executor = GraphExecutor;
/// Creates a new `EPaxos` process.
fn new(
process_id: ProcessId,
shard_id: ShardId,
config: Config,
) -> (Self, Vec<(Self::PeriodicEvent, Duration)>) {
// compute fast and write quorum sizes
let (fast_quorum_size, write_quorum_size) =
config.epaxos_quorum_sizes();
// create protocol data-structures
let bp = BaseProcess::new(
process_id,
shard_id,
config,
fast_quorum_size,
write_quorum_size,
);
let key_deps = KD::new(shard_id, config.nfr());
let f = Self::allowed_faults(config.n());
let cmds = SequentialCommandsInfo::new(
process_id,
shard_id,
config.n(),
f,
fast_quorum_size,
write_quorum_size,
);
let gc_track = VClockGCTrack::new(process_id, shard_id, config.n());
let to_processes = Vec::new();
let to_executors = Vec::new();
let buffered_commits = HashMap::new();
// create `EPaxos`
let protocol = Self {
bp,
key_deps,
cmds,
gc_track,
to_processes,
to_executors,
buffered_commits,
};
// create periodic events
let events = if let Some(interval) = config.gc_interval() {
vec![(PeriodicEvent::GarbageCollection, interval)]
} else {
vec![]
};
// return both
(protocol, events)
}
/// Returns the process identifier.
fn id(&self) -> ProcessId {
self.bp.process_id
}
/// Returns the shard identifier.
fn shard_id(&self) -> ShardId {
self.bp.shard_id
}
/// Updates the processes known by this process.
/// The set of processes provided is already sorted by distance.
fn discover(
&mut self,
processes: Vec<(ProcessId, ShardId)>,
) -> (bool, HashMap<ShardId, ProcessId>) {
let connect_ok = self.bp.discover(processes);
(connect_ok, self.bp.closest_shard_process().clone())
}
/// Submits a command issued by some client.
fn submit(&mut self, dot: Option<Dot>, cmd: Command, _time: &dyn SysTime) {
self.handle_submit(dot, cmd);
}
/// Handles protocol messages.
fn handle(
&mut self,
from: ProcessId,
_from_shard_id: ShardId,
msg: Self::Message,
time: &dyn SysTime,
) {
match msg {
Message::MCollect {
dot,
cmd,
quorum,
deps,
} => self.handle_mcollect(from, dot, cmd, quorum, deps, time),
Message::MCollectAck { dot, deps } => {
self.handle_mcollectack(from, dot, deps, time)
}
Message::MCommit { dot, value } => {
self.handle_mcommit(from, dot, value, time)
}
Message::MConsensus { dot, ballot, value } => {
self.handle_mconsensus(from, dot, ballot, value, time)
}
Message::MConsensusAck { dot, ballot } => {
self.handle_mconsensusack(from, dot, ballot, time)
}
Message::MCommitDot { dot } => {
self.handle_mcommit_dot(from, dot, time)
}
Message::MGarbageCollection { committed } => {
self.handle_mgc(from, committed, time)
}
Message::MStable { stable } => {
self.handle_mstable(from, stable, time)
}
}
}
/// Handles periodic local events.
fn handle_event(&mut self, event: Self::PeriodicEvent, time: &dyn SysTime) {
match event {
PeriodicEvent::GarbageCollection => {
self.handle_event_garbage_collection(time)
}
}
}
/// Returns a new action to be sent to other processes.
fn to_processes(&mut self) -> Option<Action<Self>> {
self.to_processes.pop()
}
/// Returns new execution info for executors.
fn to_executors(&mut self) -> Option<GraphExecutionInfo> {
self.to_executors.pop()
}
fn parallel() -> bool {
KD::parallel()
}
fn leaderless() -> bool {
true
}
fn metrics(&self) -> &ProtocolMetrics {
self.bp.metrics()
}
}
impl<KD: KeyDeps> EPaxos<KD> {
/// EPaxos always tolerates a minority of faults.
pub fn allowed_faults(n: usize) -> usize {
n / 2
}
/// Handles a submit operation by a client.
fn handle_submit(&mut self, dot: Option<Dot>, cmd: Command) {
// compute the command identifier
let dot = dot.unwrap_or_else(|| self.bp.next_dot());
// compute its deps
let deps = self.key_deps.add_cmd(dot, &cmd, None);
// create `MCollect` and target
let quorum = self.bp.maybe_adjust_fast_quorum(&cmd);
let mcollect = Message::MCollect {
dot,
cmd,
deps,
quorum,
};
let target = self.bp.all();
// save new action
self.to_processes.push(Action::ToSend {
target,
msg: mcollect,
});
}
fn handle_mcollect(
&mut self,
from: ProcessId,
dot: Dot,
cmd: Command,
quorum: HashSet<ProcessId>,
remote_deps: HashSet<Dependency>,
time: &dyn SysTime,
) {
trace!(
"p{}: MCollect({:?}, {:?}, {:?}) from {} | time={}",
self.id(),
dot,
cmd,
remote_deps,
from,
time.micros()
);
// get cmd info
let info = self.cmds.get(dot);
// discard message if no longer in START
if info.status != Status::START {
return;
}
// check if part of fast quorum
if !quorum.contains(&self.bp.process_id) {
// if not:
// - simply save the payload and set status to `PAYLOAD`
// - if we received the `MCommit` before the `MCollect`, handle the
// `MCommit` now
info.status = Status::PAYLOAD;
info.cmd = Some(cmd);
// check if there's a buffered commit notification; if yes, handle
// the commit again (since now we have the payload)
if let Some((from, value)) = self.buffered_commits.remove(&dot) {
self.handle_mcommit(from, dot, value, time);
}
return;
}
// check if it's a message from self
let message_from_self = from == self.bp.process_id;
let deps = if message_from_self {
// if it is, do not recompute deps
remote_deps
} else {
// otherwise, compute deps with the remote deps as past
self.key_deps.add_cmd(dot, &cmd, Some(remote_deps))
};
// update command info
info.status = Status::COLLECT;
// See EPaxosInfo::new for the reason why the `-1` is needed
info.quorum_deps
.maybe_adjust_fast_quorum_size(quorum.len() - 1);
info.quorum = quorum;
info.cmd = Some(cmd);
// create and set consensus value
let value = ConsensusValue::with(deps.clone());
assert!(info.synod.set_if_not_accepted(|| value));
// create `MCollectAck` and target (only if not message from self)
if !message_from_self {
let mcollectack = Message::MCollectAck { dot, deps };
let target = singleton![from];
// save new action
self.to_processes.push(Action::ToSend {
target,
msg: mcollectack,
});
}
}
fn handle_mcollectack(
&mut self,
from: ProcessId,
dot: Dot,
deps: HashSet<Dependency>,
_time: &dyn SysTime,
) {
trace!(
"p{}: MCollectAck({:?}, {:?}) from {} | time={}",
self.id(),
dot,
deps,
from,
_time.micros()
);
// it can't be a ack from self (see the `MCollect` handler)
assert_ne!(from, self.bp.process_id);
// get cmd info
let info = self.cmds.get(dot);
// do nothing if we're no longer COLLECT
if info.status != Status::COLLECT {
return;
}
// update quorum deps
info.quorum_deps.add(from, deps);
// check if we have all necessary replies
if info.quorum_deps.all() {
// compute the union while checking whether all deps reported are
// equal
let (final_deps, fast_path) = info.quorum_deps.check_equal();
// create consensus value
let value = ConsensusValue::with(final_deps);
// fast path metrics
let cmd = info.cmd.as_ref().unwrap();
self.bp.path(fast_path, cmd.read_only());
if fast_path {
// fast path: create `MCommit`
let mcommit = Message::MCommit { dot, value };
let target = self.bp.all();
// save new action
self.to_processes.push(Action::ToSend {
target,
msg: mcommit,
});
} else {
// slow path: create `MConsensus`
let ballot = info.synod.skip_prepare();
let mconsensus = Message::MConsensus { dot, ballot, value };
let target = self.bp.write_quorum();
// save new action
self.to_processes.push(Action::ToSend {
target,
msg: mconsensus,
});
}
}
}
fn handle_mcommit(
&mut self,
from: ProcessId,
dot: Dot,
value: ConsensusValue,
_time: &dyn SysTime,
) {
trace!(
"p{}: MCommit({:?}, {:?}) | time={}",
self.id(),
dot,
value.deps,
_time.micros()
);
// get cmd info
let info = self.cmds.get(dot);
if info.status == Status::START {
// save this notification just in case we've received the `MCollect`
// and `MCommit` in opposite orders (due to multiplexing)
self.buffered_commits.insert(dot, (from, value));
return;
}
if info.status == Status::COMMIT {
// do nothing if we're already COMMIT
return;
}
// check it's not a noop
assert_eq!(
value.is_noop, false,
"handling noop's is not implemented yet"
);
// create execution info
let cmd = info.cmd.clone().expect("there should be a command payload");
let execution_info =
GraphExecutionInfo::add(dot, cmd, value.deps.clone());
self.to_executors.push(execution_info);
// update command info:
info.status = Status::COMMIT;
// handle commit in synod
let msg = SynodMessage::MChosen(value);
assert!(info.synod.handle(from, msg).is_none());
if self.gc_running() {
// notify self with the committed dot
self.to_processes.push(Action::ToForward {
msg: Message::MCommitDot { dot },
});
} else {
// if we're not running gc, remove the dot info now
self.cmds.gc_single(dot);
}
}
fn handle_mconsensus(
&mut self,
from: ProcessId,
dot: Dot,
ballot: u64,
value: ConsensusValue,
_time: &dyn SysTime,
) {
trace!(
"p{}: MConsensus({:?}, {}, {:?}) | time={}",
self.id(),
dot,
ballot,
value.deps,
_time.micros()
);
// get cmd info
let info = self.cmds.get(dot);
// compute message: that can either be nothing, an ack or an mcommit
let msg = match info
.synod
.handle(from, SynodMessage::MAccept(ballot, value))
{
Some(SynodMessage::MAccepted(ballot)) => {
// the accept message was accepted: create `MConsensusAck`
Message::MConsensusAck { dot, ballot }
}
Some(SynodMessage::MChosen(value)) => {
// the value has already been chosen: create `MCommit`
Message::MCommit { dot, value }
}
None => {
// ballot too low to be accepted: nothing to do
return;
}
_ => panic!(
"no other type of message should be output by Synod in the MConsensus handler"
),
};
// create target
let target = singleton![from];
// save new action
self.to_processes.push(Action::ToSend { target, msg });
}
fn handle_mconsensusack(
&mut self,
from: ProcessId,
dot: Dot,
ballot: u64,
_time: &dyn SysTime,
) {
trace!(
"p{}: MConsensusAck({:?}, {}) | time={}",
self.id(),
dot,
ballot,
_time.micros()
);
// get cmd info
let info = self.cmds.get(dot);
// compute message: that can either be nothing or an mcommit
match info.synod.handle(from, SynodMessage::MAccepted(ballot)) {
Some(SynodMessage::MChosen(value)) => {
// enough accepts were gathered and the value has been chosen: create `MCommit` and target
let target = self.bp.all();
let mcommit = Message::MCommit { dot, value };
// save new action
self.to_processes.push(Action::ToSend {
target,
msg: mcommit,
});
}
None => {
// not enough accepts yet: nothing to do
}
_ => panic!(
"no other type of message should be output by Synod in the MConsensusAck handler"
),
}
}
fn handle_mcommit_dot(
&mut self,
from: ProcessId,
dot: Dot,
_time: &dyn SysTime,
) {
trace!(
"p{}: MCommitDot({:?}) | time={}",
self.id(),
dot,
_time.micros()
);
assert_eq!(from, self.bp.process_id);
self.gc_track.add_to_clock(&dot);
}
fn handle_mgc(
&mut self,
from: ProcessId,
committed: VClock<ProcessId>,
_time: &dyn SysTime,
) {
trace!(
"p{}: MGarbageCollection({:?}) from {} | time={}",
self.id(),
committed,
from,
_time.micros()
);
self.gc_track.update_clock_of(from, committed);
// compute newly stable dots
let stable = self.gc_track.stable();
// create `ToForward` to self
if !stable.is_empty() {
self.to_processes.push(Action::ToForward {
msg: Message::MStable { stable },
});
}
}
fn handle_mstable(
&mut self,
from: ProcessId,
stable: Vec<(ProcessId, u64, u64)>,
_time: &dyn SysTime,
) {
trace!(
"p{}: MStable({:?}) from {} | time={}",
self.id(),
stable,
from,
_time.micros()
);
assert_eq!(from, self.bp.process_id);
let stable_count = self.cmds.gc(stable);
self.bp.stable(stable_count);
}
fn handle_event_garbage_collection(&mut self, _time: &dyn SysTime) {
trace!(
"p{}: PeriodicEvent::GarbageCollection | time={}",
self.id(),
_time.micros()
);
// retrieve the committed clock
let committed = self.gc_track.clock().frontier();
// save new action
self.to_processes.push(Action::ToSend {
target: self.bp.all_but_me(),
msg: Message::MGarbageCollection { committed },
});
}
fn gc_running(&self) -> bool {
self.bp.config.gc_interval().is_some()
}
}
// consensus value is a pair where the first component is a flag indicating
// whether this is a noop and the second component is the command's dependencies
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct ConsensusValue {
is_noop: bool,
deps: HashSet<Dependency>,
}
impl ConsensusValue {
fn bottom() -> Self {
let is_noop = false;
let deps = HashSet::new();
Self { is_noop, deps }
}
fn with(deps: HashSet<Dependency>) -> Self {
let is_noop = false;
Self { is_noop, deps }
}
}
fn proposal_gen(_values: HashMap<ProcessId, ConsensusValue>) -> ConsensusValue {
todo!("recovery not implemented yet")
}
// `EPaxosInfo` contains all information required in the life-cyle of a
// `Command`
#[derive(Debug, Clone)]
struct EPaxosInfo {
status: Status,
quorum: HashSet<ProcessId>,
synod: Synod<ConsensusValue>,
// `None` if not set yet
cmd: Option<Command>,
// `quorum_clocks` is used by the coordinator to compute the threshold
// clock when deciding whether to take the fast path
quorum_deps: QuorumDeps,
}
impl Info for EPaxosInfo {
fn new(
process_id: ProcessId,
_shard_id: ShardId,
n: usize,
f: usize,
fast_quorum_size: usize,
_write_quorum_size: usize,
) -> Self {
// create bottom consensus value
let initial_value = ConsensusValue::bottom();
// although the fast quorum size is `fast_quorum_size`, we're going to
// initialize `QuorumClocks` with `fast_quorum_size - 1` since
// the clock reported by the coordinator shouldn't be considered
// in the fast path condition, and this clock is not necessary for
// correctness; for this to work, `MCollectAck`'s from self should be
// ignored, or not even created.
Self {
status: Status::START,
quorum: HashSet::new(),
synod: Synod::new(process_id, n, f, proposal_gen, initial_value),
cmd: None,
quorum_deps: QuorumDeps::new(fast_quorum_size - 1),
}
}
}
// `EPaxos` protocol messages
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub enum Message {
MCollect {
dot: Dot,
cmd: Command,
deps: HashSet<Dependency>,
quorum: HashSet<ProcessId>,
},
MCollectAck {
dot: Dot,
deps: HashSet<Dependency>,
},
MCommit {
dot: Dot,
value: ConsensusValue,
},
MConsensus {
dot: Dot,
ballot: u64,
value: ConsensusValue,
},
MConsensusAck {
dot: Dot,
ballot: u64,
},
MCommitDot {
dot: Dot,
},
MGarbageCollection {
committed: VClock<ProcessId>,
},
MStable {
stable: Vec<(ProcessId, u64, u64)>,
},
}
impl MessageIndex for Message {
fn index(&self) -> Option<(usize, usize)> {
use fantoch::load_balance::{
worker_dot_index_shift, worker_index_no_shift, GC_WORKER_INDEX,
};
match self {
// Protocol messages
Self::MCollect { dot, .. } => worker_dot_index_shift(&dot),
Self::MCollectAck { dot, .. } => worker_dot_index_shift(&dot),
Self::MCommit { dot, .. } => worker_dot_index_shift(&dot),
Self::MConsensus { dot, .. } => worker_dot_index_shift(&dot),
Self::MConsensusAck { dot, .. } => worker_dot_index_shift(&dot),
// GC messages
Self::MCommitDot { .. } => worker_index_no_shift(GC_WORKER_INDEX),
Self::MGarbageCollection { .. } => {
worker_index_no_shift(GC_WORKER_INDEX)
}
Self::MStable { .. } => None,
}
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum PeriodicEvent {
GarbageCollection,
}
impl MessageIndex for PeriodicEvent {
fn index(&self) -> Option<(usize, usize)> {
use fantoch::load_balance::{worker_index_no_shift, GC_WORKER_INDEX};
match self {
Self::GarbageCollection => worker_index_no_shift(GC_WORKER_INDEX),
}
}
}
/// `Status` of commands.
#[derive(Debug, Clone, PartialEq, Eq)]
enum Status {
START,
PAYLOAD,
COLLECT,
COMMIT,
}
#[cfg(test)]
mod tests {
use super::*;
use fantoch::client::{Client, KeyGen, Workload};
use fantoch::executor::Executor;
use fantoch::planet::{Planet, Region};
use fantoch::sim::Simulation;
use fantoch::time::SimTime;
use fantoch::util;
#[test]
fn sequential_epaxos_test() {
epaxos_flow::<SequentialKeyDeps>();
}
#[test]
fn locked_epaxos_test() {
epaxos_flow::<LockedKeyDeps>();
}
fn epaxos_flow<KD: KeyDeps>() {
// create simulation
let mut simulation = Simulation::new();
// processes ids
let process_id_1 = 1;
let process_id_2 = 2;
let process_id_3 = 3;
// regions
let europe_west2 = Region::new("europe-west2");
let europe_west3 = Region::new("europe-west2");
let us_west1 = Region::new("europe-west2");
// there's a single shard
let shard_id = 0;
// processes
let processes = vec![
(process_id_1, shard_id, europe_west2.clone()),
(process_id_2, shard_id, europe_west3.clone()),
(process_id_3, shard_id, us_west1.clone()),
];
// planet
let planet = Planet::new();
// create system time
let time = SimTime::new();
// n and f
let n = 3;
let f = 1;
let config = Config::new(n, f);
// executors
let executor_1 = GraphExecutor::new(process_id_1, shard_id, config);
let executor_2 = GraphExecutor::new(process_id_2, shard_id, config);
let executor_3 = GraphExecutor::new(process_id_3, shard_id, config);
// epaxos
let (mut epaxos_1, _) =
EPaxos::<KD>::new(process_id_1, shard_id, config);
let (mut epaxos_2, _) =
EPaxos::<KD>::new(process_id_2, shard_id, config);
let (mut epaxos_3, _) =
EPaxos::<KD>::new(process_id_3, shard_id, config);
// discover processes in all epaxos
let sorted = util::sort_processes_by_distance(
&europe_west2,
&planet,
processes.clone(),
);
epaxos_1.discover(sorted);
let sorted = util::sort_processes_by_distance(
&europe_west3,
&planet,
processes.clone(),
);
epaxos_2.discover(sorted);
let sorted = util::sort_processes_by_distance(
&us_west1,
&planet,
processes.clone(),
);
epaxos_3.discover(sorted);
// register processes
simulation.register_process(epaxos_1, executor_1);
simulation.register_process(epaxos_2, executor_2);
simulation.register_process(epaxos_3, executor_3);
// client workload
let shard_count = 1;
let key_gen = KeyGen::ConflictPool {
conflict_rate: 100,
pool_size: 1,
};
let keys_per_command = 1;
let commands_per_client = 10;
let payload_size = 100;
let workload = Workload::new(
shard_count,
key_gen,
keys_per_command,
commands_per_client,
payload_size,
);
// create client 1 that is connected to epaxos 1
let client_id = 1;
let client_region = europe_west2.clone();
let status_frequency = None;
let mut client_1 = Client::new(client_id, workload, status_frequency);
// discover processes in client 1
let closest =
util::closest_process_per_shard(&client_region, &planet, processes);
client_1.connect(closest);
// start client
let (target_shard, cmd) = client_1
.cmd_send(&time)
.expect("there should be a first operation");
let target = client_1.shard_process(&target_shard);
// check that `target` is epaxos 1
assert_eq!(target, process_id_1);
// register client
simulation.register_client(client_1);
// register command in executor and submit it in epaxos 1
let (process, _, pending, time) = simulation.get_process(target);
pending.wait_for(&cmd);
process.submit(None, cmd, time);
let mut actions: Vec<_> = process.to_processes_iter().collect();
// there's a single action
assert_eq!(actions.len(), 1);
let mcollect = actions.pop().unwrap();
// check that the mcollect is being sent to *all* processes
let check_target = |target: &HashSet<ProcessId>| target.len() == n;
assert!(
matches!(mcollect.clone(), Action::ToSend{target, ..} if check_target(&target))
);
// handle mcollects
let mut mcollectacks =
simulation.forward_to_processes((process_id_1, mcollect));
// check that there's a single mcollectack
assert_eq!(mcollectacks.len(), 1);
// handle the *only* mcollectack
// - there's a single mcollectack since the initial coordinator does not
// reply to itself
let mut mcommits = simulation.forward_to_processes(
mcollectacks.pop().expect("there should be an mcollect ack"),
);
// there's a commit now
assert_eq!(mcommits.len(), 1);
// check that the mcommit is sent to everyone
let mcommit = mcommits.pop().expect("there should be an mcommit");
let check_target = |target: &HashSet<ProcessId>| target.len() == n;
assert!(
matches!(mcommit.clone(), (_, Action::ToSend {target, ..}) if check_target(&target))
);
// all processes handle it
let to_sends = simulation.forward_to_processes(mcommit);
// check the MCommitDot
let check_msg =
|msg: &Message| matches!(msg, Message::MCommitDot { .. });
assert!(to_sends.into_iter().all(|(_, action)| {
matches!(action, Action::ToForward { msg } if check_msg(&msg))
}));
// process 1 should have something to the executor
let (process, executor, pending, time) =
simulation.get_process(process_id_1);
let to_executor: Vec<_> = process.to_executors_iter().collect();
assert_eq!(to_executor.len(), 1);
// handle in executor and check there's a single command partial
let mut ready: Vec<_> = to_executor
.into_iter()
.flat_map(|info| {
executor.handle(info, time);
executor.to_clients_iter().collect::<Vec<_>>()
})
.collect();
assert_eq!(ready.len(), 1);
// get that command
let executor_result =
ready.pop().expect("there should an executor result");
let cmd_result = pending
.add_executor_result(executor_result)
.expect("there should be a command result");
// handle the previous command result
let (target, cmd) = simulation
.forward_to_client(cmd_result)
.expect("there should a new submit");
let (process, _, _, time) = simulation.get_process(target);
process.submit(None, cmd, time);
let mut actions: Vec<_> = process.to_processes_iter().collect();
// there's a single action
assert_eq!(actions.len(), 1);
let mcollect = actions.pop().unwrap();
let check_msg = |msg: &Message| matches!(msg, Message::MCollect {dot, ..} if dot == &Dot::new(process_id_1, 2));
assert!(
matches!(mcollect, Action::ToSend {msg, ..} if check_msg(&msg))
);
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/protocol/fpaxos.rs | fantoch_ps/src/protocol/fpaxos.rs | use crate::executor::{SlotExecutionInfo, SlotExecutor};
use crate::protocol::common::synod::{GCTrack, MultiSynod, MultiSynodMessage};
use fantoch::command::Command;
use fantoch::config::Config;
use fantoch::id::{Dot, ProcessId, ShardId};
use fantoch::protocol::{
Action, BaseProcess, MessageIndex, Protocol, ProtocolMetrics,
};
use fantoch::time::SysTime;
use fantoch::{singleton, trace};
use fantoch::{HashMap, HashSet};
use serde::{Deserialize, Serialize};
use std::time::Duration;
#[derive(Debug, Clone)]
pub struct FPaxos {
bp: BaseProcess,
leader: ProcessId,
multi_synod: MultiSynod<Command>,
gc_track: GCTrack,
to_processes: Vec<Action<Self>>,
to_executors: Vec<SlotExecutionInfo>,
}
impl Protocol for FPaxos {
type Message = Message;
type PeriodicEvent = PeriodicEvent;
type Executor = SlotExecutor;
/// Creates a new `FPaxos` process.
fn new(
process_id: ProcessId,
shard_id: ShardId,
config: Config,
) -> (Self, Vec<(Self::PeriodicEvent, Duration)>) {
// compute fast and write quorum sizes
let fast_quorum_size = 0; // there's no fast quorum as we don't have fast paths
let write_quorum_size = config.fpaxos_quorum_size();
// create protocol data-structures
let bp = BaseProcess::new(
process_id,
shard_id,
config,
fast_quorum_size,
write_quorum_size,
);
// get leader from config
let initial_leader = config.leader().expect(
"in a leader-based protocol, the initial leader should be defined",
);
// create multi synod
let multi_synod =
MultiSynod::new(process_id, initial_leader, config.n(), config.f());
let to_processes = Vec::new();
let to_executors = Vec::new();
// create `FPaxos`
let protocol = Self {
bp,
leader: initial_leader,
multi_synod,
gc_track: GCTrack::new(process_id, config.n()),
to_processes,
to_executors,
};
// create periodic events
let events = if let Some(interval) = config.gc_interval() {
vec![(PeriodicEvent::GarbageCollection, interval)]
} else {
vec![]
};
// return both
(protocol, events)
}
/// Returns the process identifier.
fn id(&self) -> ProcessId {
self.bp.process_id
}
/// Returns the shard identifier.
fn shard_id(&self) -> ShardId {
self.bp.shard_id
}
/// Updates the processes known by this process.
/// The set of processes provided is already sorted by distance.
fn discover(
&mut self,
processes: Vec<(ProcessId, ShardId)>,
) -> (bool, HashMap<ShardId, ProcessId>) {
let connect_ok = self.bp.discover(processes);
(connect_ok, self.bp.closest_shard_process().clone())
}
/// Submits a command issued by some client.
fn submit(&mut self, dot: Option<Dot>, cmd: Command, _time: &dyn SysTime) {
self.handle_submit(dot, cmd);
}
/// Handles protocol messages.
fn handle(
&mut self,
from: ProcessId,
_from_shard_id: ShardId,
msg: Self::Message,
time: &dyn SysTime,
) {
match msg {
Message::MForwardSubmit { cmd } => self.handle_submit(None, cmd),
Message::MSpawnCommander { ballot, slot, cmd } => {
self.handle_mspawn_commander(from, ballot, slot, cmd, time)
}
Message::MAccept { ballot, slot, cmd } => {
self.handle_maccept(from, ballot, slot, cmd, time)
}
Message::MAccepted { ballot, slot } => {
self.handle_maccepted(from, ballot, slot, time)
}
Message::MChosen { slot, cmd } => {
self.handle_mchosen(slot, cmd, time)
}
Message::MGarbageCollection { committed } => {
self.handle_mgc(from, committed, time)
}
}
}
/// Handles periodic local events.
fn handle_event(&mut self, event: Self::PeriodicEvent, time: &dyn SysTime) {
match event {
PeriodicEvent::GarbageCollection => {
self.handle_event_garbage_collection(time)
}
}
}
/// Returns a new action to be sent to other processes.
fn to_processes(&mut self) -> Option<Action<Self>> {
self.to_processes.pop()
}
/// Returns new execution info for executors.
fn to_executors(&mut self) -> Option<SlotExecutionInfo> {
self.to_executors.pop()
}
fn parallel() -> bool {
true
}
fn leaderless() -> bool {
false
}
fn metrics(&self) -> &ProtocolMetrics {
self.bp.metrics()
}
}
impl FPaxos {
/// Handles a submit operation by a client.
fn handle_submit(&mut self, _dot: Option<Dot>, cmd: Command) {
match self.multi_synod.submit(cmd) {
MultiSynodMessage::MSpawnCommander(ballot, slot, cmd) => {
// in this case, we're the leader: record command size
self.bp.collect_metric(
fantoch::protocol::ProtocolMetricsKind::CommandKeyCount,
cmd.total_key_count() as u64,
);
// and send a spawn commander to self (that can run in a
// different process for parallelism)
let mspawn = Message::MSpawnCommander { ballot, slot, cmd };
// save new action
self.to_processes.push(Action::ToForward { msg: mspawn });
}
MultiSynodMessage::MForwardSubmit(cmd) => {
// in this case, we're not the leader and should forward the
// command to the leader
let mforward = Message::MForwardSubmit { cmd };
let target = singleton![self.leader];
// save new action
self.to_processes.push(Action::ToSend {
target,
msg: mforward,
});
}
msg => panic!("can't handle {:?} in handle_submit", msg),
}
}
fn handle_mspawn_commander(
&mut self,
from: ProcessId,
ballot: u64,
slot: u64,
cmd: Command,
_time: &dyn SysTime,
) {
trace!(
"p{}: MSpawnCommander({:?}, {:?}, {:?}) from {} | time={}",
self.id(),
ballot,
slot,
cmd,
from,
_time.micros()
);
// spawn commander message should come from self
assert_eq!(from, self.id());
// in this case, we're the leader:
// - handle spawn
// - create an maccept and send it to the write quorum
let maccept = self.multi_synod.handle(from, MultiSynodMessage::MSpawnCommander(ballot, slot, cmd)).expect("handling an MSpawnCommander in the local MultiSynod should output an MAccept");
match maccept {
MultiSynodMessage::MAccept(ballot, slot, cmd) => {
// create `MAccept`
let maccept = Message::MAccept { ballot, slot, cmd };
let target = self.bp.write_quorum();
// save new action
self.to_processes.push(Action::ToSend {
target,
msg: maccept,
});
}
msg => panic!("can't handle {:?} in handle_mspawn_commander", msg),
}
}
fn handle_maccept(
&mut self,
from: ProcessId,
ballot: u64,
slot: u64,
cmd: Command,
_time: &dyn SysTime,
) {
trace!(
"p{}: MAccept({:?}, {:?}, {:?}) from {} | time={}",
self.id(),
ballot,
slot,
cmd,
from,
_time.micros()
);
if let Some(msg) = self
.multi_synod
.handle(from, MultiSynodMessage::MAccept(ballot, slot, cmd))
{
match msg {
MultiSynodMessage::MAccepted(ballot, slot) => {
// create `MAccepted` and target
let maccepted = Message::MAccepted { ballot, slot };
let target = singleton![from];
// save new action
self.to_processes.push(Action::ToSend {
target,
msg: maccepted,
});
}
msg => panic!("can't handle {:?} in handle_maccept", msg),
}
} else {
// TODO maybe warn the leader that it is not longer a leader?
}
}
fn handle_maccepted(
&mut self,
from: ProcessId,
ballot: u64,
slot: u64,
_time: &dyn SysTime,
) {
trace!(
"p{}: MAccepted({:?}, {:?}) from {} | time={}",
self.id(),
ballot,
slot,
from,
_time.micros()
);
if let Some(msg) = self
.multi_synod
.handle(from, MultiSynodMessage::MAccepted(ballot, slot))
{
match msg {
MultiSynodMessage::MChosen(slot, cmd) => {
// create `MChosen`
let mcommit = Message::MChosen { slot, cmd };
let target = self.bp.all();
// save new action
self.to_processes.push(Action::ToSend {
target,
msg: mcommit,
});
}
msg => panic!("can't handle {:?} in handle_maccepted", msg),
}
}
}
fn handle_mchosen(&mut self, slot: u64, cmd: Command, _time: &dyn SysTime) {
trace!(
"p{}: MCommit({:?}, {:?}) | time={}",
self.id(),
slot,
cmd,
_time.micros()
);
// create execution info
let execution_info = SlotExecutionInfo::new(slot, cmd);
self.to_executors.push(execution_info);
if self.gc_running() {
// register that it has been committed
self.gc_track.commit(slot);
} else {
// if we're not running gc, remove the slot info now
self.multi_synod.gc_single(slot);
}
}
fn gc_running(&self) -> bool {
self.bp.config.gc_interval().is_some()
}
fn handle_mgc(
&mut self,
from: ProcessId,
committed: u64,
_time: &dyn SysTime,
) {
trace!(
"p{}: MGarbageCollection({:?}) from {} | time={}",
self.id(),
committed,
from,
_time.micros()
);
self.gc_track.committed_by(from, committed);
// perform garbage collection of stable slots
let stable = self.gc_track.stable();
let stable_count = self.multi_synod.gc(stable);
self.bp.stable(stable_count);
}
fn handle_event_garbage_collection(&mut self, _time: &dyn SysTime) {
trace!(
"p{}: PeriodicEvent::GarbageCollection | time={}",
self.id(),
_time.micros()
);
// retrieve the committed slot
let committed = self.gc_track.committed();
// save new action
self.to_processes.push(Action::ToSend {
target: self.bp.all_but_me(),
msg: Message::MGarbageCollection { committed },
})
}
}
// `FPaxos` protocol messages
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub enum Message {
MForwardSubmit {
cmd: Command,
},
MSpawnCommander {
ballot: u64,
slot: u64,
cmd: Command,
},
MAccept {
ballot: u64,
slot: u64,
cmd: Command,
},
MAccepted {
ballot: u64,
slot: u64,
},
MChosen {
slot: u64,
cmd: Command,
},
MGarbageCollection {
committed: u64,
},
}
const LEADER_WORKER_INDEX: usize = fantoch::load_balance::LEADER_WORKER_INDEX;
const ACCEPTOR_WORKER_INDEX: usize = 1;
impl MessageIndex for Message {
fn index(&self) -> Option<(usize, usize)> {
use fantoch::load_balance::{
worker_index_no_shift, worker_index_shift,
};
match self {
Self::MForwardSubmit { .. } => {
// forward commands to the leader worker
worker_index_no_shift(LEADER_WORKER_INDEX)
}
Self::MAccept { .. } => {
// forward accepts to the acceptor worker
worker_index_no_shift(ACCEPTOR_WORKER_INDEX)
}
Self::MChosen { .. } => {
// forward chosen messages also to acceptor worker:
// - at point we had a learner worker, but since the acceptor
// needs to know about committed slows to perform GC, we
// wouldn't gain much (if anything) in separating these roles
worker_index_no_shift(ACCEPTOR_WORKER_INDEX)
}
// spawn commanders and accepted messages should be forwarded to
// the commander process:
// - make sure that these commanders are never spawned in the
// previous 2 workers
Self::MSpawnCommander { slot, .. } => {
worker_index_shift(*slot as usize)
}
Self::MAccepted { slot, .. } => worker_index_shift(*slot as usize),
Self::MGarbageCollection { .. } => {
// since it's the acceptor that contains the slots to be gc-ed,
// we should simply run gc-tracking there as well:
// - this removes the need for Message::MStable seen in the
// other implementations
worker_index_no_shift(ACCEPTOR_WORKER_INDEX)
}
}
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum PeriodicEvent {
GarbageCollection,
}
impl MessageIndex for PeriodicEvent {
fn index(&self) -> Option<(usize, usize)> {
use fantoch::load_balance::worker_index_no_shift;
match self {
Self::GarbageCollection => {
worker_index_no_shift(ACCEPTOR_WORKER_INDEX)
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use fantoch::client::{Client, KeyGen, Workload};
use fantoch::executor::Executor;
use fantoch::planet::{Planet, Region};
use fantoch::sim::Simulation;
use fantoch::time::SimTime;
use fantoch::util;
#[test]
fn fpaxos_flow() {
// create simulation
let mut simulation = Simulation::new();
// processes ids
let process_id_1 = 1;
let process_id_2 = 2;
let process_id_3 = 3;
// regions
let europe_west2 = Region::new("europe-west2");
let europe_west3 = Region::new("europe-west2");
let us_west1 = Region::new("europe-west2");
// there's a single shard
let shard_id = 0;
// processes
let processes = vec![
(process_id_1, shard_id, europe_west2.clone()),
(process_id_2, shard_id, europe_west3.clone()),
(process_id_3, shard_id, us_west1.clone()),
];
// planet
let planet = Planet::new();
// create system time
let time = SimTime::new();
// n and f
let n = 3;
let f = 1;
let mut config = Config::new(n, f);
// set process 1 as the leader
config.set_leader(process_id_1);
// executors
let executor_1 = SlotExecutor::new(process_id_1, shard_id, config);
let executor_2 = SlotExecutor::new(process_id_2, shard_id, config);
let executor_3 = SlotExecutor::new(process_id_3, shard_id, config);
// fpaxos
let (mut fpaxos_1, _) = FPaxos::new(process_id_1, shard_id, config);
let (mut fpaxos_2, _) = FPaxos::new(process_id_2, shard_id, config);
let (mut fpaxos_3, _) = FPaxos::new(process_id_3, shard_id, config);
// discover processes in all fpaxos
let sorted = util::sort_processes_by_distance(
&europe_west2,
&planet,
processes.clone(),
);
fpaxos_1.discover(sorted);
let sorted = util::sort_processes_by_distance(
&europe_west3,
&planet,
processes.clone(),
);
fpaxos_2.discover(sorted);
let sorted = util::sort_processes_by_distance(
&us_west1,
&planet,
processes.clone(),
);
fpaxos_3.discover(sorted);
// register processes
simulation.register_process(fpaxos_1, executor_1);
simulation.register_process(fpaxos_2, executor_2);
simulation.register_process(fpaxos_3, executor_3);
// client workload
let shard_count = 1;
let key_gen = KeyGen::ConflictPool {
conflict_rate: 100,
pool_size: 1,
};
let keys_per_command = 1;
let commands_per_client = 10;
let payload_size = 100;
let workload = Workload::new(
shard_count,
key_gen,
keys_per_command,
commands_per_client,
payload_size,
);
// create client 1 that is connected to fpaxos 1
let client_id = 1;
let client_region = europe_west2.clone();
let status_frequency = None;
let mut client_1 = Client::new(client_id, workload, status_frequency);
// discover processes in client 1
let closest =
util::closest_process_per_shard(&client_region, &planet, processes);
client_1.connect(closest);
// start client
let (target_shard, cmd) = client_1
.cmd_send(&time)
.expect("there should be a first operation");
let target = client_1.shard_process(&target_shard);
// check that `target` is fpaxos 1
assert_eq!(target, process_id_1);
// register client
simulation.register_client(client_1);
// register command in executor and submit it in fpaxos 1
let (process, _, pending, time) = simulation.get_process(target);
pending.wait_for(&cmd);
process.submit(None, cmd, time);
let mut actions: Vec<_> = process.to_processes_iter().collect();
// there's a single action
assert_eq!(actions.len(), 1);
let spawn = actions.pop().unwrap();
// check that the register created a spawn commander to self and handle
// it locally
let mut actions: Vec<_> = if let Action::ToForward { msg } = spawn {
process.handle(process_id_1, shard_id, msg, time);
process.to_processes_iter().collect()
} else {
panic!("Action::ToForward not found!");
};
// there's a single action
assert_eq!(actions.len(), 1);
let maccept = actions.pop().unwrap();
// check that the maccept is being sent to 2 processes
let check_target = |target: &HashSet<ProcessId>| {
target.len() == f + 1 && target.contains(&1) && target.contains(&2)
};
assert!(
matches!(maccept.clone(), Action::ToSend{target, ..} if check_target(&target))
);
// handle maccepts
let mut maccepted =
simulation.forward_to_processes((process_id_1, maccept));
// check that there are 2 maccepted
assert_eq!(maccepted.len(), 2 * f);
// handle the first maccepted
let mchosen = simulation.forward_to_processes(
maccepted.pop().expect("there should be an maccepted"),
);
// no mchosen yet
assert!(mchosen.is_empty());
// handle the second macceptack
let mut mchosen = simulation.forward_to_processes(
maccepted.pop().expect("there should be an maccepted"),
);
// there's an mchosen now
assert_eq!(mchosen.len(), 1);
// check that the mchosen is sent to everyone
let mchosen = mchosen.pop().expect("there should be an mcommit");
let check_target = |target: &HashSet<ProcessId>| target.len() == n;
assert!(
matches!(mchosen.clone(), (_, Action::ToSend {target, ..}) if check_target(&target))
);
// all processes handle it
let to_sends = simulation.forward_to_processes(mchosen);
// check there's nothing to send
assert!(to_sends.is_empty());
// process 1 should have something to the executor
let (process, executor, pending, time) =
simulation.get_process(process_id_1);
let to_executor: Vec<_> = process.to_executors_iter().collect();
assert_eq!(to_executor.len(), 1);
// handle in executor and check there's a single command partial
let mut ready: Vec<_> = to_executor
.into_iter()
.flat_map(|info| {
executor.handle(info, time);
executor.to_clients_iter().collect::<Vec<_>>()
})
.collect();
assert_eq!(ready.len(), 1);
// get that command
let executor_result =
ready.pop().expect("there should an executor result");
let cmd_result = pending
.add_executor_result(executor_result)
.expect("there should be a command result");
// handle the previous command result
let (target, cmd) = simulation
.forward_to_client(cmd_result)
.expect("there should a new submit");
let (process, _, _, time) = simulation.get_process(target);
process.submit(None, cmd, time);
let mut actions: Vec<_> = process.to_processes_iter().collect();
// there's a single action
assert_eq!(actions.len(), 1);
let mcollect = actions.pop().unwrap();
let check_msg = |msg: &Message| matches!(msg, Message::MSpawnCommander{slot, ..} if slot == &2);
assert!(matches!(mcollect, Action::ToForward {msg} if check_msg(&msg)));
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/protocol/mod.rs | fantoch_ps/src/protocol/mod.rs | // This module contains common data-structures between protocols.
pub mod common;
// This module contains the definition of `Atlas`.
mod atlas;
// This module contains the definition of `EPaxos`.
mod epaxos;
// This module contains the definition of `Tempo`.
mod tempo;
// This module contains the definition of `FPaxos`.
mod fpaxos;
// This module contains the definition of `Caesar`.
mod caesar;
// This module contains common functionality for partial replication.
mod partial;
// Re-exports.
pub use atlas::{AtlasLocked, AtlasSequential};
pub use caesar::CaesarLocked;
pub use epaxos::{EPaxosLocked, EPaxosSequential};
pub use fpaxos::FPaxos;
pub use tempo::{TempoAtomic, TempoLocked, TempoSequential};
#[cfg(test)]
mod tests {
use super::*;
use fantoch::client::{KeyGen, Workload};
use fantoch::config::Config;
use fantoch::executor::ExecutionOrderMonitor;
use fantoch::id::{ProcessId, Rifl};
use fantoch::kvs::Key;
use fantoch::planet::Planet;
use fantoch::protocol::{Protocol, ProtocolMetrics};
use fantoch::run::tests::{run_test_with_inspect_fun, tokio_test_runtime};
use fantoch::sim::Runner;
use fantoch::HashMap;
use std::time::Duration;
// global test config
const SHARD_COUNT: usize = 1;
const KEY_GEN: KeyGen = KeyGen::ConflictPool {
conflict_rate: 50,
pool_size: 1,
};
const KEYS_PER_COMMAND: usize = 2;
const READ_ONLY_PERCENTAGE: usize = 0;
const COMMANDS_PER_CLIENT: usize = 100;
const CLIENTS_PER_PROCESS: usize = 10;
macro_rules! config {
($n:expr, $f:expr) => {{
let config = Config::new($n, $f);
config
}};
($n:expr, $f:expr, $leader:expr) => {{
let mut config = Config::new($n, $f);
config.set_leader($leader);
config
}};
}
macro_rules! tempo_config {
($n:expr, $f:expr) => {{
let mut config = Config::new($n, $f);
// always set `tempo_detached_send_interval`
config.set_tempo_detached_send_interval(Duration::from_millis(100));
config
}};
($n:expr, $f:expr, $clock_bump_interval:expr) => {{
let mut config = tempo_config!($n, $f);
config.set_tempo_tiny_quorums(true);
config.set_tempo_clock_bump_interval($clock_bump_interval);
config
}};
}
macro_rules! caesar_config {
($n:expr, $f:expr, $wait:expr) => {{
let mut config = Config::new($n, $f);
config.set_caesar_wait_condition($wait);
config
}};
}
fn ci() -> bool {
if let Ok(value) = std::env::var("CI") {
// if ci is set, it should be a bool
let ci =
value.parse::<bool>().expect("CI env var should be a bool");
if ci {
true
} else {
panic!("CI env var is set and it's not true");
}
} else {
false
}
}
/// Computes the number of commands per client and clients per process
/// according to "CI" env var; if set to true, run the tests with a smaller
/// load
fn small_load_in_ci() -> (usize, usize) {
if ci() {
// 10 commands per client and 1 client per process
(10, 1)
} else {
(COMMANDS_PER_CLIENT, CLIENTS_PER_PROCESS)
}
}
// ---- tempo tests ---- //
#[test]
fn sim_tempo_3_1_test() {
let metrics = sim_test::<TempoSequential>(
tempo_config!(3, 1),
READ_ONLY_PERCENTAGE,
KEYS_PER_COMMAND,
COMMANDS_PER_CLIENT,
CLIENTS_PER_PROCESS,
);
assert_eq!(metrics.slow_paths(), 0);
}
#[test]
fn sim_real_time_tempo_3_1_test() {
// NOTE: with n = 3 we don't really need real time clocks to get the
// best results
let clock_bump_interval = Duration::from_millis(50);
let metrics = sim_test::<TempoSequential>(
tempo_config!(3, 1, clock_bump_interval),
READ_ONLY_PERCENTAGE,
KEYS_PER_COMMAND,
COMMANDS_PER_CLIENT,
CLIENTS_PER_PROCESS,
);
assert_eq!(metrics.slow_paths(), 0);
}
#[test]
fn sim_tempo_5_1_test() {
let metrics = sim_test::<TempoSequential>(
tempo_config!(5, 1),
READ_ONLY_PERCENTAGE,
KEYS_PER_COMMAND,
COMMANDS_PER_CLIENT,
CLIENTS_PER_PROCESS,
);
assert_eq!(metrics.slow_paths(), 0);
}
#[test]
fn sim_tempo_5_2_test() {
let metrics = sim_test::<TempoSequential>(
tempo_config!(5, 2),
READ_ONLY_PERCENTAGE,
KEYS_PER_COMMAND,
COMMANDS_PER_CLIENT,
CLIENTS_PER_PROCESS,
);
assert!(metrics.slow_paths() > 0);
}
#[test]
fn sim_tempo_5_2_nfr_test() {
let mut config = tempo_config!(5, 2);
config.set_nfr(true);
let read_only_percentage = 20;
let keys_per_command = 1;
let metrics = sim_test::<TempoSequential>(
config,
read_only_percentage,
keys_per_command,
COMMANDS_PER_CLIENT,
CLIENTS_PER_PROCESS,
);
assert!(metrics.slow_paths() > 0);
assert_eq!(metrics.slow_paths_reads(), 0);
}
#[test]
fn sim_real_time_tempo_5_1_test() {
let clock_bump_interval = Duration::from_millis(50);
let metrics = sim_test::<TempoSequential>(
tempo_config!(5, 1, clock_bump_interval),
READ_ONLY_PERCENTAGE,
KEYS_PER_COMMAND,
COMMANDS_PER_CLIENT,
CLIENTS_PER_PROCESS,
);
assert_eq!(metrics.slow_paths(), 0);
}
#[test]
fn run_tempo_3_1_atomic_test() {
// tempo atomic can handle as many workers as we want but we may want to
// only have one executor
let workers = 3;
let executors = 3;
let metrics = run_test::<TempoAtomic>(
tempo_config!(3, 1),
SHARD_COUNT,
workers,
executors,
COMMANDS_PER_CLIENT,
CLIENTS_PER_PROCESS,
);
assert_eq!(metrics.slow_paths(), 0);
}
#[test]
fn run_tempo_3_1_locked_test() {
let workers = 3;
let executors = 3;
let metrics = run_test::<TempoLocked>(
tempo_config!(3, 1),
SHARD_COUNT,
workers,
executors,
COMMANDS_PER_CLIENT,
CLIENTS_PER_PROCESS,
);
assert_eq!(metrics.slow_paths(), 0);
}
#[test]
fn run_real_time_tempo_3_1_atomic_test() {
let workers = 3;
let executors = 3;
let (commands_per_client, clients_per_process) = small_load_in_ci();
let clock_bump_interval = Duration::from_millis(500);
let metrics = run_test::<TempoAtomic>(
tempo_config!(3, 1, clock_bump_interval),
SHARD_COUNT,
workers,
executors,
commands_per_client,
clients_per_process,
);
assert_eq!(metrics.slow_paths(), 0);
}
#[test]
fn run_tempo_5_1_atomic_test() {
let workers = 3;
let executors = 3;
let metrics = run_test::<TempoAtomic>(
tempo_config!(5, 1),
SHARD_COUNT,
workers,
executors,
COMMANDS_PER_CLIENT,
CLIENTS_PER_PROCESS,
);
assert_eq!(metrics.slow_paths(), 0);
}
#[test]
fn run_tempo_5_2_atomic_test() {
let workers = 3;
let executors = 3;
let metrics = run_test::<TempoAtomic>(
tempo_config!(5, 2),
SHARD_COUNT,
workers,
executors,
COMMANDS_PER_CLIENT,
CLIENTS_PER_PROCESS,
);
assert!(metrics.slow_paths() > 0);
}
// ---- tempo (partial replication) tests ---- //
#[test]
fn run_tempo_3_1_atomic_partial_replication_two_shards_test() {
let shard_count = 2;
let workers = 2;
let executors = 2;
let (commands_per_client, clients_per_process) = small_load_in_ci();
let metrics = run_test::<TempoAtomic>(
tempo_config!(3, 1),
shard_count,
workers,
executors,
commands_per_client,
clients_per_process,
);
assert_eq!(metrics.slow_paths(), 0);
}
#[test]
fn run_tempo_3_1_atomic_partial_replication_three_shards_test() {
let shard_count = 3;
let workers = 2;
let executors = 2;
let (commands_per_client, clients_per_process) = small_load_in_ci();
let metrics = run_test::<TempoAtomic>(
tempo_config!(3, 1),
shard_count,
workers,
executors,
commands_per_client,
clients_per_process,
);
assert_eq!(metrics.slow_paths(), 0);
}
#[test]
fn run_tempo_5_2_atomic_partial_replication_two_shards_test() {
let shard_count = 2;
let workers = 2;
let executors = 2;
let (commands_per_client, clients_per_process) = small_load_in_ci();
let metrics = run_test::<TempoAtomic>(
tempo_config!(5, 2),
shard_count,
workers,
executors,
commands_per_client,
clients_per_process,
);
assert!(metrics.slow_paths() > 0);
}
// ---- atlas tests ---- //
#[test]
fn sim_atlas_3_1_test() {
let metrics = sim_test::<AtlasSequential>(
config!(3, 1),
READ_ONLY_PERCENTAGE,
KEYS_PER_COMMAND,
COMMANDS_PER_CLIENT,
CLIENTS_PER_PROCESS,
);
assert_eq!(metrics.slow_paths(), 0);
}
#[test]
fn sim_atlas_5_1_test() {
let metrics = sim_test::<AtlasSequential>(
config!(3, 1),
READ_ONLY_PERCENTAGE,
KEYS_PER_COMMAND,
COMMANDS_PER_CLIENT,
CLIENTS_PER_PROCESS,
);
assert_eq!(metrics.slow_paths(), 0);
}
#[test]
fn sim_atlas_5_2_test() {
let metrics = sim_test::<AtlasSequential>(
config!(5, 2),
READ_ONLY_PERCENTAGE,
KEYS_PER_COMMAND,
COMMANDS_PER_CLIENT,
CLIENTS_PER_PROCESS,
);
assert!(metrics.slow_paths() > 0);
}
#[test]
fn sim_atlas_5_2_nfr_test() {
let mut config = config!(5, 2);
config.set_nfr(true);
let read_only_percentage = 20;
let keys_per_command = 1;
let metrics = sim_test::<AtlasSequential>(
config,
read_only_percentage,
keys_per_command,
COMMANDS_PER_CLIENT,
CLIENTS_PER_PROCESS,
);
assert!(metrics.slow_paths() > 0);
assert_eq!(metrics.slow_paths_reads(), 0);
}
#[test]
fn run_atlas_3_1_locked_test() {
// atlas locked can handle as many workers as we want but only one
// executor
let workers = 4;
let executors = 1;
let metrics = run_test::<AtlasLocked>(
config!(3, 1),
SHARD_COUNT,
workers,
executors,
COMMANDS_PER_CLIENT,
CLIENTS_PER_PROCESS,
);
assert_eq!(metrics.slow_paths(), 0);
}
// ---- atlas (partial replication) tests ---- //
#[test]
fn run_atlas_3_1_locked_partial_replication_two_shards_test() {
let shard_count = 2;
let workers = 2;
let executors = 2; // atlas executor can be parallel in partial replication
let (commands_per_client, clients_per_process) = small_load_in_ci();
let metrics = run_test::<AtlasLocked>(
tempo_config!(3, 1),
shard_count,
workers,
executors,
commands_per_client,
clients_per_process,
);
assert_eq!(metrics.slow_paths(), 0);
}
#[test]
fn run_atlas_3_1_locked_partial_replication_four_shards_test() {
let shard_count = 4;
let workers = 2;
let executors = 2; // atlas executor can be parallel in partial replication
let (commands_per_client, clients_per_process) = small_load_in_ci();
let metrics = run_test::<AtlasLocked>(
tempo_config!(3, 1),
shard_count,
workers,
executors,
commands_per_client,
clients_per_process,
);
assert_eq!(metrics.slow_paths(), 0);
}
#[test]
fn run_atlas_5_2_locked_partial_replication_two_shards_test() {
let shard_count = 2;
let workers = 2;
let executors = 2; // atlas executor can be parallel in partial replication
let (commands_per_client, clients_per_process) = small_load_in_ci();
let metrics = run_test::<AtlasLocked>(
tempo_config!(5, 2),
shard_count,
workers,
executors,
commands_per_client,
clients_per_process,
);
assert!(metrics.slow_paths() > 0);
}
// ---- epaxos tests ---- //
#[test]
fn sim_epaxos_3_1_test() {
let metrics = sim_test::<EPaxosSequential>(
config!(3, 1),
READ_ONLY_PERCENTAGE,
KEYS_PER_COMMAND,
COMMANDS_PER_CLIENT,
CLIENTS_PER_PROCESS,
);
assert_eq!(metrics.slow_paths(), 0);
}
#[test]
fn sim_epaxos_5_2_test() {
let metrics = sim_test::<EPaxosSequential>(
config!(5, 2),
READ_ONLY_PERCENTAGE,
KEYS_PER_COMMAND,
COMMANDS_PER_CLIENT,
CLIENTS_PER_PROCESS,
);
assert!(metrics.slow_paths() > 0);
}
#[test]
fn sim_epaxos_7_3_nfr_test() {
let mut config = config!(7, 3);
config.set_nfr(true);
let read_only_percentage = 100;
let keys_per_command = 1;
let metrics = sim_test::<EPaxosSequential>(
config,
read_only_percentage,
keys_per_command,
COMMANDS_PER_CLIENT,
CLIENTS_PER_PROCESS,
);
assert_eq!(metrics.slow_paths(), 0);
assert_eq!(metrics.slow_paths_reads(), 0);
}
#[test]
fn run_epaxos_3_1_locked_test() {
// epaxos locked can handle as many workers as we want but only one
// executor
let workers = 4;
let executors = 1;
let metrics = run_test::<EPaxosLocked>(
config!(3, 1),
SHARD_COUNT,
workers,
executors,
COMMANDS_PER_CLIENT,
CLIENTS_PER_PROCESS,
);
assert_eq!(metrics.slow_paths(), 0);
}
// ---- caesar tests ---- //
#[test]
fn sim_caesar_wait_3_1_test() {
let _slow_paths = sim_test::<CaesarLocked>(
caesar_config!(3, 1, true),
READ_ONLY_PERCENTAGE,
KEYS_PER_COMMAND,
COMMANDS_PER_CLIENT,
CLIENTS_PER_PROCESS,
);
}
#[test]
fn sim_caesar_3_1_no_wait_test() {
let _slow_paths = sim_test::<CaesarLocked>(
caesar_config!(3, 1, false),
READ_ONLY_PERCENTAGE,
KEYS_PER_COMMAND,
COMMANDS_PER_CLIENT,
CLIENTS_PER_PROCESS,
);
}
#[test]
fn sim_caesar_5_2_wait_test() {
let _slow_paths = sim_test::<CaesarLocked>(
caesar_config!(5, 2, true),
READ_ONLY_PERCENTAGE,
KEYS_PER_COMMAND,
COMMANDS_PER_CLIENT,
CLIENTS_PER_PROCESS,
);
}
#[test]
fn sim_caesar_5_2_no_wait_test() {
let _slow_paths = sim_test::<CaesarLocked>(
caesar_config!(5, 2, false),
READ_ONLY_PERCENTAGE,
KEYS_PER_COMMAND,
COMMANDS_PER_CLIENT,
CLIENTS_PER_PROCESS,
);
}
#[test]
fn run_caesar_3_1_wait_locked_test() {
let workers = 4;
let executors = 1;
let _slow_paths = run_test::<CaesarLocked>(
caesar_config!(3, 1, true),
SHARD_COUNT,
workers,
executors,
COMMANDS_PER_CLIENT,
CLIENTS_PER_PROCESS,
);
}
#[ignore]
#[test]
fn run_caesar_5_2_wait_locked_test() {
let workers = 4;
let executors = 1;
let _slow_paths = run_test::<CaesarLocked>(
caesar_config!(5, 2, true),
SHARD_COUNT,
workers,
executors,
COMMANDS_PER_CLIENT,
CLIENTS_PER_PROCESS,
);
}
// ---- fpaxos tests ---- //
#[test]
fn sim_fpaxos_3_1_test() {
let leader = 1;
sim_test::<FPaxos>(
config!(3, 1, leader),
READ_ONLY_PERCENTAGE,
KEYS_PER_COMMAND,
COMMANDS_PER_CLIENT,
CLIENTS_PER_PROCESS,
);
}
#[test]
fn sim_fpaxos_5_2_test() {
let leader = 1;
sim_test::<FPaxos>(
config!(5, 2, leader),
READ_ONLY_PERCENTAGE,
KEYS_PER_COMMAND,
COMMANDS_PER_CLIENT,
CLIENTS_PER_PROCESS,
);
}
#[test]
fn run_fpaxos_3_1_sequential_test() {
let leader = 1;
// run fpaxos in sequential mode
let workers = 1;
let executors = 1;
run_test::<FPaxos>(
config!(3, 1, leader),
SHARD_COUNT,
workers,
executors,
COMMANDS_PER_CLIENT,
CLIENTS_PER_PROCESS,
);
}
#[test]
fn run_fpaxos_3_1_parallel_test() {
let leader = 1;
// run fpaxos in paralel mode (in terms of workers, since execution is
// never parallel)
let workers = 3;
let executors = 1;
run_test::<FPaxos>(
config!(3, 1, leader),
SHARD_COUNT,
workers,
executors,
COMMANDS_PER_CLIENT,
CLIENTS_PER_PROCESS,
);
}
#[allow(dead_code)]
fn metrics_inspect<P>(worker: &P) -> ProtocolMetrics
where
P: Protocol,
{
worker.metrics().clone()
}
fn run_test<P>(
mut config: Config,
shard_count: usize,
workers: usize,
executors: usize,
commands_per_client: usize,
clients_per_process: usize,
) -> ProtocolMetrics
where
P: Protocol + Send + 'static,
{
update_config(&mut config, shard_count);
// create workload
let payload_size = 1;
let workload = Workload::new(
shard_count,
KEY_GEN,
KEYS_PER_COMMAND,
commands_per_client,
payload_size,
);
// run until the clients end + another 10 seconds
let extra_run_time = Some(Duration::from_secs(10));
let metrics = tokio_test_runtime()
.block_on(run_test_with_inspect_fun::<P, ProtocolMetrics>(
config,
workload,
clients_per_process,
workers,
executors,
Some(metrics_inspect),
extra_run_time,
))
.expect("run should complete successfully")
.into_iter()
.map(|(process_id, all_workers_metrics)| {
// aggregate worker metrics
let mut process_metrics = ProtocolMetrics::new();
all_workers_metrics.into_iter().for_each(|worker_metrics| {
process_metrics.merge(&worker_metrics);
});
(process_id, process_metrics)
})
.collect();
check_metrics(config, commands_per_client, clients_per_process, metrics)
}
fn sim_test<P: Protocol>(
mut config: Config,
read_only_percentage: usize,
keys_per_command: usize,
commands_per_client: usize,
clients_per_process: usize,
) -> ProtocolMetrics {
let shard_count = 1;
update_config(&mut config, shard_count);
// planet
let planet = Planet::new();
// clients workload
let payload_size = 1;
let mut workload = Workload::new(
shard_count,
KEY_GEN,
keys_per_command,
commands_per_client,
payload_size,
);
workload.set_read_only_percentage(read_only_percentage);
// process and client regions
let mut regions = planet.regions();
regions.truncate(config.n());
let process_regions = regions.clone();
let client_regions = regions.clone();
// create runner
let mut runner: Runner<P> = Runner::new(
planet,
config,
workload,
clients_per_process,
process_regions,
client_regions,
);
// reorder network messages
runner.reorder_messages();
// run simulation until the clients end + another 10 seconds (for GC)
let extra_sim_time = Some(Duration::from_secs(10));
let (metrics, executors_monitors, _) = runner.run(extra_sim_time);
// fetch slow paths and stable count from metrics
let metrics = metrics
.into_iter()
.map(|(process_id, (process_metrics, _executors_metrics))| {
(process_id, process_metrics)
})
.collect();
let executors_monitors: Vec<_> = executors_monitors
.into_iter()
.map(|(process_id, order)| {
let order = order
.expect("processes should be monitoring execution orders");
(process_id, order)
})
.collect();
check_monitors(executors_monitors);
check_metrics(config, commands_per_client, clients_per_process, metrics)
}
fn update_config(config: &mut Config, shard_count: usize) {
// make sure execution order is monitored
config.set_executor_monitor_execution_order(true);
// make sure stability is running
config.set_gc_interval(Duration::from_millis(100));
// make sure executed notification are being sent (which it will affect
// the protocols that have implemented such functionality)
config.set_executor_executed_notification_interval(
Duration::from_millis(100),
);
// set number of shards
config.set_shard_count(shard_count);
}
fn check_monitors(
mut executor_monitors: Vec<(ProcessId, ExecutionOrderMonitor)>,
) {
// take the first monitor and check that all the other are equal
let (process_a, monitor_a) = executor_monitors
.pop()
.expect("there's more than one process in the test");
for (process_b, monitor_b) in executor_monitors {
if monitor_a != monitor_b {
return compute_diff_on_monitors(
process_a, monitor_a, process_b, monitor_b,
);
}
}
}
fn compute_diff_on_monitors(
process_a: ProcessId,
monitor_a: ExecutionOrderMonitor,
process_b: ProcessId,
monitor_b: ExecutionOrderMonitor,
) {
assert_eq!(
monitor_a.len(),
monitor_b.len(),
"monitors should have the same number of keys"
);
for key in monitor_a.keys() {
let key_order_a = monitor_a
.get_order(key)
.expect("monitors should have the same keys");
let key_order_b = monitor_b
.get_order(key)
.expect("monitors should have the same keys");
compute_diff_on_key(
key,
process_a,
key_order_a,
process_b,
key_order_b,
);
}
}
fn compute_diff_on_key(
key: &Key,
process_a: ProcessId,
key_order_a: &Vec<Rifl>,
process_b: ProcessId,
key_order_b: &Vec<Rifl>,
) {
assert_eq!(
key_order_a.len(),
key_order_b.len(),
"orders per key should have the same number of rifls"
);
let len = key_order_a.len();
if key_order_a != key_order_b {
let first_different =
find_different_rifl(key_order_a, key_order_b, 0..len);
let last_equal = 1 + find_different_rifl(
key_order_a,
key_order_b,
(0..len).rev(),
);
let key_order_a = key_order_a[first_different..last_equal].to_vec();
let key_order_b = key_order_b[first_different..last_equal].to_vec();
panic!(
"different execution orders on key {:?}\n process {:?}: {:?}\n process {:?}: {:?}",
key, process_a, key_order_a, process_b, key_order_b,
)
}
}
fn find_different_rifl(
key_order_a: &Vec<Rifl>,
key_order_b: &Vec<Rifl>,
range: impl Iterator<Item = usize>,
) -> usize {
for i in range {
if key_order_a[i] != key_order_b[i] {
return i;
}
}
unreachable!(
"the execution orders are different, so we must never reach this"
)
}
fn check_metrics(
config: Config,
commands_per_client: usize,
clients_per_process: usize,
metrics: HashMap<ProcessId, ProtocolMetrics>,
) -> ProtocolMetrics {
let mut all_metrics = ProtocolMetrics::new();
// check process stats
metrics
.into_iter()
.for_each(|(process_id, process_metrics)| {
println!(
"process id = {} | fast = {} | slow = {} | fast(R) = {} | slow(R) = {} | stable = {}",
process_id,
process_metrics.fast_paths(),
process_metrics.slow_paths(),
process_metrics.fast_paths_reads(),
process_metrics.slow_paths_reads(),
process_metrics.stable()
);
all_metrics.merge(&process_metrics);
});
// compute the min and max number of MCommit messages:
// - we have min, if all commmands accesss a single shard
// - we have max, if all commmands accesss all shards
let total_processes = config.n() * config.shard_count();
let total_clients = clients_per_process * total_processes;
let min_total_commits = commands_per_client * total_clients;
let max_total_commits = min_total_commits * config.shard_count();
// check that all commands were committed (only for leaderless
// protocols)
if config.leader().is_none() {
let total_commits =
(all_metrics.fast_paths() + all_metrics.slow_paths()) as usize;
assert!(
total_commits >= min_total_commits
&& total_commits <= max_total_commits,
"number of committed commands out of bounds"
);
}
// check GC:
// - if there's a leader (i.e. FPaxos), GC will only prune commands at
// f+1 acceptors
// - otherwise, GC will prune comands at all processes
//
// since GC only happens at the targetted shard, `gc_at` only considers
// the size of the shard (i.e., no need to multiply by
// `config.shard_count()`)
let gc_at = if config.leader().is_some() {
config.f() + 1
} else {
config.n()
};
assert_eq!(
gc_at * min_total_commits,
all_metrics.stable() as usize,
"not all processes gced"
);
// return all metrics
all_metrics
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/protocol/partial.rs | fantoch_ps/src/protocol/partial.rs | use fantoch::command::Command;
use fantoch::id::{Dot, ProcessId};
use fantoch::protocol::{Action, BaseProcess, Protocol};
use fantoch::HashSet;
use fantoch::{singleton, trace};
use std::fmt::Debug;
pub fn submit_actions<P>(
bp: &BaseProcess,
dot: Dot,
cmd: &Command,
target_shard: bool,
create_mforward_submit: impl Fn(Dot, Command) -> <P as Protocol>::Message,
to_processes: &mut Vec<Action<P>>,
) where
P: Protocol,
{
if target_shard {
// create forward submit messages if:
// - we're the target shard (i.e. the shard to which the client sent the
// command)
// - command touches more than one shard
let my_shard_id = bp.shard_id;
for shard_id in
cmd.shards().filter(|shard_id| **shard_id != my_shard_id)
{
let mforward_submit = create_mforward_submit(dot, cmd.clone());
let target = singleton![bp.closest_process(shard_id)];
to_processes.push(Action::ToSend {
target,
msg: mforward_submit,
})
}
}
}
pub fn mcommit_actions<P, I, D1, D2>(
bp: &BaseProcess,
shards_commits: &mut Option<ShardsCommits<I>>,
shard_count: usize,
dot: Dot,
data1: D1,
data2: D2,
create_mcommit: impl FnOnce(Dot, D1, D2) -> <P as Protocol>::Message,
create_mshard_commit: impl FnOnce(Dot, D1) -> <P as Protocol>::Message,
update_shards_commits_info: impl FnOnce(&mut I, D2),
to_processes: &mut Vec<Action<P>>,
) where
P: Protocol,
I: Default + Debug,
{
match shard_count {
1 => {
// create `MCommit`
final_mcommit_action(
bp,
dot,
data1,
data2,
create_mcommit,
to_processes,
)
}
_ => {
// if the command accesses more than one shard, send an
// MCommitShard to the process in the shard targetted by the
// client; this process will then aggregate all the MCommitShard
// and send an MCommitShardAggregated back once it receives an
// MCommitShard from each shard; assuming that all
// shards take the fast path, this approach should work well; if
// there are slow paths, we probably want to disseminate each
// shard commit clock to all participants so that detached votes
// are generated ASAP; with n = 3 or f = 1, this is not a
// problem since we'll always take the fast path
// - TODO: revisit this approach once we implement recovery for
// partial replication
// initialize shards commit info if not yet initialized:
// - it may already be initialized if we receive the `MCommitShard`
// from another shard before we were able to commit the command in
// our own shard
let shards_commits =
init_shards_commits(shards_commits, bp.process_id, shard_count);
// update shards commit info
shards_commits.update(|shards_commit_info| {
update_shards_commits_info(shards_commit_info, data2)
});
// create `MShardCommit`
let mshard_commit = create_mshard_commit(dot, data1);
// the aggregation with occurs at the process in targetted shard
// (which is the owner of the commmand's `dot`)
let target = singleton!(dot.source());
to_processes.push(Action::ToSend {
target,
msg: mshard_commit,
});
}
}
}
pub fn handle_mshard_commit<P, I, D1>(
bp: &BaseProcess,
shards_commits: &mut Option<ShardsCommits<I>>,
shard_count: usize,
from: ProcessId,
dot: Dot,
data: D1,
add_shards_commits_info: impl FnOnce(&mut I, D1),
create_mshard_aggregated_commit: impl FnOnce(
Dot,
&I,
) -> <P as Protocol>::Message,
to_processes: &mut Vec<Action<P>>,
) where
P: Protocol,
I: Default + Debug,
{
// make sure shards commit info is initialized:
// - it may not be if we receive the `MCommitShard` from another shard
// before we were able to commit the command in our own shard
let shards_commits =
init_shards_commits(shards_commits, bp.process_id, shard_count);
// add new clock, checking if we have received all clocks
let done = shards_commits.add(from, |shards_commits_info| {
add_shards_commits_info(shards_commits_info, data)
});
if done {
// create `MShardAggregatedCommit`
let mshard_aggregated_commit =
create_mshard_aggregated_commit(dot, &shards_commits.info);
let target = shards_commits.participants.clone();
// save new action
to_processes.push(Action::ToSend {
target,
msg: mshard_aggregated_commit,
});
}
}
pub fn handle_mshard_aggregated_commit<P, I, D1, D2>(
bp: &BaseProcess,
shards_commits: &mut Option<ShardsCommits<I>>,
dot: Dot,
data1: D1,
extract_mcommit_extra_data: impl FnOnce(I) -> D2,
create_mcommit: impl FnOnce(Dot, D1, D2) -> <P as Protocol>::Message,
to_processes: &mut Vec<Action<P>>,
) where
P: Protocol,
{
// take shards commit info
let shards_commits = if let Some(shards_commits) = shards_commits.take() {
shards_commits
} else {
panic!("no shards commit info when handling MShardAggregatedCommit about dot {:?}", dot)
};
// get extra commit data
let data2 = extract_mcommit_extra_data(shards_commits.info);
// create `MCommit`
final_mcommit_action(bp, dot, data1, data2, create_mcommit, to_processes)
}
fn final_mcommit_action<P, D1, D2>(
bp: &BaseProcess,
dot: Dot,
data1: D1,
data2: D2,
create_mcommit: impl FnOnce(Dot, D1, D2) -> <P as Protocol>::Message,
to_processes: &mut Vec<Action<P>>,
) where
P: Protocol,
{
let mcommit = create_mcommit(dot, data1, data2);
let target = bp.all();
to_processes.push(Action::ToSend {
target,
msg: mcommit,
});
}
fn init_shards_commits<'a, I>(
shards_commits: &'a mut Option<ShardsCommits<I>>,
process_id: ProcessId,
shard_count: usize,
) -> &'a mut ShardsCommits<I>
where
I: Default + Debug,
{
match shards_commits {
Some(shards_commits) => shards_commits,
None => {
*shards_commits =
Some(ShardsCommits::new(process_id, shard_count, I::default()));
shards_commits.as_mut().unwrap()
}
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct ShardsCommits<I> {
process_id: ProcessId,
shard_count: usize,
participants: HashSet<ProcessId>,
info: I,
}
impl<I> ShardsCommits<I>
where
I: Debug,
{
fn new(process_id: ProcessId, shard_count: usize, info: I) -> Self {
let participants = HashSet::with_capacity(shard_count);
Self {
process_id,
shard_count,
participants,
info,
}
}
fn add(&mut self, from: ProcessId, add: impl FnOnce(&mut I)) -> bool {
assert!(self.participants.insert(from));
add(&mut self.info);
trace!(
"p{}: ShardsCommits::add {} | current info = {:?} | participants = {:?} | shard count = {}",
self.process_id,
from,
self.info,
self.participants,
self.shard_count
);
// we're done once we have received a message from each shard
self.participants.len() == self.shard_count
}
pub fn update(&mut self, update: impl FnOnce(&mut I)) {
update(&mut self.info);
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/protocol/tempo.rs | fantoch_ps/src/protocol/tempo.rs | use crate::executor::{TableExecutionInfo, TableExecutor};
use crate::protocol::common::synod::{Synod, SynodMessage};
use crate::protocol::common::table::{
AtomicKeyClocks, KeyClocks, LockedKeyClocks, QuorumClocks,
SequentialKeyClocks, Votes,
};
use crate::protocol::partial::{self, ShardsCommits};
use fantoch::command::Command;
use fantoch::config::Config;
use fantoch::id::{Dot, ProcessId, ShardId};
use fantoch::protocol::{
Action, BaseProcess, Info, MessageIndex, Protocol, ProtocolMetrics,
SequentialCommandsInfo, VClockGCTrack,
};
use fantoch::time::SysTime;
use fantoch::util;
use fantoch::{singleton, trace};
use fantoch::{HashMap, HashSet};
use serde::{Deserialize, Serialize};
use std::mem;
use std::time::Duration;
use threshold::VClock;
pub type TempoSequential = Tempo<SequentialKeyClocks>;
pub type TempoAtomic = Tempo<AtomicKeyClocks>;
// TODO: `TempoLocked` is just an (incorrect) experiment that tries to handle
// read commands differently than writes (while `TempoAtomic` treats all
// commands in the same way)
pub type TempoLocked = Tempo<LockedKeyClocks>;
#[derive(Debug, Clone)]
pub struct Tempo<KC: KeyClocks> {
bp: BaseProcess,
key_clocks: KC,
cmds: SequentialCommandsInfo<TempoInfo>,
gc_track: VClockGCTrack,
to_processes: Vec<Action<Self>>,
to_executors: Vec<TableExecutionInfo>,
// set of detached votes
detached: Votes,
// commit notifications that arrived before the initial `MCollect` message
// (this may be possible even without network failures due to multiplexing)
buffered_mcommits: HashMap<Dot, (ProcessId, u64, Votes)>,
// `MBump` messages that arrived before the initial `MCollect` message
buffered_mbumps: HashMap<Dot, u64>,
// With many many operations, it can happen that traceical clocks are
// higher that current time (e.g. if it starts at 0 in simulation), and in
// that case, the real time feature of tempo doesn't work. Solution: track
// the highest committed clock; when periodically bumping with real time,
// use this value as the minimum value to bump to
max_commit_clock: u64,
skip_fast_ack: bool,
}
impl<KC: KeyClocks> Protocol for Tempo<KC> {
type Message = Message;
type PeriodicEvent = PeriodicEvent;
type Executor = TableExecutor;
/// Creates a new `Tempo` process.
fn new(
process_id: ProcessId,
shard_id: ShardId,
config: Config,
) -> (Self, Vec<(Self::PeriodicEvent, Duration)>) {
// compute fast and write quorum sizes
let (fast_quorum_size, write_quorum_size, _) =
config.tempo_quorum_sizes();
// create protocol data-structures
let bp = BaseProcess::new(
process_id,
shard_id,
config,
fast_quorum_size,
write_quorum_size,
);
let key_clocks = KC::new(process_id, shard_id, config.nfr());
let cmds = SequentialCommandsInfo::new(
process_id,
shard_id,
config.n(),
config.f(),
fast_quorum_size,
write_quorum_size,
);
let gc_track = VClockGCTrack::new(process_id, shard_id, config.n());
let to_processes = Vec::new();
let to_executors = Vec::new();
let detached = Votes::new();
let buffered_mcommits = HashMap::new();
let buffered_mbumps = HashMap::new();
let max_commit_clock = 0;
// enable skip fast ack if configured like that and the fast quorum size
// is 2
let skip_fast_ack = config.skip_fast_ack() && fast_quorum_size == 2;
// create `Tempo`
let protocol = Self {
bp,
key_clocks,
cmds,
gc_track,
to_processes,
to_executors,
detached,
buffered_mcommits,
buffered_mbumps,
max_commit_clock,
skip_fast_ack,
};
// maybe create garbage collection periodic event
let mut events = if let Some(interval) = config.gc_interval() {
vec![(PeriodicEvent::GarbageCollection, interval)]
} else {
vec![]
};
// maybe create clock bump periodic event
if let Some(interval) = config.tempo_clock_bump_interval() {
events.reserve_exact(1);
events.push((PeriodicEvent::ClockBump, interval));
}
// maybe create send detached periodic event
if let Some(interval) = config.tempo_detached_send_interval() {
events.reserve_exact(1);
events.push((PeriodicEvent::SendDetached, interval));
}
// return both
(protocol, events)
}
/// Returns the process identifier.
fn id(&self) -> ProcessId {
self.bp.process_id
}
/// Returns the shard identifier.
fn shard_id(&self) -> ShardId {
self.bp.shard_id
}
/// Updates the processes known by this process.
/// The set of processes provided is already sorted by distance.
fn discover(
&mut self,
processes: Vec<(ProcessId, ShardId)>,
) -> (bool, HashMap<ShardId, ProcessId>) {
let connect_ok = self.bp.discover(processes);
(connect_ok, self.bp.closest_shard_process().clone())
}
/// Submits a command issued by some client.
fn submit(&mut self, dot: Option<Dot>, cmd: Command, _time: &dyn SysTime) {
self.handle_submit(dot, cmd, true);
}
/// Handles protocol messages.
fn handle(
&mut self,
from: ProcessId,
from_shard_id: ShardId,
msg: Self::Message,
time: &dyn SysTime,
) {
match msg {
// Protocol messages
Message::MCollect {
dot,
cmd,
quorum,
clock,
coordinator_votes,
} => self.handle_mcollect(
from,
dot,
cmd,
quorum,
clock,
coordinator_votes,
time,
),
Message::MCollectAck {
dot,
clock,
process_votes,
} => self.handle_mcollectack(from, dot, clock, process_votes, time),
Message::MCommit { dot, clock, votes } => {
self.handle_mcommit(from, dot, clock, votes, time)
}
Message::MCommitClock { clock } => {
self.handle_mcommit_clock(from, clock, time)
}
Message::MDetached { detached } => {
self.handle_mdetached(detached, time)
}
Message::MConsensus { dot, ballot, clock } => {
self.handle_mconsensus(from, dot, ballot, clock, time)
}
Message::MConsensusAck { dot, ballot } => {
self.handle_mconsensusack(from, dot, ballot, time)
}
// Partial replication
Message::MForwardSubmit { dot, cmd } => {
self.handle_submit(Some(dot), cmd, false)
}
Message::MBump { dot, clock } => {
self.handle_mbump(dot, clock, time)
}
Message::MShardCommit { dot, clock } => {
self.handle_mshard_commit(from, from_shard_id, dot, clock, time)
}
Message::MShardAggregatedCommit { dot, clock } => {
self.handle_mshard_aggregated_commit(dot, clock, time)
}
// GC messages
Message::MCommitDot { dot } => {
self.handle_mcommit_dot(from, dot, time)
}
Message::MGarbageCollection { committed } => {
self.handle_mgc(from, committed, time)
}
Message::MStable { stable } => {
self.handle_mstable(from, stable, time)
}
}
}
/// Handles periodic local events.
fn handle_event(&mut self, event: Self::PeriodicEvent, time: &dyn SysTime) {
match event {
PeriodicEvent::GarbageCollection => {
self.handle_event_garbage_collection(time)
}
PeriodicEvent::ClockBump => self.handle_event_clock_bump(time),
PeriodicEvent::SendDetached => {
self.handle_event_send_detached(time)
}
}
}
/// Returns a new action to be sent to other processes.
fn to_processes(&mut self) -> Option<Action<Self>> {
self.to_processes.pop()
}
/// Returns new execution info for executors.
fn to_executors(&mut self) -> Option<TableExecutionInfo> {
self.to_executors.pop()
}
fn parallel() -> bool {
KC::parallel()
}
fn leaderless() -> bool {
true
}
fn metrics(&self) -> &ProtocolMetrics {
self.bp.metrics()
}
}
impl<KC: KeyClocks> Tempo<KC> {
/// Handles a submit operation by a client.
fn handle_submit(
&mut self,
dot: Option<Dot>,
cmd: Command,
target_shard: bool,
) {
// compute the command identifier
let dot = dot.unwrap_or_else(|| self.bp.next_dot());
// record command size
self.bp.collect_metric(
fantoch::protocol::ProtocolMetricsKind::CommandKeyCount,
cmd.total_key_count() as u64,
);
// create submit actions
let create_mforward_submit =
|dot, cmd| Message::MForwardSubmit { dot, cmd };
partial::submit_actions(
&self.bp,
dot,
&cmd,
target_shard,
create_mforward_submit,
&mut self.to_processes,
);
// compute its clock:
// - this may also consume votes since we're bumping the clocks here
// - for that reason, we'll store these votes locally and not recompute
// them once we receive the `MCollect` from self
let (clock, process_votes) = self.key_clocks.proposal(&cmd, 0);
trace!(
"p{}: bump_and_vote: {:?} | clock: {} | votes: {:?}",
self.id(),
dot,
clock,
process_votes
);
// get shard count
let shard_count = cmd.shard_count();
// send votes if we can bypass the mcollectack, otherwise store them
// - if the command acesses more than one shard, the optimization is
// disabled; this is because the `MShardCommit` messages "need to" be
// aggregated at a single process (in order to reduce net traffic)
let coordinator_votes = if self.skip_fast_ack && shard_count == 1 {
process_votes
} else {
// get cmd info
let info = self.cmds.get(dot);
info.votes = process_votes;
Votes::new()
};
// create `MCollect` and target
let quorum = self.bp.maybe_adjust_fast_quorum(&cmd);
let mcollect = Message::MCollect {
dot,
cmd,
clock,
coordinator_votes,
quorum,
};
// TODO maybe just don't send to self with `self.bp.all_but_me()`
let target = self.bp.all();
// add `MCollect` send as action
self.to_processes.push(Action::ToSend {
target,
msg: mcollect,
});
}
fn handle_mcollect(
&mut self,
from: ProcessId,
dot: Dot,
cmd: Command,
quorum: HashSet<ProcessId>,
remote_clock: u64,
mut votes: Votes,
time: &dyn SysTime,
) {
trace!(
"p{}: MCollect({:?}, {:?}, {:?}, {}, {:?}) from {} | time={}",
self.id(),
dot,
cmd,
quorum,
remote_clock,
votes,
from,
time.micros()
);
// get cmd info
let info = self.cmds.get(dot);
// discard message if no longer in START
if info.status != Status::START {
return;
}
// check if part of fast quorum
if !quorum.contains(&self.bp.process_id) {
// if not:
// - maybe initialize `self.key_clocks`
// - simply save the payload and set status to `PAYLOAD`
// - if we received the `MCommit` before the `MCollect`, handle the
// `MCommit` now
if self.bp.config.tempo_clock_bump_interval().is_some() {
// make sure there's a clock for each existing key:
// - this ensures that all clocks will be bumped in the periodic
// clock bump event
self.key_clocks.init_clocks(&cmd);
}
info.status = Status::PAYLOAD;
info.cmd = Some(cmd);
// check if there's a buffered commit notification; if yes, handle
// the commit again (since now we have the payload)
if let Some((from, clock, votes)) =
self.buffered_mcommits.remove(&dot)
{
self.handle_mcommit(from, dot, clock, votes, time);
}
return;
}
// check if it's a message from self
let message_from_self = from == self.bp.process_id;
let (clock, process_votes) = if message_from_self {
// if it is a message from self, do not recompute clock and votes
(remote_clock, Votes::new())
} else {
// if not from self, compute clock considering `remote_clock` as the
// minimum value
let (clock, process_votes) =
self.key_clocks.proposal(&cmd, remote_clock);
trace!(
"p{}: bump_and_vote: {:?} | clock: {} | votes: {:?}",
self.bp.process_id,
dot,
clock,
process_votes
);
// check that there's one vote per key
debug_assert!(if self.bp.config.nfr() && cmd.nfr_allowed() {
// in this case, check nothing
true
} else {
process_votes.len() == cmd.key_count(self.bp.shard_id)
});
(clock, process_votes)
};
// if there are any buffered `MBump`'s, generate detached votes
if let Some(bump_to) = self.buffered_mbumps.remove(&dot) {
self.key_clocks.detached(&cmd, bump_to, &mut self.detached);
}
// get shard count
let shard_count = cmd.shard_count();
// update command info
info.status = Status::COLLECT;
info.cmd = Some(cmd);
info.quorum_clocks
.maybe_adjust_fast_quorum_size(quorum.len());
info.quorum = quorum;
// set consensus value
assert!(info.synod.set_if_not_accepted(|| clock));
// (see previous use of `self.skip_fast_ack` for an explanation of
// what's going on here)
if !message_from_self && self.skip_fast_ack && shard_count == 1 {
votes.merge(process_votes);
// if tiny quorums and f = 1, the fast quorum process can commit the
// command right away; create `MCommit`
Self::mcommit_actions(
&self.bp,
info,
shard_count,
dot,
clock,
votes,
&mut self.to_processes,
)
} else {
self.mcollect_actions(from, dot, clock, process_votes, shard_count)
}
}
fn handle_mcollectack(
&mut self,
from: ProcessId,
dot: Dot,
clock: u64,
remote_votes: Votes,
_time: &dyn SysTime,
) {
trace!(
"p{}: MCollectAck({:?}, {}, {:?}) from {} | time={}",
self.id(),
dot,
clock,
remote_votes,
from,
_time.micros()
);
// get cmd info
let info = self.cmds.get(dot);
if info.status != Status::COLLECT {
// do nothing if we're no longer COLLECT
return;
}
// update votes with remote votes
info.votes.merge(remote_votes);
// update quorum clocks while computing max clock and its number of
// occurrences
let (max_clock, max_count) = info.quorum_clocks.add(from, clock);
// check if it's a message from self
let message_from_self = from == self.bp.process_id;
// optimization: bump all keys clocks in `cmd` to be `max_clock`
// - this prevents us from generating votes (either when clients submit
// new operations or when handling `MCollect` from other processes)
// that could potentially delay the execution of this command
// - when skipping the mcollectack by fast quorum processes, the
// coordinator can't vote here; if it does, the votes generated here
// will never be sent in the MCommit message
// - TODO: if we refactor votes to attached/detached business, then this
// is no longer a problem
//
// - TODO: it also seems that this (or the MCommit equivalent) must run
// with real time, otherwise there's a huge tail; but that doesn't
// make any sense; NOTE: this was probably before high-resolution
// real-time clocks
let cmd = info.cmd.as_ref().unwrap();
if !message_from_self {
self.key_clocks.detached(cmd, max_clock, &mut self.detached);
}
// check if we have all necessary replies
if info.quorum_clocks.all() {
// compute threshold:
// - if the fast quorum is n/2 + f, then the threshold is f
// - if the fast quorum is a majority (for single-key reads with
// NFR), then the threshold is 1 (and thus the fast path is always
// taken)
let minority = self.bp.config.majority_quorum_size() - 1;
let threshold = info.quorum.len() - minority;
debug_assert!(threshold <= self.bp.config.f());
// fast path condition:
// - if `max_clock` was reported by at least `threshold` processes
let fast_path = max_count >= threshold;
// fast path metrics
self.bp.path(fast_path, cmd.read_only());
if fast_path {
// reset local votes as we're going to receive them right away;
// this also prevents a `info.votes.clone()`
let votes = Self::reset_votes(&mut info.votes);
// create `MCommit`
let shard_count = cmd.shard_count();
Self::mcommit_actions(
&self.bp,
info,
shard_count,
dot,
max_clock,
votes,
&mut self.to_processes,
)
} else {
// slow path: create `MConsensus`
let ballot = info.synod.skip_prepare();
let mconsensus = Message::MConsensus {
dot,
ballot,
clock: max_clock,
};
let target = self.bp.write_quorum();
// save new action
self.to_processes.push(Action::ToSend {
target,
msg: mconsensus,
})
}
}
}
fn handle_mcommit(
&mut self,
from: ProcessId,
dot: Dot,
clock: u64,
mut votes: Votes,
_time: &dyn SysTime,
) {
let _id = self.id();
trace!(
"p{}: MCommit({:?}, {}, {:?}) | time={}",
_id,
dot,
clock,
votes,
_time.micros()
);
// get cmd info
let info = self.cmds.get(dot);
if info.status == Status::START {
// save this notification just in case we've received the `MCollect`
// and `MCommit` in opposite orders (due to multiplexing)
self.buffered_mcommits.insert(dot, (from, clock, votes));
return;
}
if info.status == Status::COMMIT {
// do nothing if we're already COMMIT
return;
}
// create execution info
let cmd = info
.cmd
.as_ref()
.expect("there should be a command payload");
let rifl = cmd.rifl();
let execution_info = cmd.iter(self.bp.shard_id).map(|(key, ops)| {
// find votes on this key
let key_votes = votes.remove(&key).unwrap_or_default();
let shard_to_keys = cmd.shard_to_keys().clone();
trace!(
"p{}: MCommit({:?}) key {:?} | shard to keys {:?} | time={}",
_id,
dot,
key,
shard_to_keys,
_time.micros()
);
TableExecutionInfo::attached_votes(
dot,
clock,
key.clone(),
rifl,
shard_to_keys,
ops.clone(),
key_votes,
)
});
self.to_executors.extend(execution_info);
// update command info:
info.status = Status::COMMIT;
// handle commit in synod
let msg = SynodMessage::MChosen(clock);
assert!(info.synod.handle(from, msg).is_none());
// don't try to generate detached votes if configured with real time
// (since it will be done in a periodic event)
if self.bp.config.tempo_clock_bump_interval().is_some() {
// in this case, only notify the clock bump worker of the commit
// clock
self.to_processes.push(Action::ToForward {
msg: Message::MCommitClock { clock },
});
} else {
// try to generate detached votes
self.key_clocks.detached(cmd, clock, &mut self.detached);
}
// check if this dot is targetted to my shard
// TODO: fix this once we implement recovery for partial replication
let my_shard = util::process_ids(self.bp.shard_id, self.bp.config.n())
.any(|peer_id| peer_id == dot.source());
if self.gc_running() && my_shard {
// if running gc and this dot belongs to my shard, then notify self
// (i.e. the worker responsible for GC) with the committed dot
self.to_processes.push(Action::ToForward {
msg: Message::MCommitDot { dot },
});
} else {
// not running gc, so remove the dot info now
self.cmds.gc_single(dot);
}
}
fn handle_mcommit_clock(
&mut self,
from: ProcessId,
clock: u64,
_time: &dyn SysTime,
) {
trace!(
"p{}: MCommitClock({}) | time={}",
self.id(),
clock,
_time.micros()
);
assert_eq!(from, self.bp.process_id);
// simply update the highest commit clock
self.max_commit_clock = std::cmp::max(self.max_commit_clock, clock);
}
fn handle_mbump(&mut self, dot: Dot, clock: u64, _time: &dyn SysTime) {
trace!(
"p{}: MBump({:?}, {}) | time={}",
self.id(),
dot,
clock,
_time.micros()
);
// get cmd info
let info = self.cmds.get(dot);
// maybe bump up to `clock`
if let Some(cmd) = info.cmd.as_ref() {
// we have the payload, thus we can bump to `clock`
self.key_clocks.detached(cmd, clock, &mut self.detached);
} else {
// in this case we don't have the payload (which means we have
// received the `MBump` from some shard before `MCollect` from my
// shard); thus, buffer this request and handle it when we do
// receive the `MCollect` (see `handle_mcollect`)
let current = self.buffered_mbumps.entry(dot).or_default();
// if the command acesses more than two shards, we could receive
// several `MBump`'s before the `MCollect`; in this case, save the
// highest one
*current = std::cmp::max(*current, clock);
}
}
fn handle_mdetached(&mut self, detached: Votes, _time: &dyn SysTime) {
trace!(
"p{}: MDetached({:?}) | time={}",
self.id(),
detached,
_time.micros()
);
// create execution info
let execution_info = detached.into_iter().map(|(key, key_votes)| {
TableExecutionInfo::detached_votes(key, key_votes)
});
self.to_executors.extend(execution_info);
}
fn handle_mconsensus(
&mut self,
from: ProcessId,
dot: Dot,
ballot: u64,
clock: ConsensusValue,
_time: &dyn SysTime,
) {
trace!(
"p{}: MConsensus({:?}, {}, {:?}) | time={}",
self.id(),
dot,
ballot,
clock,
_time.micros()
);
// get cmd info
let info = self.cmds.get(dot);
// maybe bump up to `clock`
if let Some(cmd) = info.cmd.as_ref() {
// we have the payload, thus we can bump to `clock`
self.key_clocks.detached(cmd, clock, &mut self.detached);
}
// compute message: that can either be nothing, an ack or an mcommit
let msg = match info
.synod
.handle(from, SynodMessage::MAccept(ballot, clock))
{
Some(SynodMessage::MAccepted(ballot)) => {
// the accept message was accepted: create `MConsensusAck`
Message::MConsensusAck { dot, ballot }
}
Some(SynodMessage::MChosen(clock)) => {
// the value has already been chosen: fetch votes and create `MCommit`
// TODO: check if in recovery we will have enough votes to make the command stable
let votes = info.votes.clone();
Message::MCommit { dot, clock, votes }
}
None => {
// ballot too low to be accepted: nothing to do
return;
}
_ => panic!(
"no other type of message should be output by Synod in the MConsensus handler"
),
};
// create target
let target = singleton![from];
// save new action
self.to_processes.push(Action::ToSend { target, msg });
}
fn handle_mconsensusack(
&mut self,
from: ProcessId,
dot: Dot,
ballot: u64,
_time: &dyn SysTime,
) {
trace!(
"p{}: MConsensusAck({:?}, {}) | time={}",
self.id(),
dot,
ballot,
_time.micros()
);
// get cmd info
let info = self.cmds.get(dot);
// compute message: that can either be nothing or an mcommit
match info.synod.handle(from, SynodMessage::MAccepted(ballot)) {
Some(SynodMessage::MChosen(clock)) => {
// reset local votes as we're going to receive them right away;
// this also prevents a `info.votes.clone()`
// TODO: check if in recovery we will have enough votes to make the command stable
let votes = Self::reset_votes(&mut info.votes);
// enough accepts were gathered and the value has been chosen; create `MCommit`
let shard_count = info.cmd.as_ref().unwrap().shard_count();
Self::mcommit_actions(&self.bp, info, shard_count, dot, clock, votes, &mut self.to_processes)
}
None => {
// not enough accepts yet: nothing to do
}
_ => panic!(
"no other type of message should be output by Synod in the MConsensusAck handler"
),
}
}
fn handle_mshard_commit(
&mut self,
from: ProcessId,
_from_shard_id: ShardId,
dot: Dot,
clock: u64,
_time: &dyn SysTime,
) {
trace!(
"p{}: MShardCommit({:?}, {}) from shard {} | time={}",
self.id(),
dot,
clock,
_from_shard_id,
_time.micros()
);
// get cmd info
let info = self.cmds.get(dot);
let shard_count = info.cmd.as_ref().unwrap().shard_count();
let add_shards_commits_info =
|shards_commit_info: &mut ShardsCommitsInfo, clock| {
shards_commit_info.add(clock)
};
let create_mshard_aggregated_commit =
|dot, shards_commit_info: &ShardsCommitsInfo| {
Message::MShardAggregatedCommit {
dot,
clock: shards_commit_info.max_clock,
}
};
partial::handle_mshard_commit(
&self.bp,
&mut info.shards_commits,
shard_count,
from,
dot,
clock,
add_shards_commits_info,
create_mshard_aggregated_commit,
&mut self.to_processes,
)
}
fn handle_mshard_aggregated_commit(
&mut self,
dot: Dot,
clock: u64,
_time: &dyn SysTime,
) {
trace!(
"p{}: MShardAggregatedCommit({:?}, {}) | time={}",
self.id(),
dot,
clock,
_time.micros()
);
// get cmd info
let info = self.cmds.get(dot);
let extract_mcommit_extra_data =
|shards_commit_info: ShardsCommitsInfo| {
shards_commit_info
.votes
.expect("votes in shard commit info should be set")
};
let create_mcommit =
|dot, clock, votes| Message::MCommit { dot, clock, votes };
partial::handle_mshard_aggregated_commit(
&self.bp,
&mut info.shards_commits,
dot,
clock,
extract_mcommit_extra_data,
create_mcommit,
&mut self.to_processes,
)
}
fn handle_mcommit_dot(
&mut self,
from: ProcessId,
dot: Dot,
_time: &dyn SysTime,
) {
trace!(
"p{}: MCommitDot({:?}) | time={}",
self.id(),
dot,
_time.micros()
);
assert_eq!(from, self.bp.process_id);
self.gc_track.add_to_clock(&dot);
}
fn handle_mgc(
&mut self,
from: ProcessId,
committed: VClock<ProcessId>,
_time: &dyn SysTime,
) {
trace!(
"p{}: MGarbageCollection({:?}) from {} | time={}",
self.id(),
committed,
from,
_time.micros()
);
self.gc_track.update_clock_of(from, committed);
// compute newly stable dots
let stable = self.gc_track.stable();
// create `ToForward` to self
if !stable.is_empty() {
self.to_processes.push(Action::ToForward {
msg: Message::MStable { stable },
});
}
}
fn handle_mstable(
&mut self,
from: ProcessId,
stable: Vec<(ProcessId, u64, u64)>,
_time: &dyn SysTime,
) {
trace!(
"p{}: MStable({:?}) from {} | time={}",
self.id(),
stable,
from,
_time.micros()
);
assert_eq!(from, self.bp.process_id);
let stable_count = self.cmds.gc(stable);
self.bp.stable(stable_count);
}
fn handle_event_garbage_collection(&mut self, _time: &dyn SysTime) {
trace!(
"p{}: PeriodicEvent::GarbageCollection | time={}",
self.id(),
_time.micros()
);
// retrieve the committed clock
let committed = self.gc_track.clock().frontier();
// save new action
self.to_processes.push(Action::ToSend {
target: self.bp.all_but_me(),
msg: Message::MGarbageCollection { committed },
});
}
fn handle_event_clock_bump(&mut self, time: &dyn SysTime) {
trace!(
"p{}: PeriodicEvent::ClockBump | time={}",
self.id(),
time.micros()
);
// vote up to:
// - highest committed clock or
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | true |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/protocol/common/mod.rs | fantoch_ps/src/protocol/common/mod.rs | // This module contains definitions common to dependency-graph-based protocols.
pub mod graph;
// This module contains definitions common to votes-table-based protocols.
pub mod table;
// This module contains definitions common to predecessors-based protocols.
pub mod pred;
// This module contains the implementation of Paxos single and multi-decree
// Synod Protocols.
pub mod synod;
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/protocol/common/synod/gc.rs | fantoch_ps/src/protocol/common/synod/gc.rs | use fantoch::id::ProcessId;
use fantoch::trace;
use fantoch::HashMap;
use threshold::{AboveExSet, EventSet};
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct GCTrack {
process_id: ProcessId,
n: usize,
committed: AboveExSet,
all_but_me: HashMap<ProcessId, u64>,
previous_stable: u64,
}
impl GCTrack {
pub fn new(process_id: ProcessId, n: usize) -> Self {
// committed clocks from all processes but self
let all_but_me = HashMap::with_capacity(n - 1);
Self {
process_id,
n,
committed: AboveExSet::new(),
all_but_me,
previous_stable: 0,
}
}
/// Records that a command has been committed.
pub fn commit(&mut self, slot: u64) {
self.committed.add_event(slot);
}
/// Returns a clock representing the set of commands committed locally.
/// Note that there might be more commands committed than the ones being
/// represented by the returned clock.
pub fn committed(&self) -> u64 {
self.committed.frontier()
}
/// Records that set of `committed` commands by process `from`.
pub fn committed_by(&mut self, from: ProcessId, committed: u64) {
self.all_but_me.insert(from, committed);
}
/// Computes the new set of stable slots.
pub fn stable(&mut self) -> (u64, u64) {
// compute new stable slot
let new_stable = self.stable_slot();
trace!("GCTrack::stable_clock {:?}", new_stable);
// compute stable slot range
let slot_range = (self.previous_stable + 1, new_stable);
// update the previous stable slot
self.previous_stable = new_stable;
// and return newly stable slots
slot_range
}
// TODO we should design a fault-tolerant version of this
fn stable_slot(&mut self) -> u64 {
if self.all_but_me.len() != self.n - 1 {
// if we don't have info from all processes, then there are no
// stable dots.
return 0;
}
// start from our own frontier
let mut stable = self.committed.frontier();
// and intersect with all the other clocks
self.all_but_me.values().for_each(|&clock| {
stable = std::cmp::min(stable, clock);
});
stable
}
}
#[cfg(test)]
mod tests {
use super::*;
fn slots((start, end): (u64, u64)) -> Vec<u64> {
(start..=end).collect()
}
#[test]
fn gc_flow() {
let n = 2;
// create new gc track for the our process: 1
let mut gc = GCTrack::new(1, n);
// let's also create a gc track for process 2
let mut gc2 = GCTrack::new(2, n);
// there's nothing committed and nothing stable
assert_eq!(gc.committed(), 0);
assert_eq!(gc.stable_slot(), 0);
assert_eq!(slots(gc.stable()), Vec::<u64>::new());
// and commit slot 2 locally
gc.commit(2);
// this doesn't change anything
assert_eq!(gc.committed(), 0);
assert_eq!(gc.stable_slot(), 0);
assert_eq!(slots(gc.stable()), Vec::<u64>::new());
// however, if we also commit slot 1, the committed clock will change
gc.commit(1);
assert_eq!(gc.committed(), 2);
assert_eq!(gc.stable_slot(), 0);
assert_eq!(slots(gc.stable()), Vec::<u64>::new());
// if we update with the committed clock from process 2 nothing changes
gc.committed_by(2, gc2.committed());
assert_eq!(gc.committed(), 2);
assert_eq!(gc.stable_slot(), 0);
assert_eq!(slots(gc.stable()), Vec::<u64>::new());
// let's commit slot 1 and slot 3 at process 2
gc2.commit(1);
gc2.commit(3);
// now dot11 is stable at process 1
gc.committed_by(2, gc2.committed());
assert_eq!(gc.committed(), 2);
assert_eq!(gc.stable_slot(), 1);
assert_eq!(slots(gc.stable()), vec![1]);
// if we call stable again, no new dot is returned
assert_eq!(gc.stable_slot(), 1);
assert_eq!(slots(gc.stable()), Vec::<u64>::new());
// let's commit slot 3 at process 1 and slot 2 at process 2
gc.commit(3);
gc2.commit(2);
// now both dot12 and dot13 are stable at process 1
gc.committed_by(2, gc2.committed());
assert_eq!(gc.committed(), 3);
assert_eq!(gc.stable_slot(), 3);
assert_eq!(slots(gc.stable()), vec![2, 3]);
assert_eq!(slots(gc.stable()), Vec::<u64>::new());
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/protocol/common/synod/mod.rs | fantoch_ps/src/protocol/common/synod/mod.rs | // This module contains the implementation of Paxos single-decree Synod
// Protocols.
mod single;
// This module contains the implementation of Paxos multi-decree Synod
// Protocols.
mod multi;
// This module contains common functionality from tracking when it's safe to
// garbage-collect a command, i.e., when it's been committed at all processes.
mod gc;
// Re-exports.
pub use gc::GCTrack;
pub use multi::{MultiSynod, MultiSynodMessage};
pub use single::{Synod, SynodMessage};
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/protocol/common/synod/single.rs | fantoch_ps/src/protocol/common/synod/single.rs | use fantoch::id::ProcessId;
use fantoch::{HashMap, HashSet};
use std::mem;
type Ballot = u64;
/// Implementation of Flexible single-decree Paxos in which:
/// - phase-1 waits for n - f promises
/// - phase-2 waits for f + 1 accepts
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum SynodMessage<V> {
// to be handled by the top-level module
MChosen(V),
// messages to acceptor
MPrepare(Ballot),
MAccept(Ballot, V),
// messages to proposer
MPromise(Ballot, Accepted<V>),
MAccepted(Ballot),
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Synod<V> {
// paxos agents
proposer: Proposer<V>,
acceptor: Acceptor<V>,
chosen: bool,
}
impl<V> Synod<V>
where
V: Clone,
{
/// Creates a new Synod instance.
/// After executing phase-1, if the proposer sees that no proposal has been
/// accepted before, it resorts to the proposal generator to compute a
/// new consensus proposal given all values reported by the phase-1
/// quorum. We know that none of the values reported were accepted
/// because they're tagged with ballot 0.
pub fn new(
process_id: ProcessId,
n: usize,
f: usize,
proposal_gen: fn(HashMap<ProcessId, V>) -> V,
initial_value: V,
) -> Self {
Self {
proposer: Proposer::new(process_id, n, f, proposal_gen),
acceptor: Acceptor::new(initial_value),
chosen: false,
}
}
/// Set the consensus value if no value has been accepted yet (i.e. ballot
/// is still 0). If the value was successfully changed, `true` is
/// returned and `false` otherwise.
#[must_use]
pub fn set_if_not_accepted<F>(&mut self, value_gen: F) -> bool
where
F: FnOnce() -> V,
{
self.acceptor.set_if_not_accepted(value_gen)
}
/// Returns the current consensus value (not necessarily accepted).
pub fn value(&self) -> &V {
self.acceptor.value()
}
/// Creates a new prepare message with a ballot onwed by this process. This
/// ballot is greater than any ballot seen the by local acceptor agent.
/// Feeding the proposer with the highest ballot seen by the acceptor
/// increases the likelyhood of having this new prepare accepted. And
/// since the created Prepare will be delivered imediately at the local
/// acceptor, this ensures that the ballots created are unique.
///
/// TODO how do we ensure that the prepare is delivered immediately?
pub fn new_prepare(&mut self) -> SynodMessage<V> {
self.proposer.new_prepare(&self.acceptor)
}
/// Skips prepare phase and generates the first ballot (panics if not the
/// first). This ballot can be safely used (without a prepare phase) in
/// the slow path by the original coordinator because any other ballot
/// will be higher than n, and thus, it's not possible that
/// any other proposal could have been accepted at a lower ballot.
pub fn skip_prepare(&mut self) -> Ballot {
self.proposer.skip_prepare(&self.acceptor)
}
/// Handles `SynodMessage`s generated by this `Synod` module by forwarding
/// them to the proper Paxos agent (i.e. either the proposer or the
/// acceptor).
pub fn handle(
&mut self,
from: ProcessId,
msg: SynodMessage<V>,
) -> Option<SynodMessage<V>> {
match msg {
// once this is handled, and the acceptor's value is blindly
// changed, any message to be handled by the acceptor
// must only be forwarded if the value has not been chosen
SynodMessage::MChosen(value) => {
self.chosen = true;
self.acceptor.set_value(value);
None
}
// handle messages to acceptor
SynodMessage::MPrepare(b) => {
self.chosen().or_else(|| self.acceptor.handle_prepare(b))
}
SynodMessage::MAccept(b, value) => self
.chosen()
.or_else(|| self.acceptor.handle_accept(b, value)),
// handle messages to proposer
SynodMessage::MPromise(b, previous) => {
self.proposer.handle_promise(from, b, previous)
}
SynodMessage::MAccepted(b) => {
self.proposer.handle_accepted(from, b, &self.acceptor)
}
}
}
/// Return chosen value if it has been chosen.
fn chosen(&self) -> Option<SynodMessage<V>> {
if self.chosen {
// get chosen value
let value = self.acceptor.value().clone();
let chosen = SynodMessage::MChosen(value);
Some(chosen)
} else {
None
}
}
}
type Promises<V> = HashMap<ProcessId, Accepted<V>>;
type Accepts = HashSet<ProcessId>;
type Proposal<V> = Option<V>;
#[derive(Debug, Clone, PartialEq, Eq)]
struct Proposer<V> {
// process identifier
process_id: ProcessId,
// number of processes
n: usize,
// maximum number of allowed failures
f: usize,
// ballot used in prepare
ballot: Ballot,
// proposal generator that will be used once enough promises have been
// collected
proposal_gen: fn(HashMap<ProcessId, V>) -> V,
// what follows is paper-slip state:
// - promises: mapping from phase-1 quorum processes to the values in their
// promises
// - accepts: set of processes that have accepted a proposal
// - proposal: proposal generated by the proposal generator
promises: Promises<V>,
accepts: Accepts,
proposal: Proposal<V>,
}
impl<V> Proposer<V>
where
V: Clone,
{
/// Creates a new proposer.
fn new(
process_id: ProcessId,
n: usize,
f: usize,
proposal_gen: fn(HashMap<ProcessId, V>) -> V,
) -> Self {
Self {
process_id,
n,
f,
ballot: 0,
proposal_gen,
promises: HashMap::new(),
accepts: HashSet::new(),
proposal: None,
}
}
/// Generates a new prepare. See top-level docs (in `Synod`) for more info.
fn new_prepare(&mut self, acceptor: &Acceptor<V>) -> SynodMessage<V> {
// the acceptor's ballot should be at least as high as the proposer's
// ballot (if this is not the case, it's because prepare
// messages are not delivered locally immediately)
assert!(acceptor.ballot() >= self.ballot);
// generate the next ballot
self.next_ballot(acceptor);
// the new ballot should be higher than the acceptor's ballot
assert!(acceptor.ballot() < self.ballot);
// reset paper-slip state
self.reset_state();
// create prepare message
SynodMessage::MPrepare(self.ballot)
}
/// Skips prepare phase. See top-level docs (in `Synod`) for more info.
fn skip_prepare(&mut self, acceptor: &Acceptor<V>) -> Ballot {
assert_eq!(acceptor.ballot(), 0);
self.ballot = self.process_id as Ballot;
self.ballot
}
/// Changes the ballot to a ballot owned by this proposer. This new ballot
/// is higher than the ballot the acceptor is currently in, which should
/// increase the likelyhood of this ballot being accepted by other
/// acceptors.
fn next_ballot(&mut self, acceptor: &Acceptor<V>) {
// get number of processes
let n = self.n as u64;
// compute "round" of current ballot
let round = acceptor.ballot() / n;
// compute the next "round"
let next_round = round + 1;
// compute ballot owned by this process in the next round
self.ballot = self.process_id as Ballot + n * next_round;
}
/// Resets the local (paper-slip) state (promises received, accepts
/// received, and proposal sent), returning the previous value of
/// promises and proposal. TODO quick check tells me it's in fact not
/// necessary to reset the proposal (which makes sense). However, for
/// performance reasons (saving a `.clone()`), I'll keep the `mem::take`.
fn reset_state(&mut self) -> (Promises<V>, Proposal<V>) {
// reset promises
let promises = mem::take(&mut self.promises);
// reset accepts
self.accepts = HashSet::new();
// reset proposal
let proposal = mem::take(&mut self.proposal);
// return previous promises and proposal
(promises, proposal)
}
fn handle_promise(
&mut self,
from: ProcessId,
b: Ballot,
accepted: Accepted<V>,
) -> Option<SynodMessage<V>> {
// check if it's a promise about the current ballot (so that we only
// process promises about the current ballot)
if self.ballot == b {
// if yes, update set of promises
self.promises.insert(from, accepted);
// check if we have enough (i.e. n - f) promises
if self.promises.len() == self.n - self.f {
// if we do, check if any value has been accepted before:
// - if yes, select the value accepted at the highest ballot
// - if not, generate proposal using the generator
// reset state and get promises
let (mut promises, _) = self.reset_state();
// compute the proposal accepted at the highest ballot
// - if there's more than one proposal with the same highest
// ballot, then they have the same value, and thus any can be
// selected
let (highest_ballot, from) = promises
.iter()
// get highest proposal
.max_by_key(|(_process, (ballot, _value))| ballot)
// extract ballot and process
.map(|(process, (ballot, _))| (*ballot, *process))
.expect("there should n - f promises, and thus, there's a highest value");
// compute our proposal depending on whether there was a
// previously accepted proposal
let proposal = if highest_ballot == 0 {
// if the highest ballot is 0, use the proposal generator to
// generate anything we want
// TODO do we need to collect here? also, maybe we could
// simply upstream the ballots, even
// though they're all 0
let values = promises
.into_iter()
.map(|(process, (_ballot, value))| (process, value))
.collect();
(self.proposal_gen)(values)
} else {
// otherwise, we must propose the value accepted at the
// highest ballot TODO this scheme of
// removing the value from `promises` prevents cloning
// the value when we only have a reference to it; is there a
// better way?
promises.remove(&from).map(|(_ballot, value)| value).expect(
"a promise from this process must exists as it was the highest promise",
)
};
// save the proposal
self.proposal = Some(proposal.clone());
// create accept message
let accept = SynodMessage::MAccept(b, proposal);
return Some(accept);
}
}
None
}
fn handle_accepted(
&mut self,
from: ProcessId,
b: Ballot,
acceptor: &Acceptor<V>,
) -> Option<SynodMessage<V>> {
// check if it's an accept about the current ballot (so that we only
// process accepts about the current ballot)
if self.ballot == b {
// if yes, update set of accepts
self.accepts.insert(from);
// check if we have enough (i.e. f + 1) accepts
if self.accepts.len() == self.f + 1 {
// if we do, our proposal can be chosen
// reset state and get proposal
let (_, proposal) = self.reset_state();
// compute proposal
// TODO could the proposal be the value that is currently stored
// in the local acceptor, or could that value
// have been overwritten in the meantime?
let proposal = if let Some(proposal) = proposal {
// if there was a proposal, use it
proposal
} else {
// otherwise, check if we're still at the first ballot (that
// doesn't need to be prepared)
match &acceptor.accepted {
(ballot, value) if *ballot == self.process_id as Ballot => value.clone(),
_ => panic!("there should have been proposal before a value can be chosen (or we should still be at the first ballot)"),
}
};
// create chosen message
let chosen = SynodMessage::MChosen(proposal);
return Some(chosen);
}
}
None
}
}
// The first component is the ballot in which the value (the second component)
// was accepted. If the ballot is 0, the value has not been accepted yet.
type Accepted<Value> = (Ballot, Value);
#[derive(Debug, Clone, PartialEq, Eq)]
struct Acceptor<Value> {
ballot: Ballot,
accepted: Accepted<Value>,
}
impl<V> Acceptor<V>
where
V: Clone,
{
fn new(initial_value: V) -> Self {
Self {
ballot: 0,
accepted: (0, initial_value),
}
}
// Set the consensus value if no value has been accepted yet.
fn set_if_not_accepted<F>(&mut self, value_gen: F) -> bool
where
F: FnOnce() -> V,
{
if self.ballot == 0 {
self.accepted = (0, value_gen());
true
} else {
false
}
}
// Set the consensus value when it's chosen. This API shouldn't be exposed
// publicly and should be used with care.
fn set_value(&mut self, value: V) {
self.accepted = (0, value);
}
// Retrieves consensus value (not necessarily accepted).
fn value(&self) -> &V {
let (_, v) = &self.accepted;
v
}
// Returns the ballot that the acceptor is currently in.
fn ballot(&self) -> Ballot {
self.ballot
}
// The reply to this prepare request contains:
// - a promise to never accept a proposal numbered less than `b`
// - the proposal accepted with the highest number less than `b`, if any
fn handle_prepare(&mut self, b: Ballot) -> Option<SynodMessage<V>> {
// since we need to promise that we won't accept any proposal numbered
// less then `b`, there's no point in letting such proposal be
// prepared, and so, we ignore such prepares
if b > self.ballot {
// update current ballot
self.ballot = b;
// create promise message
let promise = SynodMessage::MPromise(b, self.accepted.clone());
Some(promise)
} else {
None
}
}
fn handle_accept(
&mut self,
b: Ballot,
value: V,
) -> Option<SynodMessage<V>> {
if b >= self.ballot {
// update current ballot
self.ballot = b;
// update the accepted value
self.accepted = (b, value);
// create accepted message
let accepted = SynodMessage::MAccepted(b);
Some(accepted)
} else {
None
}
}
}
#[cfg(test)]
mod tests {
use super::*;
// generate proposals by multiplying all the values reported by phase-1
// quorum processes
fn proposal_gen(values: HashMap<ProcessId, u64>) -> u64 {
values.into_iter().map(|(_, v)| v).fold(1, |acc, v| acc * v)
}
#[test]
fn synod_flow() {
// n and f
let n = 5;
let f = 1;
// create all synods
let mut synod_1 = Synod::new(1, n, f, proposal_gen, 2);
let mut synod_2 = Synod::new(2, n, f, proposal_gen, 3);
let mut synod_3 = Synod::new(3, n, f, proposal_gen, 5);
let mut synod_4 = Synod::new(4, n, f, proposal_gen, 7);
let mut synod_5 = Synod::new(5, n, f, proposal_gen, 11);
// check values
assert_eq!(synod_1.value(), &2);
assert_eq!(synod_2.value(), &3);
assert_eq!(synod_3.value(), &5);
assert_eq!(synod_4.value(), &7);
assert_eq!(synod_5.value(), &11);
// check it's possible to set values (as ballots are still 0), and check
// value
assert!(synod_1.set_if_not_accepted(|| 13));
assert_eq!(synod_1.value(), &13);
// synod 1: generate prepare
let prepare = synod_1.new_prepare();
// it's still possible to set the value as the prepare has not been
// handled
assert!(synod_1.set_if_not_accepted(|| 2));
assert_eq!(synod_1.value(), &2);
// handle the prepare at n - f processes, including synod 1
let promise_1 = synod_1
.handle(1, prepare.clone())
.expect("there should a promise from 1");
let promise_2 = synod_2
.handle(1, prepare.clone())
.expect("there should a promise from 2");
let promise_3 = synod_3
.handle(1, prepare.clone())
.expect("there should a promise from 3");
let promise_4 = synod_4
.handle(1, prepare.clone())
.expect("there should a promise from 4");
// check it's no longer possible to set the value
assert!(!synod_1.set_if_not_accepted(|| 13));
assert_eq!(synod_1.value(), &2);
// synod 1: handle promises
let result = synod_1.handle(1, promise_1);
assert!(result.is_none());
let result = synod_1.handle(2, promise_2);
assert!(result.is_none());
let result = synod_1.handle(3, promise_3);
assert!(result.is_none());
// only in the last one there should be an accept message
let accept = synod_1
.handle(4, promise_4)
.expect("there should an accept message");
// handle the accept at f + 1 processes, including synod 1
let accepted_1 = synod_1
.handle(1, accept.clone())
.expect("there should an accept from 1");
let accepted_5 = synod_5
.handle(1, accept.clone())
.expect("there should an accept from 5");
// synod 1: handle accepts
let result = synod_1.handle(1, accepted_1);
assert!(result.is_none());
let chosen = synod_1
.handle(5, accepted_5)
.expect("there should be a chosen message");
// check that 210 (2 * 3 * 5 * 7 * 11, i.e. the ballot-0 values from
// phase-1 processes) was chosen
assert_eq!(chosen, SynodMessage::MChosen(210));
}
#[test]
fn synod_prepare_with_lower_ballot_fails() {
// n and f
let n = 3;
let f = 1;
// create all synods
let mut synod_1 = Synod::new(1, n, f, proposal_gen, 0);
let mut synod_2 = Synod::new(2, n, f, proposal_gen, 0);
let mut synod_3 = Synod::new(3, n, f, proposal_gen, 0);
// synod 1 and 3: generate prepare
let prepare_a = synod_1.new_prepare();
let prepare_c = synod_3.new_prepare();
// handle the prepare_a at synod 1
synod_1
.handle(1, prepare_a.clone())
.expect("there should a promise from 1");
// handle the prepare_c at synod 3
synod_3
.handle(3, prepare_c.clone())
.expect("there should a promise from 3");
// handle the prepare_c at synod 2
synod_2
.handle(3, prepare_c.clone())
.expect("there should a promise from 2");
// handle the prepare_a at synod 2
let result = synod_2.handle(1, prepare_a.clone());
// there should be no promise from synod 2
assert!(result.is_none());
}
#[test]
fn synod_recovery() {
// n and f
let n = 3;
let f = 1;
// create all synods
let mut synod_1 = Synod::new(1, n, f, proposal_gen, 2);
let mut synod_2 = Synod::new(2, n, f, proposal_gen, 3);
let synod_3 = Synod::new(3, n, f, proposal_gen, 5);
// synod 1: generate prepare
let prepare = synod_1.new_prepare();
// handle the prepare at synod 1
let promise_1 = synod_1
.handle(1, prepare.clone())
.expect("there should a promise from 1");
// handle the prepare at synod 2
let promise_2 = synod_2
.handle(1, prepare.clone())
.expect("there should a promise from 2");
// synod 1: handle promises
let result = synod_1.handle(1, promise_1);
assert!(result.is_none());
// only in the last one there should be an accept message
let accept = synod_1
.handle(2, promise_2)
.expect("there should an accept message");
// check the value in the accept
if let SynodMessage::MAccept(ballot, value) = accept {
assert_eq!(ballot, 4); // 8 is the ballot from round-1 (n=3 * round=1 + id=1) that belongs
// to process 1
assert_eq!(value, 6); // 2 * 3, the values stored by processes 1 and
// 2
} else {
panic!("process 1 should have generated an accept")
}
// handle the accept only at synod 1
synod_1
.handle(1, accept)
.expect("there should an accept from 1");
// at this point, if another process tries to recover, there are two
// possible situations:
// - if process 1 is part of that phase-1 quorum, this new process needs
// to propose the same value that was proposed by 1 (i.e. 6)
// - if process 2 is *not* part of that phase-1 quorum, this new process
// can propose anything it wants; this value will be 15, i.e. the
// multiplication of the values stored by processes 2 and 3
// start recovery by synod 2
let prepare = synod_2.new_prepare();
// handle prepare at synod 2
let promise_2 = synod_2
.handle(2, prepare.clone())
.expect("there should be a promise from 2");
// synod 2: handle promise by 2
let result = synod_2.handle(2, promise_2);
assert!(result.is_none());
// check case 1
case_1(prepare.clone(), synod_1.clone(), synod_2.clone());
// check case 2
case_2(prepare.clone(), synod_2.clone(), synod_3.clone());
// in this case, the second prepare is handled by synod 1
fn case_1(
prepare: SynodMessage<u64>,
mut synod_1: Synod<u64>,
mut synod_2: Synod<u64>,
) {
// handle prepare at synod 1
let promise_1 = synod_1
.handle(2, prepare.clone())
.expect("there should be a promise from 1");
// synod 2: handle promise from 1
let accept = synod_2
.handle(1, promise_1)
.expect("there should an accept message");
// check the value in the accept
if let SynodMessage::MAccept(ballot, value) = accept {
assert_eq!(ballot, 8); // 8 is the ballot from round-2 (n=3 * round=2 + id=2) that
// belongs to process 2
assert_eq!(value, 6); // the value proposed by process 1
} else {
panic!("process 2 should have generated an accept")
}
}
// in this case, the second prepare is handled by synod 3
fn case_2(
prepare: SynodMessage<u64>,
mut synod_2: Synod<u64>,
mut synod_3: Synod<u64>,
) {
// handle prepare at synod 3
let promise_3 = synod_3
.handle(2, prepare.clone())
.expect("there should be a promise from 3");
// synod 2: handle promise from 3
let accept = synod_2
.handle(3, promise_3)
.expect("there should an accept message");
// check the value in the accept
if let SynodMessage::MAccept(ballot, value) = accept {
assert_eq!(ballot, 8); // 8 is the ballot from round-2 (n=3 * round=2 + id=2) that
// belongs to process 2
assert_eq!(value, 15); // 3 * 5, the values stored by processes
// 2 and 3
} else {
panic!("process 2 should have generated an accept")
}
}
}
}
#[cfg(test)]
mod proptests {
use super::*;
use quickcheck::{Arbitrary, Gen};
use quickcheck_macros::quickcheck;
use std::cell::RefCell;
use std::convert::TryInto;
// number of processes and tolerated faults
const N: usize = 5;
const F: usize = 2;
// quorum size:
// - since we consider that f = 2, the quorum size is 3
const Q: usize = 3;
// a list of pairs where the:
// - second component indicates whether the msg is lost
// - third component indicated whether the reply is lost
// with the above, an entry (_, true, false) makes no practical sense: if
// the msg is lost, there can't be any reply
type Quorum = Vec<(ProcessId, bool, bool)>;
#[derive(Clone, Debug)]
struct Action {
source: ProcessId, // either 1 or 2
q1: Quorum,
q2: Quorum,
}
fn bound_id(id: ProcessId, bound: usize) -> ProcessId {
// make sure ids are between 1 and `bound`
id % (bound as ProcessId) + 1
}
impl Arbitrary for Action {
fn arbitrary(g: &mut Gen) -> Self {
// generate source: either 1 or 2
let source: ProcessId = Arbitrary::arbitrary(g);
let source = bound_id(source, 2);
// generate q1 and q2
let q1 = arbitrary_quorum(source, g);
let q2 = arbitrary_quorum(source, g);
// return action
Self { source, q1, q2 }
}
// actions can't be shriked
fn shrink(&self) -> Box<dyn Iterator<Item = Self>> {
Box::new(std::iter::empty::<Self>())
}
}
// generate a quorum of size `Q` (`Q - 1` actually as `from` is always part
// of the quorum)
fn arbitrary_quorum(source: ProcessId, g: &mut Gen) -> Quorum {
// compute expected size
let expected_size: usize = (Q - 1)
.try_into()
.expect("it should be possible to subtract 1 as the quorum size is non-zero");
// ids of processes in the quorum
let mut ids = HashSet::new();
// loop while we don't generate a quorum with the expected size
while ids.len() < expected_size {
// generate random id
let process: ProcessId = Arbitrary::arbitrary(g);
let process = bound_id(process, N);
// add process if not source
if process != source {
ids.insert(process);
}
}
// for each quorum process, generate whether any of the messages will
// get lost
ids.into_iter()
.map(|id| {
let msg_lost = Arbitrary::arbitrary(g);
let reply_lost = Arbitrary::arbitrary(g);
(id, msg_lost, reply_lost)
})
.collect()
}
type ConsensusValue = u64;
type Synods = HashMap<ProcessId, RefCell<Synod<ConsensusValue>>>;
// generate proposals by multiplying all the values reported by phase-1
// quorum processes
fn proposal_gen(
values: HashMap<ProcessId, ConsensusValue>,
) -> ConsensusValue {
values.into_iter().map(|(_, v)| v).fold(1, |acc, v| acc * v)
}
fn create_synods() -> Synods {
// create ids and their initial values
let data = vec![(1, 2), (2, 3), (3, 5), (4, 7), (5, 11)];
// create synods
data.into_iter()
.map(|(id, initial_value)| {
// get id
let id = id as ProcessId;
// create synod
let synod = Synod::new(id, N, F, proposal_gen, initial_value);
(id, RefCell::new(synod))
})
.collect()
}
#[quickcheck]
fn a_single_value_is_chosen(actions: Vec<Action>) -> bool {
fn do_action(
action: Action,
synods: &Synods,
chosen_values: &mut HashSet<ConsensusValue>,
) {
// get source
let source = action.source;
// get synod
let mut synod = synods
.get(&source)
.expect("synod with such id should exist")
.borrow_mut();
// create prepare
let prepare = synod.new_prepare();
// handle it locally
let local_promise = synod
.handle(source, prepare.clone())
.expect("local promises should always be generated");
synod.handle(source, local_promise);
// handle it in all `q1`
let outcome = handle_in_quorum(
source, &mut synod, synods, prepare, &action.q1,
);
// check if phase-1 ended
if outcome.len() == 1 {
// if yes, start phase-2
let accept = &outcome[0];
// handle it locally
let local_accept = synod
.handle(source, accept.clone())
.expect("local accepts should always be generated");
synod.handle(source, local_accept);
// handle msg in all `q2`
let outcome = handle_in_quorum(
source,
&mut synod,
synods,
accept.clone(),
&action.q2,
);
// check if phase-2 ended
if outcome.len() == 1 {
// if yes, save chosen value
if let SynodMessage::MChosen(value) = outcome[0] {
chosen_values.insert(value);
}
}
}
}
fn handle_in_quorum(
source: ProcessId,
synod: &mut Synod<ConsensusValue>,
synods: &Synods,
msg: SynodMessage<ConsensusValue>,
quorum: &Quorum,
) -> Vec<SynodMessage<ConsensusValue>> {
quorum
.iter()
.filter_map(|(dest, msg_lost, reply_lost)| {
// handle msg if it should be lost
if !msg_lost {
// get dest synod
let mut dest_synod = synods
.get(&dest)
.expect("synod with such id should exist")
.borrow_mut();
// handle msg in destination
let reply = dest_synod.handle(source, msg.clone());
// check if there's a reply
if let Some(reply) = reply {
// if yes and reply shouldn't be lost, handle it
if !reply_lost {
return synod.handle(*dest, reply);
}
}
}
None
})
.collect()
}
// create synods
let synods = create_synods();
// set with all chosen values:
// - if in the end this set has more than one value, there's a bug
let mut chosen_values = HashSet::new();
actions.into_iter().for_each(|action| {
do_action(action, &synods, &mut chosen_values);
});
// we're good if there was at most one chosen value
chosen_values.len() <= 1
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/protocol/common/synod/multi.rs | fantoch_ps/src/protocol/common/synod/multi.rs | use fantoch::hash_map::{Entry, HashMap};
use fantoch::id::ProcessId;
use fantoch::HashSet;
type Ballot = u64;
type Slot = u64;
// The first component is the ballot in which the value (the second component)
// was accepted.
type Accepted<V> = (Ballot, V);
type AcceptedSlots<V> = HashMap<Slot, Accepted<V>>;
type Accepts = HashSet<ProcessId>;
/// Implementation of Flexible multi-decree Paxos in which:
/// - phase-1 waits for n - f promises
/// - phase-2 waits for f + 1 accepts
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum MultiSynodMessage<V> {
// to be handled outside of this module
MChosen(Slot, V),
MForwardSubmit(V),
// messages to root mod
MSpawnCommander(Ballot, Slot, V),
// messages to acceptor
MPrepare(Ballot),
MAccept(Ballot, Slot, V),
// messages to leader
MPromise(Ballot, AcceptedSlots<V>),
// messages to the commander
MAccepted(Ballot, Slot),
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct MultiSynod<V> {
// number of processes
n: usize,
// maximum number of allowed failures
f: usize,
// paxos agents
leader: Leader,
acceptor: Acceptor<V>,
commanders: HashMap<Slot, Commander<V>>,
}
impl<V> MultiSynod<V>
where
V: Clone,
{
/// Creates a new `MultiSynod` instance.
pub fn new(
process_id: ProcessId,
initial_leader: ProcessId,
n: usize,
f: usize,
) -> Self {
Self {
n,
f,
leader: Leader::new(process_id, initial_leader),
acceptor: Acceptor::new(initial_leader),
commanders: HashMap::new(),
}
}
pub fn submit(&mut self, value: V) -> MultiSynodMessage<V> {
if let Some((ballot, slot)) = self.leader.try_submit() {
// if we're the leader, create a spawn commander message:
// - this message is to be handled locally, but it can be handled in
// a different local multi-synod process for parallelism
MultiSynodMessage::MSpawnCommander(ballot, slot, value)
} else {
// if we're not the leader, then create an `MForwardSubmit` to be
// sent to the leader
MultiSynodMessage::MForwardSubmit(value)
}
}
/// Handles `MultiSynodMessage`s generated by this `MultiSynod` module by
/// forwarding them to the proper agent.
pub fn handle(
&mut self,
from: ProcessId,
msg: MultiSynodMessage<V>,
) -> Option<MultiSynodMessage<V>> {
match msg {
// handle spawn commander
MultiSynodMessage::MSpawnCommander(b, slot, value) => {
let maccept = self.handle_spawn_commander(b, slot, value);
Some(maccept)
}
// handle messages to acceptor
MultiSynodMessage::MPrepare(b) => self.acceptor.handle_prepare(b),
MultiSynodMessage::MAccept(b, slot, value) => {
self.acceptor.handle_accept(b, slot, value)
}
// handle messages to leader
MultiSynodMessage::MPromise(_b, _previous) => {
todo!("handling of MultiSynodMessage::MPromise not implemented yet");
}
// handle messages to comamnders
MultiSynodMessage::MAccepted(b, slot) => {
self.handle_maccepted(from, b, slot)
}
MultiSynodMessage::MChosen(_, _) => panic!("MultiSynod::MChosen messages are to be handled outside of MultiSynod"),
MultiSynodMessage::MForwardSubmit(_) => panic!("MultiSynod::MForwardSubmit messages are to be handled outside of MultiSynod")
}
}
/// Performs garbage collection of stable slots.
pub fn gc(&mut self, stable: (u64, u64)) -> usize {
self.acceptor.gc(stable)
}
/// Performs garbage collection of a single slot.
pub fn gc_single(&mut self, slot: u64) {
self.acceptor.gc_single(slot)
}
fn handle_spawn_commander(
&mut self,
ballot: Ballot,
slot: Slot,
value: V,
) -> MultiSynodMessage<V> {
// create a new commander
let commander = Commander::spawn(self.f, ballot, value.clone());
// update list of commander
let res = self.commanders.insert(slot, commander);
// check that there was no other commander for this slot
assert!(res.is_none());
// create the accept message
MultiSynodMessage::MAccept(ballot, slot, value)
}
fn handle_maccepted(
&mut self,
from: ProcessId,
ballot: Ballot,
slot: Slot,
) -> Option<MultiSynodMessage<V>> {
// get the commander of this slot:
match self.commanders.entry(slot) {
Entry::Occupied(mut entry) => {
let commander = entry.get_mut();
let chosen = commander.handle_accepted(from, ballot);
// if the commander has gathered enough accepts, then
// the value for this slot is chosen
if chosen {
// destroy commander and get the value that was
// being watched
let value = entry.remove().destroy();
// create chosen message
let chosen = MultiSynodMessage::MChosen(slot, value);
Some(chosen)
} else {
None
}
}
Entry::Vacant(_) => {
// ignore message if commander does not exist
println!("MultiSynodMesssage::MAccepted({}, {}) ignored as a commander for that slot {} does not exist", ballot, slot, slot);
None
}
}
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
struct Leader {
// process identifier
process_id: ProcessId,
// flag indicating whether we're the leader
is_leader: bool,
// ballot to be used in accept messages
ballot: Ballot,
// last slot used in accept messages
last_slot: Slot,
}
impl Leader {
/// Creates a new leader.
fn new(process_id: ProcessId, initial_leader: ProcessId) -> Self {
// we're leader if the identifier of the initial leader is us
let is_leader = process_id == initial_leader;
// if we're the leader, then use as initial ballot our id, which will
// automatically be joined by all acceptors on bootstrap
let ballot = if is_leader { process_id } else { 0 };
// last slot is 0
let last_slot = 0;
Self {
process_id,
is_leader,
ballot: ballot as Ballot,
last_slot,
}
}
/// Tries to submit a command. If we're the leader, then the leader ballot
/// and a new slot will be returned.
fn try_submit(&mut self) -> Option<(Ballot, Slot)> {
if self.is_leader {
// increase slot
self.last_slot += 1;
// return ballot and slot
Some((self.ballot, self.last_slot))
} else {
None
}
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
struct Commander<V> {
// maximum number of allowed failures
f: usize,
// ballot prepared by the leader
ballot: Ballot,
// value sent in the accept
value: V,
// set of processes that have accepted the accept
accepts: Accepts,
}
impl<V> Commander<V>
where
V: Clone,
{
// Spawns a new commander to watch accepts on some slot.
fn spawn(f: usize, ballot: Ballot, value: V) -> Self {
Self {
f,
ballot,
value,
accepts: HashSet::new(),
}
}
// Processes an accepted message, returning a bool indicating whether we
// have enough accepts.
fn handle_accepted(&mut self, from: ProcessId, b: Ballot) -> bool {
// check if it's an accept about the current ballot (so that we only
// process accepts about the current ballot)
if self.ballot == b {
// if yes, update set of accepts
self.accepts.insert(from);
// check if we have enough (i.e. f + 1) accepts
self.accepts.len() == self.f + 1
} else {
false
}
}
// Destroys the commander, returning the value being watched. This should be
// called once `handle_accepted` returns true. It will panic otherwise.
fn destroy(self) -> V {
assert_eq!(self.accepts.len(), self.f + 1);
self.value
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
struct Acceptor<Value> {
ballot: Ballot,
accepted: HashMap<Slot, Accepted<Value>>,
}
impl<V> Acceptor<V>
where
V: Clone,
{
// Creates a new acceptor given the initial leader.
// The acceptor immediately joins the first ballot of this leader, i.e. its
// identifer.
fn new(initial_leader: ProcessId) -> Self {
Self {
ballot: initial_leader as Ballot,
accepted: HashMap::new(),
}
}
// The reply to this prepare request contains:
// - a promise to never accept a proposal numbered less than `b`
// - the non-GCed proposals accepted at ballots less than `b`, if any
fn handle_prepare(&mut self, b: Ballot) -> Option<MultiSynodMessage<V>> {
// since we need to promise that we won't accept any proposal numbered
// less then `b`, there's no point in letting such proposal be
// prepared, and so, we ignore such prepares
if b > self.ballot {
// update current ballot
self.ballot = b;
// create promise message
let promise = MultiSynodMessage::MPromise(b, self.accepted.clone());
Some(promise)
} else {
None
}
}
fn handle_accept(
&mut self,
b: Ballot,
slot: Slot,
value: V,
) -> Option<MultiSynodMessage<V>> {
if b >= self.ballot {
// update current ballot
self.ballot = b;
// update the accepted value for `slot`
self.accepted.insert(slot, (b, value));
// create accepted message
let accepted = MultiSynodMessage::MAccepted(b, slot);
Some(accepted)
} else {
None
}
}
/// Performs garbage collection of stable slots.
/// Returns how many stable does were removed.
fn gc(&mut self, (start, end): (u64, u64)) -> usize {
(start..=end)
.filter(|slot| {
// remove slot:
// - if this acceptor is not part of the quorum used by the
// leader, then the slot does not exist locally (assuming
// there was no recovery)
self.accepted.remove(&slot).is_some()
})
.count()
}
/// Performs garbage collection of a single slot.
pub fn gc_single(&mut self, slot: u64) {
// this only does anything if this acceptor was contacted by the leader
// for this slot
self.accepted.remove(&slot);
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn multi_synod_flow() {
// n and f
let n = 3;
let f = 1;
// initial leader is 1
let initial_leader = 1;
// create all synods
let mut synod_1 = MultiSynod::<usize>::new(1, initial_leader, n, f);
let mut synod_2 = MultiSynod::<usize>::new(2, initial_leader, n, f);
let mut synod_3 = MultiSynod::<usize>::new(3, initial_leader, n, f);
// synod 1: submit new command
let value = 10;
let spawn = synod_1.submit(value);
// since synod 1 is the leader, then the message is a spawn commander
match &spawn {
MultiSynodMessage::MSpawnCommander(_, _, _) => {}
_ => panic!(
"submitting at the leader should create an spawn commander message"
),
};
let accept =
synod_1.handle(1, spawn).expect("there should be an accept");
// handle the spawn commander locally creating an accept message
match &accept {
MultiSynodMessage::MAccept(_, _, _) => {}
_ => panic!(
"the handle of a spawn commander should result in an accept message"
),
};
// handle the accept at f + 1 processes, including synod 1
let accepted_1 = synod_1
.handle(1, accept.clone())
.expect("there should an accept from 1");
let accepted_2 = synod_2
.handle(1, accept.clone())
.expect("there should an accept from 2");
// synod 1: handle accepts
let result = synod_1.handle(1, accepted_1);
assert!(result.is_none());
let chosen = synod_1
.handle(2, accepted_2)
.expect("there should be a chosen message");
// check that `valeu` was chosen at slot 1
let slot = 1;
assert_eq!(chosen, MultiSynodMessage::MChosen(slot, value));
// synod 3: submit new command
// since synod 3 is *not* the leader, then the message is an mforward
let value = 30;
match synod_3.submit(value) {
MultiSynodMessage::MForwardSubmit(forward_value) => {
assert_eq!(value, forward_value)
}
_ => panic!(
"submitting at a non-leader should create an mfoward message"
),
};
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/protocol/common/graph/mod.rs | fantoch_ps/src/protocol/common/graph/mod.rs | // This module contains the definition of `KeyDeps` and `QuorumDeps`.
mod deps;
// Re-exports.
pub use deps::{
Dependency, KeyDeps, LockedKeyDeps, QuorumDeps, SequentialKeyDeps,
};
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/protocol/common/graph/deps/quorum.rs | fantoch_ps/src/protocol/common/graph/deps/quorum.rs | use super::Dependency;
use fantoch::id::ProcessId;
use fantoch::{HashMap, HashSet};
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct QuorumDeps {
// fast quorum size
fast_quorum_size: usize,
// set of processes that have participated in this computation
participants: HashSet<ProcessId>,
// mapping from dep to the number of times it is reported by the fast
// quorum
threshold_deps: HashMap<Dependency, usize>,
}
impl QuorumDeps {
/// Creates a `QuorumDeps` instance given the quorum size.
pub fn new(fast_quorum_size: usize) -> Self {
Self {
fast_quorum_size,
participants: HashSet::with_capacity(fast_quorum_size),
threshold_deps: HashMap::new(),
}
}
/// Maybe change the fast quorum size.
pub fn maybe_adjust_fast_quorum_size(&mut self, fast_quorum_size: usize) {
debug_assert!(self.participants.is_empty());
self.fast_quorum_size = fast_quorum_size;
}
/// Adds new `deps` reported by `process_id`.
pub fn add(&mut self, process_id: ProcessId, deps: HashSet<Dependency>) {
debug_assert!(self.participants.len() < self.fast_quorum_size);
// record new participant
self.participants.insert(process_id);
// add each dep to the threshold deps
for dep in deps {
*self.threshold_deps.entry(dep).or_default() += 1;
}
}
/// Check if we all fast quorum processes have reported their deps.
pub fn all(&self) -> bool {
self.participants.len() == self.fast_quorum_size
}
/// Checks if threshold union == union and returns the union.
pub fn check_threshold(
&self,
threshold: usize,
) -> (HashSet<Dependency>, bool) {
debug_assert!(self.all());
let mut equal_to_union = true;
let deps: HashSet<_> = self
.threshold_deps
.iter()
.map(|(dep, count)| {
// it's equal to union if all deps were reported at least
// `threshold` times
equal_to_union = equal_to_union && *count >= threshold;
dep.clone()
})
.collect();
(deps, equal_to_union)
}
/// Checks if all deps reported are the same and returns the union.
pub fn check_equal(&self) -> (HashSet<Dependency>, bool) {
debug_assert!(self.all());
let (deps, counts): (HashSet<Dependency>, HashSet<usize>) =
self.threshold_deps.clone().into_iter().unzip();
// we have equal deps reported if there's a single count, i.e.
// i.e. when no dependencies are reported)
let equal_deps_reported = match counts.len() {
0 => {
// this means that no dependencies were reported, and thus it
// trivially holds that dependencies reported were all equal
true
}
1 => {
// we have equal deps if:
// - dependencies are reported the same number of times
// - their report count is equal to the number of fast quorum
// processes (i.e. every process reported every dependency)
counts
.into_iter()
.next()
.expect("there must be a dep count")
== self.fast_quorum_size
}
_ => {
// if there's a different count at least two dependencies, then
// at least one of the set of dependencies reported didn't match
false
}
};
(deps, equal_deps_reported)
}
}
#[cfg(test)]
mod tests {
use super::*;
use fantoch::id::Dot;
use std::iter::FromIterator;
fn new_dep(source: ProcessId, sequence: u64) -> Dependency {
let dot = Dot::new(source, sequence);
// we don't care about shards in these tests, so we can just set them to
// `None`
Dependency::from_noop(dot)
}
#[test]
fn all_test() {
// quorum deps
let q = 3;
let mut quorum_deps = QuorumDeps::new(q);
// add all deps and check they're there
let deps = HashSet::from_iter(vec![new_dep(1, 1), new_dep(1, 2)]);
quorum_deps.add(0, deps.clone());
assert!(!quorum_deps.all());
quorum_deps.add(1, deps.clone());
assert!(!quorum_deps.all());
quorum_deps.add(2, deps.clone());
assert!(quorum_deps.all());
}
#[test]
fn check_threshold_test() {
// -------------
// quorum deps
let q = 3;
let mut quorum_deps = QuorumDeps::new(q);
// add deps
let deps_1_and_2 =
HashSet::from_iter(vec![new_dep(1, 1), new_dep(1, 2)]);
quorum_deps.add(1, deps_1_and_2.clone());
quorum_deps.add(2, deps_1_and_2.clone());
quorum_deps.add(3, deps_1_and_2.clone());
// check threshold union
assert_eq!(
quorum_deps.check_threshold(1),
(deps_1_and_2.clone(), true)
);
assert_eq!(
quorum_deps.check_threshold(2),
(deps_1_and_2.clone(), true)
);
assert_eq!(
quorum_deps.check_threshold(3),
(deps_1_and_2.clone(), true)
);
assert_eq!(
quorum_deps.check_threshold(4),
(deps_1_and_2.clone(), false)
);
// -------------
// quorum deps
let q = 3;
let mut quorum_deps = QuorumDeps::new(q);
// add clocks
let deps_1_2_and_3 = HashSet::from_iter(vec![
new_dep(1, 1),
new_dep(1, 2),
new_dep(1, 3),
]);
quorum_deps.add(1, deps_1_2_and_3.clone());
quorum_deps.add(2, deps_1_and_2.clone());
quorum_deps.add(3, deps_1_and_2.clone());
// check threshold union
assert_eq!(
quorum_deps.check_threshold(1),
(deps_1_2_and_3.clone(), true)
);
assert_eq!(
quorum_deps.check_threshold(2),
(deps_1_2_and_3.clone(), false)
);
assert_eq!(
quorum_deps.check_threshold(3),
(deps_1_2_and_3.clone(), false)
);
assert_eq!(
quorum_deps.check_threshold(4),
(deps_1_2_and_3.clone(), false)
);
// -------------
// quorum deps
let q = 3;
let mut quorum_deps = QuorumDeps::new(q);
// add clocks
let deps_1 = HashSet::from_iter(vec![new_dep(1, 1)]);
quorum_deps.add(1, deps_1_2_and_3.clone());
quorum_deps.add(2, deps_1_and_2.clone());
quorum_deps.add(3, deps_1.clone());
// check threshold union
assert_eq!(
quorum_deps.check_threshold(1),
(deps_1_2_and_3.clone(), true)
);
assert_eq!(
quorum_deps.check_threshold(2),
(deps_1_2_and_3.clone(), false)
);
assert_eq!(
quorum_deps.check_threshold(3),
(deps_1_2_and_3.clone(), false)
);
assert_eq!(
quorum_deps.check_threshold(4),
(deps_1_2_and_3.clone(), false)
);
}
#[test]
fn check_equal_test() {
// add deps
let deps_1 = HashSet::from_iter(vec![new_dep(1, 1)]);
let deps_1_and_2 =
HashSet::from_iter(vec![new_dep(1, 1), new_dep(1, 2)]);
let deps_1_and_3 =
HashSet::from_iter(vec![new_dep(1, 1), new_dep(1, 3)]);
let deps_2_and_3 =
HashSet::from_iter(vec![new_dep(1, 2), new_dep(1, 3)]);
let deps_1_2_and_3 = HashSet::from_iter(vec![
new_dep(1, 1),
new_dep(1, 2),
new_dep(1, 3),
]);
// -------------
// quorum deps
let mut quorum_deps = QuorumDeps::new(2);
quorum_deps.add(1, HashSet::new());
quorum_deps.add(2, HashSet::new());
assert_eq!(quorum_deps.check_equal(), (HashSet::new(), true));
// -------------
// quorum deps
let mut quorum_deps = QuorumDeps::new(3);
quorum_deps.add(1, HashSet::new());
quorum_deps.add(2, HashSet::new());
quorum_deps.add(3, deps_1.clone());
assert_eq!(quorum_deps.check_equal(), (deps_1.clone(), false));
// -------------
// quorum deps
let mut quorum_deps = QuorumDeps::new(3);
quorum_deps.add(1, deps_1.clone());
quorum_deps.add(2, deps_1.clone());
quorum_deps.add(3, deps_1.clone());
assert_eq!(quorum_deps.check_equal(), (deps_1.clone(), true));
// -------------
// quorum deps
let mut quorum_deps = QuorumDeps::new(2);
quorum_deps.add(1, deps_1_and_2.clone());
quorum_deps.add(2, deps_1_and_2.clone());
assert_eq!(quorum_deps.check_equal(), (deps_1_and_2.clone(), true));
// -------------
// quorum deps
let mut quorum_deps = QuorumDeps::new(2);
quorum_deps.add(1, deps_1_and_2.clone());
quorum_deps.add(2, HashSet::new());
assert_eq!(quorum_deps.check_equal(), (deps_1_and_2.clone(), false));
// -------------
// quorum deps
let mut quorum_deps = QuorumDeps::new(3);
quorum_deps.add(1, deps_1_and_2);
quorum_deps.add(2, deps_1_and_3);
quorum_deps.add(3, deps_2_and_3);
assert_eq!(quorum_deps.check_equal(), (deps_1_2_and_3, false));
}
#[test]
fn check_equal_regression_test() {
let q = 3;
// add deps
let deps_1 = HashSet::from_iter(vec![new_dep(1, 1)]);
let deps_2 = HashSet::from_iter(vec![new_dep(1, 2)]);
let deps_1_and_2 =
HashSet::from_iter(vec![new_dep(1, 1), new_dep(1, 2)]);
let mut quorum_deps = QuorumDeps::new(q);
quorum_deps.add(1, deps_1);
quorum_deps.add(2, deps_2);
quorum_deps.add(3, deps_1_and_2.clone());
assert_eq!(quorum_deps.check_equal(), (deps_1_and_2, false));
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/protocol/common/graph/deps/mod.rs | fantoch_ps/src/protocol/common/graph/deps/mod.rs | // This module contains the definition of `KeyDeps`.
mod keys;
// // This module contains the definition of `QuorumClocks`.
mod quorum;
// Re-exports.
pub use keys::{Dependency, KeyDeps, LockedKeyDeps, SequentialKeyDeps};
pub use quorum::QuorumDeps;
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/protocol/common/graph/deps/keys/sequential.rs | fantoch_ps/src/protocol/common/graph/deps/keys/sequential.rs | use super::{Dependency, KeyDeps, LatestDep, LatestRWDep};
use fantoch::command::Command;
use fantoch::id::{Dot, ShardId};
use fantoch::kvs::Key;
use fantoch::{HashMap, HashSet};
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct SequentialKeyDeps {
shard_id: ShardId,
nfr: bool,
latest: HashMap<Key, LatestRWDep>,
latest_noop: LatestDep,
}
impl KeyDeps for SequentialKeyDeps {
/// Create a new `SequentialKeyDeps` instance.
fn new(shard_id: ShardId, nfr: bool) -> Self {
Self {
shard_id,
nfr,
latest: HashMap::new(),
latest_noop: None,
}
}
fn add_cmd(
&mut self,
dot: Dot,
cmd: &Command,
past: Option<HashSet<Dependency>>,
) -> HashSet<Dependency> {
// we start with past in case there's one, or bottom otherwise
let deps = match past {
Some(past) => past,
None => HashSet::new(),
};
self.do_add_cmd(dot, cmd, deps)
}
fn add_noop(&mut self, dot: Dot) -> HashSet<Dependency> {
// start with an empty set of dependencies
let deps = HashSet::new();
self.do_add_noop(dot, deps)
}
#[cfg(test)]
fn cmd_deps(&self, cmd: &Command) -> HashSet<Dot> {
let mut deps = HashSet::new();
self.maybe_add_noop_latest(&mut deps);
self.do_cmd_deps(cmd, &mut deps);
super::extract_dots(deps)
}
#[cfg(test)]
fn noop_deps(&self) -> HashSet<Dot> {
let mut deps = HashSet::new();
self.maybe_add_noop_latest(&mut deps);
self.do_noop_deps(&mut deps);
super::extract_dots(deps)
}
fn parallel() -> bool {
false
}
}
impl SequentialKeyDeps {
fn maybe_add_noop_latest(&self, deps: &mut HashSet<Dependency>) {
if let Some(dep) = self.latest_noop.as_ref() {
deps.insert(dep.clone());
}
}
fn do_add_cmd(
&mut self,
dot: Dot,
cmd: &Command,
mut deps: HashSet<Dependency>,
) -> HashSet<Dependency> {
// create cmd dep
let cmd_dep = Dependency::from_cmd(dot, cmd);
// flag indicating whether the command is read-only
let read_only = cmd.read_only();
// we only support single-key read commands with NFR
assert!(if self.nfr && read_only {
cmd.total_key_count() == 1
} else {
true
});
// iterate through all command keys, get their current latest and set
// ourselves to be the new latest
cmd.keys(self.shard_id).for_each(|key| {
// get latest read and write on this key
let latest_rw = match self.latest.get_mut(key) {
Some(value) => value,
None => self.latest.entry(key.clone()).or_default(),
};
super::maybe_add_deps(read_only, self.nfr, latest_rw, &mut deps);
// finally, store the command
if read_only {
// if a command is read-only, then added it as the latest read
latest_rw.read = Some(cmd_dep.clone());
} else {
// otherwise, add it as the latest write
latest_rw.write = Some(cmd_dep.clone());
}
});
// always include latest noop, if any
self.maybe_add_noop_latest(&mut deps);
// and finally return the computed deps
deps
}
fn do_add_noop(
&mut self,
dot: Dot,
mut deps: HashSet<Dependency>,
) -> HashSet<Dependency> {
// set self to be the new latest
if let Some(dep) = self.latest_noop.replace(Dependency::from_noop(dot))
{
// if there was a previous latest, then it's a dependency
deps.insert(dep);
}
// compute deps for this noop
self.do_noop_deps(&mut deps);
deps
}
fn do_noop_deps(&self, deps: &mut HashSet<Dependency>) {
// iterate through all keys, grab a read lock, and include their latest
// in the final `deps`
self.latest.values().for_each(|latest_rw| {
if let Some(rdep) = latest_rw.read.as_ref() {
deps.insert(rdep.clone());
}
if let Some(wdep) = latest_rw.write.as_ref() {
deps.insert(wdep.clone());
}
});
}
#[cfg(test)]
fn do_cmd_deps(&self, cmd: &Command, deps: &mut HashSet<Dependency>) {
// flag indicating whether the command is read-only
let read_only = cmd.read_only();
cmd.keys(self.shard_id).for_each(|key| {
// get latest command on this key
if let Some(latest_rw) = self.latest.get(key) {
super::maybe_add_deps(read_only, self.nfr, latest_rw, deps);
}
});
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/protocol/common/graph/deps/keys/locked.rs | fantoch_ps/src/protocol/common/graph/deps/keys/locked.rs | use super::{Dependency, KeyDeps, LatestDep, LatestRWDep};
use fantoch::command::Command;
use fantoch::id::{Dot, ShardId};
use fantoch::kvs::Key;
use fantoch::shared::SharedMap;
use fantoch::HashSet;
use parking_lot::RwLock;
use std::sync::Arc;
#[derive(Debug, Clone)]
pub struct LockedKeyDeps {
shard_id: ShardId,
nfr: bool,
latest: Arc<SharedMap<Key, RwLock<LatestRWDep>>>,
latest_noop: Arc<RwLock<LatestDep>>,
}
impl KeyDeps for LockedKeyDeps {
/// Create a new `LockedKeyDeps` instance.
fn new(shard_id: ShardId, nfr: bool) -> Self {
Self {
shard_id,
nfr,
latest: Arc::new(SharedMap::new()),
latest_noop: Arc::new(RwLock::new(None)),
}
}
fn add_cmd(
&mut self,
dot: Dot,
cmd: &Command,
past: Option<HashSet<Dependency>>,
) -> HashSet<Dependency> {
// we start with past in case there's one, or bottom otherwise
let deps = match past {
Some(past) => past,
None => HashSet::new(),
};
self.do_add_cmd(dot, cmd, deps)
}
fn add_noop(&mut self, dot: Dot) -> HashSet<Dependency> {
// start with an empty set of dependencies
let deps = HashSet::new();
self.do_add_noop(dot, deps)
}
#[cfg(test)]
fn cmd_deps(&self, cmd: &Command) -> HashSet<Dot> {
let mut deps = HashSet::new();
self.maybe_add_noop_latest(&mut deps);
self.do_cmd_deps(cmd, &mut deps);
super::extract_dots(deps)
}
#[cfg(test)]
fn noop_deps(&self) -> HashSet<Dot> {
let mut deps = HashSet::new();
self.maybe_add_noop_latest(&mut deps);
self.do_noop_deps(&mut deps);
super::extract_dots(deps)
}
fn parallel() -> bool {
true
}
}
impl LockedKeyDeps {
fn maybe_add_noop_latest(&self, deps: &mut HashSet<Dependency>) {
// for this operation we only need a read lock
if let Some(dep) = self.latest_noop.read().as_ref() {
deps.insert(dep.clone());
}
}
fn do_add_cmd(
&self,
dot: Dot,
cmd: &Command,
mut deps: HashSet<Dependency>,
) -> HashSet<Dependency> {
// create cmd dep
let cmd_dep = Dependency::from_cmd(dot, cmd);
// flag indicating whether the command is read-only
let read_only = cmd.read_only();
// we only support single-key read commands with NFR
assert!(if self.nfr && read_only {
cmd.total_key_count() == 1
} else {
true
});
// iterate through all command keys, grab a write lock, get their
// current latest and set ourselves to be the new latest
cmd.keys(self.shard_id).for_each(|key| {
// get latest read and write on this key
let entry = self.latest.get_or(key, || RwLock::default());
// grab a write lock
let mut guard = entry.write();
super::maybe_add_deps(read_only, self.nfr, &guard, &mut deps);
// finally, store the command
if read_only {
// if a command is read-only, then added it as the latest read
guard.read = Some(cmd_dep.clone());
} else {
// otherwise, add it as the latest write
guard.write = Some(cmd_dep.clone());
}
});
// always include latest noop, if any
// TODO: when adding recovery, check that the interleaving of the
// following and the previous loop, and how it interacts with
// `do_add_noop` is correct
self.maybe_add_noop_latest(&mut deps);
// and finally return the computed deps
deps
}
fn do_add_noop(
&self,
dot: Dot,
mut deps: HashSet<Dependency>,
) -> HashSet<Dependency> {
// grab a write lock to the noop latest and:
// - add ourselves to the deps:
// * during the next iteration a new key in the map might be created
// and we may miss it
// * by first setting ourselves to be the noop latest we make sure
// that, even though we will not see that newly created key, that
// key will see us
// grab a write lock and set self to be the new latest
if let Some(dep) =
self.latest_noop.write().replace(Dependency::from_noop(dot))
{
// if there was a previous latest, then it's a dependency
deps.insert(dep);
}
// compute deps for this noop
self.do_noop_deps(&mut deps);
deps
}
fn do_noop_deps(&self, deps: &mut HashSet<Dependency>) {
// iterate through all keys, grab a read lock, and include their latest
// in the final `deps`
self.latest.iter().for_each(|entry| {
// grab a read lock and take the dots there as a dependency
let latest_rw = entry.value().read();
if let Some(rdep) = latest_rw.read.as_ref() {
deps.insert(rdep.clone());
}
if let Some(wdep) = latest_rw.write.as_ref() {
deps.insert(wdep.clone());
}
});
}
#[cfg(test)]
fn do_cmd_deps(&self, cmd: &Command, deps: &mut HashSet<Dependency>) {
// flag indicating whether the command is read-only
let read_only = cmd.read_only();
cmd.keys(self.shard_id).for_each(|key| {
// get latest read and write on this key
let entry = self.latest.get_or(key, || RwLock::default());
// grab a read lock
let guard = entry.read();
super::maybe_add_deps(read_only, self.nfr, &guard, deps);
});
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/protocol/common/graph/deps/keys/mod.rs | fantoch_ps/src/protocol/common/graph/deps/keys/mod.rs | // This module contains the definition of `SequentialKeyDeps`.
mod sequential;
// This module contains the definition of `LockedKeyDeps`.
mod locked;
// Re-exports.
pub use locked::LockedKeyDeps;
pub use sequential::SequentialKeyDeps;
use fantoch::command::Command;
use fantoch::id::{Dot, ShardId};
use fantoch::HashSet;
use serde::{Deserialize, Serialize};
use std::collections::BTreeSet;
use std::fmt::Debug;
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct Dependency {
pub dot: Dot,
pub shards: Option<BTreeSet<ShardId>>,
}
impl Dependency {
pub fn from_cmd(dot: Dot, cmd: &Command) -> Self {
Self {
dot,
shards: Some(cmd.shards().cloned().collect()),
}
}
pub fn from_noop(dot: Dot) -> Self {
Self { dot, shards: None }
}
}
pub type LatestDep = Option<Dependency>;
#[derive(Debug, Clone, Default, PartialEq, Eq, Hash)]
pub struct LatestRWDep {
read: LatestDep,
write: LatestDep,
}
pub fn maybe_add_deps(
read_only: bool,
nfr: bool,
latest_rw: &LatestRWDep,
deps: &mut HashSet<Dependency>,
) {
// independently of whether the command is read-only or not, all commands
// depend on writes
if let Some(wdep) = latest_rw.write.as_ref() {
deps.insert(wdep.clone());
}
// if the command is not read-only, and the NFR optimization is not enabled,
// then the command should also depend on the latest read;
// in other words:
// ----------------------------------
// | read_only | NFR | add read dep |
// ----------------------------------
// | true | _ | NO |
// | false | true | NO |
// | false | false | YES |
// ----------------------------------
if !read_only && !nfr {
if let Some(rdep) = latest_rw.read.as_ref() {
deps.insert(rdep.clone());
}
}
// in sum:
// - reads never depend on reads, and
// - writes always depend on reads (unless NFR is enabled, in which case,
// they don't)
}
pub trait KeyDeps: Debug + Clone {
/// Create a new `KeyDeps` instance.
fn new(shard_id: ShardId, nfr: bool) -> Self;
/// Sets the command's `Dot` as the latest command on each key touched by
/// the command, returning the set of local conflicting commands
/// including past in them, in case there's a past.
fn add_cmd(
&mut self,
dot: Dot,
cmd: &Command,
past: Option<HashSet<Dependency>>,
) -> HashSet<Dependency>;
/// Adds a noop.
fn add_noop(&mut self, dot: Dot) -> HashSet<Dependency>;
/// Checks the current dependencies for some command.
#[cfg(test)]
fn cmd_deps(&self, cmd: &Command) -> HashSet<Dot>;
/// Checks the current dependencies for noops.
#[cfg(test)]
fn noop_deps(&self) -> HashSet<Dot>;
fn parallel() -> bool;
}
#[cfg(test)]
fn extract_dots(deps: HashSet<Dependency>) -> HashSet<Dot> {
deps.into_iter().map(|dep| dep.dot).collect()
}
#[cfg(test)]
mod tests {
use super::*;
use crate::util;
use fantoch::id::{DotGen, ProcessId, Rifl};
use fantoch::kvs::KVOp;
use fantoch::{HashMap, HashSet};
use std::iter::FromIterator;
use std::thread;
#[test]
fn sequential_key_deps() {
key_deps_flow::<SequentialKeyDeps>();
read_deps::<SequentialKeyDeps>(false);
read_deps::<SequentialKeyDeps>(true);
}
#[test]
fn locked_key_deps() {
key_deps_flow::<LockedKeyDeps>();
read_deps::<LockedKeyDeps>(false);
read_deps::<LockedKeyDeps>(true);
}
fn get(rifl: Rifl, key: String) -> Command {
Command::from(
rifl,
vec![key].into_iter().map(|key| (key.clone(), KVOp::Get)),
)
}
fn multi_put(rifl: Rifl, keys: Vec<String>, value: String) -> Command {
Command::from(
rifl,
keys.into_iter()
.map(|key| (key.clone(), KVOp::Put(value.clone()))),
)
}
fn key_deps_flow<KD: KeyDeps>() {
// create key deps
let shard_id = 0;
let deps_nfr = false;
let mut key_deps = KD::new(shard_id, deps_nfr);
// create dot gen
let process_id = 1;
let mut dot_gen = DotGen::new(process_id);
// keys
let key_a = String::from("A");
let key_b = String::from("B");
let key_c = String::from("C");
let value = String::from("");
// command a
let cmd_a_rifl = Rifl::new(100, 1); // client 100, 1st op
let cmd_a = multi_put(cmd_a_rifl, vec![key_a.clone()], value.clone());
// command b
let cmd_b_rifl = Rifl::new(101, 1); // client 101, 1st op
let cmd_b = multi_put(cmd_b_rifl, vec![key_b.clone()], value.clone());
// command ab
let cmd_ab_rifl = Rifl::new(102, 1); // client 102, 1st op
let cmd_ab = multi_put(
cmd_ab_rifl,
vec![key_a.clone(), key_b.clone()],
value.clone(),
);
// command c
let cmd_c_rifl = Rifl::new(103, 1); // client 103, 1st op
let cmd_c = multi_put(cmd_c_rifl, vec![key_c.clone()], value.clone());
// empty conf for A
let conf = key_deps.cmd_deps(&cmd_a);
assert_eq!(conf, HashSet::new());
// add A with {1,1}
key_deps.add_cmd(dot_gen.next_id(), &cmd_a, None);
// 1. conf with {1,1} for A
// 2. empty conf for B
// 3. conf with {1,1} for A-B
// 4. empty conf for C
// 5. conf with {1,1} for noop
let deps_1_1 = HashSet::from_iter(vec![Dot::new(1, 1)]);
assert_eq!(key_deps.cmd_deps(&cmd_a), deps_1_1);
assert_eq!(key_deps.cmd_deps(&cmd_b), HashSet::new());
assert_eq!(key_deps.cmd_deps(&cmd_ab), deps_1_1);
assert_eq!(key_deps.cmd_deps(&cmd_c), HashSet::new());
assert_eq!(key_deps.noop_deps(), deps_1_1);
// add noop with {1,2}
key_deps.add_noop(dot_gen.next_id());
// 1. conf with {1,2}|{1,1} for A
// 2. conf with {1,2}| for B
// 3. conf with {1,2}|{1,1} for A-B
// 4. conf with {1,2}| for C
// 5. conf with {1,2}|{1,1} for noop
let deps_1_2 = HashSet::from_iter(vec![Dot::new(1, 2)]);
let deps_1_2_and_1_1 =
HashSet::from_iter(vec![Dot::new(1, 1), Dot::new(1, 2)]);
assert_eq!(key_deps.cmd_deps(&cmd_a), deps_1_2_and_1_1);
assert_eq!(key_deps.cmd_deps(&cmd_b), deps_1_2);
assert_eq!(key_deps.cmd_deps(&cmd_ab), deps_1_2_and_1_1);
assert_eq!(key_deps.cmd_deps(&cmd_c), deps_1_2);
assert_eq!(key_deps.noop_deps(), deps_1_2_and_1_1);
// add B with {1,3}
key_deps.add_cmd(dot_gen.next_id(), &cmd_b, None);
// 1. conf with {1,2}|{1,1} for A
// 2. conf with {1,2}|{1,3} for B
// 3. conf with {1,2}|{1,1} and {1,3} for A-B
// 4. conf with {1,2}| for C
// 5. conf with {1,2}|{1,1} and {1,3} for noop
let deps_1_2_and_1_3 =
HashSet::from_iter(vec![Dot::new(1, 2), Dot::new(1, 3)]);
let deps_1_2_and_1_1_and_1_3 = HashSet::from_iter(vec![
Dot::new(1, 1),
Dot::new(1, 2),
Dot::new(1, 3),
]);
assert_eq!(key_deps.cmd_deps(&cmd_a), deps_1_2_and_1_1);
assert_eq!(key_deps.cmd_deps(&cmd_b), deps_1_2_and_1_3);
assert_eq!(key_deps.cmd_deps(&cmd_ab), deps_1_2_and_1_1_and_1_3);
assert_eq!(key_deps.cmd_deps(&cmd_c), deps_1_2);
assert_eq!(key_deps.noop_deps(), deps_1_2_and_1_1_and_1_3);
// add B with {1,4}
key_deps.add_cmd(dot_gen.next_id(), &cmd_b, None);
// 1. conf with {1,2}|{1,1} for A
// 2. conf with {1,2}|{1,4} for B
// 3. conf with {1,2}|{1,1} and {1,4} for A-B
// 4. conf with {1,2}| for C
// 5. conf with {1,2}|{1,1} and {1,4} for noop
let deps_1_2_and_1_4 =
HashSet::from_iter(vec![Dot::new(1, 2), Dot::new(1, 4)]);
let deps_1_2_1_1_and_1_4 = HashSet::from_iter(vec![
Dot::new(1, 1),
Dot::new(1, 2),
Dot::new(1, 4),
]);
assert_eq!(key_deps.cmd_deps(&cmd_a), deps_1_2_and_1_1);
assert_eq!(key_deps.cmd_deps(&cmd_b), deps_1_2_and_1_4);
assert_eq!(key_deps.cmd_deps(&cmd_ab), deps_1_2_1_1_and_1_4);
assert_eq!(key_deps.cmd_deps(&cmd_c), deps_1_2);
assert_eq!(key_deps.noop_deps(), deps_1_2_1_1_and_1_4);
// add A-B with {1,5}
key_deps.add_cmd(dot_gen.next_id(), &cmd_ab, None);
// 1. conf with {1,2}|{1,5} for A
// 2. conf with {1,2}|{1,5} for B
// 3. conf with {1,2}|{1,5} for A-B
// 4. conf with {1,2}| for C
// 5. conf with {1,2}|{1,5} for noop
let deps_1_2_and_1_5 =
HashSet::from_iter(vec![Dot::new(1, 2), Dot::new(1, 5)]);
assert_eq!(key_deps.cmd_deps(&cmd_a), deps_1_2_and_1_5);
assert_eq!(key_deps.cmd_deps(&cmd_b), deps_1_2_and_1_5);
assert_eq!(key_deps.cmd_deps(&cmd_ab), deps_1_2_and_1_5);
assert_eq!(key_deps.cmd_deps(&cmd_c), deps_1_2);
assert_eq!(key_deps.noop_deps(), deps_1_2_and_1_5);
// add A with {1,6}
key_deps.add_cmd(dot_gen.next_id(), &cmd_a, None);
// 1. conf with {1,2}|{1,6} for A
// 2. conf with {1,2}|{1,5} for B
// 3. conf with {1,2}|{1,6} and {1,5} for A-B
// 4. conf with {1,2}| for C
// 5. conf with {1,2}|{1,6} and {1,5} for noop
let deps_1_2_and_1_6 =
HashSet::from_iter(vec![Dot::new(1, 2), Dot::new(1, 6)]);
let deps_1_2_and_1_5_and_1_6 = HashSet::from_iter(vec![
Dot::new(1, 2),
Dot::new(1, 5),
Dot::new(1, 6),
]);
assert_eq!(key_deps.cmd_deps(&cmd_a), deps_1_2_and_1_6);
assert_eq!(key_deps.cmd_deps(&cmd_b), deps_1_2_and_1_5);
assert_eq!(key_deps.cmd_deps(&cmd_ab), deps_1_2_and_1_5_and_1_6);
assert_eq!(key_deps.cmd_deps(&cmd_c), deps_1_2);
assert_eq!(key_deps.noop_deps(), deps_1_2_and_1_5_and_1_6);
// add C with {1,7}
key_deps.add_cmd(dot_gen.next_id(), &cmd_c, None);
// 1. conf with {1,2}|{1,6} for A
// 2. conf with {1,2}|{1,5} for B
// 3. conf with {1,2}|{1,6} and {1,5} for A-B
// 4. conf with {1,2}|{1,7} for C
// 5. conf with {1,2}|{1,6} and {1,5} and {1,7} for noop
let deps_1_2_and_1_7 =
HashSet::from_iter(vec![Dot::new(1, 2), Dot::new(1, 7)]);
let deps_1_2_and_1_5_and_1_6_and_1_7 = HashSet::from_iter(vec![
Dot::new(1, 2),
Dot::new(1, 5),
Dot::new(1, 6),
Dot::new(1, 7),
]);
assert_eq!(key_deps.cmd_deps(&cmd_a), deps_1_2_and_1_6);
assert_eq!(key_deps.cmd_deps(&cmd_b), deps_1_2_and_1_5);
assert_eq!(key_deps.cmd_deps(&cmd_ab), deps_1_2_and_1_5_and_1_6);
assert_eq!(key_deps.cmd_deps(&cmd_c), deps_1_2_and_1_7);
assert_eq!(key_deps.noop_deps(), deps_1_2_and_1_5_and_1_6_and_1_7);
// add noop with {1,8}
key_deps.add_noop(dot_gen.next_id());
// 1. conf with {1,8}|{1,6} for A
// 2. conf with {1,8}|{1,5} for B
// 3. conf with {1,8}|{1,6} and {1,5} for A-B
// 4. conf with {1,8}|{1,7} for C
// 5. conf with {1,8}|{1,6} and {1,5} and {1,7} for noop
let deps_1_8_and_1_5 =
HashSet::from_iter(vec![Dot::new(1, 8), Dot::new(1, 5)]);
let deps_1_8_and_1_6 =
HashSet::from_iter(vec![Dot::new(1, 8), Dot::new(1, 6)]);
let deps_1_8_and_1_7 =
HashSet::from_iter(vec![Dot::new(1, 8), Dot::new(1, 7)]);
let deps_1_8_and_1_5_and_1_6 = HashSet::from_iter(vec![
Dot::new(1, 8),
Dot::new(1, 5),
Dot::new(1, 6),
]);
let deps_1_8_and_1_5_and_1_6_and_1_7 = HashSet::from_iter(vec![
Dot::new(1, 8),
Dot::new(1, 5),
Dot::new(1, 6),
Dot::new(1, 7),
]);
assert_eq!(key_deps.cmd_deps(&cmd_a), deps_1_8_and_1_6);
assert_eq!(key_deps.cmd_deps(&cmd_b), deps_1_8_and_1_5);
assert_eq!(key_deps.cmd_deps(&cmd_ab), deps_1_8_and_1_5_and_1_6);
assert_eq!(key_deps.cmd_deps(&cmd_c), deps_1_8_and_1_7);
assert_eq!(key_deps.noop_deps(), deps_1_8_and_1_5_and_1_6_and_1_7);
// add B with {1,9}
key_deps.add_cmd(dot_gen.next_id(), &cmd_b, None);
// 1. conf with {1,8}|{1,6} for A
// 2. conf with {1,8}|{1,9} for B
// 3. conf with {1,8}|{1,6} and {1,9} for A-B
// 4. conf with {1,8}|{1,7} for C
// 5. conf with {1,8}|{1,6} and {1,9} and {1,7} for noop
let deps_1_8_and_1_6 =
HashSet::from_iter(vec![Dot::new(1, 8), Dot::new(1, 6)]);
let deps_1_8_and_1_9 =
HashSet::from_iter(vec![Dot::new(1, 8), Dot::new(1, 9)]);
let deps_1_8_and_1_6_and_1_9 = HashSet::from_iter(vec![
Dot::new(1, 8),
Dot::new(1, 6),
Dot::new(1, 9),
]);
let deps_1_8_and_1_6_and_1_7_and_1_9 = HashSet::from_iter(vec![
Dot::new(1, 8),
Dot::new(1, 6),
Dot::new(1, 7),
Dot::new(1, 9),
]);
assert_eq!(key_deps.cmd_deps(&cmd_a), deps_1_8_and_1_6);
assert_eq!(key_deps.cmd_deps(&cmd_b), deps_1_8_and_1_9);
assert_eq!(key_deps.cmd_deps(&cmd_ab), deps_1_8_and_1_6_and_1_9);
assert_eq!(key_deps.cmd_deps(&cmd_c), deps_1_8_and_1_7);
assert_eq!(key_deps.noop_deps(), deps_1_8_and_1_6_and_1_7_and_1_9);
}
fn read_deps<KD: KeyDeps>(deps_nfr: bool) {
// create key deps
let shard_id = 0;
let mut key_deps = KD::new(shard_id, deps_nfr);
// create dot gen
let process_id = 1;
let mut dot_gen = DotGen::new(process_id);
// keys
let key = String::from("A");
let value = String::from("");
// read
let read_rifl = Rifl::new(100, 1); // client 100, 1st op
let read = get(read_rifl, key.clone());
// write
let write_rifl = Rifl::new(101, 1); // client 101, 1st op
let write = multi_put(write_rifl, vec![key.clone()], value);
// 1. empty conf for read
// 2. empty conf for write
let conf = key_deps.cmd_deps(&read);
assert_eq!(conf, HashSet::new());
let conf = key_deps.cmd_deps(&write);
assert_eq!(conf, HashSet::new());
// add read with {1,1}
key_deps.add_cmd(dot_gen.next_id(), &read, None);
// 1. empty conf for read
// 2. (NFR=true) empty conf for write
// 2. (NFR=false) conf with {1,1} for write
let deps_1_1 = HashSet::from_iter(vec![Dot::new(1, 1)]);
assert_eq!(key_deps.cmd_deps(&read), HashSet::new());
if deps_nfr {
assert_eq!(key_deps.cmd_deps(&write), HashSet::new());
} else {
assert_eq!(key_deps.cmd_deps(&write), deps_1_1);
}
// add read with {1,2}
key_deps.add_cmd(dot_gen.next_id(), &read, None);
// 1. empty conf for read
// 2. (NFR=true) empty conf for write
// 2. (NFR=false) conf with {1,2} for write
let deps_1_2 = HashSet::from_iter(vec![Dot::new(1, 2)]);
assert_eq!(key_deps.cmd_deps(&read), HashSet::new());
if deps_nfr {
assert_eq!(key_deps.cmd_deps(&write), HashSet::new());
} else {
assert_eq!(key_deps.cmd_deps(&write), deps_1_2);
}
// add write with {1,3}
key_deps.add_cmd(dot_gen.next_id(), &write, None);
// 1. conf with {1,3} for read
// 2. (NFR=true) conf with {1,3} for write
// 2. (NFR=false) conf with {1,2}|{1,3} for write
let deps_1_3 = HashSet::from_iter(vec![Dot::new(1, 3)]);
let deps_1_2_and_1_3 =
HashSet::from_iter(vec![Dot::new(1, 2), Dot::new(1, 3)]);
assert_eq!(key_deps.cmd_deps(&read), deps_1_3);
if deps_nfr {
assert_eq!(key_deps.cmd_deps(&write), deps_1_3);
} else {
assert_eq!(key_deps.cmd_deps(&write), deps_1_2_and_1_3);
}
// add write with {1,4}
key_deps.add_cmd(dot_gen.next_id(), &write, None);
// 1. conf with {1,4} for read
// 2. (NFR=true) conf with {1,4} for write
// 2. (NFR=false) conf with {1,2}|{1,4} for write
let deps_1_4 = HashSet::from_iter(vec![Dot::new(1, 4)]);
let deps_1_2_and_1_4 =
HashSet::from_iter(vec![Dot::new(1, 2), Dot::new(1, 4)]);
assert_eq!(key_deps.cmd_deps(&read), deps_1_4);
if deps_nfr {
assert_eq!(key_deps.cmd_deps(&write), deps_1_4);
} else {
assert_eq!(key_deps.cmd_deps(&write), deps_1_2_and_1_4);
}
// add read with {1,5}
key_deps.add_cmd(dot_gen.next_id(), &read, None);
// 1. conf with {1,4} for read
// 2. (NFR=true) conf with {1,4} for write
// 2. (NFR=false) conf with {1,4}|{1,5} for write
let deps_1_4_and_1_5 =
HashSet::from_iter(vec![Dot::new(1, 4), Dot::new(1, 5)]);
assert_eq!(key_deps.cmd_deps(&read), deps_1_4);
if deps_nfr {
assert_eq!(key_deps.cmd_deps(&write), deps_1_4);
} else {
assert_eq!(key_deps.cmd_deps(&write), deps_1_4_and_1_5);
}
}
#[test]
fn concurrent_locked_test() {
let nthreads = 2;
let ops_number = 3000;
let max_keys_per_command = 2;
let keys_number = 4;
let noop_probability = 50;
for _ in 0..10 {
concurrent_test::<LockedKeyDeps>(
nthreads,
ops_number,
max_keys_per_command,
keys_number,
noop_probability,
);
}
}
fn concurrent_test<KD: KeyDeps + Send + Sync + 'static>(
nthreads: usize,
ops_number: usize,
max_keys_per_command: usize,
keys_number: usize,
noop_probability: usize,
) {
// create key deps
let shard_id = 0;
let deps_nfr = false;
let key_deps = KD::new(shard_id, deps_nfr);
// spawn workers
let handles: Vec<_> = (1..=nthreads)
.map(|process_id| {
let key_deps_clone = key_deps.clone();
thread::spawn(move || {
worker(
process_id as ProcessId,
key_deps_clone,
ops_number,
max_keys_per_command,
keys_number,
noop_probability,
)
})
})
.collect();
// wait for all workers and aggregate their deps
let mut all_deps = HashMap::new();
for handle in handles {
let results = handle.join().expect("worker should finish");
for (dot, cmd, deps) in results {
let res = all_deps.insert(dot, (cmd, deps));
assert!(res.is_none());
}
}
// get all dots
let dots: Vec<_> = all_deps.keys().cloned().collect();
// check for each possible pair of operations if they conflict
for i in 0..dots.len() {
for j in (i + 1)..dots.len() {
let dot_a = dots[i];
let dot_b = dots[j];
let (cmd_a, _) =
all_deps.get(&dot_a).expect("dot_a must exist");
let (cmd_b, _) =
all_deps.get(&dot_b).expect("dot_b must exist");
let should_conflict = match (cmd_a, cmd_b) {
(Some(cmd_a), Some(cmd_b)) => {
// neither command is a noop
cmd_a.conflicts(&cmd_b)
}
_ => {
// at least one of the command is a noop, and thus they
// conflict
true
}
};
if should_conflict {
let conflict = is_dep(dot_a, dot_b, &all_deps)
|| is_dep(dot_b, dot_a, &all_deps);
assert!(conflict, "dot {:?} should be a dependency of {:?} (or the other way around); but that was not the case: {:?}", dot_a, dot_b, all_deps);
}
}
}
}
fn is_dep(
dot: Dot,
dep: Dot,
all_deps: &HashMap<Dot, (Option<Command>, HashSet<Dot>)>,
) -> bool {
// check if it's direct dependency, and if it's not a direct dependency,
// do depth-first-search
let (_, deps) = all_deps.get(&dot).expect("dot must exist");
deps.contains(&dep)
|| deps.iter().any(|dep| is_dep(dot, *dep, all_deps))
}
fn worker<K: KeyDeps>(
process_id: ProcessId,
mut key_deps: K,
ops_number: usize,
max_keys_per_command: usize,
keys_number: usize,
noop_probability: usize,
) -> Vec<(Dot, Option<Command>, HashSet<Dot>)> {
// create dot gen
let mut dot_gen = DotGen::new(process_id);
// all deps worker has generated
let mut all_deps = Vec::new();
for _ in 0..ops_number {
// generate dot
let dot = dot_gen.next_id();
// generate command
let cmd = util::gen_cmd(
max_keys_per_command,
keys_number,
noop_probability,
);
// compute deps
let deps = match cmd.as_ref() {
Some(cmd) => {
// add as command
key_deps.add_cmd(dot, &cmd, None)
}
None => {
// add as noop
key_deps.add_noop(dot)
}
};
// save deps
all_deps.push((dot, cmd, extract_dots(deps)));
}
all_deps
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/protocol/common/pred/mod.rs | fantoch_ps/src/protocol/common/pred/mod.rs | // This module contains the definition of `KeyClocks` and `QuorumClocks`.
mod clocks;
// Re-exports.
pub use clocks::{
Clock, KeyClocks, LockedKeyClocks, QuorumClocks, QuorumRetries,
};
use fantoch::id::Dot;
use fantoch::HashSet;
use serde::{Deserialize, Serialize};
use std::iter::FromIterator;
#[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize)]
pub struct CaesarDeps {
pub deps: HashSet<Dot>,
}
impl CaesarDeps {
pub fn new() -> Self {
Self {
deps: Default::default(),
}
}
pub fn insert(&mut self, dep: Dot) {
self.deps.insert(dep);
}
pub fn remove(&mut self, dep: &Dot) {
self.deps.remove(dep);
}
pub fn contains(&self, dep: &Dot) -> bool {
self.deps.contains(dep)
}
pub fn merge(&mut self, other: Self) {
for dep in other.deps {
self.insert(dep);
}
}
pub fn len(&self) -> usize {
self.deps.len()
}
pub fn iter(&self) -> impl Iterator<Item = &Dot> + '_ {
self.deps.iter()
}
}
impl FromIterator<Dot> for CaesarDeps {
fn from_iter<T: IntoIterator<Item = Dot>>(iter: T) -> Self {
let mut compressed_dots = Self::new();
for dot in iter {
compressed_dots.insert(dot);
}
compressed_dots
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/protocol/common/pred/clocks/quorum.rs | fantoch_ps/src/protocol/common/pred/clocks/quorum.rs | use super::Clock;
use crate::protocol::common::pred::CaesarDeps;
use fantoch::id::ProcessId;
use fantoch::HashSet;
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct QuorumClocks {
// fast quorum size
fast_quorum_size: usize,
// majority quorum size
write_quorum_size: usize,
// set of processes that have participated in this computation
participants: HashSet<ProcessId>,
// max of all `clock`s
clock: Clock,
// union of all predecessors
deps: CaesarDeps,
// and of all `ok`s
ok: bool,
}
impl QuorumClocks {
/// Creates a `QuorumClocks` instance given the quorum size.
pub fn new(
process_id: ProcessId,
fast_quorum_size: usize,
write_quorum_size: usize,
) -> Self {
Self {
fast_quorum_size,
write_quorum_size,
participants: HashSet::with_capacity(fast_quorum_size),
clock: Clock::new(process_id),
deps: CaesarDeps::new(),
ok: true,
}
}
/// Adds new `deps` reported by `process_id`.
pub fn add(
&mut self,
process_id: ProcessId,
clock: Clock,
deps: CaesarDeps,
ok: bool,
) {
assert!(self.participants.len() < self.fast_quorum_size);
// record new participant
self.participants.insert(process_id);
// update clock and deps
self.clock.join(&clock);
self.deps.merge(deps);
self.ok = self.ok && ok;
}
/// Check if we have all the replies we need.
pub fn all(&self) -> bool {
let replied = self.participants.len();
// we have all the replies we need if either one of the following:
// - (at least) a majority has replied, and one of those processes
// reported !ok
// - the whole fast quorum replied (independently of their replies)
let some_not_ok_after_majority =
!self.ok && replied >= self.write_quorum_size;
let fast_quorum = replied == self.fast_quorum_size;
some_not_ok_after_majority || fast_quorum
}
/// Returns the current aggregated result.
pub fn aggregated(&mut self) -> (Clock, CaesarDeps, bool) {
// clear unnecessary info
std::mem::take(&mut self.participants);
// resets `this.deps` so that it can be returned without having to clone
// it
let deps = std::mem::take(&mut self.deps);
(self.clock, deps, self.ok)
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct QuorumRetries {
// majority quorum size
write_quorum_size: usize,
// set of processes that have participated in this computation
participants: HashSet<ProcessId>,
// union of all predecessors
deps: CaesarDeps,
}
impl QuorumRetries {
/// Creates a `QuorumRetries` instance given the quorum size.
pub fn new(write_quorum_size: usize) -> Self {
Self {
write_quorum_size,
participants: HashSet::with_capacity(write_quorum_size),
deps: CaesarDeps::new(),
}
}
/// Adds new `deps` reported by `process_id`.
pub fn add(&mut self, process_id: ProcessId, deps: CaesarDeps) {
assert!(self.participants.len() < self.write_quorum_size);
// record new participant
self.participants.insert(process_id);
self.deps.merge(deps);
}
/// Check if we have all the replies we need.
pub fn all(&self) -> bool {
self.participants.len() == self.write_quorum_size
}
/// Returns the current aggregated result.
pub fn aggregated(&mut self) -> CaesarDeps {
// clear unnecessary info
std::mem::take(&mut self.participants);
// resets `this.deps` so that it can be returned without having to clone
// it
std::mem::take(&mut self.deps)
}
}
#[cfg(test)]
mod tests {
use super::*;
use fantoch::id::Dot;
use std::iter::FromIterator;
#[test]
fn quorum_clocks() {
// setup
let fq = 3;
let mq = 2;
let process_id = 1;
// agreement
let mut quorum_clocks = QuorumClocks::new(process_id, fq, mq);
let clock_1 = Clock::from(10, 1);
let deps_1 = CaesarDeps::from_iter(vec![Dot::new(1, 1)]);
let ok_1 = true;
let clock_2 = Clock::from(10, 2);
let deps_2 = CaesarDeps::from_iter(vec![Dot::new(1, 2)]);
let ok_2 = true;
let clock_3 = Clock::from(10, 3);
let deps_3 = CaesarDeps::from_iter(vec![Dot::new(1, 1)]);
let ok_3 = true;
quorum_clocks.add(1, clock_1, deps_1, ok_1);
assert!(!quorum_clocks.all());
quorum_clocks.add(2, clock_2, deps_2, ok_2);
assert!(!quorum_clocks.all());
quorum_clocks.add(3, clock_3, deps_3, ok_3);
assert!(quorum_clocks.all());
// check aggregated
let (clock, deps, ok) = quorum_clocks.aggregated();
assert_eq!(clock, Clock::from(10, 3));
assert_eq!(
deps,
CaesarDeps::from_iter(vec![Dot::new(1, 1), Dot::new(1, 2)])
);
assert!(ok);
// disagreement
let clock_1 = Clock::from(10, 1);
let deps_1 = CaesarDeps::from_iter(vec![Dot::new(1, 1)]);
let ok_1 = true;
let clock_2 = Clock::from(12, 2);
let deps_2 =
CaesarDeps::from_iter(vec![Dot::new(1, 2), Dot::new(1, 3)]);
let ok_2 = false;
let clock_3 = Clock::from(10, 3);
let deps_3 = CaesarDeps::from_iter(vec![Dot::new(1, 4)]);
let ok_3 = true;
// order: 1, 2
let mut quorum_clocks = QuorumClocks::new(process_id, fq, mq);
quorum_clocks.add(1, clock_1, deps_1.clone(), ok_1);
assert!(!quorum_clocks.all());
quorum_clocks.add(2, clock_2, deps_2.clone(), ok_2);
assert!(quorum_clocks.all());
// check aggregated
let (clock, deps, ok) = quorum_clocks.aggregated();
assert_eq!(clock, Clock::from(12, 2));
assert_eq!(
deps,
CaesarDeps::from_iter(vec![
Dot::new(1, 1),
Dot::new(1, 2),
Dot::new(1, 3)
])
);
assert!(!ok);
// order: 1, 3, 2
let mut quorum_clocks = QuorumClocks::new(process_id, fq, mq);
quorum_clocks.add(1, clock_1, deps_1.clone(), ok_1);
assert!(!quorum_clocks.all());
quorum_clocks.add(3, clock_3, deps_3.clone(), ok_3);
assert!(!quorum_clocks.all());
quorum_clocks.add(2, clock_2, deps_2.clone(), ok_2);
assert!(quorum_clocks.all());
// check aggregated
let (clock, deps, ok) = quorum_clocks.aggregated();
assert_eq!(clock, Clock::from(12, 2));
assert_eq!(
deps,
CaesarDeps::from_iter(vec![
Dot::new(1, 1),
Dot::new(1, 2),
Dot::new(1, 3),
Dot::new(1, 4)
])
);
assert!(!ok);
// order: 2, 3
let mut quorum_clocks = QuorumClocks::new(process_id, fq, mq);
quorum_clocks.add(2, clock_2, deps_2.clone(), ok_2);
assert!(!quorum_clocks.all());
quorum_clocks.add(3, clock_3, deps_3.clone(), ok_3);
assert!(quorum_clocks.all());
// check aggregated
let (clock, deps, ok) = quorum_clocks.aggregated();
assert_eq!(clock, Clock::from(12, 2));
assert_eq!(
deps,
CaesarDeps::from_iter(vec![
Dot::new(1, 2),
Dot::new(1, 3),
Dot::new(1, 4)
])
);
assert!(!ok);
}
#[test]
fn quorum_retries() {
// setup
let mq = 2;
// agreement
let mut quorum_retries = QuorumRetries::new(mq);
let deps_1 = CaesarDeps::from_iter(vec![Dot::new(1, 1)]);
let deps_2 = CaesarDeps::from_iter(vec![Dot::new(1, 2)]);
quorum_retries.add(1, deps_1);
assert!(!quorum_retries.all());
quorum_retries.add(2, deps_2);
assert!(quorum_retries.all());
// check aggregated
let deps = quorum_retries.aggregated();
assert_eq!(
deps,
CaesarDeps::from_iter(vec![Dot::new(1, 1), Dot::new(1, 2)])
);
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/protocol/common/pred/clocks/mod.rs | fantoch_ps/src/protocol/common/pred/clocks/mod.rs | // This module contains the definition of `KeyClocks`.
mod keys;
// This module contains the definition of `QuorumClocks` and `QuorumRetries`.
mod quorum;
// Re-exports.
pub use keys::{KeyClocks, LockedKeyClocks};
pub use quorum::{QuorumClocks, QuorumRetries};
use fantoch::id::ProcessId;
use serde::{Deserialize, Serialize};
use std::cmp::Ordering;
#[derive(
Debug,
Clone,
Copy,
PartialEq,
Eq,
PartialOrd,
Ord,
Hash,
Serialize,
Deserialize,
)]
pub struct Clock {
seq: u64,
process_id: ProcessId,
}
impl Clock {
pub fn new(process_id: ProcessId) -> Self {
Self { seq: 0, process_id }
}
pub fn from(seq: u64, process_id: ProcessId) -> Self {
Self { seq, process_id }
}
// Lexicographic join.
pub fn join(&mut self, other: &Self) {
match self.seq.cmp(&other.seq) {
Ordering::Greater => {
// nothing to do
}
Ordering::Less => {
// take the `other` value
*self = *other;
}
Ordering::Equal => {
// update the second component
self.process_id =
std::cmp::max(self.process_id, other.process_id);
}
}
}
pub fn is_zero(&self) -> bool {
self.seq == 0
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
// check lexicographic ordering
fn ord() {
assert!((Clock::from(10, 1) == Clock::from(10, 1)));
assert!(!(Clock::from(10, 1) < Clock::from(10, 1)));
assert!(!(Clock::from(10, 1) > Clock::from(10, 1)));
assert!(!(Clock::from(10, 1) == Clock::from(10, 2)));
assert!((Clock::from(10, 1) < Clock::from(10, 2)));
assert!(!(Clock::from(10, 1) > Clock::from(10, 2)));
assert!(!(Clock::from(9, 1) == Clock::from(10, 2)));
assert!((Clock::from(9, 1) < Clock::from(10, 2)));
assert!(!(Clock::from(9, 1) > Clock::from(10, 2)));
assert!(!(Clock::from(10, 1) == Clock::from(9, 2)));
assert!(!(Clock::from(10, 1) < Clock::from(9, 2)));
assert!((Clock::from(10, 1) > Clock::from(9, 2)));
}
#[test]
fn join() {
let p1 = 1;
let p2 = 2;
let p3 = 3;
let p4 = 4;
let mut clock = Clock::new(p1);
// if we join with something with a higher timestamp, then we take their
// value
clock.join(&Clock::from(2, p2));
assert_eq!(clock, Clock::from(2, p2));
// if we join with something with the same timestamp, then we take the
// max of the identifiers
clock.join(&Clock::from(2, p3));
assert_eq!(clock, Clock::from(2, p3));
clock.join(&Clock::from(4, p3));
assert_eq!(clock, Clock::from(4, p3));
clock.join(&Clock::from(4, p2));
assert_eq!(clock, Clock::from(4, p3));
// if we join with something with a lower timestamp, then nothing
// happens
clock.join(&Clock::from(1, p4));
clock.join(&Clock::from(2, p4));
clock.join(&Clock::from(3, p4));
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/protocol/common/pred/clocks/keys/locked.rs | fantoch_ps/src/protocol/common/pred/clocks/keys/locked.rs | use super::{Clock, KeyClocks};
use crate::protocol::common::pred::CaesarDeps;
use fantoch::command::Command;
use fantoch::id::{Dot, ProcessId, ShardId};
use fantoch::kvs::Key;
use fantoch::shared::{SharedMap, SharedMapRef};
use fantoch::{HashMap, HashSet};
use parking_lot::{Mutex, RwLock};
use std::cmp::Ordering;
use std::sync::Arc;
// timestamps are unique and thus it's enough to store one command `Dot` per
// timestamp.
// Note: this `Clock` should correspond to the `clock` stored in Caesar process.
type CommandsPerKey = HashMap<Clock, Dot>;
// all clock's are protected by a rwlock
type Clocks = Arc<SharedMap<Key, RwLock<CommandsPerKey>>>;
#[derive(Debug, Clone)]
pub struct LockedKeyClocks {
process_id: ProcessId,
shard_id: ShardId,
seq: Arc<Mutex<u64>>,
clocks: Clocks,
}
impl KeyClocks for LockedKeyClocks {
/// Create a new `KeyClocks` instance.
fn new(process_id: ProcessId, shard_id: ShardId) -> Self {
Self {
process_id,
shard_id,
seq: Arc::new(Mutex::new(0)),
clocks: Arc::new(SharedMap::new()),
}
}
// Generate the next clock.
fn clock_next(&mut self) -> Clock {
let mut seq = self.seq.lock();
*seq += 1;
Clock::from(*seq, self.process_id)
}
// Joins with remote clock.
fn clock_join(&mut self, other: &Clock) {
let mut seq = self.seq.lock();
*seq = std::cmp::max(*seq, other.seq);
}
// Adds a new command with some tentative timestamp.
// After this, it starts being reported as a predecessor of other commands
// with tentative higher timestamps.
fn add(&mut self, dot: Dot, cmd: &Command, clock: Clock) {
cmd.keys(self.shard_id).for_each(|key| {
// add ourselves to the set of commands and assert there was no
// command with the same timestamp
let res = self.update_commands(key, |commands| {
commands.write().insert(clock, dot)
});
assert!(
res.is_none(),
"can't add a timestamp belonging to a command already added"
);
});
}
// Removes a previously added command with some tentative timestamp.
// After this, it stops being reported as a predecessor of other commands.
fn remove(&mut self, cmd: &Command, clock: Clock) {
cmd.keys(self.shard_id).for_each(|key| {
// remove ourselves from the set of commands and assert that we were
// indeed in the set
let res = self.update_commands(key, |commands| {
commands.write().remove(&clock)
});
assert!(
res.is_some(),
"can't remove a timestamp belonging to a command never added"
);
});
}
/// Computes all conflicting commands with a timestamp lower than `clock`.
/// If `higher` is set, it fills it with all the conflicting commands with a
/// timestamp higher than `clock`.
fn predecessors(
&self,
dot: Dot,
cmd: &Command,
clock: Clock,
mut higher: Option<&mut HashSet<Dot>>,
) -> CaesarDeps {
// TODO is this data structure ever GCed? otherwise the set that we
// return here will grow unbounded as the more commands are processed in
// the system
let mut predecessors = CaesarDeps::new();
cmd.keys(self.shard_id).for_each(|key| {
self.apply_if_commands_contains_key(key, |commands| {
for (cmd_clock, cmd_dot) in commands.read().iter() {
match cmd_clock.cmp(&clock) {
Ordering::Less => {
// if it has a timestamp smaller than `clock`, add
// it as a predecessor
// - we don't assert that doesn't exist already
// because the same
// `Dot` might be stored on different keys if we
// have multi-key
// commands
predecessors.insert(*cmd_dot);
}
Ordering::Greater => {
// if it has a timestamp smaller than `clock`, add
// it to `higher` if it's defined
if let Some(higher) = higher.as_deref_mut() {
higher.insert(*cmd_dot);
}
}
Ordering::Equal => {
if *cmd_dot != dot {
panic!("found different command with the same timestamp")
}
}
}
}
});
});
predecessors
}
fn parallel() -> bool {
true
}
}
impl LockedKeyClocks {
fn apply_if_commands_contains_key<F, R>(
&self,
key: &Key,
mut f: F,
) -> Option<R>
where
F: FnMut(SharedMapRef<'_, Key, RwLock<CommandsPerKey>>) -> R,
{
// get a reference to current commands
self.clocks.get(key).map(|commands| {
// apply function and return its result
f(commands)
})
}
fn update_commands<F, R>(&mut self, key: &Key, mut f: F) -> R
where
F: FnMut(SharedMapRef<'_, Key, RwLock<CommandsPerKey>>) -> R,
{
// get a mutable reference to current commands
let commands = self.clocks.get_or(key, || RwLock::default());
// apply function and return its result
f(commands)
}
}
#[cfg(test)]
mod tests {
use super::*;
use fantoch::id::Rifl;
use fantoch::kvs::KVOp;
use std::iter::FromIterator;
fn deps(deps: Vec<Dot>) -> HashSet<Dot> {
HashSet::from_iter(deps)
}
fn caesar_deps(deps: Vec<Dot>) -> CaesarDeps {
CaesarDeps::from_iter(deps)
}
#[test]
fn clock_test() {
let p1 = 1;
let p2 = 2;
let shard_id = 0;
let mut key_clocks = LockedKeyClocks::new(p1, shard_id);
assert_eq!(key_clocks.clock_next(), Clock::from(1, p1));
assert_eq!(key_clocks.clock_next(), Clock::from(2, p1));
// if we merge with an lower clock, everything remains as is
key_clocks.clock_join(&Clock::from(1, p2));
assert_eq!(key_clocks.clock_next(), Clock::from(3, p1));
assert_eq!(key_clocks.clock_next(), Clock::from(4, p1));
// if we merge with a higher clock, the next clock generated will be
// higher than that
key_clocks.clock_join(&Clock::from(10, p2));
assert_eq!(key_clocks.clock_next(), Clock::from(11, p1));
assert_eq!(key_clocks.clock_next(), Clock::from(12, p1));
}
#[test]
fn predecessors_test() {
let p1 = 1;
let shard_id = 0;
let mut key_clocks = LockedKeyClocks::new(p1, shard_id);
// create command on key A
let cmd_a = Command::from(
Rifl::new(1, 1),
vec![(String::from("A"), KVOp::Put(String::new()))],
);
// create command on key B
let cmd_b = Command::from(
Rifl::new(1, 1),
vec![(String::from("B"), KVOp::Put(String::new()))],
);
// create command on key C
let cmd_c = Command::from(
Rifl::new(1, 1),
vec![(String::from("C"), KVOp::Put(String::new()))],
);
// create command on keys A and C
let cmd_ac = Command::from(
Rifl::new(1, 1),
vec![
(String::from("A"), KVOp::Put(String::new())),
(String::from("C"), KVOp::Put(String::new())),
],
);
// create dots and clocks
let dot = Dot::new(p1, 0); // some dot, doesn't matter
let dot_1 = Dot::new(p1, 1);
let dot_3 = Dot::new(p1, 3);
let clock_1 = Clock::from(1, p1);
let clock_2 = Clock::from(2, p1);
let clock_3 = Clock::from(3, p1);
let clock_4 = Clock::from(4, p1);
let check = |key_clocks: &LockedKeyClocks,
cmd: &Command,
clock: Clock,
expected_blocking: HashSet<Dot>,
expected_predecessors: CaesarDeps| {
let mut blocking = HashSet::new();
let predecessors =
key_clocks.predecessors(dot, cmd, clock, Some(&mut blocking));
assert_eq!(blocking, expected_blocking);
assert_eq!(predecessors, expected_predecessors);
};
// in the beginning, nothing is reported
check(
&key_clocks,
&cmd_a,
clock_2,
deps(vec![]),
caesar_deps(vec![]),
);
// --------------------------------------
// add dot_1 with clock_1 on key a
key_clocks.add(dot_1, &cmd_a, clock_1);
// i. dot_1 is reported for command a with clock 2
check(
&key_clocks,
&cmd_a,
clock_2,
deps(vec![]),
caesar_deps(vec![dot_1]),
);
// ii. dot_1 is *not* reported for command b with clock 2
check(
&key_clocks,
&cmd_b,
clock_2,
deps(vec![]),
caesar_deps(vec![]),
);
// iii. dot_1 is *not* reported for command c with clock 2
check(
&key_clocks,
&cmd_c,
clock_2,
deps(vec![]),
caesar_deps(vec![]),
);
// iv. dot_1 is reported for command ac with clock 2
check(
&key_clocks,
&cmd_ac,
clock_2,
deps(vec![]),
caesar_deps(vec![dot_1]),
);
// --------------------------------------
// add dot_3 with clock_3 on keys a and c
key_clocks.add(dot_3, &cmd_ac, clock_3);
// 1. check that nothing changed if we check again with clock 2
// i. dot_1 is reported for command a with clock 2, and dot_3 blocks
check(
&key_clocks,
&cmd_a,
clock_2,
deps(vec![dot_3]),
caesar_deps(vec![dot_1]),
);
// ii. no dot is reported for command b with clock 2
check(
&key_clocks,
&cmd_b,
clock_2,
deps(vec![]),
caesar_deps(vec![]),
);
// iii. dot_1 is *not* reported for command c with clock 2, but dot_3
// blocks
check(
&key_clocks,
&cmd_c,
clock_2,
deps(vec![dot_3]),
caesar_deps(vec![]),
);
// iv. dot_1 is reported for command ac with clock 2, and dot_3 blocks
check(
&key_clocks,
&cmd_ac,
clock_2,
deps(vec![dot_3]),
caesar_deps(vec![dot_1]),
);
// 2. now check for clock 4
// i. dot_1 and dot_3 are reported for command a with clock 4
check(
&key_clocks,
&cmd_a,
clock_4,
deps(vec![]),
caesar_deps(vec![dot_1, dot_3]),
);
// ii. no dot is reported for command b with clock 4
check(
&key_clocks,
&cmd_b,
clock_4,
deps(vec![]),
caesar_deps(vec![]),
);
// iii. only dot_3 is reported for command c with clock 4
check(
&key_clocks,
&cmd_c,
clock_4,
deps(vec![]),
caesar_deps(vec![dot_3]),
);
// iv. dot_1 and dot_3 are reported for command ac with clock 4
check(
&key_clocks,
&cmd_ac,
clock_4,
deps(vec![]),
caesar_deps(vec![dot_1, dot_3]),
);
// --------------------------------------
// remove clock_1 on key a
key_clocks.remove(&cmd_a, clock_1);
// 1. check for clock 2
// i. no dot is reported for command a with clock 2, and dot_3 blocks
check(
&key_clocks,
&cmd_a,
clock_2,
deps(vec![dot_3]),
caesar_deps(vec![]),
);
// ii. no dot is reported for command b with clock 2
check(
&key_clocks,
&cmd_b,
clock_2,
deps(vec![]),
caesar_deps(vec![]),
);
// iii. no dot is reported for command c with clock 2, but dot_3
// blocks
check(
&key_clocks,
&cmd_c,
clock_2,
deps(vec![dot_3]),
caesar_deps(vec![]),
);
// iv. no dot is reported for command ac with clock 2, and dot_3 blocks
check(
&key_clocks,
&cmd_ac,
clock_2,
deps(vec![dot_3]),
caesar_deps(vec![]),
);
// 2. check for clock 4
// i. only dot_3 is reported for command a with clock 4
check(
&key_clocks,
&cmd_a,
clock_4,
deps(vec![]),
caesar_deps(vec![dot_3]),
);
// ii. neither dot is reported for command b with clock 4
check(
&key_clocks,
&cmd_b,
clock_4,
deps(vec![]),
caesar_deps(vec![]),
);
// iii. only dot_3 is reported for command c with clock 4
check(
&key_clocks,
&cmd_c,
clock_4,
deps(vec![]),
caesar_deps(vec![dot_3]),
);
// iv. only dot_3 are reported for command ac with clock 4
check(
&key_clocks,
&cmd_ac,
clock_4,
deps(vec![]),
caesar_deps(vec![dot_3]),
);
// --------------------------------------
// remove clock_3 on key a c
key_clocks.remove(&cmd_ac, clock_3);
// check only for clock 4 that no dot is reported for any command
check(
&key_clocks,
&cmd_a,
clock_4,
deps(vec![]),
caesar_deps(vec![]),
);
check(
&key_clocks,
&cmd_b,
clock_4,
deps(vec![]),
caesar_deps(vec![]),
);
check(
&key_clocks,
&cmd_c,
clock_4,
deps(vec![]),
caesar_deps(vec![]),
);
check(
&key_clocks,
&cmd_ac,
clock_4,
deps(vec![]),
caesar_deps(vec![]),
);
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/protocol/common/pred/clocks/keys/mod.rs | fantoch_ps/src/protocol/common/pred/clocks/keys/mod.rs | // This module contains the definition of `LockedKeyClocks`.
mod locked;
// Re-exports.
pub use locked::LockedKeyClocks;
use super::Clock;
use crate::protocol::common::pred::CaesarDeps;
use fantoch::command::Command;
use fantoch::id::{Dot, ProcessId, ShardId};
use fantoch::HashSet;
use std::fmt::Debug;
pub trait KeyClocks: Debug + Clone {
/// Create a new `KeyClocks` instance.
fn new(process_id: ProcessId, shard_id: ShardId) -> Self;
// Generate the next clock.
fn clock_next(&mut self) -> Clock;
// Joins with remote clock.
fn clock_join(&mut self, other: &Clock);
// Adds a new command with some tentative timestamp.
// After this, it starts being reported as a predecessor of other commands
// with tentative higher timestamps.
fn add(&mut self, dot: Dot, cmd: &Command, clock: Clock);
// Removes a previously added command with some tentative timestamp.
// After this, it stops being reported as a predecessor of other commands.
fn remove(&mut self, cmd: &Command, clock: Clock);
/// Computes all conflicting commands with a timestamp lower than `clock`.
/// If `higher` is set, it fills it with all the conflicting commands with a
/// timestamp higher than `clock`.
fn predecessors(
&self,
dot: Dot,
cmd: &Command,
clock: Clock,
higher: Option<&mut HashSet<Dot>>,
) -> CaesarDeps;
fn parallel() -> bool;
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/protocol/common/table/votes.rs | fantoch_ps/src/protocol/common/table/votes.rs | use fantoch::hash_map::{self, HashMap};
use fantoch::id::ProcessId;
use fantoch::kvs::Key;
use serde::{Deserialize, Serialize};
use std::fmt;
/// Votes are all Votes on some command.
#[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize)]
pub struct Votes {
votes: HashMap<Key, Vec<VoteRange>>,
}
impl Votes {
/// Creates an empty `Votes` instance.
pub fn new() -> Self {
Self {
votes: HashMap::new(),
}
}
/// Creates an empty `Votes` instance.
pub fn with_capacity(capacity: usize) -> Self {
Self {
votes: HashMap::with_capacity(capacity),
}
}
/// Add new vote range to `Votes`.
#[allow(clippy::ptr_arg)]
pub fn add(&mut self, key: &Key, vote: VoteRange) {
// add new vote to current set of votes
let current_votes = match self.votes.get_mut(key) {
Some(current_votes) => current_votes,
None => self.votes.entry(key.clone()).or_insert_with(Vec::new),
};
// if there's a last vote, try to compress with that one
if let Some(last) = current_votes.last_mut() {
if let Some(vote) = last.try_compress(vote) {
// if here, then we couldn't compress
current_votes.push(vote);
}
} else {
current_votes.push(vote);
}
}
/// Sets the votes on some `Key`.
#[allow(clippy::ptr_arg)]
pub fn set(&mut self, key: Key, key_votes: Vec<VoteRange>) {
let res = self.votes.insert(key, key_votes);
assert!(res.is_none());
}
/// Merge with another `Votes`.
/// Performance should be better if `self.votes.len() > remote_votes.len()`
/// than with the opposite.
pub fn merge(&mut self, remote_votes: Votes) {
remote_votes.into_iter().for_each(|(key, key_votes)| {
// add new votes to current set of votes
let current_votes = self.votes.entry(key).or_insert_with(Vec::new);
current_votes.extend(key_votes);
});
}
/// Gets the current votes on some key.
#[allow(clippy::ptr_arg)]
pub fn get(&self, key: &Key) -> Option<&Vec<VoteRange>> {
self.votes.get(key)
}
/// Removes the votes on some key.
#[allow(clippy::ptr_arg)]
pub fn remove(&mut self, key: &Key) -> Option<Vec<VoteRange>> {
self.votes.remove(key)
}
/// Get the number of votes.
pub fn len(&self) -> usize {
self.votes.len()
}
/// Checks if `Votes` is empty.
pub fn is_empty(&self) -> bool {
self.votes.is_empty()
}
}
impl IntoIterator for Votes {
type Item = (Key, Vec<VoteRange>);
type IntoIter = hash_map::IntoIter<Key, Vec<VoteRange>>;
/// Returns a `Votes` into-iterator.
fn into_iter(self) -> Self::IntoIter {
self.votes.into_iter()
}
}
// `VoteRange` encodes a set of votes performed by some processed:
// - this will be used to fill the `VotesTable`
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
pub struct VoteRange {
by: ProcessId,
start: u64,
end: u64,
}
impl VoteRange {
/// Create a new `VoteRange` instance.
pub fn new(by: ProcessId, start: u64, end: u64) -> Self {
assert!(start <= end);
Self { by, start, end }
}
/// Get which process voted.
pub fn voter(&self) -> ProcessId {
self.by
}
/// Get range start.
pub fn start(&self) -> u64 {
self.start
}
/// Get range end.
pub fn end(&self) -> u64 {
self.end
}
/// Compress the `VoteRange` passed as argument into self if both form a
/// contiguous sequence of votes.
#[must_use]
pub fn try_compress(&mut self, other: Self) -> Option<Self> {
// check that we have the same voters
assert_eq!(self.by, other.by);
// check if we can compress
if self.end + 1 == other.start {
// in this case we can:
// - update `self.end` to be `other.end`
self.end = other.end;
None
} else {
// in this case we can't
Some(other)
}
}
/// Get all votes in this range.
pub fn votes(&self) -> Vec<u64> {
(self.start..=self.end).collect()
}
}
impl fmt::Debug for VoteRange {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if self.start == self.end {
write!(f, "<{}: {}>", self.by, self.start)
} else {
write!(f, "<{}: {}-{}>", self.by, self.start, self.end)
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::protocol::common::table::{KeyClocks, SequentialKeyClocks};
use fantoch::command::Command;
use fantoch::id::Rifl;
use fantoch::kvs::KVOp;
#[test]
fn vote_range_compress() {
let a = VoteRange::new(1, 1, 1);
let b = VoteRange::new(1, 2, 2);
let c = VoteRange::new(1, 3, 6);
let d = VoteRange::new(1, 7, 8);
let mut a_with_b = a.clone();
let res = a_with_b.try_compress(b.clone());
assert_eq!(a_with_b, VoteRange::new(1, 1, 2),);
assert_eq!(res, None);
let mut a_with_c = a.clone();
let res = a_with_c.try_compress(c.clone());
assert_eq!(a_with_c, a);
assert_eq!(res, Some(c.clone()));
let mut c_with_d = c.clone();
let res = c_with_d.try_compress(d.clone());
assert_eq!(c_with_d, VoteRange::new(1, 3, 8));
assert_eq!(res, None);
}
#[test]
fn votes_flow() {
// create clocks
let shard_id = 0;
let nfr = false;
let mut clocks_p0 = SequentialKeyClocks::new(0, shard_id, nfr);
let mut clocks_p1 = SequentialKeyClocks::new(1, shard_id, nfr);
// keys
let key_a = String::from("A");
let key_b = String::from("B");
// command a
let cmd_a_rifl = Rifl::new(100, 1); // client 100, 1st op
let cmd_a = Command::from(cmd_a_rifl, vec![(key_a.clone(), KVOp::Get)]);
let mut votes_a = Votes::new();
// command b
let cmd_ab_rifl = Rifl::new(101, 1); // client 101, 1st op
let cmd_ab = Command::from(
cmd_ab_rifl,
vec![(key_a.clone(), KVOp::Get), (key_b.clone(), KVOp::Get)],
);
let mut votes_ab = Votes::new();
// orders on each process:
// - p0: Submit(a), MCommit(a), MCollect(ab)
// - p1: Submit(ab), MCollect(a), MCommit(ab)
// ------------------------
// submit command a by p0 AND
// (local) MCollect handle by p0 (command a)
let (clock_a_p0, process_votes_a_p0) = clocks_p0.proposal(&cmd_a, 0);
assert_eq!(clock_a_p0, 1);
// -------------------------
// submit command ab by p1 AND
// (local) MCollect handle by p1 (command ab)
let (clock_ab_p1, process_votes_ab_p1) = clocks_p1.proposal(&cmd_ab, 0);
assert_eq!(clock_ab_p1, 1);
// -------------------------
// (remote) MCollect handle by p1 (command a)
let (clock_a_p1, process_votes_a_p1) =
clocks_p1.proposal(&cmd_a, clock_a_p0);
assert_eq!(clock_a_p1, 2);
// -------------------------
// (remote) MCollect handle by p0 (command ab)
let (clock_ab_p0, process_votes_ab_p0) =
clocks_p0.proposal(&cmd_ab, clock_ab_p1);
assert_eq!(clock_ab_p0, 2);
// -------------------------
// MCollectAck handles by p0 (command a)
votes_a.merge(process_votes_a_p0);
votes_a.merge(process_votes_a_p1);
// there's a single key
assert_eq!(votes_a.votes.len(), 1);
// there are two voters
let key_votes = votes_a.votes.get(&key_a).unwrap();
assert_eq!(key_votes.len(), 2);
// p0 voted with 1
let mut key_votes = key_votes.into_iter();
let key_votes_by_p0 = key_votes.next().unwrap();
assert_eq!(key_votes_by_p0.voter(), 0);
assert_eq!(key_votes_by_p0.votes(), vec![1]);
// p1 voted with 2
let key_votes_by_p1 = key_votes.next().unwrap();
assert_eq!(key_votes_by_p1.voter(), 1);
assert_eq!(key_votes_by_p1.votes(), vec![2]);
// -------------------------
// MCollectAck handles by p1 (command ab)
votes_ab.merge(process_votes_ab_p1);
votes_ab.merge(process_votes_ab_p0);
// there are two keys
assert_eq!(votes_ab.votes.len(), 2);
// key a:
// there are two voters
let key_votes = votes_ab.votes.get(&key_a).unwrap();
assert_eq!(key_votes.len(), 2);
// p1 voted with 1
let mut key_votes = key_votes.into_iter();
let key_votes_by_p1 = key_votes.next().unwrap();
assert_eq!(key_votes_by_p1.voter(), 1);
assert_eq!(key_votes_by_p1.votes(), vec![1]);
// p0 voted with 2
let key_votes_by_p0 = key_votes.next().unwrap();
assert_eq!(key_votes_by_p0.voter(), 0);
assert_eq!(key_votes_by_p0.votes(), vec![2]);
// key b:
// there are two voters
let key_votes = votes_ab.votes.get(&key_b).unwrap();
assert_eq!(key_votes.len(), 2);
// p1 voted with 1
let mut key_votes = key_votes.into_iter();
let key_votes_by_p1 = key_votes.next().unwrap();
assert_eq!(key_votes_by_p1.voter(), 1);
assert_eq!(key_votes_by_p1.votes(), vec![1]);
// p0 voted with 1 and 2
let key_votes_by_p0 = key_votes.next().unwrap();
assert_eq!(key_votes_by_p0.voter(), 0);
assert_eq!(key_votes_by_p0.votes(), vec![1, 2]);
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/protocol/common/table/mod.rs | fantoch_ps/src/protocol/common/table/mod.rs | // This module contains the definition of `VoteRange` and `Votes`.
mod votes;
// This module contains the definition of `KeyClocks` and `QuorumClocks`.
mod clocks;
// Re-exports.
pub use clocks::{
AtomicKeyClocks, KeyClocks, LockedKeyClocks, QuorumClocks,
SequentialKeyClocks,
};
pub use votes::{VoteRange, Votes};
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/protocol/common/table/clocks/quorum.rs | fantoch_ps/src/protocol/common/table/clocks/quorum.rs | use fantoch::id::ProcessId;
use fantoch::HashSet;
use std::cmp::Ordering;
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct QuorumClocks {
// fast quorum size
fast_quorum_size: usize,
// set of processes that have participated in this computation
participants: HashSet<ProcessId>,
// cache current max clock
max_clock: u64,
// number of times the maximum clock has been reported
max_clock_count: usize,
}
impl QuorumClocks {
/// Creates a `QuorumClocks` instance given the quorum size.
pub fn new(fast_quorum_size: usize) -> Self {
Self {
fast_quorum_size,
participants: HashSet::with_capacity(fast_quorum_size),
max_clock: 0,
max_clock_count: 0,
}
}
/// Maybe change the fast quorum size.
pub fn maybe_adjust_fast_quorum_size(&mut self, fast_quorum_size: usize) {
debug_assert!(self.participants.is_empty());
self.fast_quorum_size = fast_quorum_size;
}
/// Adds a new `clock` reported by `process_id` and returns the maximum
/// clock seen until now.
pub fn add(&mut self, process_id: ProcessId, clock: u64) -> (u64, usize) {
debug_assert!(self.participants.len() < self.fast_quorum_size);
// record new participant
self.participants.insert(process_id);
// update max clock and max clock count
match self.max_clock.cmp(&clock) {
Ordering::Less => {
// new max clock
self.max_clock = clock;
self.max_clock_count = 1;
}
Ordering::Equal => {
// same max clock, simply update its count
self.max_clock_count += 1;
}
Ordering::Greater => {
// max clock did not change, do nothing
}
};
// return max clock and number of occurrences
(self.max_clock, self.max_clock_count)
}
/// Check if we all fast quorum processes have reported their clock.
pub fn all(&self) -> bool {
self.participants.len() == self.fast_quorum_size
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn all() {
// quorum clocks
let q = 3;
let mut quorum_clocks = QuorumClocks::new(q);
// add clocks and check they're there
quorum_clocks.add(1, 10);
assert!(!quorum_clocks.all());
quorum_clocks.add(2, 10);
assert!(!quorum_clocks.all());
quorum_clocks.add(3, 10);
assert!(quorum_clocks.all());
}
#[test]
fn max_and_count() {
// -------------
// quorum clocks
let q = 3;
let mut quorum_clocks = QuorumClocks::new(q);
// add clocks and check they're there
assert_eq!(quorum_clocks.add(1, 10), (10, 1));
assert_eq!(quorum_clocks.add(2, 10), (10, 2));
assert_eq!(quorum_clocks.add(3, 10), (10, 3));
// -------------
// quorum clocks
let q = 10;
let mut quorum_clocks = QuorumClocks::new(q);
// add clocks and check they're there
assert_eq!(quorum_clocks.add(1, 10), (10, 1));
assert_eq!(quorum_clocks.add(2, 9), (10, 1));
assert_eq!(quorum_clocks.add(3, 10), (10, 2));
assert_eq!(quorum_clocks.add(4, 9), (10, 2));
assert_eq!(quorum_clocks.add(5, 9), (10, 2));
assert_eq!(quorum_clocks.add(6, 12), (12, 1));
assert_eq!(quorum_clocks.add(7, 12), (12, 2));
assert_eq!(quorum_clocks.add(8, 10), (12, 2));
assert_eq!(quorum_clocks.add(9, 12), (12, 3));
assert_eq!(quorum_clocks.add(10, 13), (13, 1));
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/protocol/common/table/clocks/mod.rs | fantoch_ps/src/protocol/common/table/clocks/mod.rs | // This module contains the definition of `KeyClocks`.
mod keys;
// This module contains the definition of `QuorumClocks`.
mod quorum;
// Re-exports.
pub use keys::{
AtomicKeyClocks, KeyClocks, LockedKeyClocks, SequentialKeyClocks,
};
pub use quorum::QuorumClocks;
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/protocol/common/table/clocks/keys/sequential.rs | fantoch_ps/src/protocol/common/table/clocks/keys/sequential.rs | use super::KeyClocks;
use crate::protocol::common::table::{VoteRange, Votes};
use fantoch::command::Command;
use fantoch::id::{ProcessId, ShardId};
use fantoch::kvs::Key;
use fantoch::HashMap;
use std::cmp;
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct SequentialKeyClocks {
process_id: ProcessId,
shard_id: ShardId,
nfr: bool,
clocks: HashMap<Key, u64>,
}
impl KeyClocks for SequentialKeyClocks {
/// Create a new `SequentialKeyClocks` instance.
fn new(process_id: ProcessId, shard_id: ShardId, nfr: bool) -> Self {
let clocks = HashMap::new();
Self {
process_id,
nfr,
shard_id,
clocks,
}
}
fn init_clocks(&mut self, cmd: &Command) {
cmd.keys(self.shard_id).for_each(|key| {
// create entry if key not present yet
if !self.clocks.contains_key(key) {
self.clocks.insert(key.clone(), 0);
}
});
}
fn proposal(&mut self, cmd: &Command, min_clock: u64) -> (u64, Votes) {
// if NFR with a read-only single-key command, then don't bump the clock
let should_not_bump = self.nfr && cmd.nfr_allowed();
let next_clock = if should_not_bump {
self.clock(cmd)
} else {
self.clock(cmd) + 1
};
// bump to at least `min_clock`
let clock = cmp::max(min_clock, next_clock);
// compute votes up to that clock
let key_count = cmd.key_count(self.shard_id);
let mut votes = Votes::with_capacity(key_count);
self.detached(cmd, clock, &mut votes);
// return both
(clock, votes)
}
fn detached(&mut self, cmd: &Command, up_to: u64, votes: &mut Votes) {
// vote on each key
cmd.keys(self.shard_id).for_each(|key| {
// get a mutable reference to current clock value
let current = match self.clocks.get_mut(key) {
Some(current) => current,
None => self.clocks.entry(key.clone()).or_insert(0),
};
Self::maybe_bump(self.process_id, key, current, up_to, votes);
});
}
fn detached_all(&mut self, up_to: u64, votes: &mut Votes) {
// vote on each key
let id = self.process_id;
self.clocks.iter_mut().for_each(|(key, current)| {
Self::maybe_bump(id, key, current, up_to, votes);
});
}
fn parallel() -> bool {
false
}
}
impl SequentialKeyClocks {
/// Retrieves the current clock for some command.
/// If the command touches multiple keys, returns the maximum between the
/// clocks associated with each key.
fn clock(&self, cmd: &Command) -> u64 {
cmd.keys(self.shard_id)
.filter_map(|key| self.clocks.get(key))
.max()
.cloned()
// if keys don't exist yet, we may have no maximum; in that case we
// should return 0
.unwrap_or(0)
}
fn maybe_bump(
id: ProcessId,
key: &Key,
current: &mut u64,
up_to: u64,
votes: &mut Votes,
) {
// if we should vote
if *current < up_to {
// vote from the current clock value + 1 until `clock`
let vr = VoteRange::new(id, *current + 1, up_to);
// update current clock to be `clock`
*current = up_to;
votes.add(key, vr);
}
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/protocol/common/table/clocks/keys/locked.rs | fantoch_ps/src/protocol/common/table/clocks/keys/locked.rs | use super::KeyClocks;
use crate::protocol::common::table::{VoteRange, Votes};
use fantoch::command::Command;
use fantoch::id::{ProcessId, ShardId};
use fantoch::kvs::Key;
use fantoch::shared::SharedMap;
use parking_lot::Mutex;
use std::cmp;
use std::collections::BTreeSet;
use std::iter::FromIterator;
use std::sync::Arc;
#[derive(Debug, Clone, Default)]
struct ClockAndPendingReads {
clock: u64,
}
// all clock's are protected by a mutex
type Clocks = Arc<SharedMap<Key, Mutex<ClockAndPendingReads>>>;
/// `bump_and_vote` grabs all locks before any change
#[derive(Debug, Clone)]
pub struct LockedKeyClocks {
process_id: ProcessId,
shard_id: ShardId,
clocks: Clocks,
}
impl KeyClocks for LockedKeyClocks {
/// Create a new `LockedKeyClocks` instance.
fn new(process_id: ProcessId, shard_id: ShardId, _nfr: bool) -> Self {
Self {
process_id,
shard_id,
clocks: common::new(),
}
}
fn init_clocks(&mut self, cmd: &Command) {
common::init_clocks(self.shard_id, &self.clocks, cmd)
}
fn proposal(&mut self, cmd: &Command, min_clock: u64) -> (u64, Votes) {
// make sure locks will be acquired in some pre-determined order to
// avoid deadlocks
let keys = BTreeSet::from_iter(cmd.keys(self.shard_id));
let key_count = keys.len();
// find all the locks
// - NOTE that the following loop and the one below cannot be merged due
// to lifetimes: `let guard = key_lock.lock()` borrows `key_lock` and
// the borrow checker doesn't not understand that it's fine to move
// both the `guard` and `key_lock` into e.g. a `Vec`. For that reason,
// we have two loops. One that fetches the locks (the following one)
// and another one (the one below it ) that actually acquires the
// locks.
let mut locks = Vec::with_capacity(key_count);
self.clocks
.get_or_all(&keys, &mut locks, || Mutex::default());
if cmd.read_only() {
// if the command is read-only, the simply read the current clock
// value
// TODO: reads have to be processed differently as this violates
// linearizability
let mut clock = min_clock;
for (_key, key_lock) in &locks {
let guard = key_lock.lock();
clock = cmp::max(clock, guard.clock);
}
(clock, Votes::new())
} else {
// keep track of which clock we should bump to
let mut up_to = min_clock;
// acquire the lock on all keys
let mut guards = Vec::with_capacity(key_count);
for (_key, key_lock) in &locks {
let guard = key_lock.lock();
up_to = cmp::max(up_to, guard.clock + 1);
guards.push(guard);
}
// create votes
let mut votes = Votes::with_capacity(key_count);
for entry in locks.iter().zip(guards.into_iter()) {
let (key, _key_lock) = entry.0;
let mut guard = entry.1;
common::maybe_bump(
self.process_id,
key,
&mut guard.clock,
up_to,
&mut votes,
);
// release the lock
drop(guard);
}
(up_to, votes)
}
}
fn detached(&mut self, cmd: &Command, up_to: u64, votes: &mut Votes) {
common::detached(
self.process_id,
self.shard_id,
&self.clocks,
cmd,
up_to,
votes,
)
}
fn detached_all(&mut self, up_to: u64, votes: &mut Votes) {
common::detached_all(self.process_id, &self.clocks, up_to, votes)
}
fn parallel() -> bool {
true
}
}
mod common {
use super::*;
pub(super) fn new() -> Clocks {
// create shared clocks
let clocks = SharedMap::new();
// wrap them in an arc
Arc::new(clocks)
}
pub(super) fn init_clocks(
shard_id: ShardId,
clocks: &Clocks,
cmd: &Command,
) {
cmd.keys(shard_id).for_each(|key| {
// get initializes the key to the default value, and that's exactly
// what we want
let _ = clocks.get_or(key, || Mutex::default());
})
}
pub(super) fn detached(
id: ProcessId,
shard_id: ShardId,
clocks: &Clocks,
cmd: &Command,
up_to: u64,
votes: &mut Votes,
) {
for key in cmd.keys(shard_id) {
let key_lock = clocks.get_or(key, || Mutex::default());
let mut guard = key_lock.lock();
maybe_bump(id, key, &mut guard.clock, up_to, votes);
// release the lock
drop(guard);
}
}
pub(super) fn detached_all(
id: ProcessId,
clocks: &Clocks,
up_to: u64,
votes: &mut Votes,
) {
clocks.iter().for_each(|entry| {
let key = entry.key();
let key_lock = entry.value();
let mut guard = key_lock.lock();
maybe_bump(id, key, &mut guard.clock, up_to, votes);
// release the lock
drop(guard);
});
}
pub(super) fn maybe_bump(
id: ProcessId,
key: &Key,
current: &mut u64,
up_to: u64,
votes: &mut Votes,
) {
// if we should vote
if *current < up_to {
// vote from the current clock value + 1 until `up_to`
let vr = VoteRange::new(id, *current + 1, up_to);
// update current clock to be `clock`
*current = up_to;
votes.add(key, vr);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use fantoch::id::Rifl;
use fantoch::kvs::KVOp;
#[test]
fn bump_test() {
let process_id = 1;
let shard_id = 0;
let nfr = false;
let mut clocks = LockedKeyClocks::new(process_id, shard_id, nfr);
// create rifl
let client_id = 1;
let rifl = Rifl::new(client_id, 1);
// read-only commmands do not bump clocks
let ro_cmd = Command::from(rifl, vec![(String::from("K"), KVOp::Get)]);
let (clock, votes) = clocks.proposal(&ro_cmd, 0);
assert_eq!(clock, 0);
assert!(votes.is_empty());
// update command bump the clock
let cmd = Command::from(
rifl,
vec![(String::from("K"), KVOp::Put(String::new()))],
);
let (clock, votes) = clocks.proposal(&cmd, 0);
assert_eq!(clock, 1);
assert!(!votes.is_empty());
// read-only commmands do not bump clocks
let ro_cmd = Command::from(rifl, vec![(String::from("K"), KVOp::Get)]);
let (clock, votes) = clocks.proposal(&ro_cmd, 0);
assert_eq!(clock, 1);
assert!(votes.is_empty());
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/protocol/common/table/clocks/keys/mod.rs | fantoch_ps/src/protocol/common/table/clocks/keys/mod.rs | // This module contains the definition of `SequentialKeyClocks`.
mod sequential;
// This module contains the definition of `AtomicKeyClocks`.
mod atomic;
// This module contains the definition of `LockedKeyClocks`.
mod locked;
// Re-exports.
pub use atomic::AtomicKeyClocks;
pub use locked::LockedKeyClocks;
pub use sequential::SequentialKeyClocks;
use crate::protocol::common::table::Votes;
use fantoch::command::Command;
use fantoch::id::{ProcessId, ShardId};
use std::fmt::Debug;
pub trait KeyClocks: Debug + Clone {
/// Create a new `KeyClocks` instance given the local process identifier.
fn new(id: ProcessId, shard_id: ShardId, nfr: bool) -> Self;
/// Makes sure there's a clock for each key in the command.
fn init_clocks(&mut self, cmd: &Command);
/// Bump clocks to at least `min_clock` and return the new clock (that might
/// be `min_clock` in case it was higher than any of the local clocks). Also
/// returns the consumed votes.
fn proposal(&mut self, cmd: &Command, min_clock: u64) -> (u64, Votes);
/// Votes up to `clock` for the keys accessed by `cmd`.
fn detached(&mut self, cmd: &Command, clock: u64, votes: &mut Votes);
/// Votes up to `clock` on all keys.
fn detached_all(&mut self, clock: u64, votes: &mut Votes);
fn parallel() -> bool;
}
#[cfg(test)]
mod tests {
use super::*;
use crate::util;
use fantoch::id::Rifl;
use fantoch::kvs::{KVOp, Key};
use std::collections::BTreeSet;
use std::iter::FromIterator;
use std::thread;
#[test]
fn sequential_key_clocks_flow() {
keys_clocks_flow::<SequentialKeyClocks>(true);
}
#[test]
fn sequential_key_clocks_no_double_vvotes() {
keys_clocks_no_double_votes::<SequentialKeyClocks>();
}
#[test]
fn atomic_key_clocks_flow() {
keys_clocks_flow::<AtomicKeyClocks>(true);
}
#[test]
fn atomic_key_clocks_no_double_votes() {
keys_clocks_no_double_votes::<AtomicKeyClocks>();
}
#[test]
fn concurrent_atomic_key_clocks() {
let nthreads = 2;
let ops_number = 10000;
let max_keys_per_command = 2;
let keys_number = 4;
for _ in 0..100 {
concurrent_test::<AtomicKeyClocks>(
nthreads,
ops_number,
max_keys_per_command,
keys_number,
);
}
}
#[test]
fn concurrent_locked_key_clocks() {
let nthreads = 2;
let ops_number = 10000;
let max_keys_per_command = 2;
let keys_number = 4;
for _ in 0..100 {
concurrent_test::<LockedKeyClocks>(
nthreads,
ops_number,
max_keys_per_command,
keys_number,
);
}
}
fn multi_put(rifl: Rifl, keys: Vec<String>) -> Command {
Command::from(
rifl,
keys.into_iter().map(|key| (key.clone(), KVOp::Put(key))),
)
}
fn keys_clocks_flow<KC: KeyClocks>(all_clocks_match: bool) {
// create key clocks
let process_id = 1;
let shard_id = 0;
let nfr = false;
let mut clocks = KC::new(process_id, shard_id, nfr);
// keys
let key_a = String::from("A");
let key_b = String::from("B");
// command a
let cmd_a_rifl = Rifl::new(100, 1); // client 100, 1st op
let cmd_a = multi_put(cmd_a_rifl, vec![key_a.clone()]);
// command b
let cmd_b_rifl = Rifl::new(101, 1); // client 101, 1st op
let cmd_b = multi_put(cmd_b_rifl, vec![key_b.clone()]);
// command ab
let cmd_ab_rifl = Rifl::new(102, 1); // client 102, 1st op
let cmd_ab = multi_put(cmd_ab_rifl, vec![key_a.clone(), key_b.clone()]);
// -------------------------
// first clock and votes for command a
let (clock, process_votes) = clocks.proposal(&cmd_a, 0);
assert_eq!(clock, 1);
assert_eq!(process_votes.len(), 1); // single key
assert_eq!(get_key_votes(&key_a, &process_votes), vec![1]);
// -------------------------
// second clock and votes for command a
let (clock, process_votes) = clocks.proposal(&cmd_a, 0);
assert_eq!(clock, 2);
assert_eq!(process_votes.len(), 1); // single key
assert_eq!(get_key_votes(&key_a, &process_votes), vec![2]);
// -------------------------
// first clock and votes for command ab
let (clock, process_votes) = clocks.proposal(&cmd_ab, 0);
assert_eq!(clock, 3);
assert_eq!(process_votes.len(), 2); // two keys
assert_eq!(get_key_votes(&key_a, &process_votes), vec![3]);
let key_votes = get_key_votes(&key_b, &process_votes);
let mut which = 0;
if all_clocks_match {
assert_eq!(key_votes, vec![1, 2, 3]);
} else {
// NOTE it's possible that, even though not all clocks values have
// to match, they may match; this happens when the highest clock (of
// all keys being accessed) happens to be the first one to be
// iterated; this is not deterministic since we iterate keys in
// their HashMap order, which is not a "stable"
match key_votes.as_slice() {
[1] => which = 1,
[1, 2, 3] => which = 123,
_ => panic!("unexpected key votes vote: {:?}", key_votes),
}
}
// -------------------------
// first clock and votes for command b
let (clock, process_votes) = clocks.proposal(&cmd_b, 0);
if all_clocks_match {
assert_eq!(clock, 4);
} else {
match which {
1 => assert_eq!(clock, 2),
123 => assert_eq!(clock, 4),
_ => unreachable!("impossible 'which' value: {}", which),
}
}
assert_eq!(process_votes.len(), 1); // single key
let key_votes = get_key_votes(&key_b, &process_votes);
if all_clocks_match {
assert_eq!(key_votes, vec![4]);
} else {
match which {
1 => assert_eq!(key_votes, vec![2]),
123 => assert_eq!(key_votes, vec![4]),
_ => unreachable!("impossible 'which' value: {}", which),
}
}
}
fn keys_clocks_no_double_votes<KC: KeyClocks>() {
// create key clocks
let process_id = 1;
let shard_id = 0;
let nfr = false;
let mut clocks = KC::new(process_id, shard_id, nfr);
// command
let key = String::from("A");
let cmd_rifl = Rifl::new(100, 1);
let cmd = multi_put(cmd_rifl, vec![key.clone()]);
// get process votes up to 5
let mut process_votes = Votes::new();
clocks.detached(&cmd, 5, &mut process_votes);
assert_eq!(process_votes.len(), 1); // single key
assert_eq!(get_key_votes(&key, &process_votes), vec![1, 2, 3, 4, 5]);
// get process votes up to 5 again: should get no votes
let mut process_votes = Votes::new();
clocks.detached(&cmd, 5, &mut process_votes);
assert!(process_votes.is_empty());
// get process votes up to 6
let mut process_votes = Votes::new();
clocks.detached(&cmd, 6, &mut process_votes);
assert_eq!(process_votes.len(), 1); // single key
assert_eq!(get_key_votes(&key, &process_votes), vec![6]);
// get process votes up to 2: should get no votes
let mut process_votes = Votes::new();
clocks.detached(&cmd, 2, &mut process_votes);
assert!(process_votes.is_empty());
// get process votes up to 3: should get no votes
let mut process_votes = Votes::new();
clocks.detached(&cmd, 3, &mut process_votes);
assert!(process_votes.is_empty());
// get process votes up to 10
let mut process_votes = Votes::new();
clocks.detached(&cmd, 10, &mut process_votes);
assert_eq!(process_votes.len(), 1); // single key
assert_eq!(get_key_votes(&key, &process_votes), vec![7, 8, 9, 10]);
}
// Returns the list of votes on some key.
fn get_key_votes(key: &Key, votes: &Votes) -> Vec<u64> {
let ranges = votes
.get(key)
.expect("process should have voted on this key");
// check that there's only one vote:
// - this is only try for `AtomicKeyClocks` because `Votes.add` tries to
// compress with the last added vote
assert_eq!(ranges.len(), 1);
let start = ranges[0].start();
let end = ranges[0].end();
(start..=end).collect()
}
fn concurrent_test<K: KeyClocks + Send + Sync + 'static>(
nthreads: usize,
ops_number: usize,
max_keys_per_command: usize,
keys_number: usize,
) {
// create clocks
let process_id = 1;
let shard_id = 0;
let nfr = false;
let clocks = K::new(process_id, shard_id, nfr);
// spawn workers
let handles: Vec<_> = (0..nthreads)
.map(|_| {
let clocks_clone = clocks.clone();
thread::spawn(move || {
worker(
clocks_clone,
ops_number,
max_keys_per_command,
keys_number,
)
})
})
.collect();
// wait for all workers and aggregate their votes
let mut all_votes = Votes::new();
for handle in handles {
let votes = handle.join().expect("worker should finish");
all_votes.merge(votes);
}
// verify votes
for (_, key_votes) in all_votes {
// create set will all votes expanded
let mut expanded = BTreeSet::new();
for vote_range in key_votes {
for vote in vote_range.votes() {
// insert vote and check it hasn't been added before
expanded.insert(vote);
}
}
// check that we have all votes (i.e. we don't have gaps that would
// prevent timestamp-stability)
let vote_count = expanded.len();
// we should have all votes from 1 to `vote_cound`
assert_eq!(
expanded,
BTreeSet::from_iter((1..=vote_count).map(|vote| vote as u64))
);
}
}
fn worker<K: KeyClocks>(
mut clocks: K,
ops_number: usize,
max_keys_per_command: usize,
keys_number: usize,
) -> Votes {
// all votes worker has generated
let mut all_votes = Votes::new();
// highest clock seen
let mut highest = 0;
for _ in 0..ops_number {
// there are no noop's
let noop_probability = 0;
let cmd = util::gen_cmd(
max_keys_per_command,
keys_number,
noop_probability,
)
.expect(
"command shouldn't be a noop since the noop probability is 0",
);
// get votes
let (new_highest, votes) = clocks.proposal(&cmd, highest);
// update highest
highest = new_highest;
// save votes
all_votes.merge(votes);
}
all_votes
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/protocol/common/table/clocks/keys/atomic.rs | fantoch_ps/src/protocol/common/table/clocks/keys/atomic.rs | use super::KeyClocks;
use crate::protocol::common::table::{VoteRange, Votes};
use fantoch::command::Command;
use fantoch::id::{ProcessId, ShardId};
use fantoch::kvs::Key;
use fantoch::shared::SharedMap;
use fantoch::HashSet;
use std::cmp;
use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::Arc;
#[derive(Debug, Clone)]
pub struct AtomicKeyClocks {
process_id: ProcessId,
shard_id: ShardId,
nfr: bool,
clocks: Arc<SharedMap<Key, AtomicU64>>,
}
impl KeyClocks for AtomicKeyClocks {
/// Create a new `AtomicKeyClocks` instance.
fn new(process_id: ProcessId, shard_id: ShardId, nfr: bool) -> Self {
// create shared clocks
let clocks = SharedMap::new();
// wrap them in an arc
let clocks = Arc::new(clocks);
Self {
process_id,
nfr,
shard_id,
clocks,
}
}
fn init_clocks(&mut self, cmd: &Command) {
cmd.keys(self.shard_id).for_each(|key| {
// get initializes the key to the default value, and that's exactly
// what we want
let _ = self.clocks.get_or(key, || AtomicU64::default());
});
}
fn proposal(&mut self, cmd: &Command, min_clock: u64) -> (u64, Votes) {
// if NFR with a read-only single-key command, then don't bump the clock
let should_not_bump = self.nfr && cmd.nfr_allowed();
let next_clock = |min_clock, current_clock| {
if should_not_bump {
cmp::max(min_clock, current_clock)
} else {
cmp::max(min_clock, current_clock + 1)
}
};
// first round of votes:
// - vote on each key and compute the highest clock seen
// - this means that if we have more than one key, then we don't
// necessarily end up with all key clocks equal
// OPTIMIZATION: keep track of the highest bumped-to value;
let key_count = cmd.key_count(self.shard_id);
let mut clocks = HashSet::with_capacity(key_count);
let mut votes = Votes::with_capacity(key_count);
let mut up_to = min_clock;
cmd.keys(self.shard_id).for_each(|key| {
// bump the `key` clock
let clock = self.clocks.get_or(key, || AtomicU64::default());
let bump =
Self::maybe_bump_fn(self.process_id, &clock, |current_clock| {
next_clock(up_to, current_clock)
});
if let Some(vr) = bump {
up_to = cmp::max(up_to, vr.end());
votes.set(key.clone(), vec![vr]);
}
// save final clock value
clocks.insert(up_to);
});
// second round of votes:
// - if not all clocks match, try to make them match
if clocks.len() > 1 {
cmd.keys(self.shard_id).for_each(|key| {
let clock = self.clocks.get_or(key, || AtomicU64::default());
let bump = Self::maybe_bump(self.process_id, &clock, up_to);
if let Some(vr) = bump {
votes.add(key, vr);
}
})
}
(up_to, votes)
}
fn detached(&mut self, cmd: &Command, up_to: u64, votes: &mut Votes) {
for key in cmd.keys(self.shard_id) {
let clock = self.clocks.get_or(key, || AtomicU64::default());
if let Some(vr) = Self::maybe_bump(self.process_id, &clock, up_to) {
votes.add(key, vr);
}
}
}
fn detached_all(&mut self, up_to: u64, votes: &mut Votes) {
self.clocks.iter().for_each(|entry| {
let key = entry.key();
let clock = entry.value();
if let Some(vr) = Self::maybe_bump(self.process_id, &clock, up_to) {
votes.add(key, vr);
}
});
}
fn parallel() -> bool {
true
}
}
impl AtomicKeyClocks {
// Bump the clock to at least `next_clock`.
#[must_use]
fn maybe_bump_fn<F>(
id: ProcessId,
clock: &AtomicU64,
next_clock: F,
) -> Option<VoteRange>
where
F: FnOnce(u64) -> u64 + Copy,
{
let fetch_update = clock.fetch_update(
Ordering::Relaxed,
Ordering::Relaxed,
|current| {
let next = next_clock(current);
if current < next {
Some(next)
} else {
None
}
},
);
fetch_update.ok().map(|previous_value| {
VoteRange::new(id, previous_value + 1, next_clock(previous_value))
})
}
// Bump the clock to `up_to` if lower than `up_to`.
#[must_use]
fn maybe_bump(
id: ProcessId,
clock: &AtomicU64,
up_to: u64,
) -> Option<VoteRange> {
Self::maybe_bump_fn(id, clock, |_| up_to)
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
sagiegurari/run_script | https://github.com/sagiegurari/run_script/blob/a79fdf0e15afca84681e5cf104bc080ceec60954/src/types_test.rs | src/types_test.rs | use super::*;
use std::error::Error;
use std::io::Write;
#[test]
fn script_error_description() {
let script_error = ScriptError::Description("test");
assert_eq!(script_error.to_string(), "test");
assert!(script_error.source().is_none());
let mut writer = Vec::new();
write!(&mut writer, "formatted {}", script_error).unwrap();
assert_eq!(writer, b"formatted test");
}
#[test]
fn script_options_new() {
let options = ScriptOptions::new();
assert!(options.runner.is_none());
assert!(options.working_directory.is_none());
assert_eq!(options.input_redirection, IoOptions::Inherit);
assert_eq!(options.output_redirection, IoOptions::Pipe);
assert!(!options.exit_on_error);
assert!(!options.print_commands);
#[cfg(feature = "encoding_rs")]
assert!(options.encoding.is_none());
}
| rust | Apache-2.0 | a79fdf0e15afca84681e5cf104bc080ceec60954 | 2026-01-04T20:25:01.977296Z | false |
sagiegurari/run_script | https://github.com/sagiegurari/run_script/blob/a79fdf0e15afca84681e5cf104bc080ceec60954/src/lib.rs | src/lib.rs | #![deny(
future_incompatible,
keyword_idents,
let_underscore,
nonstandard_style,
unused
)]
#![warn(unknown_lints)]
//! # run_script
//!
//! Run shell scripts in [rust](https://www.rust-lang.org/).
//!
//! This library enables to invoke shell scripts based on their content.<br>
//! While std::process::Command works great to execute standalone command, you need more manual code to take a script
//! text and execute it.<br>
//! For this purpose, this library was created.
//!
//! # Examples
//!
//! ## Basic Example
//!
//! ````use run_script;
//! use run_script::ScriptOptions;
//!
//! fn main() {
//! let options = ScriptOptions::new();
//!
//! let args = vec![];
//!
//! // run the script and get the script execution output
//! let (code, output, error) = run_script::run(
//! r#"
//! echo "Directory Info:"
//! dir
//! "#,
//! &args,
//! &options,
//! )
//! .unwrap();
//!
//! println!("Exit Code: {}", code);
//! println!("Output: {}", output);
//! println!("Error: {}", error);
//!
//! // run the script and get a handle to the running child process
//! let child = run_script::spawn(
//! r#"
//! echo "Directory Info:"
//! dir
//! "#,
//! &args,
//! &options,
//! )
//! .unwrap();
//!
//! let spawn_output = child.wait_with_output().unwrap();
//!
//! println!("Success: {}", &spawn_output.status.success());
//! }
//! ````
//!
//! ## Macro Examples
//!
//! ```rust
//! use run_script::ScriptOptions;
//!
//! fn main() {
//! // simple call to run script with only the script text
//! let (code, output, error) = run_script::run_script!(
//! r#"
//! echo "Test"
//! exit 0
//! "#
//! )
//! .unwrap();
//!
//! println!("Exit Code: {}", code);
//! println!("Output: {}", output);
//! println!("Error: {}", error);
//!
//! // run script invoked with the script text and options
//! let options = ScriptOptions::new();
//! let (code, output, error) = run_script::run_script!(
//! r#"
//! echo "Test"
//! exit 0
//! "#,
//! &options
//! )
//! .unwrap();
//!
//! println!("Exit Code: {}", code);
//! println!("Output: {}", output);
//! println!("Error: {}", error);
//!
//! // run script invoked with all arguments
//! let options = ScriptOptions::new();
//! let (code, output, error) = run_script::run_script!(
//! r#"
//! echo "Test"
//! exit 0
//! "#,
//! &vec!["ARG1".to_string(), "ARG2".to_string()],
//! &options
//! )
//! .unwrap();
//!
//! println!("Exit Code: {}", code);
//! println!("Output: {}", output);
//! println!("Error: {}", error);
//!
//! // spawn_script! works the same as run_script! but returns the child process handle
//! let child = run_script::spawn_script!(
//! r#"
//! echo "Test"
//! exit 0
//! "#
//! )
//! .unwrap();
//!
//! println!("PID: {}", child.id());
//! }
//! ```
//!
//! # Installation
//! In order to use this library, just add it as a dependency:
//!
//! ```ini
//! [dependencies]
//! run_script = "*"
//! ```
//!
//! # Contributing
//! See [contributing guide](https://github.com/sagiegurari/run_script/blob/master/.github/CONTRIBUTING.md)
//!
//! # License
//! Developed by Sagie Gur-Ari and licensed under the
//! [Apache 2](https://github.com/sagiegurari/run_script/blob/master/LICENSE) open source license.
//!
#[cfg(test)]
#[path = "./lib_test.rs"]
mod lib_test;
#[cfg(doctest)]
doc_comment::doctest!("../README.md");
#[macro_use]
mod macros;
mod runner;
pub mod types;
use crate::types::ScriptResult;
use std::process::Child;
/// Error struct
pub type ScriptError = types::ScriptError;
/// Options available for invoking the script
pub type ScriptOptions = types::ScriptOptions;
/// Io Options available for invoking the script
pub type IoOptions = types::IoOptions;
/// Invokes the provided script content and returns the invocation output.
///
/// # Arguments
///
/// * `script` - The script content
/// * `args` - The script command line arguments
/// * `options` - Options provided to the script runner
///
/// # Example
///
/// ````
/// use run_script::ScriptOptions;
///
/// fn main() {
/// let options = ScriptOptions::new();
///
/// let args = vec![];
///
/// let (code, output, error) = run_script::run(
/// r#"
/// echo "Directory Info:"
/// dir
/// "#,
/// &args,
/// &options
/// ).unwrap();
///
/// println!("Exit Code: {}", code);
/// println!("Output: {}", output);
/// println!("Error: {}", error);
/// }
/// ````
pub fn run(
script: &str,
args: &Vec<String>,
options: &ScriptOptions,
) -> ScriptResult<(i32, String, String)> {
runner::run(script, &args, &options)
}
/// Invokes the provided script content and returns a process handle.
///
/// # Arguments
///
/// * `script` - The script content
/// * `args` - The script command line arguments
/// * `options` - Options provided to the script runner
///
/// # Example
///
/// ````
/// use run_script::ScriptOptions;
///
/// fn main() {
/// let options = ScriptOptions::new();
///
/// let args = vec![];
///
/// let child = run_script::spawn(
/// r#"
/// echo "Directory Info:"
/// dir
/// "#,
/// &args,
/// &options
/// ).unwrap();
/// }
/// ````
pub fn spawn(script: &str, args: &Vec<String>, options: &ScriptOptions) -> ScriptResult<Child> {
runner::spawn(script, &args, &options)
}
/// Invokes the provided script content and returns the invocation output.
/// In case of invocation error or error exit code, this function will exit the main process.
///
/// # Arguments
///
/// * `script` - The script content
/// * `args` - The script command line arguments
/// * `options` - Options provided to the script runner
///
/// # Example
///
/// ````
/// use run_script::ScriptOptions;
///
/// fn main() {
/// let options = ScriptOptions::new();
///
/// let args = vec![];
///
/// let (output, error) = run_script::run_or_exit(
/// r#"
/// echo "Hello World"
/// "#,
/// &args,
/// &options
/// );
///
/// println!("Output: {}", output);
/// println!("Error: {}", error);
/// }
/// ````
pub fn run_or_exit(script: &str, args: &Vec<String>, options: &ScriptOptions) -> (String, String) {
runner::run_or_exit(script, &args, &options)
}
| rust | Apache-2.0 | a79fdf0e15afca84681e5cf104bc080ceec60954 | 2026-01-04T20:25:01.977296Z | false |
sagiegurari/run_script | https://github.com/sagiegurari/run_script/blob/a79fdf0e15afca84681e5cf104bc080ceec60954/src/lib_test.rs | src/lib_test.rs | use super::*;
use doc_comment as _;
#[test]
fn run_test() {
let args = vec![];
let options = ScriptOptions::new();
let (code, output, error) = run(
r#"
echo "Test"
exit 0
"#,
&args,
&options,
)
.unwrap();
assert_eq!(code, 0);
assert!(output.len() > 0);
assert_eq!(error.len(), 0);
}
#[test]
fn spawn_test_valid_exit_code() {
let args = vec![];
let options = ScriptOptions::new();
let child = spawn(
r#"
echo "Test"
exit 0
"#,
&args,
&options,
)
.unwrap();
let output = child.wait_with_output().unwrap();
assert!(output.status.success());
}
#[test]
#[should_panic]
fn run_or_exit_error_code() {
let args = vec![];
let options = ScriptOptions::new();
run_or_exit("exit 1", &args, &options);
}
#[test]
fn run_or_exit_pipe_output() {
let args = vec![];
let mut options = ScriptOptions::new();
options.output_redirection = IoOptions::Pipe;
let (output, error) = run_or_exit(
r#"
echo "Test"
exit 0
"#,
&args,
&options,
);
assert!(!output.is_empty());
assert!(error.is_empty());
}
| rust | Apache-2.0 | a79fdf0e15afca84681e5cf104bc080ceec60954 | 2026-01-04T20:25:01.977296Z | false |
sagiegurari/run_script | https://github.com/sagiegurari/run_script/blob/a79fdf0e15afca84681e5cf104bc080ceec60954/src/runner.rs | src/runner.rs | //! # command
//!
//! Runs task commands/scripts.
//!
#[cfg(test)]
#[path = "./runner_test.rs"]
mod runner_test;
use crate::types::{IoOptions, ScriptError, ScriptOptions, ScriptResult};
use fsio;
use fsio::path::from_path::FromPath;
use fsio::types::FsIOResult;
use std::env::current_dir;
use std::process::{Child, Command, ExitStatus, Stdio};
#[cfg(test)]
fn exit(code: i32) -> ! {
panic!("{}", code);
}
#[cfg(not(test))]
use std::process::exit;
/// Returns the exit code
fn get_exit_code(code: ExitStatus) -> i32 {
if !code.success() {
match code.code() {
Some(value) => value,
None => -1,
}
} else {
0
}
}
/// Creates a command builder for the given input.
fn create_command_builder(
command_string: &str,
args: &Vec<String>,
options: &ScriptOptions,
) -> Command {
let mut command = Command::new(&command_string);
if options.env_vars.is_some() {
command.envs(options.env_vars.as_ref().unwrap());
}
for arg in args.iter() {
command.arg(arg);
}
match options.input_redirection {
IoOptions::Null => command.stdin(Stdio::null()),
IoOptions::Inherit => command.stdin(Stdio::inherit()),
IoOptions::Pipe => command.stdin(Stdio::piped()),
};
match options.output_redirection {
IoOptions::Null => command.stdout(Stdio::null()).stderr(Stdio::null()),
IoOptions::Inherit => command.stdout(Stdio::inherit()).stderr(Stdio::inherit()),
IoOptions::Pipe => command.stdout(Stdio::piped()).stderr(Stdio::piped()),
};
command
}
fn create_script_file(script: &String) -> FsIOResult<String> {
let extension = if cfg!(windows) { "bat" } else { "sh" };
let file_path = fsio::path::get_temporary_file_path(extension);
match fsio::file::write_text_file(&file_path, script) {
Ok(_) => Ok(file_path),
Err(error) => {
fsio::file::delete_ignore_error(&file_path);
Err(error)
}
}
}
fn fix_path(path_string: &str) -> String {
if cfg!(windows) {
fsio::path::canonicalize_or(&path_string, &path_string)
} else {
path_string.to_string()
}
}
fn modify_script(script: &String, options: &ScriptOptions) -> ScriptResult<String> {
match current_dir() {
Ok(cwd_holder) => {
match cwd_holder.to_str() {
Some(cwd) => {
let cwd_string = fix_path(cwd);
// create cd command
let mut cd_command = "cd \"".to_string();
cd_command.push_str(&cwd_string);
cd_command.push('"');
if let Some(ref working_directory) = options.working_directory {
cd_command.push_str(" && cd \"");
let working_directory_string: String =
FromPath::from_path(&working_directory);
cd_command.push_str(&working_directory_string);
cd_command.push('"');
}
let mut script_lines: Vec<String> = script
.trim()
.split("\n")
.map(|string| string.to_string())
.collect();
// check if first line is shebang line
let mut insert_index =
if script_lines.len() > 0 && script_lines[0].starts_with("#!") {
1
} else {
0
};
if cfg!(windows) {
if !options.print_commands {
script_lines.insert(insert_index, "@echo off".to_string());
insert_index = insert_index + 1;
}
} else {
if options.exit_on_error {
script_lines.insert(insert_index, "set -e".to_string());
insert_index = insert_index + 1;
}
if options.print_commands {
script_lines.insert(insert_index, "set -x".to_string());
insert_index = insert_index + 1;
}
}
script_lines.insert(insert_index, cd_command);
script_lines.push("\n".to_string());
let updated_script = script_lines.join("\n");
Ok(updated_script)
}
None => Err(ScriptError::Description(
"Unable to extract current working directory path.",
)),
}
}
Err(error) => Err(ScriptError::IOError(error)),
}
}
/// Invokes the provided script content and returns a process handle.
fn spawn_script(
script: &str,
args: &Vec<String>,
options: &ScriptOptions,
) -> ScriptResult<(Child, String)> {
match modify_script(&script.to_string(), &options) {
Ok(updated_script) => match create_script_file(&updated_script) {
Ok(file) => {
let command = match options.runner {
Some(ref value) => value,
None => {
if cfg!(windows) {
"cmd.exe"
} else {
"sh"
}
}
};
let mut runner_args = Vec::<String>::new();
match options.runner_args {
Some(ref value) => runner_args.extend(value.iter().cloned()),
None => (),
};
let mut all_args = if command.eq("cmd.exe") || command.eq("cmd") {
let win_file = fix_path(&file);
runner_args.extend(["/C".to_string(), win_file].iter().cloned());
runner_args
} else {
runner_args.extend([file.to_string()].iter().cloned());
runner_args
};
all_args.extend(args.iter().cloned());
let mut command = create_command_builder(&command, &all_args, &options);
let result = command.spawn();
match result {
Ok(child) => Ok((child, file.clone())),
Err(error) => {
fsio::file::delete_ignore_error(&file);
Err(ScriptError::IOError(error))
}
}
}
Err(error) => Err(ScriptError::FsIOError(error)),
},
Err(error) => Err(error),
}
}
/// Invokes the provided script content and returns a process handle.
///
/// # Arguments
///
/// * `script` - The script content
/// * `args` - The script command line arguments
/// * `options` - Options provided to the script runner
pub(crate) fn spawn(
script: &str,
args: &Vec<String>,
options: &ScriptOptions,
) -> ScriptResult<Child> {
let result = spawn_script(script, &args, &options);
match result {
Ok((child, _)) => Ok(child),
Err(error) => Err(error),
}
}
/// Invokes the provided script content and returns the invocation output.
///
/// # Arguments
///
/// * `script` - The script content
/// * `args` - The script command line arguments
/// * `options` - Options provided to the script runner
pub(crate) fn run(
script: &str,
args: &Vec<String>,
options: &ScriptOptions,
) -> ScriptResult<(i32, String, String)> {
let result = spawn_script(script, &args, &options);
match result {
Ok((child, file)) => {
let process_result = child.wait_with_output();
fsio::file::delete_ignore_error(&file);
match process_result {
Ok(output) => {
let exit_code = get_exit_code(output.status);
#[cfg(feature = "encoding_rs")]
let (stdout, stderr) = if let Some(encoding) = options.encoding {
(
encoding.decode(&output.stdout).0.into_owned(),
encoding.decode(&output.stderr).0.into_owned(),
)
} else {
(
String::from_utf8_lossy(&output.stdout).into_owned(),
String::from_utf8_lossy(&output.stderr).into_owned(),
)
};
#[cfg(not(feature = "encoding_rs"))]
let (stdout, stderr) = (
String::from_utf8_lossy(&output.stdout).into_owned(),
String::from_utf8_lossy(&output.stderr).into_owned(),
);
Ok((exit_code, stdout, stderr))
}
Err(error) => Err(ScriptError::IOError(error)),
}
}
Err(error) => Err(error),
}
}
/// Invokes the provided script content and returns the invocation output.
/// In case of invocation error or error exit code, this function will exit the main process.
///
/// # Arguments
///
/// * `script` - The script content
/// * `args` - The script command line arguments
/// * `options` - Options provided to the script runner
pub(crate) fn run_or_exit(
script: &str,
args: &Vec<String>,
options: &ScriptOptions,
) -> (String, String) {
let result = run(script, &args, &options);
match result {
Ok((exit_code, output, error)) => {
if exit_code != 0 {
eprintln!("{}", error);
exit(exit_code)
} else {
(output, error)
}
}
Err(error) => {
eprintln!("{}", error);
exit(1)
}
}
}
| rust | Apache-2.0 | a79fdf0e15afca84681e5cf104bc080ceec60954 | 2026-01-04T20:25:01.977296Z | false |
sagiegurari/run_script | https://github.com/sagiegurari/run_script/blob/a79fdf0e15afca84681e5cf104bc080ceec60954/src/types.rs | src/types.rs | //! # types
//!
//! Defines the various types and aliases.
//!
#[cfg(test)]
#[path = "./types_test.rs"]
mod types_test;
#[cfg(feature = "encoding_rs")]
use encoding_rs::Encoding;
use fsio::error::FsIOError;
use std::error::Error;
use std::fmt;
use std::fmt::Display;
use std::io;
use std::path::PathBuf;
/// Alias for result with script error
pub type ScriptResult<T> = Result<T, ScriptError>;
#[derive(Debug)]
/// Holds the error information
pub enum ScriptError {
/// Root error
IOError(io::Error),
/// Root error
FsIOError(FsIOError),
/// Description text of the error reason
Description(&'static str),
}
impl Display for ScriptError {
/// Formats the value using the given formatter.
fn fmt(&self, format: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match self {
Self::IOError(ref cause) => cause.fmt(format),
Self::FsIOError(ref cause) => cause.fmt(format),
Self::Description(description) => description.fmt(format),
}
}
}
impl Error for ScriptError {
fn source(&self) -> Option<&(dyn Error + 'static)> {
match self {
Self::Description(_) => None,
Self::IOError(error) => Some(error),
Self::FsIOError(error) => Some(error),
}
}
}
#[derive(Debug, Clone, PartialEq)]
/// Options available for invoking the script
pub struct ScriptOptions {
/// Defines the requested runner (defaults to cmd in windows and sh for other platforms)
pub runner: Option<String>,
/// Args for the runner (for cmd, /C will automatically be added at the end)
pub runner_args: Option<Vec<String>>,
/// The working directory of the invocation
pub working_directory: Option<PathBuf>,
/// Default is IoOptions::Inherit
pub input_redirection: IoOptions,
/// Default is IoOptions::Pipe (only pipe enables to capture the output)
pub output_redirection: IoOptions,
/// Sets -e flag. Will exit on any error while running the script (not available for windows)
pub exit_on_error: bool,
/// Sets -x flag for printing each script command before invocation (not available for windows)
pub print_commands: bool,
/// Environment environment variables to add before invocation
pub env_vars: Option<std::collections::HashMap<String, String>>,
/// Encoding conversion for stdout and stderr
#[cfg(feature = "encoding_rs")]
pub encoding: Option<&'static Encoding>,
}
#[derive(Debug, Copy, Clone, PartialEq)]
/// Options available for IO
pub enum IoOptions {
/// Corresponds to Stdio::null()
Null,
/// Corresponds to Stdio::pipe()
Pipe,
/// Corresponds to Stdio::inherit()
Inherit,
}
impl ScriptOptions {
/// Returns new instance
pub fn new() -> ScriptOptions {
ScriptOptions {
runner: None,
runner_args: None,
working_directory: None,
input_redirection: IoOptions::Inherit,
output_redirection: IoOptions::Pipe,
exit_on_error: false,
print_commands: false,
env_vars: None,
#[cfg(feature = "encoding_rs")]
encoding: None,
}
}
}
| rust | Apache-2.0 | a79fdf0e15afca84681e5cf104bc080ceec60954 | 2026-01-04T20:25:01.977296Z | false |
sagiegurari/run_script | https://github.com/sagiegurari/run_script/blob/a79fdf0e15afca84681e5cf104bc080ceec60954/src/macros.rs | src/macros.rs | //! # macros
//!
//! Defines the library macros
//!
/// Enables to invoke the run_script::run function more easily without providing all input.
///
/// # Arguments
///
/// * `script` - The script content
/// * `args` - Optional, script command line arguments. If provided, the last options argument must also be provided.
/// * `options` - Optional, options provided to the script runner
///
/// # Examples
///
/// ```rust
/// use run_script::ScriptOptions;
///
/// fn main() {
/// // simple call to run script with only the script text
/// let (code, output, error) = run_script::run_script!(
/// r#"
/// echo "Test"
/// exit 0
/// "#
/// ).unwrap();
///
/// // run script invoked with the script text and options
/// let options = ScriptOptions::new();
/// let (code, output, error) = run_script::run_script!(
/// r#"
/// echo "Test"
/// exit 0
/// "#,
/// &options
/// ).unwrap();
///
/// // run script invoked with all arguments
/// let options = ScriptOptions::new();
/// let (code, output, error) = run_script::run_script!(
/// r#"
/// echo "Test"
/// exit 0
/// "#,
/// &vec!["ARG1".to_string(), "ARG2".to_string()],
/// &options
/// ).unwrap();
/// }
/// ```
#[macro_export]
macro_rules! run_script {
($script:expr) => {{
let args = vec![];
let options = $crate::ScriptOptions::new();
$crate::run(&$script, &args, &options)
}};
($script:expr, $options:expr) => {{
let args = vec![];
$crate::run(&$script, &args, &$options)
}};
($script:expr, $args:expr, $options:expr) => {{
$crate::run(&$script, &$args, &$options)
}};
}
/// Enables to invoke the run_script::spawn function more easily without providing all input.
///
/// # Arguments
///
/// * `script` - The script content
/// * `args` - Optional, script command line arguments. If provided, the last options argument must also be provided.
/// * `options` - Optional, options provided to the script runner
///
/// # Examples
///
/// ```rust_script;
///
/// use run_script::ScriptOptions;
///
/// fn main() {
/// // simple call to run script with only the script text
/// let child = run_script::spawn_script!(
/// r#"
/// echo "Test"
/// exit 0
/// "#
/// ).unwrap();
///
/// // run script invoked with the script text and options
/// let options = ScriptOptions::new();
/// let child = run_script::spawn_script!(
/// r#"
/// echo "Test"
/// exit 0
/// "#,
/// &options
/// ).unwrap();
///
/// // run script invoked with all arguments
/// let options = ScriptOptions::new();
/// let child = run_script::spawn_script!(
/// r#"
/// echo "Test"
/// exit 0
/// "#,
/// &vec!["ARG1".to_string(), "ARG2".to_string()],
/// &options
/// ).unwrap();
/// }
/// ```
#[macro_export]
macro_rules! spawn_script {
($script:expr) => {{
let args = vec![];
let options = $crate::ScriptOptions::new();
$crate::spawn(&$script, &args, &options)
}};
($script:expr, $options:expr) => {{
let args = vec![];
$crate::spawn(&$script, &args, &$options)
}};
($script:expr, $args:expr, $options:expr) => {{
$crate::spawn(&$script, &$args, &$options)
}};
}
/// Enables to invoke the run_script::run_or_exit function more easily without providing all input.
///
/// # Arguments
///
/// * `script` - The script content
/// * `args` - Optional, script command line arguments. If provided, the last options argument must also be provided.
/// * `options` - Optional, options provided to the script runner
///
/// # Examples
///
/// ```rust
/// use run_script::ScriptOptions;
///
/// fn main() {
/// // simple call to the macro with only the script text
/// let (output, error) = run_script::run_script_or_exit!(
/// r#"
/// echo "Test"
/// exit 0
/// "#
/// );
///
/// // macro invoked with the script text and options
/// let options = ScriptOptions::new();
/// let (output, error) = run_script::run_script_or_exit!(
/// r#"
/// echo "Test"
/// exit 0
/// "#,
/// &options
/// );
///
/// // macro invoked with all arguments
/// let options = ScriptOptions::new();
/// let (output, error) = run_script::run_script_or_exit!(
/// r#"
/// echo "Test"
/// exit 0
/// "#,
/// &vec!["ARG1".to_string(), "ARG2".to_string()],
/// &options
/// );
/// }
/// ```
#[macro_export]
macro_rules! run_script_or_exit {
($script:expr) => {{
let args = vec![];
let options = $crate::ScriptOptions::new();
$crate::run_or_exit(&$script, &args, &options)
}};
($script:expr, $options:expr) => {{
let args = vec![];
$crate::run_or_exit(&$script, &args, &$options)
}};
($script:expr, $args:expr, $options:expr) => {{
$crate::run_or_exit(&$script, &$args, &$options)
}};
}
| rust | Apache-2.0 | a79fdf0e15afca84681e5cf104bc080ceec60954 | 2026-01-04T20:25:01.977296Z | false |
sagiegurari/run_script | https://github.com/sagiegurari/run_script/blob/a79fdf0e15afca84681e5cf104bc080ceec60954/src/runner_test.rs | src/runner_test.rs | use super::*;
use std::env::current_dir;
use std::path::{Path, PathBuf};
#[test]
fn create_script_file_and_delete() {
let file = create_script_file(&"test".to_string()).unwrap();
assert!(Path::new(&file).exists());
fsio::file::delete_ignore_error(&file);
assert!(!Path::new(&file).exists());
}
#[test]
fn modify_script_no_shebang_default_options() {
let options = ScriptOptions::new();
let cwd = current_dir().unwrap();
let mut expected_script = "".to_string();
if cfg!(windows) {
expected_script.push_str("@echo off\n");
}
expected_script.push_str("cd \"");
expected_script.push_str(cwd.to_str().unwrap());
expected_script.push_str("\"\necho test\n\n");
let script = modify_script(&"echo test".to_string(), &options).unwrap();
assert_eq!(script, expected_script);
}
#[cfg(not(windows))]
#[test]
fn modify_script_with_shebang_default_options() {
let options = ScriptOptions::new();
let cwd = current_dir().unwrap();
let mut expected_script = "#!/bin/bash\n".to_string();
expected_script.push_str("cd \"");
expected_script.push_str(cwd.to_str().unwrap());
expected_script.push_str("\"\necho test\n\n");
let script = modify_script(&"#!/bin/bash\necho test".to_string(), &options).unwrap();
assert_eq!(script, expected_script);
}
#[test]
fn modify_script_exit_on_error() {
let mut options = ScriptOptions::new();
options.exit_on_error = true;
let cwd = current_dir().unwrap();
let mut expected_script = "".to_string();
if !cfg!(windows) {
expected_script.push_str("set -e\n");
} else {
expected_script.push_str("@echo off\n");
}
expected_script.push_str("cd \"");
expected_script.push_str(cwd.to_str().unwrap());
expected_script.push_str("\"\necho test\n\n");
let script = modify_script(&"echo test".to_string(), &options).unwrap();
assert_eq!(script, expected_script);
}
#[test]
fn modify_script_working_directory() {
let mut options = ScriptOptions::new();
options.working_directory = Some(PathBuf::from("/usr/me/home"));
let cwd = current_dir().unwrap();
let mut expected_script = "".to_string();
if cfg!(windows) {
expected_script.push_str("@echo off\n");
}
expected_script.push_str("cd \"");
expected_script.push_str(cwd.to_str().unwrap());
expected_script.push_str("\" && cd \"/usr/me/home\"\necho test\n\n");
let script = modify_script(&"echo test".to_string(), &options).unwrap();
assert_eq!(script, expected_script);
}
#[test]
fn modify_script_print_commands() {
let mut options = ScriptOptions::new();
options.print_commands = true;
let cwd = current_dir().unwrap();
let mut expected_script = "".to_string();
if !cfg!(windows) {
expected_script.push_str("set -x\n");
}
expected_script.push_str("cd \"");
expected_script.push_str(cwd.to_str().unwrap());
expected_script.push_str("\"\necho test\n\n");
let script = modify_script(&"echo test".to_string(), &options).unwrap();
assert_eq!(script, expected_script);
}
#[test]
fn modify_script_exit_on_error_and_print_commands() {
let mut options = ScriptOptions::new();
options.exit_on_error = true;
options.print_commands = true;
let cwd = current_dir().unwrap();
let mut expected_script = "".to_string();
if !cfg!(windows) {
expected_script.push_str("set -e\n");
expected_script.push_str("set -x\n");
}
expected_script.push_str("cd \"");
expected_script.push_str(cwd.to_str().unwrap());
expected_script.push_str("\"\necho test\n\n");
let script = modify_script(&"echo test".to_string(), &options).unwrap();
assert_eq!(script, expected_script);
}
#[test]
fn run_test_no_args_default_options() {
let args = vec![];
let options = ScriptOptions::new();
let (code, output, error) = run(
r#"
echo "Test"
exit 0
"#,
&args,
&options,
)
.unwrap();
assert_eq!(code, 0);
assert!(!output.is_empty());
assert!(error.is_empty());
}
#[cfg(feature = "encoding_rs")]
#[test]
fn run_test_no_args_with_encoding() {
let args = vec![];
let mut options = ScriptOptions::new();
options.encoding = Some(encoding_rs::UTF_8);
let (code, output, error) = run(
r#"
echo "Test"
exit 0
"#,
&args,
&options,
)
.unwrap();
assert_eq!(code, 0);
assert!(!output.is_empty());
assert!(error.is_empty());
}
#[test]
fn run_test_error_exit_code() {
let args = vec![];
let options = ScriptOptions::new();
let result = run("exit 1", &args, &options).unwrap();
assert_eq!(result.0, 1);
}
#[test]
fn run_test_error_execute() {
let args = vec![];
let mut options = ScriptOptions::new();
options.runner = Some("badtest123".to_string());
let result = run("exit 1", &args, &options);
assert!(result.is_err());
}
#[test]
fn run_test_with_runner_args() {
let args = vec![];
let mut options = ScriptOptions::new();
if cfg!(windows) {
options.runner = Some("powershell".to_string());
options.runner_args = Some(vec![
"-window".to_string(),
"normal".to_string(),
"-command".to_string(),
]);
} else {
options.runner_args = Some(vec!["--".to_string()]);
}
let (code, output, error) = run(
r#"
echo "Test"
exit 0
"#,
&args,
&options,
)
.unwrap();
assert_eq!(code, 0);
assert!(!output.is_empty());
assert!(error.is_empty());
}
#[test]
fn run_test_with_bad_runner_args() {
let args = vec![];
let mut options = ScriptOptions::new();
if cfg!(windows) {
options.runner = Some("powershell".to_string());
options.runner_args = Some(vec!["-notarealflag".to_string()]);
} else {
options.runner_args = Some(vec!["-notarealflag".to_string(), "--".to_string()]);
}
let (code, output, error) = run(
r#"
echo "Test"
exit 0
"#,
&args,
&options,
)
.unwrap();
assert_ne!(code, 0);
assert!(output.is_empty());
assert!(!error.is_empty());
}
#[test]
fn run_test_with_args() {
let args = vec!["ARG1".to_string(), "ARG2".to_string()];
let options = ScriptOptions::new();
let script = if cfg!(windows) {
"echo arg1: %1\necho arg2: %2"
} else {
"echo arg1: $1\necho arg2: $2"
};
let (code, output, error) = run(script, &args, &options).unwrap();
assert_eq!(code, 0);
assert!(!output.is_empty());
assert!(error.is_empty());
assert!(output.find("arg1: ARG1").is_some());
assert!(output.find("arg2: ARG2").is_some());
}
#[test]
fn run_test_with_runner_and_script_args() {
let args = vec!["ARG1".to_string(), "ARG2".to_string()];
let mut options = ScriptOptions::new();
if cfg!(windows) {
options.runner = Some("powershell".to_string());
options.runner_args = Some(vec![
"-window".to_string(),
"normal".to_string(),
"-command".to_string(),
]);
} else {
options.runner_args = Some(vec!["--".to_string()]);
}
let script = if cfg!(windows) {
"echo arg1: %1\necho arg2: %2"
} else {
"echo arg1: $1\necho arg2: $2"
};
let (code, output, error) = run(script, &args, &options).unwrap();
assert_eq!(code, 0);
assert!(!output.is_empty());
assert!(error.is_empty());
assert!(output.find("arg1: ARG1").is_some());
assert!(output.find("arg2: ARG2").is_some());
}
#[test]
fn run_test_no_args_inherit_input() {
let args = vec![];
let mut options = ScriptOptions::new();
options.input_redirection = IoOptions::Inherit;
let (code, output, error) = run(
r#"
echo "Test"
exit 0
"#,
&args,
&options,
)
.unwrap();
assert_eq!(code, 0);
assert!(!output.is_empty());
assert!(error.is_empty());
}
#[test]
fn run_test_no_args_pipe_input() {
let args = vec![];
let mut options = ScriptOptions::new();
options.input_redirection = IoOptions::Pipe;
let (code, output, error) = run(
r#"
echo "Test"
exit 0
"#,
&args,
&options,
)
.unwrap();
assert_eq!(code, 0);
assert!(!output.is_empty());
assert!(error.is_empty());
}
#[test]
fn run_test_no_args_null_input() {
let args = vec![];
let mut options = ScriptOptions::new();
options.input_redirection = IoOptions::Null;
let (code, output, error) = run(
r#"
echo "Test"
exit 0
"#,
&args,
&options,
)
.unwrap();
assert_eq!(code, 0);
assert!(!output.is_empty());
assert!(error.is_empty());
}
#[test]
fn run_test_no_args_inherit_output() {
let args = vec![];
let mut options = ScriptOptions::new();
options.output_redirection = IoOptions::Inherit;
let (code, output, error) = run(
r#"
echo "Test"
exit 0
"#,
&args,
&options,
)
.unwrap();
assert_eq!(code, 0);
assert!(output.is_empty());
assert!(error.is_empty());
}
#[test]
fn run_test_no_args_pipe_output() {
let args = vec![];
let mut options = ScriptOptions::new();
options.output_redirection = IoOptions::Pipe;
let (code, output, error) = run(
r#"
echo "Test"
exit 0
"#,
&args,
&options,
)
.unwrap();
assert_eq!(code, 0);
assert!(!output.is_empty());
assert!(error.is_empty());
}
#[test]
fn run_test_no_args_null_output() {
let args = vec![];
let mut options = ScriptOptions::new();
options.output_redirection = IoOptions::Null;
let (code, output, error) = run(
r#"
echo "Test"
exit 0
"#,
&args,
&options,
)
.unwrap();
assert_eq!(code, 0);
assert!(output.is_empty());
assert!(error.is_empty());
}
#[test]
fn spawn_test_valid_exit_code() {
let args = vec![];
let options = ScriptOptions::new();
let child = spawn("exit 0", &args, &options).unwrap();
let output = child.wait_with_output().unwrap();
assert!(output.status.success());
}
#[test]
fn spawn_test_error_exit_code() {
let args = vec![];
let options = ScriptOptions::new();
let child = spawn("exit 1", &args, &options).unwrap();
let output = child.wait_with_output().unwrap();
assert!(!output.status.success());
}
#[test]
#[should_panic]
fn run_or_exit_error_code() {
let args = vec![];
let options = ScriptOptions::new();
run_or_exit("exit 1", &args, &options);
}
#[test]
#[should_panic]
fn run_or_exit_invocation_error() {
let args = vec![];
let options = ScriptOptions::new();
run_or_exit("badcommand", &args, &options);
}
#[test]
fn run_or_exit_pipe_output() {
let args = vec![];
let mut options = ScriptOptions::new();
options.output_redirection = IoOptions::Pipe;
let (output, error) = run_or_exit(
r#"
echo "Test"
exit 0
"#,
&args,
&options,
);
assert!(!output.is_empty());
assert!(error.is_empty());
}
#[test]
fn run_or_exit_append_env() {
let args = vec![];
let mut options = ScriptOptions::new();
let mut env_vars = std::collections::HashMap::<String, String>::new();
env_vars.insert("MY_TEST_VARIABLE".to_string(), "MY_TEST_VALUE".to_string());
options.env_vars = Some(env_vars);
std::env::set_var("PARENT_VAR", "PARENT_VALUE");
let script: String;
if cfg!(windows) {
script = r#"
ECHO %MY_TEST_VARIABLE%
ECHO %PARENT_VAR%
"#
.to_string();
} else {
script = r#"
echo $MY_TEST_VARIABLE
echo $PARENT_VAR
"#
.to_string()
}
let (output, error) = run_or_exit(&script, &args, &options);
assert!(output.contains("MY_TEST_VALUE"));
assert!(output.contains("PARENT_VALUE"));
assert!(error.is_empty());
// Check if current environment is polluted
match std::env::var("MY_TEST_VARIABLE") {
Ok(_) => assert!(false, "The parent environment is polluted"),
Err(_) => (),
}
}
| rust | Apache-2.0 | a79fdf0e15afca84681e5cf104bc080ceec60954 | 2026-01-04T20:25:01.977296Z | false |
sagiegurari/run_script | https://github.com/sagiegurari/run_script/blob/a79fdf0e15afca84681e5cf104bc080ceec60954/tests/spawn_script_macro_test.rs | tests/spawn_script_macro_test.rs | use run_script;
use run_script::ScriptOptions;
#[test]
fn spawn_macro_no_args_no_options_valid() {
let child = run_script::spawn_script!(
r#"
echo "Test"
exit 0
"#
)
.unwrap();
let output = child.wait_with_output().unwrap();
assert!(output.status.success());
}
#[test]
fn spawn_macro_no_args_no_options_error_output() {
let child = run_script::spawn_script!(
r#"
echo "Test"
exit 123
"#
)
.unwrap();
let output = child.wait_with_output().unwrap();
assert_eq!(output.status.code().unwrap(), 123);
}
#[test]
fn spawn_macro_no_args_with_options() {
let options = ScriptOptions::new();
let child = run_script::spawn_script!(
r#"
echo "Test"
exit 0
"#,
options
)
.unwrap();
let output = child.wait_with_output().unwrap();
assert!(output.status.success());
assert!(output.stdout.len() > 0);
}
#[test]
fn spawn_macro_with_args_with_options() {
let options = ScriptOptions::new();
let script = if cfg!(windows) {
r#"
echo arg1: %1
echo arg2: %2
exit 0
"#
} else {
r#"
echo arg1: $1
echo arg2: $2
exit 0
"#
};
let child = run_script::spawn_script!(
&script,
&vec!["ARG1".to_string(), "ARG2".to_string()],
options
)
.unwrap();
let output = child.wait_with_output().unwrap();
assert!(output.status.success());
assert!(output.stdout.len() > 0);
assert_eq!(output.stderr.len(), 0);
}
| rust | Apache-2.0 | a79fdf0e15afca84681e5cf104bc080ceec60954 | 2026-01-04T20:25:01.977296Z | false |
sagiegurari/run_script | https://github.com/sagiegurari/run_script/blob/a79fdf0e15afca84681e5cf104bc080ceec60954/tests/run_script_macro_test.rs | tests/run_script_macro_test.rs | use run_script;
use run_script::ScriptOptions;
#[test]
fn run_macro_no_args_no_options_valid() {
let (code, output, error) = run_script::run_script!(
r#"
echo "Test"
exit 0
"#
)
.unwrap();
assert_eq!(code, 0);
assert!(output.len() > 0);
assert_eq!(error.len(), 0);
}
#[test]
fn run_macro_no_args_no_options_error_output() {
let output = run_script::run_script!(
r#"
echo "Test"
exit 123
"#
)
.unwrap();
let code = output.0;
assert_eq!(code, 123);
}
#[test]
fn run_macro_no_args_with_options() {
let options = ScriptOptions::new();
let (code, output, error) = run_script::run_script!(
r#"
echo "Test"
exit 0
"#,
options
)
.unwrap();
assert_eq!(code, 0);
assert!(output.len() > 0);
assert_eq!(error.len(), 0);
}
#[test]
fn run_macro_with_args_with_options() {
let options = ScriptOptions::new();
let script = if cfg!(windows) {
r#"
echo arg1: %1
echo arg2: %2
exit 0
"#
} else {
r#"
echo arg1: $1
echo arg2: $2
exit 0
"#
};
let (code, output, error) = run_script::run_script!(
&script,
&vec!["ARG1".to_string(), "ARG2".to_string()],
options
)
.unwrap();
assert_eq!(code, 0);
assert!(output.len() > 0);
assert_eq!(error.len(), 0);
assert!(output.find("arg1: ARG1").is_some());
assert!(output.find("arg2: ARG2").is_some());
}
| rust | Apache-2.0 | a79fdf0e15afca84681e5cf104bc080ceec60954 | 2026-01-04T20:25:01.977296Z | false |
sagiegurari/run_script | https://github.com/sagiegurari/run_script/blob/a79fdf0e15afca84681e5cf104bc080ceec60954/tests/run_test.rs | tests/run_test.rs | use run_script;
use run_script::ScriptOptions;
#[test]
fn run_test() {
let args = vec![];
let options = ScriptOptions::new();
let (code, output, error) = run_script::run(
r#"
echo "Test"
exit 0
"#,
&args,
&options,
)
.unwrap();
assert_eq!(code, 0);
assert!(output.len() > 0);
assert_eq!(error.len(), 0);
}
| rust | Apache-2.0 | a79fdf0e15afca84681e5cf104bc080ceec60954 | 2026-01-04T20:25:01.977296Z | false |
sagiegurari/run_script | https://github.com/sagiegurari/run_script/blob/a79fdf0e15afca84681e5cf104bc080ceec60954/tests/run_script_or_exit_macro_test.rs | tests/run_script_or_exit_macro_test.rs | use run_script;
use run_script::ScriptOptions;
#[test]
fn run_script_or_exit_macro_no_args_no_options_valid() {
let (output, error) = run_script::run_script_or_exit!(
r#"
echo "Test"
exit 0
"#
);
assert!(output.len() > 0);
assert_eq!(error.len(), 0);
}
#[test]
fn run_script_or_exit_macro_no_args_with_options() {
let options = ScriptOptions::new();
let (output, error) = run_script::run_script_or_exit!(
r#"
echo "Test"
exit 0
"#,
options
);
assert!(output.len() > 0);
assert_eq!(error.len(), 0);
}
#[test]
fn run_script_or_exit_macro_with_args_with_options() {
let options = ScriptOptions::new();
let script = if cfg!(windows) {
r#"
echo arg1: %1
echo arg2: %2
exit 0
"#
} else {
r#"
echo arg1: $1
echo arg2: $2
exit 0
"#
};
let (output, error) = run_script::run_script_or_exit!(
&script,
&vec!["ARG1".to_string(), "ARG2".to_string()],
options
);
assert!(output.len() > 0);
assert_eq!(error.len(), 0);
assert!(output.find("arg1: ARG1").is_some());
assert!(output.find("arg2: ARG2").is_some());
}
| rust | Apache-2.0 | a79fdf0e15afca84681e5cf104bc080ceec60954 | 2026-01-04T20:25:01.977296Z | false |
sagiegurari/run_script | https://github.com/sagiegurari/run_script/blob/a79fdf0e15afca84681e5cf104bc080ceec60954/benches/bench_run.rs | benches/bench_run.rs | #![feature(test)]
extern crate test;
use run_script;
use run_script::ScriptOptions;
use test::Bencher;
#[bench]
fn run(bencher: &mut Bencher) {
let options = ScriptOptions::new();
let args = vec![];
bencher.iter(|| {
let (code, output, error) = run_script::run(
r#"
echo "Test"
exit 0
"#,
&args,
&options,
)
.unwrap();
assert_eq!(code, 0);
assert!(output.len() > 0);
assert_eq!(error.len(), 0);
});
}
| rust | Apache-2.0 | a79fdf0e15afca84681e5cf104bc080ceec60954 | 2026-01-04T20:25:01.977296Z | false |
sagiegurari/run_script | https://github.com/sagiegurari/run_script/blob/a79fdf0e15afca84681e5cf104bc080ceec60954/examples/macro_examples.rs | examples/macro_examples.rs | use run_script;
use run_script::ScriptOptions;
fn main() {
// simple call to run script with only the script text
let (code, output, error) = run_script::run_script!(
r#"
echo "Test"
exit 0
"#
)
.unwrap();
println!("Exit Code: {}", code);
println!("Output: {}", output);
println!("Error: {}", error);
// run script invoked with the script text and options
let options = ScriptOptions::new();
let (code, output, error) = run_script::run_script!(
r#"
echo "Test"
exit 0
"#,
&options
)
.unwrap();
println!("Exit Code: {}", code);
println!("Output: {}", output);
println!("Error: {}", error);
// run script invoked with all arguments
let options = ScriptOptions::new();
let (code, output, error) = run_script::run_script!(
r#"
echo "Test"
exit 0
"#,
&vec!["ARG1".to_string(), "ARG2".to_string()],
&options
)
.unwrap();
println!("Exit Code: {}", code);
println!("Output: {}", output);
println!("Error: {}", error);
// spawn_script! works the same as run_script! but returns the child process handle
let child = run_script::spawn_script!(
r#"
echo "Test"
exit 0
"#
)
.unwrap();
println!("PID: {}", child.id());
}
| rust | Apache-2.0 | a79fdf0e15afca84681e5cf104bc080ceec60954 | 2026-01-04T20:25:01.977296Z | false |
sagiegurari/run_script | https://github.com/sagiegurari/run_script/blob/a79fdf0e15afca84681e5cf104bc080ceec60954/examples/function_examples.rs | examples/function_examples.rs | use run_script;
use run_script::ScriptOptions;
fn main() {
let options = ScriptOptions::new();
let args = vec![];
// run the script and get the script execution output
let (code, output, error) = run_script::run(
r#"
echo "Directory Info:"
dir
"#,
&args,
&options,
)
.unwrap();
println!("Exit Code: {}", code);
println!("Output: {}", output);
println!("Error: {}", error);
// run the script and get a handle to the running child process
let child = run_script::spawn(
r#"
echo "Directory Info:"
dir
"#,
&args,
&options,
)
.unwrap();
let spawn_output = child.wait_with_output().unwrap();
println!("Success: {}", &spawn_output.status.success());
}
| rust | Apache-2.0 | a79fdf0e15afca84681e5cf104bc080ceec60954 | 2026-01-04T20:25:01.977296Z | false |
alttch/busrt | https://github.com/alttch/busrt/blob/4031c56928b6f5fdca171cda5cad1ca6e9de7f7b/src/borrow.rs | src/borrow.rs | use std::sync::Arc;
/// When a frame is sent via sockets, only the data pointer is necessary. For inter-thread
/// communications, a full data block is requied. This smart-pointer type acts like
/// std::borrow:Cow, except is stuck to &[u8] / Vec<u8> buffers
///
/// The principle is simple: always give the full data block if possible, but give a pointer if
/// isn't
///
/// Example:
///
/// ```rust
/// use busrt::borrow::Cow;
///
/// let owned_payload: Cow = vec![0u8, 1, 2, 3].into();
/// let borrowed_payload: Cow = vec![0u8, 1, 2, 3].as_slice().into();
/// ```
#[derive(Clone)]
pub enum Cow<'a> {
Borrowed(&'a [u8]),
Owned(Vec<u8>),
Referenced(Arc<Vec<u8>>),
}
impl<'a> From<Vec<u8>> for Cow<'a> {
fn from(src: Vec<u8>) -> Cow<'a> {
Cow::Owned(src)
}
}
impl<'a> From<Arc<Vec<u8>>> for Cow<'a> {
fn from(src: Arc<Vec<u8>>) -> Cow<'a> {
Cow::Referenced(src)
}
}
impl<'a> From<&'a [u8]> for Cow<'a> {
fn from(src: &'a [u8]) -> Cow<'a> {
Cow::Borrowed(src)
}
}
impl Cow<'_> {
#[inline]
pub fn as_slice(&self) -> &[u8] {
match self {
Cow::Borrowed(v) => v,
Cow::Owned(v) => v.as_slice(),
Cow::Referenced(v) => v.as_slice(),
}
}
#[inline]
pub fn to_vec(self) -> Vec<u8> {
match self {
Cow::Borrowed(v) => v.to_vec(),
Cow::Owned(v) => v,
Cow::Referenced(v) => v.to_vec(),
}
}
#[inline]
pub fn len(&self) -> usize {
match self {
Cow::Borrowed(v) => v.len(),
Cow::Owned(v) => v.len(),
Cow::Referenced(v) => v.len(),
}
}
#[inline]
pub fn is_empty(&self) -> bool {
match self {
Cow::Borrowed(v) => v.is_empty(),
Cow::Owned(v) => v.is_empty(),
Cow::Referenced(v) => v.is_empty(),
}
}
}
| rust | Apache-2.0 | 4031c56928b6f5fdca171cda5cad1ca6e9de7f7b | 2026-01-04T20:25:23.680465Z | false |
alttch/busrt | https://github.com/alttch/busrt/blob/4031c56928b6f5fdca171cda5cad1ca6e9de7f7b/src/lib.rs | src/lib.rs | #![ doc = include_str!( concat!( env!( "CARGO_MANIFEST_DIR" ), "/", "README.md" ) ) ]
use std::fmt;
use std::sync::Arc;
use std::time::Duration;
#[cfg(feature = "rpc")]
pub use async_trait::async_trait;
pub const OP_NOP: u8 = 0x00;
pub const OP_PUBLISH: u8 = 0x01;
pub const OP_SUBSCRIBE: u8 = 0x02;
pub const OP_UNSUBSCRIBE: u8 = 0x03;
pub const OP_EXCLUDE: u8 = 0x04;
pub const OP_UNEXCLUDE: u8 = 0x05;
pub const OP_PUBLISH_FOR: u8 = 0x06;
pub const OP_MESSAGE: u8 = 0x12;
pub const OP_BROADCAST: u8 = 0x13;
pub const OP_ACK: u8 = 0xFE;
pub const PROTOCOL_VERSION: u16 = 0x01;
pub const RESPONSE_OK: u8 = 0x01;
pub const PING_FRAME: &[u8] = &[0, 0, 0, 0, 0, 0, 0, 0, 0];
pub const ERR_CLIENT_NOT_REGISTERED: u8 = 0x71;
pub const ERR_DATA: u8 = 0x72;
pub const ERR_IO: u8 = 0x73;
pub const ERR_OTHER: u8 = 0x74;
pub const ERR_NOT_SUPPORTED: u8 = 0x75;
pub const ERR_BUSY: u8 = 0x76;
pub const ERR_NOT_DELIVERED: u8 = 0x77;
pub const ERR_TIMEOUT: u8 = 0x78;
pub const ERR_ACCESS: u8 = 0x79;
pub const GREETINGS: [u8; 1] = [0xEB];
pub const VERSION: &str = env!("CARGO_PKG_VERSION");
pub static AUTHOR: &str = "(c) 2022 Bohemia Automation / Altertech";
pub const DEFAULT_TIMEOUT: Duration = Duration::from_secs(1);
pub const DEFAULT_BUF_TTL: Duration = Duration::from_micros(10);
pub const DEFAULT_BUF_SIZE: usize = 8192;
pub const DEFAULT_QUEUE_SIZE: usize = 8192;
pub const SECONDARY_SEP: &str = "%%";
/// When a frame is sent, methods do not wait for the result, but they return OpConfirm type to let
/// the sender get the result if required.
///
/// When the frame is sent with QoS "processed", the Option contains Receiver<Result>
///
/// Example:
///
/// ```rust,ignore
/// use busrt::QoS;
///
/// let result = client.send("target", payload, QoS::Processed).await.unwrap(); // get send result
/// let confirm = result.unwrap(); // get OpConfirm
/// let op_result = confirm.await.unwrap(); // receive the operation result
/// match op_result {
/// Ok(_) => { /* the server has confirmed that it had processed the message */ }
/// Err(e) => { /* the server has returned an error */ }
/// }
/// ```
#[cfg(any(feature = "rpc", feature = "broker", feature = "ipc"))]
pub type OpConfirm = Option<tokio::sync::oneshot::Receiver<Result<(), Error>>>;
pub type Frame = Arc<FrameData>;
pub type EventChannel = async_channel::Receiver<Frame>;
#[cfg(all(any(feature = "ipc-sync", feature = "rpc-sync"), feature = "rt"))]
type RawMutex = rtsc::pi::RawMutex;
#[cfg(all(any(feature = "ipc-sync", feature = "rpc-sync"), feature = "rt"))]
type Condvar = rtsc::pi::Condvar;
#[cfg(all(any(feature = "ipc-sync", feature = "rpc-sync"), not(feature = "rt")))]
type RawMutex = parking_lot::RawMutex;
#[cfg(all(any(feature = "ipc-sync", feature = "rpc-sync"), not(feature = "rt")))]
type Condvar = parking_lot::Condvar;
#[cfg(any(feature = "ipc-sync", feature = "rpc-sync"))]
pub type SyncEventChannel = rtsc::channel::Receiver<Frame, RawMutex, Condvar>;
#[cfg(any(feature = "ipc-sync", feature = "rpc-sync"))]
type SyncEventSender = rtsc::channel::Sender<Frame, RawMutex, Condvar>;
#[cfg(any(feature = "ipc-sync", feature = "rpc-sync"))]
pub type SyncOpConfirm = Option<oneshot::Receiver<Result<(), Error>>>;
#[derive(Debug, Eq, PartialEq, Copy, Clone)]
#[repr(u8)]
pub enum ErrorKind {
NotRegistered = ERR_CLIENT_NOT_REGISTERED,
NotSupported = ERR_NOT_SUPPORTED,
Io = ERR_IO,
Timeout = ERR_TIMEOUT,
Data = ERR_DATA,
Busy = ERR_BUSY,
NotDelivered = ERR_NOT_DELIVERED,
Access = ERR_ACCESS,
Other = ERR_OTHER,
Eof = 0xff,
}
impl From<u8> for ErrorKind {
fn from(code: u8) -> Self {
match code {
ERR_CLIENT_NOT_REGISTERED => ErrorKind::NotRegistered,
ERR_NOT_SUPPORTED => ErrorKind::NotSupported,
ERR_IO => ErrorKind::Io,
ERR_DATA => ErrorKind::Data,
ERR_BUSY => ErrorKind::Busy,
ERR_NOT_DELIVERED => ErrorKind::NotDelivered,
ERR_ACCESS => ErrorKind::Access,
_ => ErrorKind::Other,
}
}
}
impl fmt::Display for ErrorKind {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{}",
match self {
ErrorKind::NotRegistered => "Client not registered",
ErrorKind::NotSupported => "Feature not supported",
ErrorKind::Io => "I/O Error",
ErrorKind::Timeout => "Timeout",
ErrorKind::Data => "Data Error",
ErrorKind::Busy => "Busy",
ErrorKind::NotDelivered => "Frame not delivered",
ErrorKind::Other => "Error",
ErrorKind::Access => "Access denied",
ErrorKind::Eof => "Eof",
}
)
}
}
#[derive(Debug)]
pub struct Error {
kind: ErrorKind,
message: Option<String>,
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if let Some(ref message) = self.message {
write!(f, "{}: {}", self.kind, message)
} else {
write!(f, "{}", self.kind)
}
}
}
impl std::error::Error for Error {}
impl Error {
#[inline]
pub fn new(kind: ErrorKind, message: Option<impl fmt::Display>) -> Self {
Self {
kind,
message: message.map(|m| m.to_string()),
}
}
#[inline]
pub fn io(e: impl fmt::Display) -> Self {
Self {
kind: ErrorKind::Io,
message: Some(e.to_string()),
}
}
#[inline]
pub fn data(e: impl fmt::Display) -> Self {
Self {
kind: ErrorKind::Data,
message: Some(e.to_string()),
}
}
#[inline]
pub fn access(e: impl fmt::Display) -> Self {
Self {
kind: ErrorKind::Access,
message: Some(e.to_string()),
}
}
#[inline]
pub fn not_supported(e: impl fmt::Display) -> Self {
Self {
kind: ErrorKind::NotSupported,
message: Some(e.to_string()),
}
}
#[inline]
pub fn not_registered() -> Self {
Self {
kind: ErrorKind::NotRegistered,
message: None,
}
}
#[inline]
pub fn not_delivered() -> Self {
Self {
kind: ErrorKind::NotDelivered,
message: None,
}
}
#[inline]
pub fn timeout() -> Self {
Self {
kind: ErrorKind::Timeout,
message: None,
}
}
#[inline]
pub fn busy(e: impl fmt::Display) -> Self {
Self {
kind: ErrorKind::Busy,
message: Some(e.to_string()),
}
}
#[inline]
pub fn kind(&self) -> ErrorKind {
self.kind
}
}
pub trait IntoBusRtResult {
fn to_busrt_result(self) -> Result<(), Error>;
}
impl IntoBusRtResult for u8 {
#[inline]
fn to_busrt_result(self) -> Result<(), Error> {
if self == RESPONSE_OK {
Ok(())
} else {
Err(Error {
kind: self.into(),
message: None,
})
}
}
}
#[cfg(any(feature = "rpc", feature = "broker", feature = "ipc"))]
impl From<tokio::time::error::Elapsed> for Error {
fn from(_e: tokio::time::error::Elapsed) -> Error {
Error::timeout()
}
}
impl From<std::io::Error> for Error {
fn from(e: std::io::Error) -> Error {
if e.kind() == std::io::ErrorKind::UnexpectedEof
|| e.kind() == std::io::ErrorKind::BrokenPipe
|| e.kind() == std::io::ErrorKind::ConnectionReset
{
Error {
kind: ErrorKind::Eof,
message: None,
}
} else {
Error::io(e)
}
}
}
impl From<&std::io::Error> for Error {
fn from(e: &std::io::Error) -> Error {
if e.kind() == std::io::ErrorKind::UnexpectedEof
|| e.kind() == std::io::ErrorKind::BrokenPipe
|| e.kind() == std::io::ErrorKind::ConnectionReset
{
Error {
kind: ErrorKind::Eof,
message: None,
}
} else {
Error::io(e)
}
}
}
impl From<std::str::Utf8Error> for Error {
fn from(e: std::str::Utf8Error) -> Error {
Error::data(e)
}
}
impl From<std::array::TryFromSliceError> for Error {
fn from(e: std::array::TryFromSliceError) -> Error {
Error::data(e)
}
}
impl<T> From<async_channel::SendError<T>> for Error {
fn from(_e: async_channel::SendError<T>) -> Error {
Error {
kind: ErrorKind::Eof,
message: None,
}
}
}
#[cfg(any(feature = "rpc", feature = "broker", feature = "ipc"))]
impl From<tokio::sync::oneshot::error::RecvError> for Error {
fn from(_e: tokio::sync::oneshot::error::RecvError) -> Error {
Error {
kind: ErrorKind::Eof,
message: None,
}
}
}
#[derive(Debug, Eq, PartialEq, Copy, Clone)]
#[repr(u8)]
pub enum FrameOp {
Nop = OP_NOP,
Message = OP_MESSAGE,
Broadcast = OP_BROADCAST,
PublishTopic = OP_PUBLISH,
PublishTopicFor = OP_PUBLISH_FOR,
SubscribeTopic = OP_SUBSCRIBE,
UnsubscribeTopic = OP_UNSUBSCRIBE,
ExcludeTopic = OP_EXCLUDE,
// not include but unexclude as it's clearly an opposite operation to exclude
// while include may be confused with subscribe
UnexcludeTopic = OP_UNEXCLUDE,
}
impl TryFrom<u8> for FrameOp {
type Error = Error;
fn try_from(tp: u8) -> Result<Self, Error> {
match tp {
OP_NOP => Ok(FrameOp::Nop),
OP_MESSAGE => Ok(FrameOp::Message),
OP_BROADCAST => Ok(FrameOp::Broadcast),
OP_PUBLISH => Ok(FrameOp::PublishTopic),
OP_PUBLISH_FOR => Ok(FrameOp::PublishTopicFor),
OP_SUBSCRIBE => Ok(FrameOp::SubscribeTopic),
OP_UNSUBSCRIBE => Ok(FrameOp::UnsubscribeTopic),
OP_EXCLUDE => Ok(FrameOp::ExcludeTopic),
OP_UNEXCLUDE => Ok(FrameOp::UnexcludeTopic),
_ => Err(Error::data(format!("Invalid frame type: {}", tp))),
}
}
}
#[derive(Debug, Copy, Clone)]
#[repr(u8)]
pub enum QoS {
No = 0,
Processed = 1,
Realtime = 2,
RealtimeProcessed = 3,
}
impl QoS {
#[inline]
pub fn is_realtime(self) -> bool {
self as u8 & 0b10 != 0
}
#[inline]
pub fn needs_ack(self) -> bool {
self as u8 & 0b1 != 0
}
}
impl TryFrom<u8> for QoS {
type Error = Error;
fn try_from(q: u8) -> Result<Self, Error> {
match q {
0 => Ok(QoS::No),
1 => Ok(QoS::Processed),
2 => Ok(QoS::Realtime),
3 => Ok(QoS::RealtimeProcessed),
_ => Err(Error::data(format!("Invalid QoS: {}", q))),
}
}
}
#[derive(Debug, Eq, PartialEq, Copy, Clone)]
#[repr(u8)]
pub enum FrameKind {
Prepared = 0xff,
Message = OP_MESSAGE,
Broadcast = OP_BROADCAST,
Publish = OP_PUBLISH,
Acknowledge = OP_ACK,
Nop = OP_NOP,
}
impl TryFrom<u8> for FrameKind {
type Error = Error;
fn try_from(code: u8) -> Result<Self, Self::Error> {
match code {
OP_MESSAGE => Ok(FrameKind::Message),
OP_BROADCAST => Ok(FrameKind::Broadcast),
OP_PUBLISH => Ok(FrameKind::Publish),
OP_ACK => Ok(FrameKind::Acknowledge),
OP_NOP => Ok(FrameKind::Nop),
_ => Err(Error::data(format!("Invalid frame type: {:x}", code))),
}
}
}
#[derive(Debug)]
pub struct FrameData {
kind: FrameKind,
sender: Option<String>,
topic: Option<String>,
header: Option<Vec<u8>>, // zero-copy payload prefix
buf: Vec<u8>,
payload_pos: usize,
realtime: bool,
}
impl FrameData {
#[inline]
pub fn new(
kind: FrameKind,
sender: Option<String>,
topic: Option<String>,
header: Option<Vec<u8>>,
buf: Vec<u8>,
payload_pos: usize,
realtime: bool,
) -> Self {
Self {
kind,
sender,
topic,
header,
buf,
payload_pos,
realtime,
}
}
#[inline]
pub fn new_nop() -> Self {
Self {
kind: FrameKind::Nop,
sender: None,
topic: None,
header: None,
buf: Vec::new(),
payload_pos: 0,
realtime: false,
}
}
#[inline]
pub fn kind(&self) -> FrameKind {
self.kind
}
/// # Panics
///
/// Will panic if called for a prepared frame
#[inline]
pub fn sender(&self) -> &str {
self.sender.as_ref().unwrap()
}
/// # Panics
///
/// Will panic if called for a prepared frame
#[inline]
pub fn primary_sender(&self) -> &str {
let primary_sender = self.sender.as_ref().unwrap();
if let Some(pos) = primary_sender.find(SECONDARY_SEP) {
&primary_sender[..pos]
} else {
primary_sender
}
}
/// Filled for pub/sub communications
#[inline]
pub fn topic(&self) -> Option<&str> {
self.topic.as_deref()
}
/// To keep zero-copy model, frames contain the full incoming buffer + actual payload position.
/// Use this method to get the actual call payload.
#[inline]
pub fn payload(&self) -> &[u8] {
&self.buf[self.payload_pos..]
}
/// The header can be used by certain implementations (e.g. the default RPC layer) to
/// keep zero-copy model. The header is None for IPC communications, but filled for
/// inter-thread ones. A custom layer should use/parse the header to avoid unnecessary payload
/// copy
#[inline]
pub fn header(&self) -> Option<&[u8]> {
self.header.as_deref()
}
#[inline]
pub fn is_realtime(&self) -> bool {
self.realtime
}
}
pub mod borrow;
pub mod common;
pub mod tools {
#[cfg(any(feature = "rpc", feature = "broker", feature = "ipc"))]
pub mod pubsub;
}
#[cfg(feature = "broker")]
pub mod broker;
#[cfg(feature = "cursors")]
pub mod cursors;
#[cfg(feature = "ipc")]
pub mod ipc;
#[cfg(any(feature = "rpc", feature = "rpc-sync"))]
pub mod rpc;
#[cfg(any(feature = "ipc-sync", feature = "rpc-sync"))]
pub mod sync;
#[cfg(any(feature = "rpc", feature = "broker", feature = "ipc"))]
pub mod client;
#[cfg(any(feature = "broker", feature = "ipc"))]
pub mod comm;
#[macro_export]
macro_rules! empty_payload {
() => {
$crate::borrow::Cow::Borrowed(&[])
};
}
| rust | Apache-2.0 | 4031c56928b6f5fdca171cda5cad1ca6e9de7f7b | 2026-01-04T20:25:23.680465Z | false |
alttch/busrt | https://github.com/alttch/busrt/blob/4031c56928b6f5fdca171cda5cad1ca6e9de7f7b/src/cli.rs | src/cli.rs | use async_trait::async_trait;
use busrt::client::AsyncClient;
use busrt::common::{BrokerInfo, BrokerStats, ClientList};
use busrt::ipc::{Client, Config};
use busrt::rpc::{DummyHandlers, Rpc, RpcClient, RpcError, RpcEvent, RpcHandlers, RpcResult};
use busrt::{empty_payload, Error, Frame, QoS};
use clap::{Parser, Subcommand};
use colored::Colorize;
use is_terminal::IsTerminal;
use log::{error, info};
use num_format::{Locale, ToFormattedString};
use serde_value::Value;
use std::collections::BTreeMap;
use std::sync::Arc;
use std::time::Duration;
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use tokio::sync::Mutex;
use tokio::time::sleep;
#[macro_use]
extern crate bma_benchmark;
#[cfg(not(feature = "std-alloc"))]
#[global_allocator]
static ALLOC: mimalloc::MiMalloc = mimalloc::MiMalloc;
#[macro_use]
extern crate prettytable;
trait ToDebugString<T> {
fn to_debug_string(&self) -> String;
}
impl<T> ToDebugString<T> for T
where
T: std::fmt::Debug,
{
#[inline]
fn to_debug_string(&self) -> String {
format!("{:?}", self)
}
}
#[derive(Subcommand, Clone)]
enum BrokerCommand {
#[clap(name = "client.list")]
ClientList,
#[clap(name = "info")]
Info,
#[clap(name = "stats")]
Stats,
#[clap(name = "test")]
Test,
}
#[derive(Parser, Clone)]
struct ListenCommand {
#[clap(short = 't', long = "topics", help = "Subscribe to topics")]
topics: Vec<String>,
#[clap(long = "exclude", help = "Exclude topics")]
exclude_topics: Vec<String>,
}
#[derive(Parser, Clone)]
struct TargetPayload {
#[clap()]
target: String,
#[clap(help = "payload string or empty for stdin")]
payload: Option<String>,
}
#[derive(Parser, Clone)]
struct RpcCall {
#[clap()]
target: String,
#[clap()]
method: String,
#[clap(help = "payload string key=value, '-' for stdin payload")]
params: Vec<String>,
}
#[derive(Parser, Clone)]
struct PublishCommand {
#[clap()]
topic: String,
#[clap(help = "payload string or empty for stdin")]
payload: Option<String>,
}
#[derive(Subcommand, Clone)]
enum RpcCommand {
Listen(RpcListenCommand),
Notify(TargetPayload),
Call0(RpcCall),
Call(RpcCall),
}
#[derive(Parser, Clone)]
struct RpcListenCommand {
#[clap(short = 't', long = "topics", help = "Subscribe to topics")]
topics: Vec<String>,
}
#[derive(Parser, Clone)]
struct BenchmarkCommand {
#[clap(short = 'w', long = "workers", default_value = "1")]
workers: u32,
#[clap(long = "payload-size", default_value = "100")]
payload_size: usize,
#[clap(short = 'i', long = "iters", default_value = "1000000")]
iters: u32,
}
#[derive(Clone, Subcommand)]
enum Command {
#[clap(subcommand)]
Broker(BrokerCommand),
Listen(ListenCommand),
r#Send(TargetPayload),
Publish(PublishCommand),
#[clap(subcommand)]
Rpc(RpcCommand),
Benchmark(BenchmarkCommand),
}
#[derive(Parser)]
#[clap(version = busrt::VERSION, author = busrt::AUTHOR)]
struct Opts {
#[clap(name = "socket path or host:port")]
path: String,
#[clap(short = 'n', long = "name")]
name: Option<String>,
#[clap(long = "buf-size", default_value = "8192")]
buf_size: usize,
#[clap(long = "queue-size", default_value = "8192")]
queue_size: usize,
#[clap(long = "timeout", default_value = "5")]
timeout: f32,
#[clap(short = 'v', long = "verbose")]
verbose: bool,
#[clap(short = 's', long = "silent", help = "suppress logging")]
silent: bool,
#[clap(subcommand)]
command: Command,
}
fn ctable(titles: Vec<&str>) -> prettytable::Table {
let mut table = prettytable::Table::new();
let format = prettytable::format::FormatBuilder::new()
.column_separator(' ')
.borders(' ')
.separators(
&[prettytable::format::LinePosition::Title],
prettytable::format::LineSeparator::new('-', '-', '-', '-'),
)
.padding(0, 1)
.build();
table.set_format(format);
let mut titlevec: Vec<prettytable::Cell> = Vec::new();
for t in titles {
titlevec.push(prettytable::Cell::new(t).style_spec("Fb"));
}
table.set_titles(prettytable::Row::new(titlevec));
table
}
#[inline]
fn decode_msgpack(payload: &[u8]) -> Result<Value, rmp_serde::decode::Error> {
rmp_serde::from_slice(payload)
}
#[inline]
fn decode_json(payload: &str) -> Result<BTreeMap<Value, Value>, serde_json::Error> {
serde_json::from_str(payload)
}
async fn print_payload(payload: &[u8], silent: bool) {
let mut isstr = true;
for p in payload {
if *p < 9 {
isstr = false;
break;
}
}
if isstr {
if let Ok(s) = std::str::from_utf8(payload) {
if let Ok(j) = decode_json(s) {
if !silent {
println!("JSON:");
}
println!(
"{}",
if silent {
serde_json::to_string(&j)
} else {
serde_json::to_string_pretty(&j)
}
.unwrap()
);
} else {
println!("{} {}", if silent { "" } else { "STR: " }, s);
}
return;
}
}
if let Ok(data) = decode_msgpack(payload) {
if !silent {
println!("MSGPACK:");
}
if let Ok(s) = if silent {
serde_json::to_string(&data)
} else {
serde_json::to_string_pretty(&data)
} {
println!("{}", s);
} else {
print_hex(payload);
}
} else if silent {
let mut stdout = tokio::io::stdout();
stdout.write_all(payload).await.unwrap();
} else {
print_hex(payload);
}
}
fn print_hex(payload: &[u8]) {
let (p, dots) = if payload.len() > 256 {
(&payload[..256], "...")
} else {
#[allow(clippy::redundant_slicing)]
(&payload[..], "")
};
println!("HEX: {}{}", hex::encode(p), dots);
}
#[inline]
fn sep() {
println!("{}", "----".dimmed());
}
macro_rules! fnum {
($n: expr) => {
$n.to_formatted_string(&Locale::en).replace(',', "_")
};
}
macro_rules! ok {
() => {
println!("{}", "OK".green());
};
}
#[allow(clippy::needless_for_each)]
async fn subscribe_topics(client: &mut Client, topics: &[String]) -> Result<(), Error> {
topics
.iter()
.for_each(|t| info!("subscribing to a topic {}", t.yellow()));
client
.subscribe_bulk(
&topics.iter().map(String::as_str).collect::<Vec<&str>>(),
QoS::Processed,
)
.await
.unwrap()
.unwrap()
.await
.unwrap()
}
#[allow(clippy::needless_for_each)]
async fn exclude_topics(client: &mut Client, topics: &[String]) -> Result<(), Error> {
topics
.iter()
.for_each(|t| info!("excluding a topic {}", t.yellow()));
client
.exclude_bulk(
&topics.iter().map(String::as_str).collect::<Vec<&str>>(),
QoS::Processed,
)
.await
.unwrap()
.unwrap()
.await
.unwrap()
}
async fn print_frame(frame: &Frame) {
info!("Incoming frame {} byte(s)", fnum!(frame.payload().len()));
println!(
"{} from {} ({})",
frame.kind().to_debug_string().yellow(),
frame.sender().bold(),
frame.primary_sender()
);
if let Some(topic) = frame.topic() {
println!("topic: {}", topic.magenta());
}
print_payload(frame.payload(), false).await;
sep();
}
struct Handlers {}
#[async_trait]
impl RpcHandlers for Handlers {
async fn handle_frame(&self, frame: Frame) {
print_frame(&frame).await;
}
async fn handle_notification(&self, event: RpcEvent) {
info!(
"Incoming RPC notification {} byte(s)",
fnum!(event.payload().len())
);
println!(
"{} from {} ({})",
event.kind().to_debug_string().yellow(),
event.sender().bold(),
event.primary_sender()
);
print_payload(event.payload(), false).await;
sep();
}
async fn handle_call(&self, event: RpcEvent) -> RpcResult {
info!("Incoming RPC call");
println!(
"method: {}",
event
.parse_method()
.map_or_else(
|_| format!("HEX: {}", hex::encode(event.method())),
ToOwned::to_owned
)
.blue()
.bold()
);
println!(
"from {} ({})",
event.sender().bold(),
event.primary_sender()
);
print_payload(event.payload(), false).await;
sep();
Ok(None)
}
}
async fn read_stdin() -> Vec<u8> {
let mut stdin = tokio::io::stdin();
let mut buf: Vec<u8> = Vec::new();
if stdin.is_terminal() {
println!("Reading stdin, Ctrl-D to finish...");
}
stdin.read_to_end(&mut buf).await.unwrap();
buf
}
async fn get_payload(candidate: &Option<String>) -> Vec<u8> {
if let Some(p) = candidate {
p.as_bytes().to_vec()
} else {
read_stdin().await
}
}
async fn create_client(opts: &Opts, name: &str) -> Client {
let config = Config::new(&opts.path, name)
.buf_size(opts.buf_size)
.queue_size(opts.queue_size)
.timeout(Duration::from_secs_f32(opts.timeout));
Client::connect(&config)
.await
.expect("Unable to connect to the busrt broker")
}
macro_rules! bm_finish {
($iters: expr, $futs: expr) => {
while let Some(f) = $futs.pop() {
f.await.unwrap();
}
staged_benchmark_finish_current!($iters);
};
}
async fn benchmark_client(
opts: &Opts,
client_name: &str,
iters: u32,
workers: u32,
payload_size: usize,
) {
let iters_worker = iters / workers;
let data = Arc::new(vec![0xee; payload_size]);
let mut clients = Vec::new();
let mut ecs = Vec::new();
let mut cnns = Vec::new();
let mut futs = Vec::new();
for w in 0..workers {
let cname = format!("{}-{}", client_name, w + 1);
let cname_null = format!("{}-{}-null", client_name, w + 1);
let mut client = create_client(opts, &cname).await;
let rx = client.take_event_channel().unwrap();
clients.push(Arc::new(Mutex::new(client)));
ecs.push(Arc::new(Mutex::new(rx)));
cnns.push(cname_null);
}
macro_rules! clear {
() => {
for e in &ecs {
let rx = e.lock().await;
while !rx.is_empty() {
let _r = rx.recv().await;
}
}
};
}
macro_rules! spawn_sender {
($client: expr, $target: expr, $payload: expr, $qos: expr) => {
futs.push(tokio::spawn(async move {
let mut client = $client.lock().await;
for _ in 0..iters_worker {
let result = client
.send(&$target, $payload.clone().into(), $qos)
.await
.unwrap();
if $qos.needs_ack() {
let _r = result.unwrap().await.unwrap();
}
}
}));
};
}
warmup!();
for q in &[(QoS::No, "no"), (QoS::RealtimeProcessed, "processed")] {
let qos = q.0;
clear!();
staged_benchmark_start!(&format!("send.qos.{}", q.1));
for w in 0..workers {
let client = clients[w as usize].clone();
let payload = data.clone();
let target = cnns[w as usize].clone();
spawn_sender!(client, target, payload, qos);
}
bm_finish!(iters, futs);
}
for q in &[(QoS::No, "no"), (QoS::RealtimeProcessed, "processed")] {
let qos = q.0;
clear!();
staged_benchmark_start!(&format!("send+recv.qos.{}", q.1));
for w in 0..workers {
let client = clients[w as usize].clone();
let target = client.lock().await.get_name().to_owned();
let payload = data.clone();
spawn_sender!(client, target, payload, qos);
let crx = ecs[w as usize].clone();
futs.push(tokio::spawn(async move {
let rx = crx.lock().await;
let mut cnt = 0;
while cnt < iters_worker {
let _r = rx.recv().await;
cnt += 1;
}
}));
}
bm_finish!(iters, futs);
}
}
struct BenchmarkHandlers {}
#[async_trait]
impl RpcHandlers for BenchmarkHandlers {
async fn handle_frame(&self, _frame: Frame) {}
async fn handle_notification(&self, _event: RpcEvent) {}
async fn handle_call(&self, event: RpcEvent) -> RpcResult {
if event.parse_method()? == "benchmark.selftest" {
Ok(Some(event.payload().to_vec()))
} else {
Err(RpcError::method(None))
}
}
}
async fn benchmark_rpc(
opts: &Opts,
client_name: &str,
iters: u32,
workers: u32,
payload_size: usize,
) {
let iters_worker = iters / workers;
let data = Arc::new(vec![0xee; payload_size]);
let mut rpcs = Vec::new();
let mut cnns = Vec::new();
let mut futs = Vec::new();
for w in 0..workers {
let cname = format!("{}-{}", client_name, w + 1);
let cname_null = format!("{}-{}-null", client_name, w + 1);
let client = create_client(opts, &cname).await;
let rpc = RpcClient::new(client, BenchmarkHandlers {});
rpcs.push(Arc::new(Mutex::new(rpc)));
cnns.push(cname_null);
}
macro_rules! spawn_caller {
($rpc: expr, $target: expr, $method: expr, $payload: expr, $cr: expr) => {
futs.push(tokio::spawn(async move {
let rpc = $rpc.lock().await;
for _ in 0..iters_worker {
if $cr {
let result = rpc
.call(
&$target,
$method,
$payload.clone().into(),
QoS::RealtimeProcessed,
)
.await
.unwrap();
assert_eq!(result.payload(), *$payload);
} else {
let result = rpc
.call0(
&$target,
$method,
$payload.clone().into(),
QoS::RealtimeProcessed,
)
.await
.unwrap();
let _r = result.unwrap().await.unwrap();
}
}
}));
};
}
staged_benchmark_start!("rpc.call");
for w in 0..workers {
let rpc = rpcs[w as usize].clone();
let payload = data.clone();
spawn_caller!(rpc, ".broker", "benchmark.test", payload, true);
}
bm_finish!(iters, futs);
staged_benchmark_start!("rpc.call+handle");
for w in 0..workers {
let rpc = rpcs[w as usize].clone();
let target = rpc.lock().await.client().lock().await.get_name().to_owned();
let payload = data.clone();
spawn_caller!(rpc, target, "benchmark.selftest", payload, true);
}
bm_finish!(iters, futs);
staged_benchmark_start!("rpc.call0");
for w in 0..workers {
let rpc = rpcs[w as usize].clone();
let target = cnns[w as usize].clone();
let payload = data.clone();
spawn_caller!(rpc, target, "test", payload, false);
}
bm_finish!(iters, futs);
}
#[allow(clippy::too_many_lines)]
#[tokio::main(worker_threads = 1)]
async fn main() {
let opts = Opts::parse();
let client_name = opts.name.as_ref().map_or_else(
|| {
format!(
"cli.{}.{}",
hostname::get()
.expect("Unable to get hostname")
.to_str()
.expect("Unable to parse hostname"),
std::process::id()
)
},
ToOwned::to_owned,
);
if !opts.silent {
env_logger::Builder::new()
.target(env_logger::Target::Stdout)
.filter_level(if opts.verbose {
log::LevelFilter::Trace
} else {
log::LevelFilter::Info
})
.init();
}
info!(
"Connecting to {}, using service name {}",
opts.path, client_name
);
macro_rules! prepare_rpc_call {
($c: expr, $client: expr) => {{
let rpc = RpcClient::new($client, DummyHandlers {});
let payload = if $c.params.len() == 1 && $c.params[0] == "-" {
read_stdin().await
} else if $c.params.is_empty() {
Vec::new()
} else {
let s = $c.params.iter().map(String::as_str).collect::<Vec<&str>>();
rmp_serde::to_vec_named(&busrt::common::str_to_params_map(&s).unwrap()).unwrap()
};
(rpc, payload)
}};
}
let timeout = Duration::from_secs_f32(opts.timeout);
macro_rules! wto {
($fut: expr) => {
tokio::time::timeout(timeout, $fut)
.await
.expect("timed out")
};
}
match opts.command {
Command::Broker(ref op) => {
let client = create_client(&opts, &client_name).await;
match op {
BrokerCommand::ClientList => {
let rpc = RpcClient::new(client, DummyHandlers {});
let result =
wto!(rpc.call(".broker", "client.list", empty_payload!(), QoS::Processed))
.unwrap();
let mut clients: ClientList = rmp_serde::from_slice(result.payload()).unwrap();
clients.clients.sort();
let mut table = ctable(vec![
"name", "type", "source", "port", "r_frames", "r_bytes", "w_frames",
"w_bytes", "queue", "ins",
]);
for c in clients.clients {
if c.name != client_name {
table.add_row(row![
c.name,
c.kind,
c.source.unwrap_or_default(),
c.port.unwrap_or_default(),
fnum!(c.r_frames),
fnum!(c.r_bytes),
fnum!(c.w_frames),
fnum!(c.w_bytes),
fnum!(c.queue),
fnum!(c.instances),
]);
}
}
table.printstd();
}
BrokerCommand::Stats => {
let rpc = RpcClient::new(client, DummyHandlers {});
let result =
wto!(rpc.call(".broker", "stats", empty_payload!(), QoS::Processed))
.unwrap();
let stats: BrokerStats = rmp_serde::from_slice(result.payload()).unwrap();
let mut table = ctable(vec!["field", "value"]);
table.add_row(row!["r_frames", stats.r_frames]);
table.add_row(row!["r_bytes", stats.r_bytes]);
table.add_row(row!["w_frames", stats.w_frames]);
table.add_row(row!["w_bytes", stats.w_bytes]);
table.add_row(row!["uptime", stats.uptime]);
table.printstd();
}
BrokerCommand::Info => {
let rpc = RpcClient::new(client, DummyHandlers {});
let result =
wto!(rpc.call(".broker", "info", empty_payload!(), QoS::Processed))
.unwrap();
let info: BrokerInfo = rmp_serde::from_slice(result.payload()).unwrap();
let mut table = ctable(vec!["field", "value"]);
table.add_row(row!["author", info.author]);
table.add_row(row!["version", info.version]);
table.printstd();
}
BrokerCommand::Test => {
let rpc = RpcClient::new(client, DummyHandlers {});
let result =
wto!(rpc.call(".broker", "test", empty_payload!(), QoS::Processed))
.unwrap();
print_payload(result.payload(), opts.silent).await;
}
}
}
Command::Listen(ref cmd) => {
let mut client = create_client(&opts, &client_name).await;
exclude_topics(&mut client, &cmd.exclude_topics)
.await
.unwrap();
subscribe_topics(&mut client, &cmd.topics).await.unwrap();
sep();
let rx = client.take_event_channel().unwrap();
println!(
"Listening to messages for {} ...",
client_name.cyan().bold()
);
let fut = tokio::spawn(async move {
while let Ok(frame) = rx.recv().await {
print_frame(&frame).await;
}
});
let sleep_step = Duration::from_millis(100);
while client.is_connected() {
sleep(sleep_step).await;
}
fut.abort();
}
Command::r#Send(ref cmd) => {
let mut client = create_client(&opts, &client_name).await;
let payload = get_payload(&cmd.payload).await;
let fut = if cmd.target.contains(&['*', '?'][..]) {
client.send_broadcast(&cmd.target, payload.into(), QoS::Processed)
} else {
client.send(&cmd.target, payload.into(), QoS::Processed)
};
wto!(wto!(fut).unwrap().unwrap()).unwrap().unwrap();
ok!();
}
Command::Publish(ref cmd) => {
let mut client = create_client(&opts, &client_name).await;
let payload = get_payload(&cmd.payload).await;
wto!(
wto!(client.publish(&cmd.topic, payload.into(), QoS::Processed))
.unwrap()
.unwrap()
)
.unwrap()
.unwrap();
ok!();
}
Command::Rpc(ref r) => {
let mut client = create_client(&opts, &client_name).await;
match r {
RpcCommand::Listen(cmd) => {
subscribe_topics(&mut client, &cmd.topics).await.unwrap();
let rpc = RpcClient::new(client, Handlers {});
sep();
println!(
"Listening to RPC messages for {} ...",
client_name.cyan().bold()
);
let sleep_step = Duration::from_millis(100);
while rpc.is_connected() {
sleep(sleep_step).await;
}
}
RpcCommand::Notify(cmd) => {
let rpc = RpcClient::new(client, DummyHandlers {});
let payload = get_payload(&cmd.payload).await;
wto!(
wto!(rpc.notify(&cmd.target, payload.into(), QoS::Processed))
.unwrap()
.unwrap()
)
.unwrap()
.unwrap();
ok!();
}
RpcCommand::Call0(cmd) => {
let (rpc, payload) = prepare_rpc_call!(cmd, client);
wto!(
wto!(rpc.call0(&cmd.target, &cmd.method, payload.into(), QoS::Processed))
.unwrap()
.unwrap()
)
.unwrap()
.unwrap();
ok!();
}
RpcCommand::Call(cmd) => {
let (rpc, payload) = prepare_rpc_call!(cmd, client);
match wto!(rpc.call(&cmd.target, &cmd.method, payload.into(), QoS::Processed)) {
Ok(result) => print_payload(result.payload(), opts.silent).await,
Err(e) => {
let message = e
.data()
.map_or("", |data| std::str::from_utf8(data).unwrap_or(""));
error!("RPC Error {}: {}", e.code(), message);
std::process::exit(1);
}
}
}
}
}
Command::Benchmark(ref cmd) => {
println!(
"Starting benchmark, {} worker(s), {} iters, {} iters/worker, {} byte(s) payload",
cmd.workers.to_string().blue().bold(),
fnum!(cmd.iters).yellow(),
fnum!(cmd.iters / cmd.workers).bold(),
fnum!(cmd.payload_size).cyan()
);
benchmark_client(
&opts,
&client_name,
cmd.iters,
cmd.workers,
cmd.payload_size,
)
.await;
benchmark_rpc(
&opts,
&client_name,
cmd.iters,
cmd.workers,
cmd.payload_size,
)
.await;
staged_benchmark_print!();
}
}
}
| rust | Apache-2.0 | 4031c56928b6f5fdca171cda5cad1ca6e9de7f7b | 2026-01-04T20:25:23.680465Z | false |
alttch/busrt | https://github.com/alttch/busrt/blob/4031c56928b6f5fdca171cda5cad1ca6e9de7f7b/src/client.rs | src/client.rs | use crate::borrow::Cow;
use crate::{Error, EventChannel, OpConfirm, QoS};
use async_trait::async_trait;
use std::sync::{atomic, Arc};
use std::time::Duration;
#[allow(clippy::module_name_repetitions)]
#[async_trait]
pub trait AsyncClient: Send + Sync {
fn take_event_channel(&mut self) -> Option<EventChannel>;
async fn send(
&mut self,
target: &str,
payload: Cow<'async_trait>,
qos: QoS,
) -> Result<OpConfirm, Error>;
async fn zc_send(
&mut self,
target: &str,
header: Cow<'async_trait>,
payload: Cow<'async_trait>,
qos: QoS,
) -> Result<OpConfirm, Error>;
async fn send_broadcast(
&mut self,
target: &str,
payload: Cow<'async_trait>,
qos: QoS,
) -> Result<OpConfirm, Error>;
async fn publish(
&mut self,
topic: &str,
payload: Cow<'async_trait>,
qos: QoS,
) -> Result<OpConfirm, Error>;
#[allow(unused_variables)]
async fn publish_for(
&mut self,
topic: &str,
receiver: &str,
payload: Cow<'async_trait>,
qos: QoS,
) -> Result<OpConfirm, Error> {
Err(Error::not_supported("publish_for"))
}
async fn subscribe(&mut self, topic: &str, qos: QoS) -> Result<OpConfirm, Error>;
async fn unsubscribe(&mut self, topic: &str, qos: QoS) -> Result<OpConfirm, Error>;
async fn subscribe_bulk(&mut self, topics: &[&str], qos: QoS) -> Result<OpConfirm, Error>;
async fn unsubscribe_bulk(&mut self, topics: &[&str], qos: QoS) -> Result<OpConfirm, Error>;
/// exclude a topic. it is highly recommended to exclude topics first, then call subscribe
/// operations to avoid receiving unwanted messages. excluding topics is also an additional
/// heavy operation so use it only when there is no other way.
async fn exclude(&mut self, topic: &str, qos: QoS) -> Result<OpConfirm, Error>;
/// unexclude a topic (include back but not subscribe)
async fn unexclude(&mut self, topic: &str, qos: QoS) -> Result<OpConfirm, Error>;
/// exclude multiple topics
async fn exclude_bulk(&mut self, topics: &[&str], qos: QoS) -> Result<OpConfirm, Error>;
/// unexclude multiple topics (include back but not subscribe)
async fn unexclude_bulk(&mut self, topics: &[&str], qos: QoS) -> Result<OpConfirm, Error>;
async fn ping(&mut self) -> Result<(), Error>;
fn is_connected(&self) -> bool;
fn get_connected_beacon(&self) -> Option<Arc<atomic::AtomicBool>>;
fn get_timeout(&self) -> Option<Duration>;
fn get_name(&self) -> &str;
}
| rust | Apache-2.0 | 4031c56928b6f5fdca171cda5cad1ca6e9de7f7b | 2026-01-04T20:25:23.680465Z | false |
alttch/busrt | https://github.com/alttch/busrt/blob/4031c56928b6f5fdca171cda5cad1ca6e9de7f7b/src/comm.rs | src/comm.rs | use std::sync::Arc;
use std::time::Duration;
use tokio::io::{AsyncWriteExt, BufWriter};
use tokio::sync::oneshot;
use tokio::sync::Mutex;
use tokio::task::JoinHandle;
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub enum Flush {
No,
Scheduled,
Instant,
}
impl From<bool> for Flush {
#[inline]
fn from(realtime: bool) -> Self {
if realtime {
Flush::Instant
} else {
Flush::Scheduled
}
}
}
pub struct TtlBufWriter<W> {
writer: Arc<Mutex<BufWriter<W>>>,
tx: async_channel::Sender<()>,
dtx: Option<oneshot::Sender<()>>,
flusher: JoinHandle<()>,
}
impl<W> TtlBufWriter<W>
where
W: AsyncWriteExt + Unpin + Send + Sync + 'static,
{
pub fn new(writer: W, cap: usize, ttl: Duration, timeout: Duration) -> Self {
let writer = Arc::new(Mutex::new(BufWriter::with_capacity(cap, writer)));
let wf = writer.clone();
let (tx, rx) = async_channel::bounded::<()>(1);
// flusher future
let flusher = tokio::spawn(async move {
while rx.recv().await.is_ok() {
async_io::Timer::after(ttl).await;
if let Ok(mut writer) = tokio::time::timeout(timeout, wf.lock()).await {
let _r = tokio::time::timeout(timeout, writer.flush()).await;
}
}
});
let (dtx, drx) = oneshot::channel();
let wf = writer.clone();
// this future works on drop
tokio::spawn(async move {
let _r = drx.await;
let mut writer = wf.lock().await;
let _r = tokio::time::timeout(timeout, writer.flush()).await;
});
Self {
writer,
tx,
dtx: Some(dtx),
flusher,
}
}
#[inline]
pub async fn write(&mut self, buf: &[u8], flush: Flush) -> std::io::Result<()> {
let mut writer = self.writer.lock().await;
let result = writer.write_all(buf).await;
if flush == Flush::Instant {
writer.flush().await?;
} else if flush == Flush::Scheduled && self.tx.is_empty() {
let _ = self.tx.send(()).await;
}
result
}
}
impl<W> Drop for TtlBufWriter<W> {
fn drop(&mut self) {
self.flusher.abort();
let _ = self.dtx.take().unwrap().send(());
}
}
| rust | Apache-2.0 | 4031c56928b6f5fdca171cda5cad1ca6e9de7f7b | 2026-01-04T20:25:23.680465Z | false |
alttch/busrt | https://github.com/alttch/busrt/blob/4031c56928b6f5fdca171cda5cad1ca6e9de7f7b/src/server.rs | src/server.rs | #[macro_use]
extern crate lazy_static;
#[cfg(not(feature = "std-alloc"))]
#[global_allocator]
static ALLOC: mimalloc::MiMalloc = mimalloc::MiMalloc;
use chrono::prelude::*;
use clap::Parser;
use colored::Colorize;
use log::{error, info, trace};
use log::{Level, LevelFilter};
use std::sync::atomic;
use std::time::Duration;
use tokio::signal::unix::{signal, SignalKind};
use tokio::sync::Mutex;
use tokio::time::sleep;
#[cfg(feature = "rpc")]
use busrt::broker::BrokerEvent;
use busrt::broker::{Broker, Options, ServerConfig};
static SERVER_ACTIVE: atomic::AtomicBool = atomic::AtomicBool::new(true);
lazy_static! {
static ref PID_FILE: Mutex<Option<String>> = Mutex::new(None);
static ref SOCK_FILES: Mutex<Vec<String>> = Mutex::new(Vec::new());
static ref BROKER: Mutex<Option<Broker>> = Mutex::new(None);
}
struct SimpleLogger;
impl log::Log for SimpleLogger {
fn enabled(&self, _metadata: &log::Metadata) -> bool {
true
}
fn log(&self, record: &log::Record) {
if self.enabled(record.metadata()) {
let s = format!(
"{} {}",
Local::now().to_rfc3339_opts(SecondsFormat::Secs, false),
record.args()
);
println!(
"{}",
match record.level() {
Level::Trace => s.black().dimmed(),
Level::Debug => s.dimmed(),
Level::Warn => s.yellow().bold(),
Level::Error => s.red(),
Level::Info => s.normal(),
}
);
}
}
fn flush(&self) {}
}
static LOGGER: SimpleLogger = SimpleLogger;
fn set_verbose_logger(filter: LevelFilter) {
log::set_logger(&LOGGER)
.map(|()| log::set_max_level(filter))
.unwrap();
}
#[allow(clippy::struct_excessive_bools)]
#[derive(Parser)]
struct Opts {
#[clap(
short = 'B',
long = "bind",
required = true,
help = "Unix socket path, IP:PORT or fifo:path, can be specified multiple times"
)]
path: Vec<String>,
#[clap(short = 'P', long = "pid-file")]
pid_file: Option<String>,
#[clap(long = "verbose", help = "Verbose logging")]
verbose: bool,
#[clap(short = 'D')]
daemonize: bool,
#[clap(long = "log-syslog", help = "Force log to syslog")]
log_syslog: bool,
#[clap(
long = "force-register",
help = "Force register new clients with duplicate names"
)]
force_register: bool,
#[clap(short = 'w', default_value = "4")]
workers: usize,
#[clap(short = 't', default_value = "5", help = "timeout (seconds)")]
timeout: f64,
#[clap(
long = "buf-size",
default_value = "16384",
help = "I/O buffer size, per client"
)]
buf_size: usize,
#[clap(
long = "buf-ttl",
default_value = "10",
help = "Write buffer TTL (microseconds)"
)]
buf_ttl: u64,
#[clap(
long = "queue-size",
default_value = "8192",
help = "frame queue size, per client"
)]
queue_size: usize,
}
async fn terminate(allow_log: bool) {
if let Some(f) = PID_FILE.lock().await.as_ref() {
// do not log anything on C-ref() {
if allow_log {
trace!("removing pid file {}", f);
}
let _r = std::fs::remove_file(f);
}
for f in SOCK_FILES.lock().await.iter() {
if allow_log {
trace!("removing sock file {}", f);
}
let _r = std::fs::remove_file(f);
}
if allow_log {
info!("terminating");
}
#[cfg(feature = "rpc")]
if let Some(broker) = BROKER.lock().await.as_ref() {
if let Err(e) = broker.announce(BrokerEvent::shutdown()).await {
error!("{}", e);
}
}
SERVER_ACTIVE.store(false, atomic::Ordering::Relaxed);
#[cfg(feature = "rpc")]
sleep(Duration::from_secs(1)).await;
}
macro_rules! handle_term_signal {
($kind: expr, $allow_log: expr) => {
tokio::spawn(async move {
trace!("starting handler for {:?}", $kind);
loop {
match signal($kind) {
Ok(mut v) => {
v.recv().await;
}
Err(e) => {
error!("Unable to bind to signal {:?}: {}", $kind, e);
break;
}
}
// do not log anything on C-c
if $allow_log {
trace!("got termination signal");
}
terminate($allow_log).await
}
});
};
}
#[allow(clippy::too_many_lines)]
fn main() {
#[cfg(feature = "tracing")]
console_subscriber::init();
let opts: Opts = Opts::parse();
if opts.verbose {
set_verbose_logger(LevelFilter::Trace);
} else if (!opts.daemonize
|| std::env::var("DISABLE_SYSLOG").unwrap_or_else(|_| "0".to_owned()) == "1")
&& !opts.log_syslog
{
set_verbose_logger(LevelFilter::Info);
} else {
let formatter = syslog::Formatter3164 {
facility: syslog::Facility::LOG_USER,
hostname: None,
process: "busrtd".into(),
pid: 0,
};
match syslog::unix(formatter) {
Ok(logger) => {
log::set_boxed_logger(Box::new(syslog::BasicLogger::new(logger)))
.map(|()| log::set_max_level(LevelFilter::Info))
.unwrap();
}
Err(_) => {
set_verbose_logger(LevelFilter::Info);
}
}
}
let timeout = Duration::from_secs_f64(opts.timeout);
let buf_ttl = Duration::from_micros(opts.buf_ttl);
info!("starting BUS/RT server");
info!("workers: {}", opts.workers);
info!("buf size: {}", opts.buf_size);
info!("buf ttl: {:?}", buf_ttl);
info!("queue size: {}", opts.queue_size);
info!("timeout: {:?}", timeout);
if opts.daemonize {
if let Ok(fork::Fork::Child) = fork::daemon(true, false) {
std::process::exit(0);
}
}
let rt = tokio::runtime::Builder::new_multi_thread()
.worker_threads(opts.workers)
.enable_all()
.build()
.unwrap();
rt.block_on(async move {
if let Some(pid_file) = opts.pid_file {
let pid = std::process::id().to_string();
tokio::fs::write(&pid_file, pid)
.await
.expect("Unable to write pid file");
info!("created pid file {}", pid_file);
PID_FILE.lock().await.replace(pid_file);
}
handle_term_signal!(SignalKind::interrupt(), false);
handle_term_signal!(SignalKind::terminate(), true);
let mut broker = Broker::create(&Options::default().force_register(opts.force_register));
#[cfg(feature = "rpc")]
broker.init_default_core_rpc().await.unwrap();
broker.set_queue_size(opts.queue_size);
let mut sock_files = SOCK_FILES.lock().await;
for path in opts.path {
info!("binding at {}", path);
#[allow(clippy::case_sensitive_file_extension_comparisons)]
if let Some(_fifo) = path.strip_prefix("fifo:") {
#[cfg(feature = "rpc")]
{
broker
.spawn_fifo(_fifo, opts.buf_size)
.await
.expect("unable to start fifo server");
sock_files.push(_fifo.to_owned());
}
} else {
let server_config = ServerConfig::new()
.buf_size(opts.buf_size)
.buf_ttl(buf_ttl)
.timeout(timeout);
if path.ends_with(".sock")
|| path.ends_with(".socket")
|| path.ends_with(".ipc")
|| path.starts_with('/')
{
broker
.spawn_unix_server(&path, server_config)
.await
.expect("Unable to start unix server");
sock_files.push(path);
} else {
broker
.spawn_tcp_server(&path, server_config)
.await
.expect("Unable to start tcp server");
}
}
}
drop(sock_files);
BROKER.lock().await.replace(broker);
info!("BUS/RT broker started");
let sleep_step = Duration::from_millis(100);
loop {
if !SERVER_ACTIVE.load(atomic::Ordering::Relaxed) {
break;
}
sleep(sleep_step).await;
}
});
}
| rust | Apache-2.0 | 4031c56928b6f5fdca171cda5cad1ca6e9de7f7b | 2026-01-04T20:25:23.680465Z | false |
alttch/busrt | https://github.com/alttch/busrt/blob/4031c56928b6f5fdca171cda5cad1ca6e9de7f7b/src/common.rs | src/common.rs | #[cfg(feature = "rpc")]
use crate::Error;
#[cfg(feature = "rpc")]
use serde::{Deserialize, Serialize};
#[cfg(feature = "rpc")]
use serde_value::Value;
#[cfg(feature = "rpc")]
use std::collections::HashMap;
#[cfg_attr(feature = "rpc", derive(Serialize, Deserialize))]
#[derive(Eq, PartialEq, Clone)]
pub struct ClientInfo<'a> {
pub name: &'a str,
pub kind: &'a str,
pub source: Option<&'a str>,
pub port: Option<&'a str>,
pub r_frames: u64,
pub r_bytes: u64,
pub w_frames: u64,
pub w_bytes: u64,
pub queue: usize,
pub instances: usize,
}
impl Ord for ClientInfo<'_> {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
self.name.cmp(other.name)
}
}
impl PartialOrd for ClientInfo<'_> {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
#[cfg_attr(feature = "rpc", derive(Serialize, Deserialize))]
#[derive(Clone)]
pub struct ClientList<'a> {
#[cfg_attr(feature = "rpc", serde(borrow))]
pub clients: Vec<ClientInfo<'a>>,
}
#[cfg_attr(feature = "rpc", derive(Serialize, Deserialize))]
#[derive(Clone)]
pub struct BrokerStats {
pub uptime: u64,
pub r_frames: u64,
pub r_bytes: u64,
pub w_frames: u64,
pub w_bytes: u64,
}
#[cfg_attr(feature = "rpc", derive(Serialize, Deserialize))]
#[derive(Clone)]
pub struct BrokerInfo<'a> {
pub author: &'a str,
pub version: &'a str,
}
#[allow(clippy::ptr_arg)]
#[cfg(feature = "rpc")]
pub fn str_to_params_map<'a>(s: &'a [&'a str]) -> Result<HashMap<&'a str, Value>, Error> {
let mut params: HashMap<&str, Value> = HashMap::new();
for pair in s {
if !pair.is_empty() {
let mut psp = pair.split('=');
let var = psp
.next()
.ok_or_else(|| Error::data("var name not specified"))?;
let v = psp
.next()
.ok_or_else(|| Error::data("var value not specified"))?;
let value = if v == "false" {
Value::Bool(false)
} else if v == "true" {
Value::Bool(true)
} else if let Ok(i) = v.parse::<i64>() {
Value::I64(i)
} else if let Ok(f) = v.parse::<f64>() {
Value::F64(f)
} else {
Value::String(v.to_owned())
};
params.insert(var, value);
}
}
Ok(params)
}
#[cfg(feature = "broker")]
#[allow(clippy::cast_sign_loss)]
/// # Panics
///
/// Will panic if system clock is not available
pub fn now_ns() -> u64 {
let t = nix::time::clock_gettime(nix::time::ClockId::CLOCK_REALTIME).unwrap();
t.tv_sec() as u64 * 1_000_000_000 + t.tv_nsec() as u64
}
| rust | Apache-2.0 | 4031c56928b6f5fdca171cda5cad1ca6e9de7f7b | 2026-01-04T20:25:23.680465Z | false |
alttch/busrt | https://github.com/alttch/busrt/blob/4031c56928b6f5fdca171cda5cad1ca6e9de7f7b/src/broker.rs | src/broker.rs | use crate::borrow::Cow;
use crate::client::AsyncClient;
use crate::comm::{Flush, TtlBufWriter};
#[cfg(feature = "broker-rpc")]
use crate::common::now_ns;
use crate::common::{BrokerInfo, BrokerStats};
#[cfg(feature = "broker-rpc")]
use crate::common::{ClientInfo, ClientList};
use crate::SECONDARY_SEP;
use crate::{Error, ErrorKind, GREETINGS, PROTOCOL_VERSION};
use crate::{EventChannel, OpConfirm};
use crate::{Frame, FrameData, FrameKind, FrameOp, QoS};
use crate::{ERR_ACCESS, ERR_DATA, ERR_NOT_SUPPORTED};
use crate::{OP_ACK, RESPONSE_OK};
use async_trait::async_trait;
use ipnetwork::IpNetwork;
use log::{debug, error, trace, warn};
#[cfg(not(feature = "rt"))]
use parking_lot::Mutex as SyncMutex;
#[cfg(feature = "rt")]
use parking_lot_rt::Mutex as SyncMutex;
#[cfg(feature = "broker-rpc")]
use serde::{Deserialize, Serialize};
use std::collections::{hash_map, HashMap, HashSet};
use std::fmt;
use std::marker::Unpin;
use std::net::IpAddr;
use std::net::SocketAddr;
use std::sync::atomic;
use std::sync::Arc;
use std::time::Duration;
use std::time::Instant;
use submap::{AclMap, BroadcastMap, SubMap};
use tokio::io::{AsyncReadExt, AsyncWriteExt, BufReader};
use tokio::net::{TcpListener, TcpStream};
#[cfg(not(target_os = "windows"))]
use tokio::net::{UnixListener, UnixStream};
#[cfg(feature = "rpc")]
use tokio::sync::Mutex;
use tokio::task::JoinHandle;
use tokio::time;
#[cfg(feature = "rpc")]
use crate::rpc::RpcClient;
#[cfg(feature = "broker-rpc")]
use crate::rpc::{Rpc, RpcError, RpcEvent, RpcHandlers, RpcResult};
pub const DEFAULT_QUEUE_SIZE: usize = 8192;
const MAX_SENDER_NAME_LEN: usize = 256; // not enforced but pre-allocates buffers
pub const BROKER_INFO_TOPIC: &str = ".broker/info";
pub const BROKER_WARN_TOPIC: &str = ".broker/warn";
pub const BROKER_NAME: &str = ".broker";
#[allow(dead_code)]
const BROKER_RPC_NOT_INIT_ERR: &str = "broker core RPC client not initialized";
macro_rules! pretty_error {
($name: expr, $err:expr) => {
if $err.kind() != ErrorKind::Eof {
error!("client {} error: {}", $name, $err);
}
};
}
type BrokerClient = Arc<BusRtClient>;
macro_rules! make_confirm_channel {
($qos: expr) => {
if $qos.needs_ack() {
let (tx, rx) = tokio::sync::oneshot::channel();
let _r = tx.send(Ok(()));
Ok(Some(rx))
} else {
Ok(None)
}
};
}
macro_rules! safe_send_frame {
($db: expr, $tgt: expr, $frame: expr, $timeout: expr) => {
if $tgt.tx.is_full() {
if $tgt.kind == BusRtClientKind::Internal {
if let Some(timeout) = $timeout {
warn!(
"internal client {} queue is full, blocking for {:?}",
$tgt.name, timeout
);
time::timeout(timeout, $tgt.tx.send($frame))
.await?
.map_err(Into::into)
} else {
warn!("internal client {} queue is full, blocking", $tgt.name);
$tgt.tx.send($frame).await.map_err(Into::into)
}
} else {
warn!("client {} queue is full, force unregistering", $tgt.name);
$db.unregister_client(&$tgt).await;
$tgt.tx.close();
Err(Error::not_delivered())
}
} else {
$tgt.tx.send($frame).await.map_err(Into::into)
}
};
}
macro_rules! send {
($db:expr, $client:expr, $target:expr, $header: expr,
$buf:expr, $payload_pos:expr, $len: expr, $realtime: expr, $timeout: expr) => {{
$client.r_frames.fetch_add(1, atomic::Ordering::Relaxed);
$client.r_bytes.fetch_add($len, atomic::Ordering::Relaxed);
$db.r_frames.fetch_add(1, atomic::Ordering::Relaxed);
$db.r_bytes.fetch_add($len, atomic::Ordering::Relaxed);
trace!("bus/rt message from {} to {}", $client, $target);
let client = {
$db.clients.lock().get($target).map(|c| {
c.w_frames.fetch_add(1, atomic::Ordering::Relaxed);
c.w_bytes.fetch_add($len, atomic::Ordering::Relaxed);
$db.w_frames.fetch_add(1, atomic::Ordering::Relaxed);
$db.w_bytes.fetch_add($len, atomic::Ordering::Relaxed);
c.clone()
})
};
if let Some(client) = client {
let frame = Arc::new(FrameData {
kind: FrameKind::Message,
sender: Some($client.name.clone()),
topic: None,
header: $header,
buf: $buf,
payload_pos: $payload_pos,
realtime: $realtime,
});
safe_send_frame!($db, client, frame, $timeout)
} else {
Err(Error::not_registered())
}
}};
}
macro_rules! send_broadcast {
($db:expr, $client:expr, $target:expr, $header: expr,
$buf:expr, $payload_pos:expr, $len: expr, $realtime: expr, $timeout: expr) => {{
$client.r_frames.fetch_add(1, atomic::Ordering::Relaxed);
$client.r_bytes.fetch_add($len, atomic::Ordering::Relaxed);
$db.r_frames.fetch_add(1, atomic::Ordering::Relaxed);
$db.r_bytes.fetch_add($len, atomic::Ordering::Relaxed);
trace!("bus/rt broadcast message from {} to {}", $client, $target);
#[allow(clippy::mutable_key_type)]
let subs = { $db.broadcasts.lock().get_clients_by_mask($target) };
if !subs.is_empty() {
let frame = Arc::new(FrameData {
kind: FrameKind::Broadcast,
sender: Some($client.name.clone()),
topic: None,
header: $header,
buf: $buf,
payload_pos: $payload_pos,
realtime: $realtime,
});
$db.w_frames
.fetch_add(subs.len() as u64, atomic::Ordering::Relaxed);
$db.w_bytes
.fetch_add($len * subs.len() as u64, atomic::Ordering::Relaxed);
for sub in subs {
sub.w_frames.fetch_add(1, atomic::Ordering::Relaxed);
sub.w_bytes.fetch_add($len, atomic::Ordering::Relaxed);
let _r = safe_send_frame!($db, sub, frame.clone(), $timeout);
}
}
}};
}
macro_rules! publish {
($db:expr, $client:expr, $topic:expr, $header: expr,
$buf:expr, $payload_pos:expr, $len: expr, $realtime: expr, $timeout: expr) => {{
$client.r_frames.fetch_add(1, atomic::Ordering::Relaxed);
$client.r_bytes.fetch_add($len, atomic::Ordering::Relaxed);
$db.r_frames.fetch_add(1, atomic::Ordering::Relaxed);
$db.r_bytes.fetch_add($len, atomic::Ordering::Relaxed);
trace!("bus/rt topic publish from {} to {}", $client, $topic);
#[allow(clippy::mutable_key_type)]
let mut subs = { $db.subscriptions.lock().get_subscribers($topic) };
subs.retain(|sub| {
!sub.has_exclusions.load(atomic::Ordering::Acquire)
|| !sub.exclusions.lock().matches($topic)
});
if !subs.is_empty() {
let frame = Arc::new(FrameData {
kind: FrameKind::Publish,
sender: Some($client.name.clone()),
topic: Some($topic.to_owned()),
header: $header,
buf: $buf,
payload_pos: $payload_pos,
realtime: $realtime,
});
$db.w_frames
.fetch_add(subs.len() as u64, atomic::Ordering::Relaxed);
$db.w_bytes
.fetch_add($len * subs.len() as u64, atomic::Ordering::Relaxed);
for sub in subs {
sub.w_frames.fetch_add(1, atomic::Ordering::Relaxed);
sub.w_bytes.fetch_add($len, atomic::Ordering::Relaxed);
let _r = safe_send_frame!($db, sub, frame.clone(), $timeout);
}
}
}};
($db:expr, $client:expr, $topic:expr, $receiver: expr, $header: expr,
$buf:expr, $payload_pos:expr, $len: expr, $realtime: expr, $timeout: expr) => {{
$client.r_frames.fetch_add(1, atomic::Ordering::Relaxed);
$client.r_bytes.fetch_add($len, atomic::Ordering::Relaxed);
$db.r_frames.fetch_add(1, atomic::Ordering::Relaxed);
$db.r_bytes.fetch_add($len, atomic::Ordering::Relaxed);
trace!("bus/rt topic publish from {} to {}", $client, $topic);
#[allow(clippy::mutable_key_type)]
let mut subs = { $db.subscriptions.lock().get_subscribers($topic) };
subs.retain(|sub| {
sub.primary_name == $receiver
&& (!sub.has_exclusions.load(atomic::Ordering::Acquire)
|| !sub.exclusions.lock().matches($topic))
});
if !subs.is_empty() {
let frame = Arc::new(FrameData {
kind: FrameKind::Publish,
sender: Some($client.name.clone()),
topic: Some($topic.to_owned()),
header: $header,
buf: $buf,
payload_pos: $payload_pos,
realtime: $realtime,
});
$db.w_frames
.fetch_add(subs.len() as u64, atomic::Ordering::Relaxed);
$db.w_bytes
.fetch_add($len * subs.len() as u64, atomic::Ordering::Relaxed);
for sub in subs {
sub.w_frames.fetch_add(1, atomic::Ordering::Relaxed);
sub.w_bytes.fetch_add($len, atomic::Ordering::Relaxed);
let _r = safe_send_frame!($db, sub, frame.clone(), $timeout);
}
}
}};
}
pub struct Client {
name: String,
bus: Arc<BusRtClient>,
db: Arc<BrokerDb>,
rx: Option<EventChannel>,
secondary_counter: atomic::AtomicUsize,
}
#[async_trait]
impl AsyncClient for Client {
/// # Panics
///
/// Will panic if the mutex is poisoned
async fn subscribe(&mut self, topic: &str, qos: QoS) -> Result<OpConfirm, Error> {
if self.db.subscriptions.lock().subscribe(topic, &self.bus) {
make_confirm_channel!(qos)
} else {
Err(Error::not_registered())
}
}
/// # Panics
///
/// Will panic if the mutex is poisoned
async fn subscribe_bulk(&mut self, topics: &[&str], qos: QoS) -> Result<OpConfirm, Error> {
let mut db = self.db.subscriptions.lock();
for topic in topics {
if !db.subscribe(topic, &self.bus) {
return Err(Error::not_registered());
}
}
make_confirm_channel!(qos)
}
/// # Panics
///
/// Will panic if the mutex is poisoned
async fn unsubscribe(&mut self, topic: &str, qos: QoS) -> Result<OpConfirm, Error> {
if self.db.subscriptions.lock().unsubscribe(topic, &self.bus) {
make_confirm_channel!(qos)
} else {
Err(Error::not_registered())
}
}
/// # Panics
///
/// Will panic if the mutex is poisoned
async fn unsubscribe_bulk(&mut self, topics: &[&str], qos: QoS) -> Result<OpConfirm, Error> {
let mut db = self.db.subscriptions.lock();
for topic in topics {
if !db.unsubscribe(topic, &self.bus) {
return Err(Error::not_registered());
}
}
make_confirm_channel!(qos)
}
async fn exclude(&mut self, topic: &str, qos: QoS) -> Result<OpConfirm, Error> {
self.bus
.has_exclusions
.store(true, atomic::Ordering::Release);
self.bus.exclusions.lock().insert(topic);
make_confirm_channel!(qos)
}
/// unexclude a topic (include back but not subscribe)
async fn unexclude(&mut self, topic: &str, qos: QoS) -> Result<OpConfirm, Error> {
let mut exclusions = self.bus.exclusions.lock();
exclusions.remove(topic);
if exclusions.is_empty() {
self.bus
.has_exclusions
.store(false, atomic::Ordering::Release);
}
make_confirm_channel!(qos)
}
/// exclude multiple topics
async fn exclude_bulk(&mut self, topics: &[&str], qos: QoS) -> Result<OpConfirm, Error> {
let mut exclusions = self.bus.exclusions.lock();
if !topics.is_empty() {
self.bus
.has_exclusions
.store(true, atomic::Ordering::Release);
}
for topic in topics {
exclusions.insert(topic);
}
make_confirm_channel!(qos)
}
/// unexclude multiple topics (include back but not subscribe)
async fn unexclude_bulk(&mut self, topics: &[&str], qos: QoS) -> Result<OpConfirm, Error> {
let mut exclusions = self.bus.exclusions.lock();
for topic in topics {
exclusions.remove(topic);
}
if exclusions.is_empty() {
self.bus
.has_exclusions
.store(false, atomic::Ordering::Release);
}
make_confirm_channel!(qos)
}
#[inline]
async fn send(
&mut self,
target: &str,
payload: Cow<'async_trait>,
qos: QoS,
) -> Result<OpConfirm, Error> {
let len = payload.len() as u64;
send!(
self.db,
self.bus,
target,
None,
payload.to_vec(),
0,
len,
qos.is_realtime(),
self.get_timeout()
)?;
make_confirm_channel!(qos)
}
#[inline]
async fn zc_send(
&mut self,
target: &str,
header: Cow<'async_trait>,
payload: Cow<'async_trait>,
qos: QoS,
) -> Result<OpConfirm, Error> {
let len = (payload.len() + header.len()) as u64;
send!(
self.db,
self.bus,
target,
Some(header.to_vec()),
payload.to_vec(),
0,
len,
qos.is_realtime(),
self.get_timeout()
)?;
make_confirm_channel!(qos)
}
#[inline]
async fn send_broadcast(
&mut self,
target: &str,
payload: Cow<'async_trait>,
qos: QoS,
) -> Result<OpConfirm, Error> {
let len = payload.len() as u64;
send_broadcast!(
self.db,
self.bus,
target,
None,
payload.to_vec(),
0,
len,
qos.is_realtime(),
self.get_timeout()
);
make_confirm_channel!(qos)
}
#[inline]
async fn publish(
&mut self,
topic: &str,
payload: Cow<'async_trait>,
qos: QoS,
) -> Result<OpConfirm, Error> {
let len = payload.len() as u64;
publish!(
self.db,
self.bus,
topic,
None,
payload.to_vec(),
0,
len,
qos.is_realtime(),
self.get_timeout()
);
make_confirm_channel!(qos)
}
#[inline]
async fn publish_for(
&mut self,
topic: &str,
receiver: &str,
payload: Cow<'async_trait>,
qos: QoS,
) -> Result<OpConfirm, Error> {
let len = payload.len() as u64;
publish!(
self.db,
self.bus,
topic,
receiver,
None,
payload.to_vec(),
0,
len,
qos.is_realtime(),
self.get_timeout()
);
make_confirm_channel!(qos)
}
#[inline]
fn take_event_channel(&mut self) -> Option<EventChannel> {
self.rx.take()
}
#[inline]
async fn ping(&mut self) -> Result<(), Error> {
Ok(())
}
#[inline]
fn is_connected(&self) -> bool {
true
}
#[inline]
fn get_timeout(&self) -> Option<Duration> {
None
}
#[inline]
fn get_connected_beacon(&self) -> Option<Arc<atomic::AtomicBool>> {
None
}
#[inline]
fn get_name(&self) -> &str {
self.name.as_str()
}
}
impl Client {
/// When an internal client is dropped, it is automatically dropped from the broker db, but no
/// announce is sent. It is better to manually call "unregister" method before.
#[inline]
pub async fn unregister(&self) {
self.bus.registered.store(false, atomic::Ordering::SeqCst);
self.db.unregister_client(&self.bus).await;
}
}
impl Drop for Client {
fn drop(&mut self) {
self.db.drop_client(&self.bus);
}
}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
enum BusRtClientKind {
Internal,
LocalIpc,
Tcp,
}
impl BusRtClientKind {
#[allow(dead_code)]
fn as_str(&self) -> &str {
match self {
BusRtClientKind::Internal => "internal",
BusRtClientKind::LocalIpc => "local_ipc",
BusRtClientKind::Tcp => "tcp",
}
}
}
#[allow(dead_code)]
#[derive(Debug)]
struct BusRtClient {
name: String,
digest: submap::digest::Sha256Digest,
primary_name: String,
kind: BusRtClientKind,
source: Option<String>,
port: Option<String>,
disconnect_trig: triggered::Trigger,
tx: async_channel::Sender<Frame>,
registered: atomic::AtomicBool,
r_frames: atomic::AtomicU64,
r_bytes: atomic::AtomicU64,
w_frames: atomic::AtomicU64,
w_bytes: atomic::AtomicU64,
primary: bool,
secondaries: SyncMutex<HashSet<String>>,
has_exclusions: atomic::AtomicBool,
exclusions: SyncMutex<AclMap>,
}
impl fmt::Display for BusRtClient {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.name)
}
}
impl BusRtClient {
pub fn new(
name: &str,
primary_name: &str,
queue_size: usize,
kind: BusRtClientKind,
source: Option<String>,
port: Option<String>,
) -> (Self, EventChannel, triggered::Listener) {
let digest = submap::digest::sha256(name);
let (tx, rx) = async_channel::bounded(queue_size);
let primary = name == primary_name;
let (disconnect_trig, disconnect_listener) = triggered::trigger();
(
Self {
name: name.to_owned(),
digest,
primary_name: primary_name.to_owned(),
kind,
source,
port,
disconnect_trig,
tx,
registered: atomic::AtomicBool::new(false),
r_frames: atomic::AtomicU64::new(0),
r_bytes: atomic::AtomicU64::new(0),
w_frames: atomic::AtomicU64::new(0),
w_bytes: atomic::AtomicU64::new(0),
primary,
secondaries: <_>::default(),
has_exclusions: atomic::AtomicBool::new(false),
exclusions: SyncMutex::new(
AclMap::new().separator('/').match_any("+").wildcard("#"),
),
},
rx,
disconnect_listener,
)
}
}
impl PartialEq for BusRtClient {
fn eq(&self, other: &Self) -> bool {
self.digest == other.digest
}
}
impl Ord for BusRtClient {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
self.digest.cmp(&other.digest)
}
}
impl PartialOrd for BusRtClient {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl Eq for BusRtClient {}
#[cfg_attr(feature = "broker-rpc", derive(Serialize, Deserialize))]
#[derive(Eq, PartialEq, Clone)]
#[allow(clippy::module_name_repetitions)]
pub struct BrokerEvent<'a> {
s: &'a str,
#[cfg_attr(feature = "broker-rpc", serde(skip_serializing_if = "Option::is_none"))]
d: Option<&'a str>,
t: u64,
#[cfg_attr(feature = "broker-rpc", serde(skip))]
topic: &'a str,
}
impl<'a> BrokerEvent<'a> {
pub fn new(s: &'a str, d: Option<&'a str>, topic: &'a str) -> Self {
Self { s, d, t: 0, topic }
}
pub fn shutdown() -> Self {
Self {
s: "shutdown",
d: None,
t: 0,
topic: BROKER_WARN_TOPIC,
}
}
pub fn reg(name: &'a str) -> Self {
Self {
s: "reg",
d: Some(name),
t: 0,
topic: BROKER_INFO_TOPIC,
}
}
pub fn unreg(name: &'a str) -> Self {
Self {
s: "unreg",
d: Some(name),
t: 0,
topic: BROKER_INFO_TOPIC,
}
}
pub fn subject(&self) -> &str {
self.s
}
pub fn data(&self) -> Option<&str> {
self.d
}
pub fn time(&self) -> u64 {
self.t
}
}
struct BrokerDb {
clients: SyncMutex<HashMap<String, BrokerClient>>,
broadcasts: SyncMutex<BroadcastMap<BrokerClient>>,
subscriptions: SyncMutex<SubMap<BrokerClient>>,
#[cfg(feature = "rpc")]
rpc_client: Arc<Mutex<Option<RpcClient>>>,
r_frames: atomic::AtomicU64,
r_bytes: atomic::AtomicU64,
w_frames: atomic::AtomicU64,
w_bytes: atomic::AtomicU64,
startup_time: Instant,
force_register: bool,
}
impl Default for BrokerDb {
fn default() -> Self {
Self {
clients: <_>::default(),
broadcasts: SyncMutex::new(
BroadcastMap::new()
.separator('.')
.match_any("?")
.wildcard("*"),
),
subscriptions: SyncMutex::new(
SubMap::new().separator('/').match_any("+").wildcard("#"),
),
#[cfg(feature = "rpc")]
rpc_client: <_>::default(),
r_frames: atomic::AtomicU64::new(0),
r_bytes: atomic::AtomicU64::new(0),
w_frames: atomic::AtomicU64::new(0),
w_bytes: atomic::AtomicU64::new(0),
startup_time: Instant::now(),
force_register: false,
}
}
}
impl BrokerDb {
fn stats(&self) -> BrokerStats {
BrokerStats {
uptime: self.startup_time.elapsed().as_secs(),
r_frames: self.r_frames.load(atomic::Ordering::Relaxed),
r_bytes: self.r_bytes.load(atomic::Ordering::Relaxed),
w_frames: self.w_frames.load(atomic::Ordering::Relaxed),
w_bytes: self.w_bytes.load(atomic::Ordering::Relaxed),
}
}
#[cfg(feature = "broker-rpc")]
#[inline]
async fn announce(&self, mut event: BrokerEvent<'_>) -> Result<(), Error> {
if let Some(rpc_client) = self.rpc_client.lock().await.as_ref() {
event.t = now_ns();
rpc_client
.client()
.lock()
.await
.publish(
event.topic,
rmp_serde::to_vec_named(&event).map_err(Error::data)?.into(),
QoS::No,
)
.await?;
}
Ok(())
}
#[allow(clippy::unused_async)]
#[inline]
async fn register_client(&self, client: Arc<BusRtClient>) -> Result<(), Error> {
// copy name for the announce
let name = client.name.clone();
#[cfg(feature = "broker-rpc")]
let primary = client.primary;
#[cfg(feature = "broker-rpc")]
let allow_force = client.primary && self.force_register;
#[cfg(not(feature = "broker-rpc"))]
let allow_force = self.force_register;
match self.insert_client(client.clone()) {
Ok(()) => {}
Err(e) if e.kind() == ErrorKind::Busy && allow_force => {
let prev_c = self.clients.lock().remove(&name);
if let Some(prev) = prev_c {
warn!("disconnecting previous instance of {}", name);
self.drop_client(&prev);
prev.disconnect_trig.trigger();
}
self.insert_client(client)?;
}
Err(e) => return Err(e),
}
#[cfg(feature = "broker-rpc")]
if primary {
if let Err(e) = self.announce(BrokerEvent::reg(&name)).await {
error!("{}", e);
}
}
Ok(())
}
fn insert_client(&self, client: Arc<BusRtClient>) -> Result<(), Error> {
let mut clients = self.clients.lock();
let primary_client = if client.primary {
None
} else {
Some(
clients
.get_mut(&client.primary_name)
.map(|c| c.clone())
.ok_or_else(Error::not_registered)?,
)
};
if let hash_map::Entry::Vacant(x) = clients.entry(client.name.clone()) {
if let Some(pc) = primary_client {
pc.secondaries.lock().insert(client.name.clone());
}
{
let mut bdb = self.broadcasts.lock();
bdb.register_client(&client.name, &client);
}
{
let mut sdb = self.subscriptions.lock();
sdb.register_client(&client);
sdb.subscribe(BROKER_WARN_TOPIC, &client);
}
client.registered.store(true, atomic::Ordering::Relaxed);
x.insert(client);
} else {
return Err(Error::busy(format!(
"the client is already registred: {}",
client.name
)));
}
Ok(())
}
fn trigger_disconnect(&self, name: &str) -> Result<(), Error> {
if let Some(client) = self.clients.lock().get(name) {
if client.kind == BusRtClientKind::Internal {
Err(Error::not_supported("the client is internal"))
} else {
client.disconnect_trig.trigger();
Ok(())
}
} else {
Err(Error::not_registered())
}
}
#[allow(clippy::unused_async)]
#[inline]
async fn unregister_client(&self, client: &Arc<BusRtClient>) {
#[cfg(feature = "broker-rpc")]
let was_registered = client.registered.load(atomic::Ordering::Relaxed);
self.drop_client(client);
#[cfg(feature = "broker-rpc")]
if client.primary && was_registered {
if let Err(e) = self.announce(BrokerEvent::unreg(&client.name)).await {
error!("{}", e);
}
}
}
fn drop_client(&self, client: &Arc<BusRtClient>) {
if client.registered.load(atomic::Ordering::Relaxed) {
client.registered.store(false, atomic::Ordering::Relaxed);
self.subscriptions.lock().unregister_client(client);
self.broadcasts
.lock()
.unregister_client(&client.name, client);
self.clients.lock().remove(&client.name);
if client.primary {
let mut secondaries = client.secondaries.lock();
for secondary in &*secondaries {
let sec = self.clients.lock().get(secondary).cloned();
if let Some(sec) = sec {
if sec.kind != BusRtClientKind::Internal {
sec.disconnect_trig.trigger();
}
self.drop_client(&sec);
}
}
secondaries.clear();
} else if let Some(primary) = self.clients.lock().get(&client.primary_name) {
primary.secondaries.lock().remove(&client.name);
}
}
}
}
pub type AaaMap = Arc<SyncMutex<HashMap<String, ClientAaa>>>;
#[derive(Debug, Clone)]
pub struct ServerConfig {
buf_size: usize,
buf_ttl: Duration,
timeout: Duration,
payload_size_limit: Option<u32>,
aaa_map: Option<AaaMap>,
}
impl Default for ServerConfig {
fn default() -> Self {
Self {
buf_size: crate::DEFAULT_BUF_SIZE,
buf_ttl: crate::DEFAULT_BUF_TTL,
timeout: crate::DEFAULT_TIMEOUT,
payload_size_limit: None,
aaa_map: None,
}
}
}
impl ServerConfig {
#[inline]
pub fn new() -> Self {
Self::default()
}
#[inline]
pub fn buf_size(mut self, size: usize) -> Self {
self.buf_size = size;
self
}
#[inline]
pub fn buf_ttl(mut self, ttl: Duration) -> Self {
self.buf_ttl = ttl;
self
}
#[inline]
pub fn timeout(mut self, timeout: Duration) -> Self {
self.timeout = timeout;
self
}
#[inline]
pub fn aaa_map(mut self, aaa_map: AaaMap) -> Self {
self.aaa_map.replace(aaa_map);
self
}
#[inline]
pub fn payload_size_limit(mut self, size: u32) -> Self {
self.payload_size_limit = Some(size);
self
}
}
#[allow(clippy::struct_excessive_bools)]
#[derive(Debug, Clone)]
pub struct ClientAaa {
hosts_allow: HashSet<IpNetwork>,
allow_p2p_to: AclMap,
allow_p2p_any: bool,
allow_publish_to: AclMap,
allow_publish_any: bool,
allow_subscribe_to: AclMap,
allow_subscribe_any: bool,
allow_broadcast_to: AclMap,
allow_broadcast_any: bool,
}
impl Default for ClientAaa {
fn default() -> Self {
let mut hosts_allow = HashSet::new();
hosts_allow.insert(IpNetwork::V4("0.0.0.0/0".parse().unwrap()));
Self {
hosts_allow,
allow_p2p_to: AclMap::new().separator('.').wildcard("*").match_any("?"),
allow_p2p_any: true,
allow_publish_to: AclMap::new().separator('/').wildcard("#").match_any("+"),
allow_publish_any: true,
allow_subscribe_to: AclMap::new().separator('/').wildcard("#").match_any("+"),
allow_subscribe_any: true,
allow_broadcast_to: AclMap::new().separator('.').wildcard("*").match_any("?"),
allow_broadcast_any: true,
}
}
}
impl ClientAaa {
#[inline]
pub fn new() -> Self {
Self::default()
}
#[inline]
pub fn hosts_allow(mut self, hosts: Vec<IpNetwork>) -> Self {
self.hosts_allow = hosts.iter().copied().collect();
self
}
/// peer masks as
///
/// group.?.*
/// group.subgroup.client
/// group.?.client
/// group.*
#[inline]
pub fn allow_p2p_to(mut self, peer_masks: &[&str]) -> Self {
self.allow_p2p_any = false;
for peer_mask in peer_masks {
if *peer_mask == "*" {
self.allow_p2p_any = true;
}
self.allow_p2p_to.insert(peer_mask);
}
self
}
#[inline]
pub fn deny_p2p(mut self) -> Self {
self.allow_p2p_any = false;
self.allow_p2p_to = AclMap::new();
self
}
/// topic masks as
///
/// topic/+/#
/// topic/subtopic/subsubtopic
/// topic/+/subtopic
/// topic/#
#[inline]
pub fn allow_publish_to(mut self, topic_masks: &[&str]) -> Self {
self.allow_publish_any = false;
for topic_mask in topic_masks {
if *topic_mask == "#" {
self.allow_publish_any = true;
}
self.allow_publish_to.insert(topic_mask);
}
self
}
#[inline]
pub fn deny_publish(mut self) -> Self {
self.allow_publish_any = false;
self.allow_publish_to = AclMap::new();
self
}
/// topic masks as
///
/// topic/+/#
/// topic/subtopic/subsubtopic
/// topic/+/subtopic
/// topic/#
#[inline]
pub fn allow_subscribe_to(mut self, topic_masks: &[&str]) -> Self {
self.allow_subscribe_any = false;
for topic_mask in topic_masks {
if *topic_mask == "#" {
self.allow_subscribe_any = true;
}
self.allow_subscribe_to.insert(topic_mask);
}
self
}
#[inline]
pub fn deny_subscribe(mut self) -> Self {
self.allow_subscribe_any = false;
self.allow_subscribe_to = AclMap::new();
self
}
/// peer masks as
///
/// group.?.*
/// group.subgroup.client
/// group.?.client
/// group.*
#[inline]
pub fn allow_broadcast_to(mut self, peer_masks: &[&str]) -> Self {
self.allow_broadcast_any = false;
for peer_mask in peer_masks {
if *peer_mask == "*" {
self.allow_broadcast_any = true;
}
self.allow_broadcast_to.insert(peer_mask);
}
self
}
#[inline]
pub fn deny_broadcast(mut self) -> Self {
self.allow_broadcast_any = false;
self.allow_broadcast_to = AclMap::new();
self
}
#[inline]
| rust | Apache-2.0 | 4031c56928b6f5fdca171cda5cad1ca6e9de7f7b | 2026-01-04T20:25:23.680465Z | true |
alttch/busrt | https://github.com/alttch/busrt/blob/4031c56928b6f5fdca171cda5cad1ca6e9de7f7b/src/cursors.rs | src/cursors.rs | use crate::rpc::RpcError;
use async_trait::async_trait;
#[cfg(not(feature = "rt"))]
use parking_lot::Mutex as SyncMutex;
#[cfg(feature = "rt")]
use parking_lot_rt::Mutex as SyncMutex;
use serde::{Deserialize, Serialize};
use std::collections::BTreeMap;
use std::sync::atomic;
use std::sync::Arc;
use std::time::Duration;
use std::time::Instant;
use tokio::sync::RwLock;
use tokio::task::JoinHandle;
use uuid::Uuid;
/// A helper cursor payload structure which implements serialize/deserialize with serde Can be
/// replaced with a custom one
#[derive(Serialize, Deserialize, Debug, Copy, Clone)]
pub struct Payload {
u: uuid::Uuid,
#[serde(skip_serializing_if = "Option::is_none")]
n: Option<usize>,
}
impl From<Uuid> for Payload {
#[inline]
fn from(u: Uuid) -> Self {
Self { u, n: None }
}
}
impl Payload {
#[inline]
pub fn uuid(&self) -> &Uuid {
&self.u
}
#[inline]
pub fn bulk_number(&self) -> usize {
self.n.unwrap_or(1)
}
#[inline]
pub fn set_bulk_number(&mut self, n: usize) {
self.n = Some(n);
}
#[inline]
pub fn clear_bulk_number(&mut self) {
self.n = None;
}
}
/// A helper map to handle multiple cursors
pub struct Map {
data: Arc<RwLock<BTreeMap<uuid::Uuid, Box<dyn Cursor + Send + Sync>>>>,
cleaner: Option<JoinHandle<()>>,
}
impl Map {
/// creates cursor map object, cleaner task is automatically spawned
pub fn new(cleaner_interval: Duration) -> Self {
let mut map = Self {
data: <_>::default(),
cleaner: <_>::default(),
};
let cleaner = map.spawn_cleaner(cleaner_interval);
map.cleaner.replace(cleaner);
map
}
/// Add a new cursor to the map and return its UUID
pub async fn add<C>(&self, c: C) -> Uuid
where
C: Cursor + Send + Sync + 'static,
{
let u = Uuid::new_v4();
self.data.write().await.insert(u, Box::new(c));
u
}
/// Remove a cursor from the map
///
/// (usually should not be called, unless there is no cleaner worker spawned)
pub async fn remove<C>(&self, u: &Uuid) {
self.data.write().await.remove(u);
}
/// Call "next" method of the cursor, specified by UUID
pub async fn next(&self, cursor_id: &Uuid) -> Result<Option<Vec<u8>>, RpcError> {
if let Some(cursor) = self.data.read().await.get(cursor_id) {
cursor.meta().touch();
Ok(cursor.next().await?)
} else {
Err(RpcError::not_found(None))
}
}
/// Call "next_bulk" method of the cursor, specified by UUID
pub async fn next_bulk(
&self,
cursor_id: &Uuid,
count: usize,
) -> Result<Option<Vec<u8>>, RpcError> {
if let Some(cursor) = self.data.read().await.get(cursor_id) {
cursor.meta().touch();
Ok(Some(cursor.next_bulk(count).await?))
} else {
Err(RpcError::not_found(None))
}
}
fn spawn_cleaner(&self, interval: Duration) -> JoinHandle<()> {
let cursors = self.data.clone();
let mut int = tokio::time::interval(interval);
tokio::spawn(async move {
loop {
int.tick().await;
cursors.write().await.retain(|_, v| v.meta().is_alive());
}
})
}
}
impl Drop for Map {
fn drop(&mut self) {
if let Some(cleaner) = self.cleaner.take() {
cleaner.abort();
}
}
}
/// The cursor trait
#[async_trait]
pub trait Cursor {
async fn next(&self) -> Result<Option<Vec<u8>>, RpcError>;
async fn next_bulk(&self, count: usize) -> Result<Vec<u8>, RpcError>;
fn meta(&self) -> &Meta;
}
/// The cursor meta object, used by cursors::Map to manage finished/expired cursors
pub struct Meta {
finished: atomic::AtomicBool,
expires: SyncMutex<Instant>,
ttl: Duration,
}
impl Meta {
#[inline]
pub fn new(ttl: Duration) -> Self {
Self {
expires: SyncMutex::new(Instant::now() + ttl),
finished: <_>::default(),
ttl,
}
}
#[inline]
pub fn is_finished(&self) -> bool {
self.finished.load(atomic::Ordering::SeqCst)
}
#[inline]
pub fn is_expired(&self) -> bool {
*self.expires.lock() < Instant::now()
}
#[inline]
pub fn mark_finished(&self) {
self.finished.store(true, atomic::Ordering::SeqCst);
}
#[inline]
pub fn is_alive(&self) -> bool {
!self.is_finished() && !self.is_expired()
}
#[inline]
fn touch(&self) {
let mut expires = self.expires.lock();
*expires = Instant::now() + self.ttl;
}
}
| rust | Apache-2.0 | 4031c56928b6f5fdca171cda5cad1ca6e9de7f7b | 2026-01-04T20:25:23.680465Z | false |
alttch/busrt | https://github.com/alttch/busrt/blob/4031c56928b6f5fdca171cda5cad1ca6e9de7f7b/src/ipc.rs | src/ipc.rs | use crate::borrow::Cow;
use crate::comm::{Flush, TtlBufWriter};
use crate::Error;
use crate::EventChannel;
use crate::IntoBusRtResult;
use crate::OpConfirm;
use crate::QoS;
use crate::GREETINGS;
use crate::PING_FRAME;
use crate::PROTOCOL_VERSION;
use crate::RESPONSE_OK;
use crate::SECONDARY_SEP;
use crate::{Frame, FrameData, FrameKind, FrameOp};
#[cfg(not(feature = "rt"))]
use parking_lot::Mutex;
#[cfg(feature = "rt")]
use parking_lot_rt::Mutex;
use std::collections::BTreeMap;
use std::marker::Unpin;
use std::sync::atomic;
use std::sync::Arc;
use std::time::Duration;
use tokio::io::{AsyncReadExt, AsyncWriteExt, BufReader};
#[cfg(not(target_os = "windows"))]
use tokio::net::unix;
#[cfg(not(target_os = "windows"))]
use tokio::net::UnixStream;
use tokio::net::{tcp, TcpStream};
use tokio::sync::oneshot;
use tokio::task::JoinHandle;
use crate::client::AsyncClient;
use log::{error, trace, warn};
use async_trait::async_trait;
type ResponseMap = Arc<Mutex<BTreeMap<u32, oneshot::Sender<Result<(), Error>>>>>;
enum Writer {
#[cfg(not(target_os = "windows"))]
Unix(TtlBufWriter<unix::OwnedWriteHalf>),
Tcp(TtlBufWriter<tcp::OwnedWriteHalf>),
}
impl Writer {
pub async fn write(&mut self, buf: &[u8], flush: Flush) -> Result<(), Error> {
match self {
#[cfg(not(target_os = "windows"))]
Writer::Unix(w) => w.write(buf, flush).await.map_err(Into::into),
Writer::Tcp(w) => w.write(buf, flush).await.map_err(Into::into),
}
}
}
#[derive(Debug, Clone)]
pub struct Config {
path: String,
name: String,
buf_size: usize,
buf_ttl: Duration,
queue_size: usize,
timeout: Duration,
}
impl Config {
/// path - /path/to/socket (must end with .sock .socket or .ipc) or host:port,
/// name - an unique client name
pub fn new(path: &str, name: &str) -> Self {
Self {
path: path.to_owned(),
name: name.to_owned(),
buf_size: crate::DEFAULT_BUF_SIZE,
buf_ttl: crate::DEFAULT_BUF_TTL,
queue_size: crate::DEFAULT_QUEUE_SIZE,
timeout: crate::DEFAULT_TIMEOUT,
}
}
pub fn buf_size(mut self, size: usize) -> Self {
self.buf_size = size;
self
}
pub fn buf_ttl(mut self, ttl: Duration) -> Self {
self.buf_ttl = ttl;
self
}
pub fn queue_size(mut self, size: usize) -> Self {
self.queue_size = size;
self
}
pub fn timeout(mut self, timeout: Duration) -> Self {
self.timeout = timeout;
self
}
}
pub struct Client {
name: String,
writer: Writer,
reader_fut: JoinHandle<()>,
frame_id: u32,
responses: ResponseMap,
rx: Option<EventChannel>,
connected: Arc<atomic::AtomicBool>,
timeout: Duration,
config: Config,
secondary_counter: atomic::AtomicUsize,
}
// keep these as macros to insure inline and avoid unecc. futures
macro_rules! prepare_frame_buf {
($self: expr, $op: expr, $qos: expr, $expected_header_len: expr) => {{
$self.increment_frame_id();
let mut buf = Vec::with_capacity($expected_header_len + 4 + 1);
buf.extend($self.frame_id.to_le_bytes());
buf.push($op as u8 | ($qos as u8) << 6);
buf
}};
}
macro_rules! send_data_or_mark_disconnected {
($self: expr, $data: expr, $flush: expr) => {
match tokio::time::timeout($self.timeout, $self.writer.write($data, $flush)).await {
Ok(result) => {
if let Err(e) = result {
$self.reader_fut.abort();
$self.connected.store(false, atomic::Ordering::Relaxed);
return Err(e.into());
}
}
Err(e) => {
return Err(e.into());
}
}
};
}
macro_rules! send_frame_and_confirm {
($self: expr, $buf: expr, $payload: expr, $qos: expr) => {{
let rx = if $qos.needs_ack() {
let (tx, rx) = oneshot::channel();
{
$self.responses.lock().insert($self.frame_id, tx);
}
Some(rx)
} else {
None
};
send_data_or_mark_disconnected!($self, $buf, Flush::No);
send_data_or_mark_disconnected!($self, $payload, $qos.is_realtime().into());
Ok(rx)
}};
}
macro_rules! send_zc_frame {
// zc-send to target or topic
($self: expr, $target: expr, $header: expr, $payload: expr, $op: expr, $qos: expr) => {{
let t = $target.as_bytes();
let mut buf = prepare_frame_buf!($self, $op, $qos, 4 + t.len() + 1 + $header.len());
#[allow(clippy::cast_possible_truncation)]
buf.extend_from_slice(
&((t.len() + $payload.len() + $header.len() + 1) as u32).to_le_bytes(),
);
buf.extend_from_slice(t);
buf.push(0x00);
buf.extend_from_slice($header);
trace!("sending busrt {:?} to {} QoS={:?}", $op, $target, $qos);
send_frame_and_confirm!($self, &buf, $payload, $qos)
}};
}
macro_rules! send_frame {
// send to target or topic
($self: expr, $target: expr, $payload: expr, $op: expr, $qos: expr) => {{
let t = $target.as_bytes();
let mut buf = prepare_frame_buf!($self, $op, $qos, 4 + t.len() + 1);
#[allow(clippy::cast_possible_truncation)]
buf.extend_from_slice(&((t.len() + $payload.len() + 1) as u32).to_le_bytes());
buf.extend_from_slice(t);
buf.push(0x00);
trace!("sending busrt {:?} to {} QoS={:?}", $op, $target, $qos);
send_frame_and_confirm!($self, &buf, $payload, $qos)
}};
// send to topic with a receiver
($self: expr, $target: expr, $receiver: expr, $payload: expr, $op: expr, $qos: expr) => {{
let t = $target.as_bytes();
let r = $receiver.as_bytes();
let mut buf = prepare_frame_buf!($self, $op, $qos, 4 + t.len() + 1 + r.len() + 1);
#[allow(clippy::cast_possible_truncation)]
buf.extend_from_slice(&((t.len() + r.len() + $payload.len() + 2) as u32).to_le_bytes());
buf.extend_from_slice(t);
buf.push(0x00);
buf.extend_from_slice(r);
buf.push(0x00);
trace!("sending busrt {:?} to {} QoS={:?}", $op, $target, $qos);
send_frame_and_confirm!($self, &buf, $payload, $qos)
}};
// send w/o a target
($self: expr, $payload: expr, $op: expr, $qos: expr) => {{
let mut buf = prepare_frame_buf!($self, $op, $qos, 4);
#[allow(clippy::cast_possible_truncation)]
buf.extend_from_slice(&($payload.len() as u32).to_le_bytes());
send_frame_and_confirm!($self, &buf, $payload, $qos)
}};
}
macro_rules! connect_broker {
($name: expr, $reader: expr, $writer: expr,
$responses: expr, $connected: expr, $timeout: expr, $queue_size: expr) => {{
chat($name, &mut $reader, &mut $writer).await?;
let (tx, rx) = async_channel::bounded($queue_size);
let reader_responses = $responses.clone();
let rconn = $connected.clone();
let timeout = $timeout.clone();
let reader_fut = tokio::spawn(async move {
if let Err(e) = handle_read($reader, tx, timeout, reader_responses).await {
error!("busrt client reader error: {}", e);
}
rconn.store(false, atomic::Ordering::Relaxed);
});
(reader_fut, rx)
}};
}
impl Client {
pub async fn connect(config: &Config) -> Result<Self, Error> {
tokio::time::timeout(config.timeout, Self::connect_broker(config, None)).await?
}
pub async fn connect_stream(stream: UnixStream, config: &Config) -> Result<Self, Error> {
tokio::time::timeout(config.timeout, Self::connect_broker(config, Some(stream))).await?
}
async fn connect_broker(config: &Config, stream: Option<UnixStream>) -> Result<Self, Error> {
let responses: ResponseMap = <_>::default();
let connected = Arc::new(atomic::AtomicBool::new(true));
#[allow(clippy::case_sensitive_file_extension_comparisons)]
let (writer, reader_fut, rx) = if config.path.ends_with(".sock")
|| config.path.ends_with(".socket")
|| config.path.ends_with(".ipc")
|| config.path.starts_with('/')
{
#[cfg(target_os = "windows")]
{
return Err(Error::not_supported("unix sockets"));
}
#[cfg(not(target_os = "windows"))]
{
let stream = if let Some(s) = stream {
s
} else {
UnixStream::connect(&config.path).await?
};
let (r, mut writer) = stream.into_split();
let mut reader = BufReader::with_capacity(config.buf_size, r);
let (reader_fut, rx) = connect_broker!(
&config.name,
reader,
writer,
responses,
connected,
config.timeout,
config.queue_size
);
(
Writer::Unix(TtlBufWriter::new(
writer,
config.buf_size,
config.buf_ttl,
config.timeout,
)),
reader_fut,
rx,
)
}
} else {
let stream = TcpStream::connect(&config.path).await?;
stream.set_nodelay(true)?;
let (r, mut writer) = stream.into_split();
let mut reader = BufReader::with_capacity(config.buf_size, r);
let (reader_fut, rx) = connect_broker!(
&config.name,
reader,
writer,
responses,
connected,
config.timeout,
config.queue_size
);
(
Writer::Tcp(TtlBufWriter::new(
writer,
config.buf_size,
config.buf_ttl,
config.timeout,
)),
reader_fut,
rx,
)
};
Ok(Self {
name: config.name.clone(),
writer,
reader_fut,
frame_id: 0,
responses,
rx: Some(rx),
connected,
timeout: config.timeout,
config: config.clone(),
secondary_counter: atomic::AtomicUsize::new(0),
})
}
pub async fn register_secondary(&self) -> Result<Self, Error> {
if self.name.contains(SECONDARY_SEP) {
Err(Error::not_supported("not a primary client"))
} else {
let secondary_id = self
.secondary_counter
.fetch_add(1, atomic::Ordering::Relaxed);
let secondary_name = format!("{}{}{}", self.name, SECONDARY_SEP, secondary_id);
let mut config = self.config.clone();
config.name = secondary_name;
Self::connect(&config).await
}
}
#[inline]
fn increment_frame_id(&mut self) {
if self.frame_id == u32::MAX {
self.frame_id = 1;
} else {
self.frame_id += 1;
}
}
#[inline]
pub fn get_timeout(&self) -> Duration {
self.timeout
}
}
#[async_trait]
impl AsyncClient for Client {
#[inline]
fn take_event_channel(&mut self) -> Option<EventChannel> {
self.rx.take()
}
#[inline]
fn get_connected_beacon(&self) -> Option<Arc<atomic::AtomicBool>> {
Some(self.connected.clone())
}
async fn send(
&mut self,
target: &str,
payload: Cow<'async_trait>,
qos: QoS,
) -> Result<OpConfirm, Error> {
send_frame!(self, target, payload.as_slice(), FrameOp::Message, qos)
}
async fn zc_send(
&mut self,
target: &str,
header: Cow<'async_trait>,
payload: Cow<'async_trait>,
qos: QoS,
) -> Result<OpConfirm, Error> {
send_zc_frame!(
self,
target,
header.as_slice(),
payload.as_slice(),
FrameOp::Message,
qos
)
}
async fn send_broadcast(
&mut self,
target: &str,
payload: Cow<'async_trait>,
qos: QoS,
) -> Result<OpConfirm, Error> {
send_frame!(self, target, payload.as_slice(), FrameOp::Broadcast, qos)
}
async fn publish(
&mut self,
target: &str,
payload: Cow<'async_trait>,
qos: QoS,
) -> Result<OpConfirm, Error> {
send_frame!(self, target, payload.as_slice(), FrameOp::PublishTopic, qos)
}
async fn publish_for(
&mut self,
target: &str,
receiver: &str,
payload: Cow<'async_trait>,
qos: QoS,
) -> Result<OpConfirm, Error> {
send_frame!(
self,
target,
receiver,
payload.as_slice(),
FrameOp::PublishTopicFor,
qos
)
}
async fn subscribe(&mut self, topic: &str, qos: QoS) -> Result<OpConfirm, Error> {
send_frame!(self, topic.as_bytes(), FrameOp::SubscribeTopic, qos)
}
async fn unsubscribe(&mut self, topic: &str, qos: QoS) -> Result<OpConfirm, Error> {
send_frame!(self, topic.as_bytes(), FrameOp::UnsubscribeTopic, qos)
}
async fn subscribe_bulk(&mut self, topics: &[&str], qos: QoS) -> Result<OpConfirm, Error> {
let mut payload = Vec::new();
for topic in topics {
if !payload.is_empty() {
payload.push(0x00);
}
payload.extend(topic.as_bytes());
}
send_frame!(self, &payload, FrameOp::SubscribeTopic, qos)
}
async fn unsubscribe_bulk(&mut self, topics: &[&str], qos: QoS) -> Result<OpConfirm, Error> {
let mut payload = Vec::new();
for topic in topics {
if !payload.is_empty() {
payload.push(0x00);
}
payload.extend(topic.as_bytes());
}
send_frame!(self, &payload, FrameOp::UnsubscribeTopic, qos)
}
async fn exclude(&mut self, topic: &str, qos: QoS) -> Result<OpConfirm, Error> {
send_frame!(self, topic.as_bytes(), FrameOp::ExcludeTopic, qos)
}
async fn unexclude(&mut self, topic: &str, qos: QoS) -> Result<OpConfirm, Error> {
send_frame!(self, topic.as_bytes(), FrameOp::UnexcludeTopic, qos)
}
async fn exclude_bulk(&mut self, topics: &[&str], qos: QoS) -> Result<OpConfirm, Error> {
let mut payload = Vec::new();
for topic in topics {
if !payload.is_empty() {
payload.push(0x00);
}
payload.extend(topic.as_bytes());
}
send_frame!(self, &payload, FrameOp::ExcludeTopic, qos)
}
async fn unexclude_bulk(&mut self, topics: &[&str], qos: QoS) -> Result<OpConfirm, Error> {
let mut payload = Vec::new();
for topic in topics {
if !payload.is_empty() {
payload.push(0x00);
}
payload.extend(topic.as_bytes());
}
send_frame!(self, &payload, FrameOp::UnexcludeTopic, qos)
}
#[inline]
async fn ping(&mut self) -> Result<(), Error> {
send_data_or_mark_disconnected!(self, PING_FRAME, Flush::Instant);
Ok(())
}
#[inline]
fn is_connected(&self) -> bool {
self.connected.load(atomic::Ordering::Relaxed)
}
#[inline]
fn get_timeout(&self) -> Option<Duration> {
Some(self.timeout)
}
#[inline]
fn get_name(&self) -> &str {
self.name.as_str()
}
}
impl Drop for Client {
fn drop(&mut self) {
self.reader_fut.abort();
}
}
async fn handle_read<R>(
mut reader: R,
tx: async_channel::Sender<Frame>,
timeout: Duration,
responses: ResponseMap,
) -> Result<(), Error>
where
R: AsyncReadExt + Unpin,
{
loop {
let mut buf = [0_u8; 6];
reader.read_exact(&mut buf).await?;
let frame_type: FrameKind = buf[0].try_into()?;
let realtime = buf[5] != 0;
match frame_type {
FrameKind::Nop => {}
FrameKind::Acknowledge => {
let ack_id = u32::from_le_bytes(buf[1..5].try_into().unwrap());
let tx_channel = { responses.lock().remove(&ack_id) };
if let Some(tx) = tx_channel {
let _r = tx.send(buf[5].to_busrt_result());
} else {
warn!("orphaned busrt op ack {}", ack_id);
}
}
_ => {
let frame_len = u32::from_le_bytes(buf[1..5].try_into().unwrap());
let mut buf = vec![0; frame_len as usize];
tokio::time::timeout(timeout, reader.read_exact(&mut buf)).await??;
let (sender, topic, payload_pos) = {
if frame_type == FrameKind::Publish {
let mut sp = buf.splitn(3, |c| *c == 0);
let s = sp.next().ok_or_else(|| Error::data("broken frame"))?;
let sender = std::str::from_utf8(s)?.to_owned();
let t = sp.next().ok_or_else(|| Error::data("broken frame"))?;
let topic = std::str::from_utf8(t)?.to_owned();
sp.next().ok_or_else(|| Error::data("broken frame"))?;
let payload_pos = s.len() + t.len() + 2;
(Some(sender), Some(topic), payload_pos)
} else {
let mut sp = buf.splitn(2, |c| *c == 0);
let s = sp.next().ok_or_else(|| Error::data("broken frame"))?;
let sender = std::str::from_utf8(s)?.to_owned();
sp.next().ok_or_else(|| Error::data("broken frame"))?;
let payload_pos = s.len() + 1;
(Some(sender), None, payload_pos)
}
};
let frame = Arc::new(FrameData::new(
frame_type,
sender,
topic,
None,
buf,
payload_pos,
realtime,
));
tx.send(frame).await.map_err(Error::io)?;
}
}
}
}
async fn chat<R, W>(name: &str, reader: &mut R, writer: &mut W) -> Result<(), Error>
where
R: AsyncReadExt + Unpin,
W: AsyncWriteExt + Unpin,
{
if name.len() > u16::MAX as usize {
return Err(Error::data("name too long"));
}
let mut buf = [0_u8; 3];
reader.read_exact(&mut buf).await?;
if buf[0] != GREETINGS[0] {
return Err(Error::not_supported("Invalid greetings"));
}
if u16::from_le_bytes(buf[1..3].try_into().unwrap()) != PROTOCOL_VERSION {
return Err(Error::not_supported("Unsupported protocol version"));
}
writer.write_all(&buf).await?;
let mut buf = [0_u8; 1];
reader.read_exact(&mut buf).await?;
if buf[0] != RESPONSE_OK {
return Err(Error::new(
buf[0].into(),
Some(format!("Server greetings response: {:?}", buf[0])),
));
}
let n = name.as_bytes().to_vec();
#[allow(clippy::cast_possible_truncation)]
writer.write_all(&(name.len() as u16).to_le_bytes()).await?;
writer.write_all(&n).await?;
let mut buf = [0_u8; 1];
reader.read_exact(&mut buf).await?;
if buf[0] != RESPONSE_OK {
return Err(Error::new(
buf[0].into(),
Some(format!("Server registration response: {:?}", buf[0])),
));
}
Ok(())
}
| rust | Apache-2.0 | 4031c56928b6f5fdca171cda5cad1ca6e9de7f7b | 2026-01-04T20:25:23.680465Z | false |
alttch/busrt | https://github.com/alttch/busrt/blob/4031c56928b6f5fdca171cda5cad1ca6e9de7f7b/src/tools/pubsub.rs | src/tools/pubsub.rs | use crate::{Error, Frame};
use std::collections::btree_map::Entry;
use std::collections::BTreeMap;
pub type PublicationSender = async_channel::Sender<Publication>;
pub type PublicationReceiver = async_channel::Receiver<Publication>;
pub struct Publication {
subtopic_pos: usize,
frame: Frame,
handler_id: usize,
}
impl Publication {
#[inline]
pub fn frame(&self) -> &Frame {
&self.frame
}
#[inline]
pub fn sender(&self) -> &str {
self.frame.sender()
}
#[inline]
pub fn primary_sender(&self) -> &str {
self.frame.primary_sender()
}
/// # Panics
///
/// Will not panic as all processed frames always have topics
#[inline]
pub fn topic(&self) -> &str {
self.frame.topic().unwrap()
}
/// # Panics
///
/// Will not panic as all processed frames always have topics
#[inline]
pub fn subtopic(&self) -> &str {
&self.frame.topic().as_ref().unwrap()[self.subtopic_pos..]
}
#[inline]
pub fn payload(&self) -> &[u8] {
self.frame.payload()
}
#[inline]
pub fn header(&self) -> Option<&[u8]> {
self.frame.header()
}
#[inline]
pub fn is_realtime(&self) -> bool {
self.frame.is_realtime()
}
#[inline]
pub fn handler_id(&self) -> usize {
self.handler_id
}
}
/// Topic publications broker
///
/// The helper class to process topics in blocking mode
///
/// Processes topics and sends frames to handler channels
#[derive(Default)]
pub struct TopicBroker {
prefixes: BTreeMap<String, (PublicationSender, usize)>,
topics: BTreeMap<String, (PublicationSender, usize)>,
}
impl TopicBroker {
#[inline]
pub fn new() -> Self {
Self::default()
}
/// Process a topic (returns tx, rx channel for a handler)
#[inline]
pub fn register_topic(
&mut self,
topic: &str,
channel_size: usize,
) -> Result<(PublicationSender, PublicationReceiver), Error> {
let (tx, rx) = async_channel::bounded(channel_size);
self.register_topic_tx(topic, tx.clone())?;
Ok((tx, rx))
}
/// Process a topic (returns tx, rx channel for a handler)
///
/// handler id - a custom handler id (to use in multi-handlers)
#[inline]
pub fn register_topic_with_handler_id(
&mut self,
topic: &str,
handler_id: usize,
channel_size: usize,
) -> Result<(PublicationSender, PublicationReceiver), Error> {
let (tx, rx) = async_channel::bounded(channel_size);
self.register_topic_tx_with_handler_id(topic, handler_id, tx.clone())?;
Ok((tx, rx))
}
/// Process a topic with the pre-defined channel
#[inline]
pub fn register_topic_tx(&mut self, topic: &str, tx: PublicationSender) -> Result<(), Error> {
if let Entry::Vacant(o) = self.topics.entry(topic.to_owned()) {
o.insert((tx, 0));
Ok(())
} else {
Err(Error::busy("topic already registered"))
}
}
/// Process a topic with the pre-defined channel
///
/// handler id - a custom handler id (to use in multi-handlers)
#[inline]
pub fn register_topic_tx_with_handler_id(
&mut self,
topic: &str,
handler_id: usize,
tx: PublicationSender,
) -> Result<(), Error> {
if let Entry::Vacant(o) = self.topics.entry(topic.to_owned()) {
o.insert((tx, handler_id));
Ok(())
} else {
Err(Error::busy("topic already registered"))
}
}
/// Process subtopic by prefix (returns tx, rx channel for a handler)
#[inline]
pub fn register_prefix(
&mut self,
prefix: &str,
channel_size: usize,
) -> Result<(PublicationSender, PublicationReceiver), Error> {
let (tx, rx) = async_channel::bounded(channel_size);
self.register_prefix_tx(prefix, tx.clone())?;
Ok((tx, rx))
}
/// Process subtopic by prefix (returns tx, rx channel for a handler)
///
/// handler id - a custom handler id (to use in multi-handlers)
#[inline]
pub fn register_prefix_with_handler_id(
&mut self,
prefix: &str,
handler_id: usize,
channel_size: usize,
) -> Result<(PublicationSender, PublicationReceiver), Error> {
let (tx, rx) = async_channel::bounded(channel_size);
self.register_prefix_tx_with_handler_id(prefix, handler_id, tx.clone())?;
Ok((tx, rx))
}
/// Process subtopic by prefix with the pre-defined channel
#[inline]
pub fn register_prefix_tx(&mut self, prefix: &str, tx: PublicationSender) -> Result<(), Error> {
if let Entry::Vacant(o) = self.prefixes.entry(prefix.to_owned()) {
o.insert((tx, 0));
Ok(())
} else {
Err(Error::busy("topic prefix already registered"))
}
}
/// Process subtopic by prefix with the pre-defined channel
#[inline]
pub fn register_prefix_tx_with_handler_id(
&mut self,
prefix: &str,
handler_id: usize,
tx: PublicationSender,
) -> Result<(), Error> {
if let Entry::Vacant(o) = self.prefixes.entry(prefix.to_owned()) {
o.insert((tx, handler_id));
Ok(())
} else {
Err(Error::busy("topic prefix already registered"))
}
}
/// The frame is returned back if not processed
#[inline]
pub async fn process(&self, frame: Frame) -> Result<Option<Frame>, Error> {
if let Some(topic) = frame.topic() {
if let Some((tx, handler_id)) = self.topics.get(topic) {
tx.send(Publication {
subtopic_pos: 0,
frame,
handler_id: *handler_id,
})
.await?;
return Ok(None);
}
for (pfx, (tx, handler_id)) in &self.prefixes {
if topic.starts_with(pfx) {
tx.send(Publication {
subtopic_pos: pfx.len(),
frame,
handler_id: *handler_id,
})
.await?;
return Ok(None);
}
}
}
Ok(Some(frame))
}
}
| rust | Apache-2.0 | 4031c56928b6f5fdca171cda5cad1ca6e9de7f7b | 2026-01-04T20:25:23.680465Z | false |
alttch/busrt | https://github.com/alttch/busrt/blob/4031c56928b6f5fdca171cda5cad1ca6e9de7f7b/src/rpc/async_client.rs | src/rpc/async_client.rs | use super::{
prepare_call_payload, RpcError, RpcEvent, RpcEventKind, RpcResult, RPC_ERROR,
RPC_ERROR_CODE_METHOD_NOT_FOUND, RPC_NOTIFICATION, RPC_REPLY,
};
use crate::borrow::Cow;
use crate::client::AsyncClient;
use crate::EventChannel;
use crate::{Error, Frame, FrameKind, OpConfirm, QoS};
use async_trait::async_trait;
use log::{error, trace, warn};
#[cfg(not(feature = "rt"))]
use parking_lot::Mutex as SyncMutex;
#[cfg(feature = "rt")]
use parking_lot_rt::Mutex as SyncMutex;
use std::collections::BTreeMap;
use std::sync::atomic;
use std::sync::Arc;
use std::time::Duration;
use tokio::sync::oneshot;
use tokio::sync::Mutex;
use tokio::task::JoinHandle;
use tokio_task_pool::{Pool, Task};
/// By default, RPC frame and notification handlers are launched in background which allows
/// non-blocking event processing, however events can be processed in random order
///
/// RPC options allow to launch handlers in blocking mode. In this case handlers must process
/// events as fast as possible (e.g. send them to processing channels) and avoid using any RPC
/// client functions from inside.
///
/// WARNING: when handling frames in blocking mode, it is forbidden to use the current RPC client
/// directly or with any kind of bounded channels, otherwise the RPC client may get stuck!
///
/// See https://busrt.readthedocs.io/en/latest/rpc_blocking.html
#[derive(Default, Clone, Debug)]
pub struct Options {
blocking_notifications: bool,
blocking_frames: bool,
task_pool: Option<Arc<Pool>>,
}
impl Options {
#[inline]
pub fn new() -> Self {
Self::default()
}
#[inline]
pub fn blocking_notifications(mut self) -> Self {
self.blocking_notifications = true;
self
}
#[inline]
pub fn blocking_frames(mut self) -> Self {
self.blocking_frames = true;
self
}
#[inline]
/// See <https://crates.io/crates/tokio-task-pool>
pub fn with_task_pool(mut self, pool: Pool) -> Self {
self.task_pool = Some(Arc::new(pool));
self
}
}
#[allow(clippy::module_name_repetitions)]
#[async_trait]
pub trait RpcHandlers {
#[allow(unused_variables)]
async fn handle_call(&self, event: RpcEvent) -> RpcResult {
Err(RpcError::method(None))
}
#[allow(unused_variables)]
async fn handle_notification(&self, event: RpcEvent) {}
#[allow(unused_variables)]
async fn handle_frame(&self, frame: Frame) {}
}
pub struct DummyHandlers {}
#[async_trait]
impl RpcHandlers for DummyHandlers {
async fn handle_call(&self, _event: RpcEvent) -> RpcResult {
Err(RpcError::new(
RPC_ERROR_CODE_METHOD_NOT_FOUND,
Some("RPC handler is not implemented".as_bytes().to_vec()),
))
}
}
type CallMap = Arc<SyncMutex<BTreeMap<u32, oneshot::Sender<RpcEvent>>>>;
#[async_trait]
pub trait Rpc {
/// When created, busrt client is wrapped with Arc<Mutex<_>> to let it be sent into
/// the incoming frames handler future
///
/// This mehtod allows to get the containered-client back, to call its methods directly (manage
/// pub/sub and send broadcast messages)
fn client(&self) -> Arc<Mutex<(dyn AsyncClient + 'static)>>;
async fn notify(
&self,
target: &str,
data: Cow<'async_trait>,
qos: QoS,
) -> Result<OpConfirm, Error>;
/// Call the method, no response is required
async fn call0(
&self,
target: &str,
method: &str,
params: Cow<'async_trait>,
qos: QoS,
) -> Result<OpConfirm, Error>;
/// Call the method and get the response
async fn call(
&self,
target: &str,
method: &str,
params: Cow<'async_trait>,
qos: QoS,
) -> Result<RpcEvent, RpcError>;
fn is_connected(&self) -> bool;
}
#[allow(clippy::module_name_repetitions)]
pub struct RpcClient {
call_id: SyncMutex<u32>,
timeout: Option<Duration>,
client: Arc<Mutex<dyn AsyncClient>>,
processor_fut: Arc<SyncMutex<JoinHandle<()>>>,
pinger_fut: Option<JoinHandle<()>>,
calls: CallMap,
connected: Option<Arc<atomic::AtomicBool>>,
}
#[allow(clippy::too_many_lines)]
async fn processor<C, H>(
rx: EventChannel,
processor_client: Arc<Mutex<C>>,
calls: CallMap,
handlers: Arc<H>,
opts: Options,
) where
C: AsyncClient + 'static,
H: RpcHandlers + Send + Sync + 'static,
{
macro_rules! spawn {
($task_id: expr, $fut: expr) => {
if let Some(ref pool) = opts.task_pool {
let task = Task::new($fut).with_id($task_id);
if let Err(e) = pool.spawn_task(task).await {
error!("Unable to spawn RPC task: {}", e);
}
} else {
tokio::spawn($fut);
}
};
}
while let Ok(frame) = rx.recv().await {
if frame.kind() == FrameKind::Message {
match RpcEvent::try_from(frame) {
Ok(event) => match event.kind() {
RpcEventKind::Notification => {
trace!("RPC notification from {}", event.frame().sender());
if opts.blocking_notifications {
handlers.handle_notification(event).await;
} else {
let h = handlers.clone();
spawn!("rpc.notification", async move {
h.handle_notification(event).await;
});
}
}
RpcEventKind::Request => {
let id = event.id();
trace!(
"RPC request from {}, id: {}, method: {:?}",
event.frame().sender(),
id,
event.method()
);
let ev = if id > 0 {
Some((event.frame().sender().to_owned(), processor_client.clone()))
} else {
None
};
let h = handlers.clone();
spawn!("rpc.request", async move {
let qos = if event.frame().is_realtime() {
QoS::RealtimeProcessed
} else {
QoS::Processed
};
let res = h.handle_call(event).await;
if let Some((target, cl)) = ev {
macro_rules! send_reply {
($payload: expr, $result: expr) => {{
let mut client = cl.lock().await;
if let Some(result) = $result {
client
.zc_send(&target, $payload, result.into(), qos)
.await
} else {
client
.zc_send(&target, $payload, (&[][..]).into(), qos)
.await
}
}};
}
match res {
Ok(v) => {
trace!("Sending RPC reply id {} to {}", id, target);
let mut payload = Vec::with_capacity(5);
payload.push(RPC_REPLY);
payload.extend_from_slice(&id.to_le_bytes());
let _r = send_reply!(payload.into(), v);
}
Err(e) => {
trace!(
"Sending RPC error {} reply id {} to {}",
e.code,
id,
target,
);
let mut payload = Vec::with_capacity(7);
payload.push(RPC_ERROR);
payload.extend_from_slice(&id.to_le_bytes());
payload.extend_from_slice(&e.code.to_le_bytes());
let _r = send_reply!(payload.into(), e.data);
}
}
}
});
}
RpcEventKind::Reply | RpcEventKind::ErrorReply => {
let id = event.id();
trace!(
"RPC {} from {}, id: {}",
event.kind(),
event.frame().sender(),
id
);
if let Some(tx) = { calls.lock().remove(&id) } {
let _r = tx.send(event);
} else {
warn!("orphaned RPC response: {}", id);
}
}
},
Err(e) => {
error!("{}", e);
}
}
} else if opts.blocking_frames {
handlers.handle_frame(frame).await;
} else {
let h = handlers.clone();
spawn!("rpc.frame", async move {
h.handle_frame(frame).await;
});
}
}
}
impl RpcClient {
/// creates RPC client with the specified handlers and the default options
pub fn new<H>(client: impl AsyncClient + 'static, handlers: H) -> Self
where
H: RpcHandlers + Send + Sync + 'static,
{
Self::init(client, handlers, Options::default())
}
/// creates RPC client with dummy handlers and the default options
pub fn new0(client: impl AsyncClient + 'static) -> Self {
Self::init(client, DummyHandlers {}, Options::default())
}
/// creates RPC client
pub fn create<H>(client: impl AsyncClient + 'static, handlers: H, opts: Options) -> Self
where
H: RpcHandlers + Send + Sync + 'static,
{
Self::init(client, handlers, opts)
}
/// creates RPC client with dummy handlers
pub fn create0(client: impl AsyncClient + 'static, opts: Options) -> Self {
Self::init(client, DummyHandlers {}, opts)
}
fn init<H>(mut client: impl AsyncClient + 'static, handlers: H, opts: Options) -> Self
where
H: RpcHandlers + Send + Sync + 'static,
{
let timeout = client.get_timeout();
let rx = { client.take_event_channel().unwrap() };
let connected = client.get_connected_beacon();
let client = Arc::new(Mutex::new(client));
let calls: CallMap = <_>::default();
let processor_fut = Arc::new(SyncMutex::new(tokio::spawn(processor(
rx,
client.clone(),
calls.clone(),
Arc::new(handlers),
opts,
))));
let pinger_client = client.clone();
let pfut = processor_fut.clone();
let pinger_fut = timeout.map(|t| {
tokio::spawn(async move {
loop {
if let Err(e) = pinger_client.lock().await.ping().await {
error!("{}", e);
pfut.lock().abort();
break;
}
tokio::time::sleep(t).await;
}
})
});
Self {
call_id: SyncMutex::new(0),
timeout,
client,
processor_fut,
pinger_fut,
calls,
connected,
}
}
}
#[async_trait]
impl Rpc for RpcClient {
#[inline]
fn client(&self) -> Arc<Mutex<(dyn AsyncClient + 'static)>> {
self.client.clone()
}
#[inline]
async fn notify(
&self,
target: &str,
data: Cow<'async_trait>,
qos: QoS,
) -> Result<OpConfirm, Error> {
self.client
.lock()
.await
.zc_send(target, (&[RPC_NOTIFICATION][..]).into(), data, qos)
.await
}
async fn call0(
&self,
target: &str,
method: &str,
params: Cow<'async_trait>,
qos: QoS,
) -> Result<OpConfirm, Error> {
let payload = prepare_call_payload(method, &[0, 0, 0, 0]);
self.client
.lock()
.await
.zc_send(target, payload.into(), params, qos)
.await
}
/// # Panics
///
/// Will panic on poisoned mutex
async fn call(
&self,
target: &str,
method: &str,
params: Cow<'async_trait>,
qos: QoS,
) -> Result<RpcEvent, RpcError> {
let call_id = {
let mut ci = self.call_id.lock();
let mut call_id = *ci;
if call_id == u32::MAX {
call_id = 1;
} else {
call_id += 1;
}
*ci = call_id;
call_id
};
let payload = prepare_call_payload(method, &call_id.to_le_bytes());
let (tx, rx) = oneshot::channel();
self.calls.lock().insert(call_id, tx);
macro_rules! unwrap_or_cancel {
($result: expr) => {
match $result {
Ok(v) => v,
Err(e) => {
self.calls.lock().remove(&call_id);
return Err(Into::<Error>::into(e).into());
}
}
};
}
let opc = {
let mut client = self.client.lock().await;
let fut = client.zc_send(target, payload.into(), params, qos);
if let Some(timeout) = self.timeout {
unwrap_or_cancel!(unwrap_or_cancel!(tokio::time::timeout(timeout, fut).await))
} else {
unwrap_or_cancel!(fut.await)
}
};
if let Some(c) = opc {
unwrap_or_cancel!(unwrap_or_cancel!(c.await));
}
let result = rx.await.map_err(Into::<Error>::into)?;
if let Ok(e) = RpcError::try_from(&result) {
Err(e)
} else {
Ok(result)
}
}
fn is_connected(&self) -> bool {
self.connected
.as_ref()
.is_none_or(|b| b.load(atomic::Ordering::Relaxed))
}
}
impl Drop for RpcClient {
fn drop(&mut self) {
self.pinger_fut.as_ref().map(JoinHandle::abort);
self.processor_fut.lock().abort();
}
}
| rust | Apache-2.0 | 4031c56928b6f5fdca171cda5cad1ca6e9de7f7b | 2026-01-04T20:25:23.680465Z | false |
alttch/busrt | https://github.com/alttch/busrt/blob/4031c56928b6f5fdca171cda5cad1ca6e9de7f7b/src/rpc/mod.rs | src/rpc/mod.rs | use crate::{Error, Frame};
use std::fmt;
#[cfg(feature = "rpc")]
mod async_client;
#[cfg(feature = "rpc")]
#[allow(clippy::module_name_repetitions)]
pub use async_client::{DummyHandlers, Options, Rpc, RpcClient, RpcHandlers};
pub const RPC_NOTIFICATION: u8 = 0x00;
pub const RPC_REQUEST: u8 = 0x01;
pub const RPC_REPLY: u8 = 0x11;
pub const RPC_ERROR: u8 = 0x12;
pub const RPC_ERROR_CODE_NOT_FOUND: i16 = -32001;
pub const RPC_ERROR_CODE_PARSE: i16 = -32700;
pub const RPC_ERROR_CODE_INVALID_REQUEST: i16 = -32600;
pub const RPC_ERROR_CODE_METHOD_NOT_FOUND: i16 = -32601;
pub const RPC_ERROR_CODE_INVALID_METHOD_PARAMS: i16 = -32602;
pub const RPC_ERROR_CODE_INTERNAL: i16 = -32603;
#[allow(clippy::module_name_repetitions)]
#[derive(Debug, Eq, PartialEq, Copy, Clone)]
#[repr(u8)]
pub enum RpcEventKind {
Notification = RPC_NOTIFICATION,
Request = RPC_REQUEST,
Reply = RPC_REPLY,
ErrorReply = RPC_ERROR,
}
#[allow(clippy::module_name_repetitions)]
#[inline]
pub fn rpc_err_str(v: impl fmt::Display) -> Option<Vec<u8>> {
Some(v.to_string().as_bytes().to_vec())
}
impl fmt::Display for RpcEventKind {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{}",
match self {
RpcEventKind::Notification => "notifcation",
RpcEventKind::Request => "request",
RpcEventKind::Reply => "reply",
RpcEventKind::ErrorReply => "error reply",
}
)
}
}
#[allow(clippy::module_name_repetitions)]
#[derive(Debug)]
pub struct RpcEvent {
kind: RpcEventKind,
frame: Frame,
payload_pos: usize,
use_header: bool,
}
impl RpcEvent {
#[inline]
pub fn kind(&self) -> RpcEventKind {
self.kind
}
#[inline]
pub fn frame(&self) -> &Frame {
&self.frame
}
#[inline]
pub fn sender(&self) -> &str {
self.frame.sender()
}
#[inline]
pub fn primary_sender(&self) -> &str {
self.frame.primary_sender()
}
#[inline]
pub fn payload(&self) -> &[u8] {
&self.frame().payload()[self.payload_pos..]
}
/// # Panics
///
/// Should not panic
#[inline]
pub fn id(&self) -> u32 {
u32::from_le_bytes(
if self.use_header {
&self.frame.header().unwrap()[1..5]
} else {
&self.frame.payload()[1..5]
}
.try_into()
.unwrap(),
)
}
#[inline]
pub fn is_response_required(&self) -> bool {
self.id() != 0
}
/// # Panics
///
/// Should not panic
#[inline]
pub fn method(&self) -> &[u8] {
if self.use_header {
let header = self.frame.header.as_ref().unwrap();
&header[5..header.len() - 1]
} else {
&self.frame().payload()[5..self.payload_pos - 1]
}
}
#[inline]
pub fn parse_method(&self) -> Result<&str, Error> {
std::str::from_utf8(self.method()).map_err(Into::into)
}
/// # Panics
///
/// Should not panic
#[inline]
pub fn code(&self) -> i16 {
if self.kind == RpcEventKind::ErrorReply {
i16::from_le_bytes(
if self.use_header {
&self.frame.header().unwrap()[5..7]
} else {
&self.frame.payload()[5..7]
}
.try_into()
.unwrap(),
)
} else {
0
}
}
}
impl TryFrom<Frame> for RpcEvent {
type Error = Error;
fn try_from(frame: Frame) -> Result<Self, Self::Error> {
let (body, use_header) = frame
.header()
.map_or_else(|| (frame.payload(), false), |h| (h, true));
if body.is_empty() {
Err(Error::data("Empty RPC frame"))
} else {
macro_rules! check_len {
($len: expr) => {
if body.len() < $len {
return Err(Error::data("Invalid RPC frame"));
}
};
}
match body[0] {
RPC_NOTIFICATION => Ok(RpcEvent {
kind: RpcEventKind::Notification,
frame,
payload_pos: usize::from(!use_header),
use_header: false,
}),
RPC_REQUEST => {
check_len!(6);
if use_header {
Ok(RpcEvent {
kind: RpcEventKind::Request,
frame,
payload_pos: 0,
use_header: true,
})
} else {
let mut sp = body[5..].splitn(2, |c| *c == 0);
let method = sp.next().ok_or_else(|| Error::data("No RPC method"))?;
let payload_pos = 6 + method.len();
sp.next()
.ok_or_else(|| Error::data("No RPC params block"))?;
Ok(RpcEvent {
kind: RpcEventKind::Request,
frame,
payload_pos,
use_header: false,
})
}
}
RPC_REPLY => {
check_len!(5);
Ok(RpcEvent {
kind: RpcEventKind::Reply,
frame,
payload_pos: if use_header { 0 } else { 5 },
use_header,
})
}
RPC_ERROR => {
check_len!(7);
Ok(RpcEvent {
kind: RpcEventKind::ErrorReply,
frame,
payload_pos: if use_header { 0 } else { 7 },
use_header,
})
}
v => Err(Error::data(format!("Unsupported RPC frame code {}", v))),
}
}
}
}
#[allow(clippy::module_name_repetitions)]
#[derive(Debug)]
pub struct RpcError {
code: i16,
data: Option<Vec<u8>>,
}
impl TryFrom<&RpcEvent> for RpcError {
type Error = Error;
#[inline]
fn try_from(event: &RpcEvent) -> Result<Self, Self::Error> {
if event.kind() == RpcEventKind::ErrorReply {
Ok(RpcError::new(event.code(), Some(event.payload().to_vec())))
} else {
Err(Error::data("not a RPC error"))
}
}
}
impl RpcError {
#[inline]
pub fn new(code: i16, data: Option<Vec<u8>>) -> Self {
Self { code, data }
}
#[inline]
pub fn code(&self) -> i16 {
self.code
}
#[inline]
pub fn data(&self) -> Option<&[u8]> {
self.data.as_deref()
}
#[inline]
pub fn method(err: Option<Vec<u8>>) -> Self {
Self {
code: RPC_ERROR_CODE_METHOD_NOT_FOUND,
data: err,
}
}
#[inline]
pub fn not_found(err: Option<Vec<u8>>) -> Self {
Self {
code: RPC_ERROR_CODE_NOT_FOUND,
data: err,
}
}
#[inline]
pub fn params(err: Option<Vec<u8>>) -> Self {
Self {
code: RPC_ERROR_CODE_INVALID_METHOD_PARAMS,
data: err,
}
}
#[inline]
pub fn parse(err: Option<Vec<u8>>) -> Self {
Self {
code: RPC_ERROR_CODE_PARSE,
data: err,
}
}
#[inline]
pub fn invalid(err: Option<Vec<u8>>) -> Self {
Self {
code: RPC_ERROR_CODE_INVALID_REQUEST,
data: err,
}
}
#[inline]
pub fn internal(err: Option<Vec<u8>>) -> Self {
Self {
code: RPC_ERROR_CODE_INTERNAL,
data: err,
}
}
/// Converts displayable to Vec<u8>
#[inline]
pub fn convert_data(v: impl fmt::Display) -> Vec<u8> {
v.to_string().as_bytes().to_vec()
}
}
impl From<Error> for RpcError {
#[inline]
fn from(e: Error) -> RpcError {
RpcError {
code: -32000 - e.kind() as i16,
data: None,
}
}
}
#[cfg(feature = "broker-rpc")]
impl From<rmp_serde::encode::Error> for RpcError {
#[inline]
fn from(e: rmp_serde::encode::Error) -> RpcError {
RpcError {
code: RPC_ERROR_CODE_INTERNAL,
data: Some(e.to_string().as_bytes().to_vec()),
}
}
}
impl From<regex::Error> for RpcError {
#[inline]
fn from(e: regex::Error) -> RpcError {
RpcError {
code: RPC_ERROR_CODE_PARSE,
data: Some(e.to_string().as_bytes().to_vec()),
}
}
}
impl From<std::io::Error> for RpcError {
#[inline]
fn from(e: std::io::Error) -> RpcError {
RpcError {
code: RPC_ERROR_CODE_INTERNAL,
data: Some(e.to_string().as_bytes().to_vec()),
}
}
}
#[cfg(feature = "broker-rpc")]
impl From<rmp_serde::decode::Error> for RpcError {
#[inline]
fn from(e: rmp_serde::decode::Error) -> RpcError {
RpcError {
code: RPC_ERROR_CODE_PARSE,
data: Some(e.to_string().as_bytes().to_vec()),
}
}
}
impl fmt::Display for RpcError {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "rpc error code: {}", self.code)
}
}
impl std::error::Error for RpcError {}
#[allow(clippy::module_name_repetitions)]
pub type RpcResult = Result<Option<Vec<u8>>, RpcError>;
#[inline]
pub(crate) fn prepare_call_payload(method: &str, id_bytes: &[u8]) -> Vec<u8> {
let m = method.as_bytes();
let mut payload = Vec::with_capacity(m.len() + 6);
payload.push(RPC_REQUEST);
payload.extend(id_bytes);
payload.extend(m);
payload.push(0x00);
payload
}
| rust | Apache-2.0 | 4031c56928b6f5fdca171cda5cad1ca6e9de7f7b | 2026-01-04T20:25:23.680465Z | false |
alttch/busrt | https://github.com/alttch/busrt/blob/4031c56928b6f5fdca171cda5cad1ca6e9de7f7b/src/sync/client.rs | src/sync/client.rs | use crate::borrow::Cow;
use crate::{Error, QoS};
use std::sync::{atomic, Arc};
use std::time::Duration;
use crate::{SyncEventChannel, SyncOpConfirm};
#[allow(clippy::module_name_repetitions)]
pub trait SyncClient {
fn take_event_channel(&mut self) -> Option<SyncEventChannel>;
fn send(&mut self, target: &str, payload: Cow<'_>, qos: QoS) -> Result<SyncOpConfirm, Error>;
fn zc_send(
&mut self,
target: &str,
header: Cow<'_>,
payload: Cow<'_>,
qos: QoS,
) -> Result<SyncOpConfirm, Error>;
fn send_broadcast(
&mut self,
target: &str,
payload: Cow<'_>,
qos: QoS,
) -> Result<SyncOpConfirm, Error>;
fn publish(&mut self, topic: &str, payload: Cow<'_>, qos: QoS) -> Result<SyncOpConfirm, Error>;
#[allow(unused_variables)]
fn publish_for(
&mut self,
topic: &str,
receiver: &str,
payload: Cow<'_>,
qos: QoS,
) -> Result<SyncOpConfirm, Error> {
Err(Error::not_supported("publish_for"))
}
fn subscribe(&mut self, topic: &str, qos: QoS) -> Result<SyncOpConfirm, Error>;
fn unsubscribe(&mut self, topic: &str, qos: QoS) -> Result<SyncOpConfirm, Error>;
fn subscribe_bulk(&mut self, topics: &[&str], qos: QoS) -> Result<SyncOpConfirm, Error>;
fn unsubscribe_bulk(&mut self, topics: &[&str], qos: QoS) -> Result<SyncOpConfirm, Error>;
/// exclude a topic. it is highly recommended to exclude topics first, then call subscribe
/// operations to avoid receiving unwanted messages. excluding topics is also an additional
/// heavy operation so use it only when there is no other way.
fn exclude(&mut self, topic: &str, qos: QoS) -> Result<SyncOpConfirm, Error>;
/// unexclude a topic (include back but not subscribe)
fn unexclude(&mut self, topic: &str, qos: QoS) -> Result<SyncOpConfirm, Error>;
/// exclude multiple topics
fn exclude_bulk(&mut self, topics: &[&str], qos: QoS) -> Result<SyncOpConfirm, Error>;
/// unexclude multiple topics (include back but not subscribe)
fn unexclude_bulk(&mut self, topics: &[&str], qos: QoS) -> Result<SyncOpConfirm, Error>;
fn ping(&mut self) -> Result<(), Error>;
fn is_connected(&self) -> bool;
fn get_connected_beacon(&self) -> Option<Arc<atomic::AtomicBool>>;
fn get_timeout(&self) -> Option<Duration>;
fn get_name(&self) -> &str;
}
| rust | Apache-2.0 | 4031c56928b6f5fdca171cda5cad1ca6e9de7f7b | 2026-01-04T20:25:23.680465Z | false |
alttch/busrt | https://github.com/alttch/busrt/blob/4031c56928b6f5fdca171cda5cad1ca6e9de7f7b/src/sync/mod.rs | src/sync/mod.rs | pub mod client;
pub mod ipc;
#[cfg(feature = "rpc-sync")]
pub mod rpc;
| rust | Apache-2.0 | 4031c56928b6f5fdca171cda5cad1ca6e9de7f7b | 2026-01-04T20:25:23.680465Z | false |
alttch/busrt | https://github.com/alttch/busrt/blob/4031c56928b6f5fdca171cda5cad1ca6e9de7f7b/src/sync/rpc.rs | src/sync/rpc.rs | use crate::borrow::Cow;
use crate::rpc::{
prepare_call_payload, RpcError, RpcEvent, RpcEventKind, RpcResult, RPC_ERROR,
RPC_ERROR_CODE_METHOD_NOT_FOUND, RPC_NOTIFICATION, RPC_REPLY,
};
use crate::sync::client::SyncClient;
use crate::SyncEventChannel;
use crate::{Error, Frame, FrameKind, QoS, SyncOpConfirm};
use log::{error, trace, warn};
#[cfg(not(feature = "rt"))]
use parking_lot::Mutex;
#[cfg(feature = "rt")]
use parking_lot_rt::Mutex;
use std::collections::BTreeMap;
use std::sync::atomic;
use std::sync::Arc;
use std::thread;
use std::time::Duration;
#[allow(clippy::module_name_repetitions)]
pub trait SyncRpcHandlers {
#[allow(unused_variables)]
fn handle_call(&self, event: RpcEvent) -> RpcResult {
Err(RpcError::method(None))
}
#[allow(unused_variables)]
fn handle_notification(&self, event: RpcEvent) {}
#[allow(unused_variables)]
fn handle_frame(&self, frame: Frame) {}
}
pub struct DummyHandlers {}
impl SyncRpcHandlers for DummyHandlers {
fn handle_call(&self, _event: RpcEvent) -> RpcResult {
Err(RpcError::new(
RPC_ERROR_CODE_METHOD_NOT_FOUND,
Some("RPC handler is not implemented".as_bytes().to_vec()),
))
}
}
type CallMap = Arc<Mutex<BTreeMap<u32, oneshot::Sender<RpcEvent>>>>;
#[allow(clippy::module_name_repetitions)]
pub trait SyncRpc {
/// When created, busrt client is wrapped with Arc<Mutex<_>> to let it be sent into
/// the incoming frames handler future
///
/// This mehtod allows to get the containered-client back, to call its methods directly (manage
/// pub/sub and send broadcast messages)
fn client(&self) -> Arc<Mutex<(dyn SyncClient + Send + 'static)>>;
fn notify(&self, target: &str, data: Cow<'_>, qos: QoS) -> Result<SyncOpConfirm, Error>;
/// Call the method, no response is required
fn call0(
&self,
target: &str,
method: &str,
params: Cow<'_>,
qos: QoS,
) -> Result<SyncOpConfirm, Error>;
/// Call the method and get the response
fn call(
&self,
target: &str,
method: &str,
params: Cow<'_>,
qos: QoS,
) -> Result<RpcEvent, RpcError>;
fn is_connected(&self) -> bool;
}
#[allow(clippy::module_name_repetitions)]
pub struct RpcClient {
call_id: Mutex<u32>,
timeout: Option<Duration>,
client: Arc<Mutex<dyn SyncClient + Send>>,
calls: CallMap,
connected: Option<Arc<atomic::AtomicBool>>,
}
pub struct Processor {
rx: SyncEventChannel,
client: Arc<Mutex<dyn SyncClient + Send>>,
calls: CallMap,
handlers: Arc<dyn SyncRpcHandlers + Send + Sync>,
}
impl Processor {
#[allow(clippy::too_many_lines)]
pub fn run(self) {
let Self {
rx,
client,
calls,
handlers,
} = self;
while let Ok(frame) = rx.recv() {
if frame.kind() == FrameKind::Message {
match RpcEvent::try_from(frame) {
Ok(event) => match event.kind() {
RpcEventKind::Notification => {
trace!("RPC notification from {}", event.frame().sender());
handlers.handle_notification(event);
}
RpcEventKind::Request => {
let id = event.id();
trace!(
"RPC request from {}, id: {}, method: {:?}",
event.frame().sender(),
id,
event.method()
);
let ev = if id > 0 {
Some((event.frame().sender().to_owned(), client.clone()))
} else {
None
};
let h = handlers.clone();
thread::spawn(move || {
let qos = if event.frame().is_realtime() {
QoS::RealtimeProcessed
} else {
QoS::Processed
};
let res = h.handle_call(event);
if let Some((target, cl)) = ev {
macro_rules! send_reply {
($payload: expr, $result: expr) => {{
let mut client = cl.lock();
if let Some(result) = $result {
client.zc_send(
&target,
$payload,
result.into(),
qos,
)
} else {
client.zc_send(
&target,
$payload,
(&[][..]).into(),
qos,
)
}
}};
}
match res {
Ok(v) => {
trace!("Sending RPC reply id {} to {}", id, target);
let mut payload = Vec::with_capacity(5);
payload.push(RPC_REPLY);
payload.extend_from_slice(&id.to_le_bytes());
let _r = send_reply!(payload.into(), v);
}
Err(e) => {
trace!(
"Sending RPC error {} reply id {} to {}",
e.code(),
id,
target,
);
let mut payload = Vec::with_capacity(7);
payload.push(RPC_ERROR);
payload.extend_from_slice(&id.to_le_bytes());
payload.extend_from_slice(&e.code().to_le_bytes());
let _r = send_reply!(payload.into(), e.data());
}
}
}
});
}
RpcEventKind::Reply | RpcEventKind::ErrorReply => {
let id = event.id();
trace!(
"RPC {} from {}, id: {}",
event.kind(),
event.frame().sender(),
id
);
if let Some(tx) = { calls.lock().remove(&id) } {
let _r = tx.send(event);
} else {
warn!("orphaned RPC response: {}", id);
}
}
},
Err(e) => {
error!("{}", e);
}
}
} else {
handlers.handle_frame(frame);
}
}
}
}
impl RpcClient {
/// creates RPC client with the specified handlers and the default options
pub fn new<H>(client: impl SyncClient + Send + 'static, handlers: H) -> (Self, Processor)
where
H: SyncRpcHandlers + Send + Sync + 'static,
{
Self::init(client, handlers)
}
/// creates RPC client with dummy handlers and the default options
pub fn new0(client: impl SyncClient + Send + 'static) -> (Self, Processor) {
Self::init(client, DummyHandlers {})
}
fn init<H>(mut client: impl SyncClient + Send + 'static, handlers: H) -> (Self, Processor)
where
H: SyncRpcHandlers + Send + Sync + 'static,
{
let timeout = client.get_timeout();
let rx = { client.take_event_channel().unwrap() };
let connected = client.get_connected_beacon();
let client = Arc::new(Mutex::new(client));
let calls: CallMap = <_>::default();
let processor = Processor {
rx,
client: client.clone(),
calls: calls.clone(),
handlers: Arc::new(handlers),
};
(
Self {
call_id: Mutex::new(0),
timeout,
client,
calls,
connected,
},
processor,
)
}
}
impl SyncRpc for RpcClient {
#[inline]
fn client(&self) -> Arc<Mutex<(dyn SyncClient + Send + 'static)>> {
self.client.clone()
}
#[inline]
fn notify(&self, target: &str, data: Cow<'_>, qos: QoS) -> Result<SyncOpConfirm, Error> {
self.client
.lock()
.zc_send(target, (&[RPC_NOTIFICATION][..]).into(), data, qos)
}
fn call0(
&self,
target: &str,
method: &str,
params: Cow<'_>,
qos: QoS,
) -> Result<SyncOpConfirm, Error> {
let payload = prepare_call_payload(method, &[0, 0, 0, 0]);
self.client
.lock()
.zc_send(target, payload.into(), params, qos)
}
/// # Panics
///
/// Will panic on poisoned mutex
fn call(
&self,
target: &str,
method: &str,
params: Cow<'_>,
qos: QoS,
) -> Result<RpcEvent, RpcError> {
let call_id = {
let mut ci = self.call_id.lock();
let mut call_id = *ci;
if call_id == u32::MAX {
call_id = 1;
} else {
call_id += 1;
}
*ci = call_id;
call_id
};
let payload = prepare_call_payload(method, &call_id.to_le_bytes());
let (tx, rx) = oneshot::channel();
self.calls.lock().insert(call_id, tx);
let mut client = self.client.lock();
client.zc_send(target, payload.into(), params, qos)?;
let result = if let Some(timeout) = self.timeout {
match rx.recv_timeout(timeout) {
Ok(v) => v,
Err(oneshot::RecvTimeoutError::Timeout) => {
self.calls.lock().remove(&call_id);
return Err(Error::timeout().into());
}
Err(e) => {
self.calls.lock().remove(&call_id);
return Err(Error::io(e).into());
}
}
} else {
match rx.recv() {
Ok(v) => v,
Err(e) => {
self.calls.lock().remove(&call_id);
return Err(Error::io(e).into());
}
}
};
if let Ok(e) = RpcError::try_from(&result) {
Err(e)
} else {
Ok(result)
}
}
fn is_connected(&self) -> bool {
self.connected
.as_ref()
.map_or(true, |b| b.load(atomic::Ordering::Relaxed))
}
}
| rust | Apache-2.0 | 4031c56928b6f5fdca171cda5cad1ca6e9de7f7b | 2026-01-04T20:25:23.680465Z | false |
alttch/busrt | https://github.com/alttch/busrt/blob/4031c56928b6f5fdca171cda5cad1ca6e9de7f7b/src/sync/ipc.rs | src/sync/ipc.rs | use crate::borrow::Cow;
use crate::Error;
use crate::IntoBusRtResult;
use crate::QoS;
use crate::GREETINGS;
use crate::PING_FRAME;
use crate::PROTOCOL_VERSION;
use crate::RESPONSE_OK;
use crate::SECONDARY_SEP;
use crate::{FrameData, FrameKind, FrameOp};
#[cfg(not(feature = "rt"))]
use parking_lot::Mutex;
#[cfg(feature = "rt")]
use rtsc::pi::Mutex;
use std::collections::BTreeMap;
use std::io::BufReader;
use std::io::Read;
use std::io::Write;
use std::net::Shutdown;
use std::net::TcpStream;
use std::os::unix::net::UnixStream;
use std::sync::atomic;
use std::sync::Arc;
use std::time::Duration;
use log::{error, trace, warn};
use crate::sync::client::SyncClient;
use crate::SyncEventChannel;
use crate::SyncEventSender;
use crate::SyncOpConfirm;
type ResponseMap = Arc<Mutex<BTreeMap<u32, oneshot::Sender<Result<(), Error>>>>>;
#[derive(Debug, Clone)]
pub struct Config {
path: String,
name: String,
buf_size: usize,
queue_size: usize,
timeout: Duration,
}
impl Config {
/// path - /path/to/socket (must end with .sock .socket or .ipc) or host:port,
/// name - an unique client name
pub fn new(path: &str, name: &str) -> Self {
Self {
path: path.to_owned(),
name: name.to_owned(),
buf_size: crate::DEFAULT_BUF_SIZE,
queue_size: crate::DEFAULT_QUEUE_SIZE,
timeout: crate::DEFAULT_TIMEOUT,
}
}
pub fn buf_size(mut self, size: usize) -> Self {
self.buf_size = size;
self
}
pub fn queue_size(mut self, size: usize) -> Self {
self.queue_size = size;
self
}
pub fn timeout(mut self, timeout: Duration) -> Self {
self.timeout = timeout;
self
}
}
trait Socket: Read + Write {
fn shutdown(&self);
}
#[cfg(not(target_os = "windows"))]
impl Socket for UnixStream {
fn shutdown(&self) {
let _ = self.shutdown(Shutdown::Both);
}
}
impl Socket for TcpStream {
fn shutdown(&self) {
let _ = self.shutdown(Shutdown::Both);
}
}
pub struct Client {
name: String,
writer: Box<dyn Socket + Send>,
frame_id: u32,
responses: ResponseMap,
rx: Option<SyncEventChannel>,
connected: Arc<atomic::AtomicBool>,
timeout: Duration,
config: Config,
secondary_counter: atomic::AtomicUsize,
}
// keep these as macros to insure inline and avoid unecc. futures
macro_rules! prepare_frame_buf {
($self: expr, $op: expr, $qos: expr, $expected_header_len: expr) => {{
$self.increment_frame_id();
let mut buf = Vec::with_capacity($expected_header_len + 4 + 1);
buf.extend($self.frame_id.to_le_bytes());
buf.push($op as u8 | ($qos as u8) << 6);
buf
}};
}
macro_rules! send_data_or_mark_disconnected {
($self: expr, $data: expr) => {
if let Err(e) = $self.writer.write_all($data) {
$self.connected.store(false, atomic::Ordering::Relaxed);
$self.writer.shutdown();
return Err(e.into());
}
};
}
macro_rules! send_frame_and_confirm {
($self: expr, $buf: expr, $payload: expr, $qos: expr) => {{
let rx = if $qos.needs_ack() {
let (tx, rx) = oneshot::channel();
{
$self.responses.lock().insert($self.frame_id, tx);
}
Some(rx)
} else {
None
};
send_data_or_mark_disconnected!($self, $buf);
send_data_or_mark_disconnected!($self, $payload);
Ok(rx)
}};
}
macro_rules! send_zc_frame {
// zc-send to target or topic
($self: expr, $target: expr, $header: expr, $payload: expr, $op: expr, $qos: expr) => {{
let t = $target.as_bytes();
let mut buf = prepare_frame_buf!($self, $op, $qos, 4 + t.len() + 1 + $header.len());
#[allow(clippy::cast_possible_truncation)]
buf.extend_from_slice(
&((t.len() + $payload.len() + $header.len() + 1) as u32).to_le_bytes(),
);
buf.extend_from_slice(t);
buf.push(0x00);
buf.extend_from_slice($header);
trace!("sending busrt {:?} to {} QoS={:?}", $op, $target, $qos);
send_frame_and_confirm!($self, &buf, $payload, $qos)
}};
}
macro_rules! send_frame {
// send to target or topic
($self: expr, $target: expr, $payload: expr, $op: expr, $qos: expr) => {{
let t = $target.as_bytes();
let mut buf = prepare_frame_buf!($self, $op, $qos, 4 + t.len() + 1);
#[allow(clippy::cast_possible_truncation)]
buf.extend_from_slice(&((t.len() + $payload.len() + 1) as u32).to_le_bytes());
buf.extend_from_slice(t);
buf.push(0x00);
trace!("sending busrt {:?} to {} QoS={:?}", $op, $target, $qos);
send_frame_and_confirm!($self, &buf, $payload, $qos)
}};
// send to topic with a receiver
($self: expr, $target: expr, $receiver: expr, $payload: expr, $op: expr, $qos: expr) => {{
let t = $target.as_bytes();
let r = $receiver.as_bytes();
let mut buf = prepare_frame_buf!($self, $op, $qos, 4 + t.len() + 1 + r.len() + 1);
#[allow(clippy::cast_possible_truncation)]
buf.extend_from_slice(&((t.len() + r.len() + $payload.len() + 2) as u32).to_le_bytes());
buf.extend_from_slice(t);
buf.push(0x00);
buf.extend_from_slice(r);
buf.push(0x00);
trace!("sending busrt {:?} to {} QoS={:?}", $op, $target, $qos);
send_frame_and_confirm!($self, &buf, $payload, $qos)
}};
// send w/o a target
($self: expr, $payload: expr, $op: expr, $qos: expr) => {{
let mut buf = prepare_frame_buf!($self, $op, $qos, 4);
#[allow(clippy::cast_possible_truncation)]
buf.extend_from_slice(&($payload.len() as u32).to_le_bytes());
send_frame_and_confirm!($self, &buf, $payload, $qos)
}};
}
macro_rules! connect_broker {
($name: expr, $socket: expr, $reader: expr,
$responses: expr, $connected: expr, $timeout: expr, $queue_size: expr) => {{
chat($name, &mut $socket)?;
let (tx, rx) = rtsc::channel::bounded($queue_size);
let reader_responses = $responses.clone();
let rconn = $connected.clone();
(
rx,
Reader {
inner: Box::new($reader),
tx,
responses: reader_responses,
rconn,
},
)
}};
}
impl Client {
/// Creates a new client instance. The Reader must be started manually by calling
/// `Reader::run()` (e.g. in a separate thread).
pub fn connect(config: &Config) -> Result<(Self, Reader), Error> {
Self::connect_broker(config, None)
}
pub fn connect_stream(stream: UnixStream, config: &Config) -> Result<(Self, Reader), Error> {
Self::connect_broker(config, Some(stream))
}
fn connect_broker(
config: &Config,
stream: Option<UnixStream>,
) -> Result<(Self, Reader), Error> {
let responses: ResponseMap = <_>::default();
let connected = Arc::new(atomic::AtomicBool::new(true));
#[allow(clippy::case_sensitive_file_extension_comparisons)]
let (writer, rx, reader) = if config.path.ends_with(".sock")
|| config.path.ends_with(".socket")
|| config.path.ends_with(".ipc")
|| config.path.starts_with('/')
{
#[cfg(target_os = "windows")]
{
return Err(Error::not_supported("unix sockets"));
}
#[cfg(not(target_os = "windows"))]
{
let mut stream = if let Some(s) = stream {
s
} else {
UnixStream::connect(&config.path)?
};
stream.set_write_timeout(Some(config.timeout))?;
let r = stream.try_clone()?;
let reader = BufReader::with_capacity(config.buf_size, r);
let (rx, reader) = connect_broker!(
&config.name,
stream,
reader,
responses,
connected,
config.timeout,
config.queue_size
);
(
Box::new(stream) as Box<dyn Socket + Send + 'static>,
rx,
reader,
)
}
} else {
let mut stream = TcpStream::connect(&config.path)?;
stream.set_write_timeout(Some(config.timeout))?;
stream.set_nodelay(true)?;
let r = stream.try_clone()?;
let reader = BufReader::with_capacity(config.buf_size, r);
let (rx, reader) = connect_broker!(
&config.name,
stream,
reader,
responses,
connected,
config.timeout,
config.queue_size
);
(
Box::new(stream) as Box<dyn Socket + Send + 'static>,
rx,
reader,
)
};
Ok((
Self {
name: config.name.clone(),
writer,
frame_id: 0,
responses,
rx: Some(rx),
connected,
timeout: config.timeout,
config: config.clone(),
secondary_counter: atomic::AtomicUsize::new(0),
},
reader,
))
}
/// The Reader must be started manually by calling
/// `Reader::run()` (e.g. in a separate thread).
pub fn register_secondary(&self) -> Result<(Self, Reader), Error> {
if self.name.contains(SECONDARY_SEP) {
Err(Error::not_supported("not a primary client"))
} else {
let secondary_id = self
.secondary_counter
.fetch_add(1, atomic::Ordering::Relaxed);
let secondary_name = format!("{}{}{}", self.name, SECONDARY_SEP, secondary_id);
let mut config = self.config.clone();
config.name = secondary_name;
Self::connect(&config)
}
}
#[inline]
fn increment_frame_id(&mut self) {
if self.frame_id == u32::MAX {
self.frame_id = 1;
} else {
self.frame_id += 1;
}
}
#[inline]
pub fn get_timeout(&self) -> Duration {
self.timeout
}
}
impl SyncClient for Client {
#[inline]
fn take_event_channel(&mut self) -> Option<SyncEventChannel> {
self.rx.take()
}
#[inline]
fn get_connected_beacon(&self) -> Option<Arc<atomic::AtomicBool>> {
Some(self.connected.clone())
}
fn send(&mut self, target: &str, payload: Cow<'_>, qos: QoS) -> Result<SyncOpConfirm, Error> {
send_frame!(self, target, payload.as_slice(), FrameOp::Message, qos)
}
fn zc_send(
&mut self,
target: &str,
header: Cow<'_>,
payload: Cow<'_>,
qos: QoS,
) -> Result<SyncOpConfirm, Error> {
send_zc_frame!(
self,
target,
header.as_slice(),
payload.as_slice(),
FrameOp::Message,
qos
)
}
fn send_broadcast(
&mut self,
target: &str,
payload: Cow<'_>,
qos: QoS,
) -> Result<SyncOpConfirm, Error> {
send_frame!(self, target, payload.as_slice(), FrameOp::Broadcast, qos)
}
fn publish(
&mut self,
target: &str,
payload: Cow<'_>,
qos: QoS,
) -> Result<SyncOpConfirm, Error> {
send_frame!(self, target, payload.as_slice(), FrameOp::PublishTopic, qos)
}
fn publish_for(
&mut self,
target: &str,
receiver: &str,
payload: Cow<'_>,
qos: QoS,
) -> Result<SyncOpConfirm, Error> {
send_frame!(
self,
target,
receiver,
payload.as_slice(),
FrameOp::PublishTopicFor,
qos
)
}
fn subscribe(&mut self, topic: &str, qos: QoS) -> Result<SyncOpConfirm, Error> {
send_frame!(self, topic.as_bytes(), FrameOp::SubscribeTopic, qos)
}
fn unsubscribe(&mut self, topic: &str, qos: QoS) -> Result<SyncOpConfirm, Error> {
send_frame!(self, topic.as_bytes(), FrameOp::UnsubscribeTopic, qos)
}
fn subscribe_bulk(&mut self, topics: &[&str], qos: QoS) -> Result<SyncOpConfirm, Error> {
let mut payload = Vec::new();
for topic in topics {
if !payload.is_empty() {
payload.push(0x00);
}
payload.extend(topic.as_bytes());
}
send_frame!(self, &payload, FrameOp::SubscribeTopic, qos)
}
fn unsubscribe_bulk(&mut self, topics: &[&str], qos: QoS) -> Result<SyncOpConfirm, Error> {
let mut payload = Vec::new();
for topic in topics {
if !payload.is_empty() {
payload.push(0x00);
}
payload.extend(topic.as_bytes());
}
send_frame!(self, &payload, FrameOp::UnsubscribeTopic, qos)
}
fn exclude(&mut self, topic: &str, qos: QoS) -> Result<SyncOpConfirm, Error> {
send_frame!(self, topic.as_bytes(), FrameOp::ExcludeTopic, qos)
}
fn unexclude(&mut self, topic: &str, qos: QoS) -> Result<SyncOpConfirm, Error> {
send_frame!(self, topic.as_bytes(), FrameOp::UnexcludeTopic, qos)
}
fn exclude_bulk(&mut self, topics: &[&str], qos: QoS) -> Result<SyncOpConfirm, Error> {
let mut payload = Vec::new();
for topic in topics {
if !payload.is_empty() {
payload.push(0x00);
}
payload.extend(topic.as_bytes());
}
send_frame!(self, &payload, FrameOp::ExcludeTopic, qos)
}
fn unexclude_bulk(&mut self, topics: &[&str], qos: QoS) -> Result<SyncOpConfirm, Error> {
let mut payload = Vec::new();
for topic in topics {
if !payload.is_empty() {
payload.push(0x00);
}
payload.extend(topic.as_bytes());
}
send_frame!(self, &payload, FrameOp::UnexcludeTopic, qos)
}
#[inline]
fn ping(&mut self) -> Result<(), Error> {
send_data_or_mark_disconnected!(self, PING_FRAME);
Ok(())
}
#[inline]
fn is_connected(&self) -> bool {
self.connected.load(atomic::Ordering::Relaxed)
}
#[inline]
fn get_timeout(&self) -> Option<Duration> {
Some(self.timeout)
}
#[inline]
fn get_name(&self) -> &str {
self.name.as_str()
}
}
impl Drop for Client {
fn drop(&mut self) {
self.writer.shutdown();
}
}
fn handle_read<R>(mut reader: R, tx: SyncEventSender, responses: ResponseMap) -> Result<(), Error>
where
R: Read,
{
loop {
let mut buf = [0_u8; 6];
reader.read_exact(&mut buf)?;
let frame_type: FrameKind = buf[0].try_into()?;
let realtime = buf[5] != 0;
match frame_type {
FrameKind::Nop => {}
FrameKind::Acknowledge => {
let ack_id = u32::from_le_bytes(buf[1..5].try_into().unwrap());
let tx_channel = { responses.lock().remove(&ack_id) };
if let Some(tx) = tx_channel {
let _r = tx.send(buf[5].to_busrt_result());
} else {
warn!("orphaned busrt op ack {}", ack_id);
}
}
_ => {
let frame_len = u32::from_le_bytes(buf[1..5].try_into().unwrap());
let mut buf = vec![0; frame_len as usize];
reader.read_exact(&mut buf)?;
let (sender, topic, payload_pos) = {
if frame_type == FrameKind::Publish {
let mut sp = buf.splitn(3, |c| *c == 0);
let s = sp.next().ok_or_else(|| Error::data("broken frame"))?;
let sender = std::str::from_utf8(s)?.to_owned();
let t = sp.next().ok_or_else(|| Error::data("broken frame"))?;
let topic = std::str::from_utf8(t)?.to_owned();
sp.next().ok_or_else(|| Error::data("broken frame"))?;
let payload_pos = s.len() + t.len() + 2;
(Some(sender), Some(topic), payload_pos)
} else {
let mut sp = buf.splitn(2, |c| *c == 0);
let s = sp.next().ok_or_else(|| Error::data("broken frame"))?;
let sender = std::str::from_utf8(s)?.to_owned();
sp.next().ok_or_else(|| Error::data("broken frame"))?;
let payload_pos = s.len() + 1;
(Some(sender), None, payload_pos)
}
};
let frame = Arc::new(FrameData::new(
frame_type,
sender,
topic,
None,
buf,
payload_pos,
realtime,
));
tx.send(frame).map_err(Error::io)?;
}
}
}
}
fn chat<S>(name: &str, socket: &mut S) -> Result<(), Error>
where
S: Read + Write,
{
if name.len() > u16::MAX as usize {
return Err(Error::data("name too long"));
}
let mut buf = [0_u8; 3];
socket.read_exact(&mut buf)?;
if buf[0] != GREETINGS[0] {
return Err(Error::not_supported("Invalid greetings"));
}
if u16::from_le_bytes(buf[1..3].try_into().unwrap()) != PROTOCOL_VERSION {
return Err(Error::not_supported("Unsupported protocol version"));
}
socket.write_all(&buf)?;
let mut buf = [0_u8; 1];
socket.read_exact(&mut buf)?;
if buf[0] != RESPONSE_OK {
return Err(Error::new(
buf[0].into(),
Some(format!("Server greetings response: {:?}", buf[0])),
));
}
let n = name.as_bytes().to_vec();
#[allow(clippy::cast_possible_truncation)]
socket.write_all(&(name.len() as u16).to_le_bytes())?;
socket.write_all(&n)?;
let mut buf = [0_u8; 1];
socket.read_exact(&mut buf)?;
if buf[0] != RESPONSE_OK {
return Err(Error::new(
buf[0].into(),
Some(format!("Server registration response: {:?}", buf[0])),
));
}
Ok(())
}
pub struct Reader {
inner: Box<dyn Read + Send>,
tx: SyncEventSender,
responses: ResponseMap,
rconn: Arc<atomic::AtomicBool>,
}
impl Reader {
pub fn run(self) {
if let Err(e) = handle_read(self.inner, self.tx, self.responses) {
error!("busrt client reader error: {}", e);
self.rconn.store(false, atomic::Ordering::Relaxed);
}
}
}
| rust | Apache-2.0 | 4031c56928b6f5fdca171cda5cad1ca6e9de7f7b | 2026-01-04T20:25:23.680465Z | false |
alttch/busrt | https://github.com/alttch/busrt/blob/4031c56928b6f5fdca171cda5cad1ca6e9de7f7b/examples/client_listener.rs | examples/client_listener.rs | // Client demo (listener)
use busrt::client::AsyncClient;
use busrt::ipc::{Client, Config};
use busrt::QoS;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let name = "test.client.listener";
// create a new client instance
let config = Config::new("/tmp/busrt.sock", name);
let mut client = Client::connect(&config).await?;
// subscribe to all topics
let opc = client.subscribe("#", QoS::Processed).await?.expect("no op");
opc.await??;
// handle incoming frames
let rx = client.take_event_channel().unwrap();
while let Ok(frame) = rx.recv().await {
println!(
"Frame from {}: {:?} {:?} {}",
frame.sender(),
frame.kind(),
frame.topic(),
std::str::from_utf8(frame.payload()).unwrap_or("something unreadable")
);
}
Ok(())
}
| rust | Apache-2.0 | 4031c56928b6f5fdca171cda5cad1ca6e9de7f7b | 2026-01-04T20:25:23.680465Z | false |
alttch/busrt | https://github.com/alttch/busrt/blob/4031c56928b6f5fdca171cda5cad1ca6e9de7f7b/examples/client_rpc_handler.rs | examples/client_rpc_handler.rs | // Demo of client RPC handler
//
// use client_rpc example to test client/server, don't forget to launch a standalone broker server
// instance
use busrt::async_trait;
use busrt::client::AsyncClient;
use busrt::ipc::{Client, Config};
use busrt::rpc::{Rpc, RpcClient, RpcError, RpcEvent, RpcHandlers, RpcResult};
use busrt::{Frame, QoS};
use serde::Deserialize;
use std::collections::BTreeMap;
use std::sync::atomic;
use std::time::Duration;
use tokio::time::sleep;
struct MyHandlers {
// all RPC handlers are launched in parallel multiple instances, so the internal variables need
// to be either atomic or under Mutex/RwLock to be modified
counter: atomic::AtomicU64,
}
#[derive(Deserialize)]
struct AddParams {
value: u64,
}
#[async_trait]
impl RpcHandlers for MyHandlers {
// RPC call handler. Will react to the "test" and "get" (any params) and "add" (will parse
// params as msgpack and add the value to the internal counter) methods
async fn handle_call(&self, event: RpcEvent) -> RpcResult {
match event.parse_method()? {
"test" => {
let mut payload = BTreeMap::new();
payload.insert("ok", true);
Ok(Some(rmp_serde::to_vec_named(&payload)?))
}
"get" => {
let mut payload = BTreeMap::new();
payload.insert("value", self.counter.load(atomic::Ordering::SeqCst));
Ok(Some(rmp_serde::to_vec_named(&payload)?))
}
"add" => {
let params: AddParams = rmp_serde::from_slice(event.payload())?;
self.counter
.fetch_add(params.value, atomic::Ordering::SeqCst);
Ok(None)
}
_ => Err(RpcError::method(None)),
}
}
// Handle RPC notifications
async fn handle_notification(&self, event: RpcEvent) {
println!(
"Got RPC notification from {}: {}",
event.sender(),
std::str::from_utf8(event.payload()).unwrap_or("something unreadable")
);
}
// handle broadcast notifications and topic publications
async fn handle_frame(&self, frame: Frame) {
println!(
"Got non-RPC frame from {}: {:?} {:?} {}",
frame.sender(),
frame.kind(),
frame.topic(),
std::str::from_utf8(frame.payload()).unwrap_or("something unreadable")
);
}
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let name = "test.client.rpc";
// create a new client instance
let config = Config::new("/tmp/busrt.sock", name);
let mut client = Client::connect(&config).await?;
// subscribe the cclient to all topics to print publish frames when received
let op_confirm = client.subscribe("#", QoS::Processed).await?.expect("no op");
// receive operation confirmation
op_confirm.await??;
// create handlers object
let handlers = MyHandlers {
counter: atomic::AtomicU64::default(),
};
// create RPC
let rpc = RpcClient::new(client, handlers);
println!("Waiting for frames to {}", name);
while rpc.is_connected() {
sleep(Duration::from_secs(1)).await;
}
Ok(())
}
| rust | Apache-2.0 | 4031c56928b6f5fdca171cda5cad1ca6e9de7f7b | 2026-01-04T20:25:23.680465Z | false |
alttch/busrt | https://github.com/alttch/busrt/blob/4031c56928b6f5fdca171cda5cad1ca6e9de7f7b/examples/client_sender.rs | examples/client_sender.rs | // Client demo (listener)
use busrt::client::AsyncClient;
use busrt::ipc::{Client, Config};
use busrt::QoS;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let name = "test.client.sender";
// create a new client instance
let config = Config::new("/tmp/busrt.sock", name);
let mut client = Client::connect(&config).await?;
// publish to a topic
let opc = client
.publish("some/topic", "hello".as_bytes().into(), QoS::Processed)
.await?
.expect("no op");
opc.await??;
// send a direct message
let opc = client
.send(
"test.client.listener",
"hello".as_bytes().into(),
QoS::Processed,
)
.await?
.expect("no op");
opc.await??;
// send a broadcast message
let opc = client
.send_broadcast("test.*", "hello everyone".as_bytes().into(), QoS::Processed)
.await?
.expect("no op");
opc.await??;
Ok(())
}
| rust | Apache-2.0 | 4031c56928b6f5fdca171cda5cad1ca6e9de7f7b | 2026-01-04T20:25:23.680465Z | false |
alttch/busrt | https://github.com/alttch/busrt/blob/4031c56928b6f5fdca171cda5cad1ca6e9de7f7b/examples/broker_custom_rpc.rs | examples/broker_custom_rpc.rs | // Demo of custom broker internal RPC
use busrt::async_trait;
use busrt::broker::{Broker, ServerConfig, BROKER_NAME};
use busrt::client::AsyncClient;
use busrt::rpc::{Rpc, RpcClient, RpcError, RpcEvent, RpcHandlers, RpcResult};
use busrt::{Frame, QoS};
use serde::Deserialize;
use std::time::Duration;
use tokio::time::sleep;
struct MyHandlers {}
#[derive(Deserialize)]
struct PingParams<'a> {
message: Option<&'a str>,
}
#[async_trait]
impl RpcHandlers for MyHandlers {
// RPC call handler. Will react to the "test" (any params) and "ping" (will parse params as
// msgpack and return the "message" field back) methods
async fn handle_call(&self, event: RpcEvent) -> RpcResult {
match event.parse_method()? {
"test" => Ok(Some("passed".as_bytes().to_vec())),
"ping" => {
let params: PingParams = rmp_serde::from_slice(event.payload())?;
Ok(params.message.map(|m| m.as_bytes().to_vec()))
}
_ => Err(RpcError::method(None)),
}
}
// Handle RPC notifications
async fn handle_notification(&self, event: RpcEvent) {
println!(
"Got RPC notification from {}: {}",
event.sender(),
std::str::from_utf8(event.payload()).unwrap_or("something unreadable")
);
}
// handle broadcast notifications and topic publications
async fn handle_frame(&self, frame: Frame) {
println!(
"Got non-RPC frame from {}: {:?} {:?} {}",
frame.sender(),
frame.kind(),
frame.topic(),
std::str::from_utf8(frame.payload()).unwrap_or("something unreadable")
);
}
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// create a new broker instance
let mut broker = Broker::new();
// spawn unix server for external clients
broker
.spawn_unix_server("/tmp/busrt.sock", ServerConfig::default())
.await?;
// register the broker core client
let mut core_client = broker.register_client(BROKER_NAME).await?;
// subscribe the core client to all topics to print publish frames when received
core_client.subscribe("#", QoS::No).await?;
// create handlers object
let handlers = MyHandlers {};
// create RPC
let crpc = RpcClient::new(core_client, handlers);
println!("Waiting for frames to {}", BROKER_NAME);
// set broker client, optional, allows to spawn fifo servers, the client is wrapped in
// Arc<Mutex<_>> as it is cloned for each fifo spawned and can be got back with core_rpc_client
// broker method
broker.set_core_rpc_client(crpc).await;
// test it with echo .broker .hello > /tmp/busrt.fifo
broker.spawn_fifo("/tmp/busrt.fifo", 8192).await?;
// this is the internal client, it will be connected forever
while broker
.core_rpc_client()
.lock()
.await
.as_ref()
.unwrap()
.is_connected()
{
sleep(Duration::from_secs(1)).await;
}
Ok(())
}
| rust | Apache-2.0 | 4031c56928b6f5fdca171cda5cad1ca6e9de7f7b | 2026-01-04T20:25:23.680465Z | false |
alttch/busrt | https://github.com/alttch/busrt/blob/4031c56928b6f5fdca171cda5cad1ca6e9de7f7b/examples/client_rpc.rs | examples/client_rpc.rs | // Demo of client RPC with no handler which just calls methods
//
// use client_rpc_handler example to test client/server
use busrt::ipc::{Client, Config};
use busrt::rpc::{Rpc, RpcClient};
use busrt::{empty_payload, QoS};
use serde::Deserialize;
use std::collections::BTreeMap;
#[derive(Deserialize)]
struct Amount {
value: u64,
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let name = "test.client.123";
let target = "test.client.rpc";
// create a new client instance
let config = Config::new("/tmp/busrt.sock", name);
let client = Client::connect(&config).await?;
// create RPC with no handlers
let rpc = RpcClient::new0(client);
// call the method with no confirm
rpc.call0(target, "test", empty_payload!(), QoS::Processed)
.await?;
let mut payload: BTreeMap<&str, u32> = <_>::default();
payload.insert("value", 10);
// call a method with confirm to make sure the value is added
rpc.call(
target,
"add",
rmp_serde::to_vec_named(&payload)?.into(),
QoS::Processed,
)
.await?;
// call the method to read the sum
let result = rpc
.call(target, "get", empty_payload!(), QoS::Processed)
.await?;
let amount: Amount = rmp_serde::from_slice(result.payload())?;
println!("{}", amount.value);
Ok(())
}
| rust | Apache-2.0 | 4031c56928b6f5fdca171cda5cad1ca6e9de7f7b | 2026-01-04T20:25:23.680465Z | false |
alttch/busrt | https://github.com/alttch/busrt/blob/4031c56928b6f5fdca171cda5cad1ca6e9de7f7b/examples/broker_aaa.rs | examples/broker_aaa.rs | // Demo of a broker with AAA
//
// The broker listens on 0.0.0.0:7777
//
// Accepted client names: test (from localhost only), test2 (from any)
//
// test is allowed to do anything
//
// test2 is allowed to send direct messages to "test" only and publish to subtopics of "news"
//
// The broker force-disconnects the client named "test2" every 5 seconds
use busrt::broker::{AaaMap, Broker, ClientAaa, ServerConfig};
use ipnetwork::IpNetwork;
use std::time::Duration;
use tokio::time::sleep;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// create a new broker instance
let mut broker = Broker::new();
broker.init_default_core_rpc().await?;
// create AAA map
let aaa_map = AaaMap::default();
{
let mut map = aaa_map.lock();
map.insert(
"test".to_owned(),
ClientAaa::new().hosts_allow(vec![IpNetwork::V4("127.0.0.0/8".parse()?)]),
);
map.insert(
"test2".to_owned(),
ClientAaa::new()
.allow_publish_to(&["news/#"])
.deny_subscribe()
.deny_broadcast()
.allow_p2p_to(&["test"]),
);
}
// put AAA map to the server config
let config = ServerConfig::new().aaa_map(aaa_map);
// spawn tcp server for external clients
broker.spawn_tcp_server("0.0.0.0:7777", config).await?;
// the map can be modified later at any time, however access controls are cached for clients
// which are already connected
loop {
sleep(Duration::from_secs(5)).await;
println!("forcing test2 disconnect");
if let Err(e) = broker.force_disconnect("test2") {
eprintln!("{}", e);
}
}
}
| rust | Apache-2.0 | 4031c56928b6f5fdca171cda5cad1ca6e9de7f7b | 2026-01-04T20:25:23.680465Z | false |
alttch/busrt | https://github.com/alttch/busrt/blob/4031c56928b6f5fdca171cda5cad1ca6e9de7f7b/examples/client_cursor.rs | examples/client_cursor.rs | // Demo of client cursor RPC
//
// use server_cursor example to test client/server
use busrt::ipc::{Client, Config};
use busrt::rpc::{Rpc, RpcClient};
use busrt::{cursors, empty_payload, QoS};
use serde::Deserialize;
#[derive(Deserialize)]
struct Customer {
id: i64,
name: String,
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let name = "test.client.123";
let target = "db";
// create a new client instance
let config = Config::new("/tmp/busrt.sock", name);
let client = Client::connect(&config).await?;
// create RPC with no handlers
let rpc = RpcClient::new0(client);
// get a cursor
let cursor: cursors::Payload = rmp_serde::from_slice(
rpc.call(target, "Ccustomers", empty_payload!(), QoS::Processed)
.await?
.payload(),
)?;
// let us use a cow to avoid unnecessary data serialization every time when the method is
// called
let packed_cursor = rmp_serde::to_vec_named(&cursor)?;
let b_cursor = busrt::borrow::Cow::Borrowed(&packed_cursor);
loop {
// get customers one-by-one
let result = rpc
.call(target, "N", b_cursor.clone(), QoS::Processed)
.await?;
let data = result.payload();
// the payload is empty when there are no more records left
if data.is_empty() {
break;
}
let customer: Customer = rmp_serde::from_slice(data)?;
println!("{}: {}", customer.id, customer.name);
}
// do the same in bulk
let bulk_size = 100;
// get a cursor
let mut cursor: cursors::Payload = rmp_serde::from_slice(
rpc.call(target, "Ccustomers", empty_payload!(), QoS::Processed)
.await?
.payload(),
)?;
cursor.set_bulk_number(bulk_size);
let packed_cursor = rmp_serde::to_vec_named(&cursor)?;
let b_cursor = busrt::borrow::Cow::Borrowed(&packed_cursor);
loop {
// get customers in bulk
let result = rpc
.call(target, "NB", b_cursor.clone(), QoS::Processed)
.await?;
let customers: Vec<Customer> = rmp_serde::from_slice(result.payload())?;
for customer in &customers {
println!("{}: {}", customer.id, customer.name);
}
// stop if the block contains less records than the bulk size - that means it is the last
// block
if customers.len() < bulk_size {
break;
}
}
Ok(())
}
| rust | Apache-2.0 | 4031c56928b6f5fdca171cda5cad1ca6e9de7f7b | 2026-01-04T20:25:23.680465Z | false |
alttch/busrt | https://github.com/alttch/busrt/blob/4031c56928b6f5fdca171cda5cad1ca6e9de7f7b/examples/inter_thread.rs | examples/inter_thread.rs | // Demo of inter-thread communication (with no RPC layer) with a UNIX socket for external clients
use busrt::broker::{Broker, ServerConfig};
use busrt::client::AsyncClient;
use busrt::QoS;
use std::time::Duration;
use tokio::time::sleep;
const SLEEP_STEP: Duration = Duration::from_secs(1);
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// create a new broker instance
let mut broker = Broker::new();
// init the default broker RPC API, optional
broker.init_default_core_rpc().await?;
// spawn unix server for external clients
broker
.spawn_unix_server("/tmp/busrt.sock", ServerConfig::default())
.await?;
// worker 1 will send to worker2 direct "hello" message
let mut client1 = broker.register_client("worker.1").await?;
// worker 2 will listen to incoming frames only
let mut client2 = broker.register_client("worker.2").await?;
// worker 3 will send broadcasts to all workers, an external client with a name "worker.N" can
// connect the broker via unix socket and receive them as well or send a message to "worker.2"
// to print it
let mut client3 = broker.register_client("worker.3").await?;
let rx = client2.take_event_channel().unwrap();
tokio::spawn(async move {
loop {
client1
.send("worker.2", "hello".as_bytes().into(), QoS::No)
.await
.unwrap();
sleep(SLEEP_STEP).await;
}
});
tokio::spawn(async move {
loop {
client3
.send_broadcast(
"worker.*",
"this is a broadcast message".as_bytes().into(),
QoS::No,
)
.await
.unwrap();
sleep(SLEEP_STEP).await;
}
});
while let Ok(frame) = rx.recv().await {
println!(
"{}: {}",
frame.sender(),
std::str::from_utf8(frame.payload()).unwrap_or("something unreadable")
);
}
Ok(())
}
| rust | Apache-2.0 | 4031c56928b6f5fdca171cda5cad1ca6e9de7f7b | 2026-01-04T20:25:23.680465Z | false |
alttch/busrt | https://github.com/alttch/busrt/blob/4031c56928b6f5fdca171cda5cad1ca6e9de7f7b/examples/server_cursor.rs | examples/server_cursor.rs | // Server-side cursor example
//
// Cursors are used to transfer data from streams either one-by-one or in bulk blocks
//
// The source can be a database, a HTTP data stream etc.
//
// consider there is a local PostgreSQL database "tests" with a table "customers" (id bigserial,
// name varchar). The access credentials are tests/xxx
use busrt::broker::{Broker, ServerConfig};
use busrt::rpc::{RpcClient, RpcError, RpcEvent, RpcHandlers, RpcResult};
use busrt::{async_trait, cursors};
use futures::{Stream, TryStreamExt};
use serde::Serialize;
use sqlx::{
postgres::{PgPoolOptions, PgRow},
Row,
};
use std::pin::Pin;
use std::str::FromStr;
use std::time::Duration;
use tokio::sync::Mutex;
use tokio::time::sleep;
// max cursor time-to-live before forcibly dropped
const CURSOR_TTL: Duration = Duration::from_secs(30);
// a database stream type alias
type DbStream = Pin<Box<dyn Stream<Item = Result<PgRow, sqlx::Error>> + Send>>;
// a structure for data rows
#[derive(Serialize)]
struct Customer {
id: i64,
name: String,
}
// define a cursor for the database stream
struct CustomerCursor {
// futures::Stream object must be under a mutex to implement Sync which is required for the RPC
// server
stream: Mutex<DbStream>,
// a special cursor metadata object, must exist in all cursor structures if busrt::cursors::Map
// helper object is used
meta: cursors::Meta,
}
// the busrt::cursors::Cursor trait requires the following methods to be implemented
//
// some implementations may omit either "next" or "next_bulk" if not required (e.g. with
// unimplemented!() inside the function), in this case the methods must not be mapped to RPC
#[async_trait]
impl cursors::Cursor for CustomerCursor {
// the method returns either a serialized data (bytes) or None
// as BUS/RT has no requirements for the data serialization format, it can be any, recognized
// by both server and client
async fn next(&self) -> Result<Option<Vec<u8>>, RpcError> {
if let Some(row) = self
.stream
.lock()
.await
.try_next()
.await
.map_err(|_| RpcError::internal(None))?
{
let id: i64 = row.try_get(0).map_err(|_| RpcError::internal(None))?;
let name: String = row.try_get(1).map_err(|_| RpcError::internal(None))?;
Ok(Some(rmp_serde::to_vec_named(&Customer { id, name })?))
} else {
// mark the cursor finished if there are no more records
self.meta().mark_finished();
Ok(None)
}
}
// the method always returns a serialized data array (bytes)
// if there are no more records, an empty array should be returned
async fn next_bulk(&self, count: usize) -> Result<Vec<u8>, RpcError> {
let mut result: Vec<Customer> = Vec::with_capacity(count);
if count > 0 {
let mut stream = self.stream.lock().await;
while let Some(row) = stream
.try_next()
.await
.map_err(|_| RpcError::internal(None))?
{
let id: i64 = row.try_get(0).map_err(|_| RpcError::internal(None))?;
let name: String = row.try_get(1).map_err(|_| RpcError::internal(None))?;
result.push(Customer { id, name });
if result.len() == count {
break;
}
}
}
if result.len() < count {
// mark the cursor finished if there are no more records
self.meta.mark_finished();
}
Ok(rmp_serde::to_vec_named(&result)?)
}
// the method must return the pointer to the cursor meta object
//
// can be omitted with e.g. unimplemented!() if no busrt::cursors::Map helper objects are used
fn meta(&self) -> &cursors::Meta {
&self.meta
}
}
impl CustomerCursor {
fn new(stream: DbStream) -> Self {
Self {
stream: Mutex::new(stream),
meta: cursors::Meta::new(CURSOR_TTL),
}
}
}
struct MyHandlers {
pool: sqlx::PgPool,
// a helper object to handle multiple cursors
cursors: cursors::Map,
}
#[async_trait]
impl RpcHandlers for MyHandlers {
async fn handle_call(&self, event: RpcEvent) -> RpcResult {
let payload = event.payload();
match event.parse_method()? {
// the method "CCustomers" returns a cursor uuid only
"Ccustomers" => {
let stream = sqlx::query("select id, name from customers").fetch(&self.pool);
let cursor = CustomerCursor::new(stream);
let u = self.cursors.add(cursor).await;
Ok(Some(rmp_serde::to_vec_named(&cursors::Payload::from(u))?))
}
"N" => {
// handle cursor-next calls. if all cursors properly implement
// busrt::cursors::Cursor trait, it is possible to have a sigle "next" method for
// all cursor types.
let p: cursors::Payload = rmp_serde::from_slice(payload)?;
self.cursors.next(p.uuid()).await
}
"NB" => {
// handle cursor-next-bulk calls. if all cursors properly implement
// busrt::cursors::Cursor trait, it is possible to have a sigle "next-bulk" method
// for all cursor types.
let p: cursors::Payload = rmp_serde::from_slice(payload)?;
self.cursors.next_bulk(p.uuid(), p.bulk_number()).await
}
_ => Err(RpcError::method(None)),
}
}
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let mut broker = Broker::new();
broker
.spawn_unix_server("/tmp/busrt.sock", ServerConfig::default())
.await?;
let client = broker.register_client("db").await?;
let opts = sqlx::postgres::PgConnectOptions::from_str("postgres://tests:xxx@localhost/tests")?;
let pool = PgPoolOptions::new().connect_with(opts).await?;
let handlers = MyHandlers {
pool,
cursors: cursors::Map::new(Duration::from_secs(30)),
};
let _rpc = RpcClient::new(client, handlers);
loop {
sleep(Duration::from_secs(1)).await;
}
}
| rust | Apache-2.0 | 4031c56928b6f5fdca171cda5cad1ca6e9de7f7b | 2026-01-04T20:25:23.680465Z | false |
way-edges/way-edges | https://github.com/way-edges/way-edges/blob/03a9091642ad39345115223890000481eeac8333/crates/way-edges/src/args.rs | crates/way-edges/src/args.rs | use std::sync::OnceLock;
use backend::ipc;
use clap::{CommandFactory, Parser, Subcommand};
use clap_complete::{
engine::{ArgValueCompleter, CompletionCandidate},
CompleteEnv,
};
#[derive(Debug, Parser)]
#[command(name = "way-edges")]
#[command(author = "OGIOS")]
#[command(version = "pre")]
#[command(about = "Hidden widget on the screen edges", long_about = None)]
pub struct Cli {
/// print the mouse button key to the log when press and release.
#[arg(short = 'd', long)]
pub mouse_debug: bool,
#[arg(short = 'c', long)]
pub config_path: Option<String>,
#[arg(short = 'i', long)]
pub ipc_namespace: Option<String>,
#[command(subcommand)]
pub command: Option<Command>,
}
fn complete_widget_name(current: &std::ffi::OsStr) -> Vec<CompletionCandidate> {
let Some(current) = current.to_str() else {
return vec![];
};
let Ok(root) = config::get_config_root() else {
return vec![];
};
root.widgets
.into_iter()
.filter(|w| w.common.namespace.starts_with(current))
.map(|w| CompletionCandidate::new(&w.common.namespace))
.collect()
}
#[derive(Subcommand, Debug, PartialEq, Clone)]
pub enum Command {
/// print json schema of the configurations to the stdout
#[command(name = "schema")]
Schema,
/// (deprecated) run daemon. There can only be one daemon at a time.
#[command(name = "daemon", alias = "d")]
Daemon,
/// toggle pin of a widget under certain group.
/// format: <group_name>:<widget_name>
#[command(name = "togglepin")]
TogglePin {
/// format: <group_name>:<widget_name>
#[clap(add = ArgValueCompleter::new(complete_widget_name))]
namespace: String,
},
/// reload widget configuration
#[command(name = "reload")]
Reload,
/// close daemon
#[command(name = "quit", alias = "q")]
Exit,
}
impl Command {
pub fn send_ipc(&self) {
let (command, args) = match self {
Self::Exit => (ipc::IPC_COMMAND_QUIT, vec![]),
Self::TogglePin { namespace } => {
(ipc::IPC_COMMAND_TOGGLE_PIN, vec![namespace.to_string()])
}
Self::Reload => (ipc::IPC_COMMAND_RELOAD, vec![]),
_ => {
return;
}
};
ipc::send_command(ipc::CommandBody {
command: command.to_string(),
args,
});
}
}
static ARGS: OnceLock<Cli> = OnceLock::new();
pub fn get_args() -> &'static Cli {
ARGS.get_or_init(Cli::parse)
}
/// nothing should be printed to stdout before this.
pub fn if_print_completion_and_exit() {
CompleteEnv::with_factory(Cli::command).complete();
}
| rust | MIT | 03a9091642ad39345115223890000481eeac8333 | 2026-01-04T20:25:15.177432Z | false |
way-edges/way-edges | https://github.com/way-edges/way-edges/blob/03a9091642ad39345115223890000481eeac8333/crates/way-edges/src/main.rs | crates/way-edges/src/main.rs | mod args;
use frontend::run_app;
use log::Level;
use std::env;
use std::io::Write;
fn main() {
// completion script output, and exit
args::if_print_completion_and_exit();
if env::var("RUST_LOG").is_err() {
unsafe { env::set_var("RUST_LOG", "info,system_tray=error,zbus=warn") }
}
// force tracing warn
unsafe {
env::set_var(
"RUST_LOG",
format!("{},tracing=warn,usvg=error", env::var("RUST_LOG").unwrap()),
)
};
eprintln!("Logging with settings: {}", env::var("RUST_LOG").unwrap());
env_logger::Builder::from_default_env()
.format(|buf, record| {
let (tag, color) = match record.level() {
Level::Debug => ("DBG", "\x1b[90m"), // grey
Level::Info => ("INF", "\x1b[34m"), // blue
Level::Warn => ("WRN", "\x1b[33m"), // yellow
Level::Error => ("ERR", "\x1b[31m"), // red
Level::Trace => ("TRC", "\x1b[2m"),
};
writeln!(buf, "{}{}:\x1b[0m {}", color, tag, record.args())
})
.init();
// env_logger::init();
let cli = args::get_args();
config::set_config_path(cli.config_path.as_deref());
backend::ipc::set_ipc_namespace(cli.ipc_namespace.as_deref());
if let Some(cmd) = cli.command.as_ref() {
match &cmd {
args::Command::Daemon => {
log::warn!("daemon command is deprecated, please just run `way-edges`");
}
args::Command::Schema => {
config::output_json_schema();
return;
}
_ => {
cmd.send_ipc();
return;
}
}
}
run_app(cli.mouse_debug);
}
| rust | MIT | 03a9091642ad39345115223890000481eeac8333 | 2026-01-04T20:25:15.177432Z | false |
way-edges/way-edges | https://github.com/way-edges/way-edges/blob/03a9091642ad39345115223890000481eeac8333/crates/util/src/lib.rs | crates/util/src/lib.rs | pub mod color;
pub mod draw;
pub mod template;
pub mod text;
pub mod shell {
use log::{error, warn};
use std::{os::unix::process::CommandExt, process::Command, thread};
pub fn shell_cmd(value: &str) -> Result<String, String> {
let mut cmd = Command::new("/bin/sh");
log::debug!("running command: {value}");
let res = cmd.arg("-c").arg(value).output();
let msg = match res {
Ok(o) => {
if !o.status.success() {
Err(format!(
"command exit with code 1: {}",
String::from_utf8_lossy(&o.stderr)
))
} else {
Ok(String::from_utf8_lossy(&o.stdout).to_string())
}
}
Err(e) => Err(format!("Error: {e}")),
};
if let Err(ref e) = msg {
log::error!("error running command: {value}\n{e}");
};
msg
}
pub fn shell_cmd_non_block(value: String) {
thread::spawn(move || {
log::debug!("running command: {value}");
let mut cmd = Command::new("/bin/sh");
cmd.arg("-c").arg(&value);
cmd.stdout(std::process::Stdio::null())
.stderr(std::process::Stdio::null())
.stdin(std::process::Stdio::null());
unsafe {
cmd.pre_exec(move || {
match libc::fork() {
-1 => return Err(std::io::Error::last_os_error()),
0 => (),
_ => libc::_exit(0),
}
Ok(())
});
}
match cmd.spawn() {
Ok(mut c) => match c.wait() {
Ok(s) => {
if !s.success() {
warn!("command exit unsuccessfully: {value}");
}
}
Err(e) => {
error!("error waiting for command {cmd:?}: {e:?}");
}
},
Err(err) => {
error!("error spawning {cmd:?}: {err:?}");
}
}
});
}
}
pub static Z: f64 = 0.;
use std::fmt::Debug;
use std::fmt::Display;
pub fn binary_search_within_range<T: Debug + PartialOrd + Copy + Display>(
l: &[[T; 2]],
v: T,
) -> isize {
if l.is_empty() {
return -1;
}
if l.len() == 1 {
if v >= l[0][0] && v < l[0][1] {
return 0;
} else {
return -1;
}
}
let mut index = l.len() - 1;
let mut half = l.len();
fn half_index(index: &mut usize, half: &mut usize, is_left: bool) {
*half = (*half / 2).max(1);
if is_left {
*index -= *half
} else {
*index += *half
}
}
half_index(&mut index, &mut half, true);
loop {
let current = l[index];
if v < current[0] {
if index == 0 || l[index - 1][1] <= v {
return -1;
} else {
half_index(&mut index, &mut half, true);
}
} else if v >= current[1] {
if index == l.len() - 1 || v < l[index + 1][0] {
return -1;
} else {
half_index(&mut index, &mut half, false);
}
} else {
return index as isize;
}
}
}
pub fn binary_search_end<T: Debug + PartialOrd + Copy + Display + Default>(l: &[T], v: T) -> isize {
if l.is_empty() {
return -1;
}
if l.len() == 1 {
if v >= T::default() && v < l[0] {
return 0;
} else {
return -1;
}
}
let mut index = 0;
let max_index = l.len() - 1;
let mut get_half = {
let mut half = l.len();
move || {
half = (half / 2).max(1);
half
}
};
loop {
let current = l[index];
if v < current {
// if at the first, or there's no smaller to the left
if index == 0 || v >= l[index - 1] {
return index as isize;
}
index -= get_half();
} else {
// if it's the last
if index == max_index {
return -1;
}
// if smaller than the right
if v < l[index + 1] {
return (index + 1) as isize;
}
index += get_half();
}
}
}
#[derive(Debug)]
pub struct Or(pub bool);
impl Or {
pub fn or(&mut self, b: bool) {
self.0 = self.0 || b
}
pub fn res(self) -> bool {
self.0
}
}
/// input: rgba
/// output: bgra
pub fn pre_multiply_and_to_little_endian_argb(rgba: [u8; 4]) -> [u8; 4] {
// pre-multiply
let red = rgba[0] as u16;
let green = rgba[1] as u16;
let blue = rgba[2] as u16;
let alpha = rgba[3] as u16;
let r = (red * alpha) / 255;
let g = (green * alpha) / 255;
let b = (blue * alpha) / 255;
// little-endian for ARgb32
[b as u8, g as u8, r as u8, rgba[3]]
}
| rust | MIT | 03a9091642ad39345115223890000481eeac8333 | 2026-01-04T20:25:15.177432Z | false |
way-edges/way-edges | https://github.com/way-edges/way-edges/blob/03a9091642ad39345115223890000481eeac8333/crates/util/src/draw.rs | crates/util/src/draw.rs | use std::f64::consts::PI;
use cairo::{Format, ImageSurface, Path};
use crate::Z;
pub fn new_surface(size: (i32, i32)) -> ImageSurface {
ImageSurface::create(Format::ARgb32, size.0, size.1).unwrap()
}
pub fn draw_rect_path(radius: f64, size: (f64, f64), corners: [bool; 4]) -> Result<Path, String> {
let surf =
cairo::ImageSurface::create(Format::ARgb32, size.0.ceil() as i32, size.1.ceil() as i32)
.unwrap();
let ctx = cairo::Context::new(&surf).unwrap();
// draw
{
// top left corner
{
ctx.move_to(Z, radius);
if corners[0] {
let center = (radius, radius);
ctx.arc(center.0, center.1, radius, PI, 1.5 * PI);
} else {
ctx.line_to(Z, Z);
}
let x = size.0 - radius;
let y = Z;
ctx.line_to(x, y);
}
// top right corner
{
if corners[1] {
let center = (size.0 - radius, radius);
ctx.arc(center.0, center.1, radius, 1.5 * PI, 2. * PI);
} else {
ctx.line_to(size.0, Z);
}
let x = size.0;
let y = size.1 - radius;
ctx.line_to(x, y);
}
// bottom right corner
{
if corners[2] {
let center = (size.0 - radius, size.1 - radius);
ctx.arc(center.0, center.1, radius, 0., 0.5 * PI);
} else {
ctx.line_to(size.0, size.1);
}
let x = radius;
let y = size.1;
ctx.line_to(x, y);
}
// bottom left corner
{
if corners[3] {
let center = (radius, size.1 - radius);
ctx.arc(center.0, center.1, radius, 0.5 * PI, PI);
} else {
ctx.line_to(Z, size.1);
}
let x = Z;
let y = radius;
ctx.line_to(x, y);
}
ctx.close_path();
Ok(ctx.copy_path().unwrap())
}
}
pub fn draw_fan(ctx: &cairo::Context, point: (f64, f64), radius: f64, start: f64, end: f64) {
ctx.arc(point.0, point.1, radius, start * PI, end * PI);
ctx.line_to(point.0, point.1);
ctx.close_path();
}
#[allow(clippy::too_many_arguments)]
pub fn copy_pixmap(
src_data: &[u8],
src_width: usize,
src_height: usize,
dst_data: &mut [u8],
dst_width: usize,
dst_height: usize,
x: isize,
y: isize,
) {
let (sx_start, dx_start, copy_width) = {
let sx_start = (-x).max(0) as usize;
let dx_start = x.max(0) as usize;
let remaining_width_src = src_width.saturating_sub(sx_start) as isize;
let remaining_width_dst = dst_width.saturating_sub(dx_start) as isize;
let copy_width = remaining_width_src.min(remaining_width_dst).max(0) as usize;
(sx_start, dx_start, copy_width)
};
let (sy_start, dy_start, copy_height) = {
let sy_start = (-y).max(0) as usize;
let dy_start = y.max(0) as usize;
let remaining_height_src = src_height.saturating_sub(sy_start) as isize;
let remaining_height_dst = dst_height.saturating_sub(dy_start) as isize;
let copy_height = remaining_height_src.min(remaining_height_dst).max(0) as usize;
(sy_start, dy_start, copy_height)
};
if copy_width == 0 || copy_height == 0 {
return;
}
for row in 0..copy_height {
let src_row = sy_start + row;
let dst_row = dy_start + row;
let src_start = (src_row * src_width + sx_start) * 4;
let src_end = src_start + copy_width * 4;
let dst_start = (dst_row * dst_width + dx_start) * 4;
let dst_end = dst_start + copy_width * 4;
dst_data[dst_start..dst_end].copy_from_slice(&src_data[src_start..src_end]);
}
}
| rust | MIT | 03a9091642ad39345115223890000481eeac8333 | 2026-01-04T20:25:15.177432Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.