repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1 value | license stringclasses 7 values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2 classes |
|---|---|---|---|---|---|---|---|---|
ohah/hwpjs | https://github.com/ohah/hwpjs/blob/19df26a209c6e780b4c3b03d2ab628209e1ae311/packages/hwpjs/crates/lib/src/generated.rs | packages/hwpjs/crates/lib/src/generated.rs | // Auto generated by Craby. DO NOT EDIT.
// Hash: 1761b519b16719d6
#[rustfmt::skip]
use craby::prelude::*;
use crate::ffi::bridging::*;
pub trait HwpjsSpec {
fn new(ctx: Context) -> Self;
fn id(&self) -> usize;
fn file_header(&mut self, data: ArrayBuffer) -> String;
fn to_json(&mut self, data: ArrayBuffer) -> String;
fn to_markdown(&mut self, data: ArrayBuffer, options: ToMarkdownOptions) -> ToMarkdownResult;
}
impl Default for NullableString {
fn default() -> Self {
NullableString {
null: true,
val: String::default(),
}
}
}
impl From<NullableString> for Nullable<String> {
fn from(val: NullableString) -> Self {
Nullable::new(if val.null { None } else { Some(val.val) })
}
}
impl From<Nullable<String>> for NullableString {
fn from(val: Nullable<String>) -> Self {
let val = val.into_value();
let null = val.is_none();
NullableString {
val: val.unwrap_or(String::default()),
null,
}
}
}
impl Default for ToMarkdownOptions {
fn default() -> Self {
ToMarkdownOptions {
image_output_dir: NullableString::default(),
image: NullableString::default(),
use_html: false,
include_version: false,
include_page_info: false
}
}
}
impl Default for ToMarkdownResult {
fn default() -> Self {
ToMarkdownResult {
markdown: String::default()
}
}
}
| rust | MIT | 19df26a209c6e780b4c3b03d2ab628209e1ae311 | 2026-01-04T20:24:59.575438Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_mc/src/lib.rs | fantoch_mc/src/lib.rs | use fantoch::command::Command;
use fantoch::config::Config;
use fantoch::executor::Executor;
use fantoch::id::ProcessId;
use fantoch::protocol::{Action, Protocol};
use fantoch::time::RunTime;
use fantoch::util;
use fantoch::{HashMap, HashSet};
use stateright::actor::{Actor, Id};
use std::marker::PhantomData;
const SHARD_ID: u64 = 0;
pub struct ProtocolActor<P: Protocol> {
config: Config,
topology: HashMap<ProcessId, Vec<ProcessId>>,
_phantom: PhantomData<P>,
}
impl<P> ProtocolActor<P>
where
P: Protocol,
{
pub fn new(
config: Config,
topology: HashMap<ProcessId, Vec<ProcessId>>,
) -> Self {
Self::check_topology(config.n(), topology.clone());
Self {
config,
topology,
_phantom: PhantomData,
}
}
fn check_topology(n: usize, topology: HashMap<ProcessId, Vec<ProcessId>>) {
let ids = Self::usort(util::process_ids(SHARD_ID, n));
let keys = Self::usort(topology.keys().cloned());
assert_eq!(ids, keys);
for peers in topology.values() {
let peers = Self::usort(peers.iter().cloned());
assert_eq!(ids, peers);
}
}
fn usort<I>(ids: I) -> Vec<ProcessId>
where
I: Iterator<Item = ProcessId>,
{
let mut ids: Vec<_> = ids.collect();
ids.sort();
ids.dedup();
ids
}
}
#[derive(Clone)]
pub struct ProtocolActorState<P: Protocol> {
protocol: P,
executor: <P as Protocol>::Executor,
}
#[derive(Clone, Debug)]
pub enum KV<M> {
Access(Command),
Internal(M),
}
fn to_process_id(id: Id) -> ProcessId {
usize::from(id) as ProcessId
}
fn from_process_id(id: ProcessId) -> Id {
Id::from(id as usize)
}
impl<P> Actor for ProtocolActor<P>
where
P: Protocol,
{
type Msg = KV<<P as Protocol>::Message>;
type State = ProtocolActorState<P>;
}
// fn init(i: InitIn<Self>, o: &mut Out<Self>) {
// // fetch id and config
// let process_id: ProcessId = usize::from(i.id) as ProcessId;
// let config = i.context.config;
// // our ids range from 1..n
// assert!(process_id > 0);
// // create protocol
// let (mut protocol, periodic_events) = P::new(process_id, config);
// if !periodic_events.is_empty() {
// todo!("schedule periodic events: {:?}", periodic_events);
// }
// // discover peers
// let peers = i
// .context
// .topology
// .get(&process_id)
// .cloned()
// .expect("each process should have a set of peers");
// protocol.discover(peers);
// // create executor
// let executor = <<P as Protocol>::Executor>::new(process_id, config);
// // set actor state
// let state = ProtocolActorState { protocol, executor };
// o.set_state(state);
// }
// fn next(i: NextIn<Self>, o: &mut Out<Self>) {
// // get current protocol state
// let mut state = i.state.clone();
// // get msg received
// let Event::Receive(from, msg) = i.event;
// let from = to_process_id(from);
// // handle msg
// let to_sends = match msg {
// KV::Access(cmd) => Self::handle_submit(cmd, &mut state),
// KV::Internal(msg) => Self::handle_msg(from, msg, &mut state),
// };
// // send new messages
// for (recipients, msg) in to_sends {
// let recipients: Vec<_> =
// recipients.into_iter().map(from_process_id).collect();
// let msg = KV::Internal(msg);
// o.broadcast(&recipients, &msg);
// }
// // set new protocol state
// o.set_state(state);
// }
// }
impl<P> ProtocolActor<P>
where
P: Protocol,
{
#[must_use]
fn handle_submit(
cmd: Command,
state: &mut ProtocolActorState<P>,
) -> Vec<(HashSet<ProcessId>, P::Message)> {
state.protocol.submit(None, cmd, &RunTime);
Self::handle_to_executors(state);
Self::handle_to_processes(state)
}
#[must_use]
fn handle_msg(
from: ProcessId,
msg: P::Message,
state: &mut ProtocolActorState<P>,
) -> Vec<(HashSet<ProcessId>, P::Message)> {
// handle message
state.protocol.handle(from, SHARD_ID, msg, &RunTime);
Self::handle_to_executors(state);
Self::handle_to_processes(state)
}
fn handle_to_executors(state: &mut ProtocolActorState<P>) {
// handle new execution info
for execution_info in state.protocol.to_executors_iter() {
state.executor.handle(execution_info, &RunTime);
// assert that there's nothing to other executors (since we're
// assuming full replication (and only Janus needs that in partial
// replication))
assert!(state.executor.to_executors().is_none());
for executor_result in state.executor.to_clients_iter() {
todo!("send result to client: {:?}", executor_result);
}
}
}
#[must_use]
fn handle_to_processes(
state: &mut ProtocolActorState<P>,
) -> Vec<(HashSet<ProcessId>, P::Message)> {
// get the id of this process
let process_id = state.protocol.id();
let actions: Vec<_> = state.protocol.to_processes_iter().collect();
// handle all new actions
actions
.into_iter()
.flat_map(|action| {
match action {
Action::ToSend { msg, mut target } => {
if target.remove(&process_id) {
// handle message locally, if message also to self,
// and remove self from target
let mut to_sends = Self::handle_msg(
process_id,
msg.clone(),
state,
);
to_sends.push((target, msg));
to_sends
} else {
vec![(target, msg)]
}
}
Action::ToForward { msg } => {
// there's a single worker, so just handle it locally
Self::handle_msg(process_id, msg, state)
}
}
})
.collect()
}
}
#[cfg(test)]
mod tests {
use super::*;
use fantoch::protocol::Basic;
#[test]
fn it_works() {
let n = 3;
let f = 1;
let config = Config::new(n, f);
let mut topology = HashMap::new();
topology.insert(1, vec![1, 2, 3]);
topology.insert(2, vec![2, 3, 1]);
topology.insert(3, vec![3, 1, 2]);
let _ = ProtocolActor::<Basic>::new(config, topology);
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/config.rs | fantoch/src/config.rs | use crate::id::ProcessId;
use serde::{Deserialize, Serialize};
use std::time::Duration;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub struct Config {
/// number of processes
n: usize,
/// number of tolerated faults
f: usize,
/// number of shards
shard_count: usize,
/// if enabled, then execution is skipped
execute_at_commit: bool,
/// defines the interval between executor cleanups
executor_cleanup_interval: Duration,
/// defines the interval between between executed notifications sent to
/// the local worker process
executor_executed_notification_interval: Duration,
/// defines whether the executor should monitor pending commands, and if
/// so, the interval between each monitor
executor_monitor_pending_interval: Option<Duration>,
/// defines whether the executor should monitor the execution order of
/// commands
executor_monitor_execution_order: bool,
/// defines the interval between garbage collections
gc_interval: Option<Duration>,
/// starting leader process
leader: Option<ProcessId>,
/// defines whether protocols (atlas, epaxos and tempo) should employ the
/// NFR optimization
nfr: bool,
/// defines whether tempo should employ tiny quorums or not
tempo_tiny_quorums: bool,
/// defines the interval between clock bumps, if any
tempo_clock_bump_interval: Option<Duration>,
/// defines the interval the sending of `MDetached` messages in tempo, if
/// any
tempo_detached_send_interval: Option<Duration>,
/// defines whether caesar should employ the wait condition
caesar_wait_condition: bool,
/// defines whether protocols should try to bypass the fast quorum process
/// ack (which is only possible if the fast quorum size is 2)
skip_fast_ack: bool,
}
impl Config {
/// Create a new `Config`.
/// The first argument `n` represents the number of processes in the system.
/// The second argument `f` represents the number of faults tolerated by the
/// system.
pub fn new(n: usize, f: usize) -> Self {
if f > n / 2 {
panic!("f={} is larger than a minority with n={}", f, n);
}
// by default, `shard_count = 1`
let shard_count = 1;
// by default, execution is not skipped
let execute_at_commit = false;
// by default, executor cleanups happen every 5ms
let executor_cleanup_interval = Duration::from_millis(5);
// by default, executed notifications happen every 50ms
let executor_executed_notification_interval = Duration::from_millis(50);
// by default, pending commnads are not monitored
let executor_monitor_pending_interval = None;
// by default, executors do not monitor execution order
let executor_monitor_execution_order = false;
// by default, commands are deleted at commit time
let gc_interval = None;
// by default, there's no leader
let leader = None;
// by default, `nfr = false`
let nfr = false;
// by default, `tempo_tiny_quorums = false`
let tempo_tiny_quorums = false;
// by default, clocks are not bumped periodically
let tempo_clock_bump_interval = None;
// by default, `MDetached` messages are not sent
let tempo_detached_send_interval = None;
// by default, `caesar_wait_condition = true`
let caesar_wait_condition = true;
// by default `skip_fast_ack = false;
let skip_fast_ack = false;
Self {
n,
f,
shard_count,
execute_at_commit,
executor_cleanup_interval,
executor_executed_notification_interval,
executor_monitor_pending_interval,
executor_monitor_execution_order,
gc_interval,
leader,
nfr,
tempo_tiny_quorums,
tempo_clock_bump_interval,
tempo_detached_send_interval,
caesar_wait_condition,
skip_fast_ack,
}
}
/// Retrieve the number of processes.
pub fn n(&self) -> usize {
self.n
}
/// Retrieve the number of faults tolerated.
pub fn f(&self) -> usize {
self.f
}
/// Retrieve the number of shards.
pub fn shard_count(&self) -> usize {
self.shard_count
}
/// Changes the number of sahrds.
pub fn set_shard_count(&mut self, shard_count: usize) {
assert!(shard_count >= 1);
self.shard_count = shard_count;
}
/// Checks whether execution is to be skipped.
pub fn execute_at_commit(&self) -> bool {
self.execute_at_commit
}
/// Changes the value of `execute_at_commit`.
pub fn set_execute_at_commit(&mut self, execute_at_commit: bool) {
self.execute_at_commit = execute_at_commit;
}
/// Checks the executor cleanup interval.
pub fn executor_cleanup_interval(&self) -> Duration {
self.executor_cleanup_interval
}
/// Sets the executor cleanup interval.
pub fn set_executor_cleanup_interval(&mut self, interval: Duration) {
self.executor_cleanup_interval = interval;
}
/// Checks the executor monitor pending interval.
pub fn executor_monitor_pending_interval(&self) -> Option<Duration> {
self.executor_monitor_pending_interval
}
/// Sets the executor monitor pending interval.
pub fn set_executor_monitor_pending_interval<I>(&mut self, interval: I)
where
I: Into<Option<Duration>>,
{
self.executor_monitor_pending_interval = interval.into();
}
/// Checks the whether executors should monitor execution order.
pub fn executor_monitor_execution_order(&self) -> bool {
self.executor_monitor_execution_order
}
/// Sets the executor monitor execution order.
pub fn set_executor_monitor_execution_order(
&mut self,
executor_monitor_execution_order: bool,
) {
self.executor_monitor_execution_order =
executor_monitor_execution_order;
}
/// Checks the executed notification interval.
pub fn executor_executed_notification_interval(&self) -> Duration {
self.executor_executed_notification_interval
}
/// Sets the executed notification interval.
pub fn set_executor_executed_notification_interval(
&mut self,
interval: Duration,
) {
self.executor_executed_notification_interval = interval;
}
/// Checks the garbage collection interval.
pub fn gc_interval(&self) -> Option<Duration> {
self.gc_interval
}
/// Sets the garbage collection interval.
pub fn set_gc_interval<I>(&mut self, interval: I)
where
I: Into<Option<Duration>>,
{
self.gc_interval = interval.into();
}
/// Checks whether a starting leader has been defined.
pub fn leader(&self) -> Option<ProcessId> {
self.leader
}
/// Sets the starting leader.
pub fn set_leader<L>(&mut self, leader: L)
where
L: Into<Option<ProcessId>>,
{
self.leader = leader.into();
}
/// Checks whether deps NFR is enabled or not.
pub fn nfr(&self) -> bool {
self.nfr
}
/// Changes the value of `nfr`.
pub fn set_nfr(&mut self, nfr: bool) {
self.nfr = nfr;
}
/// Checks whether tempo tiny quorums is enabled or not.
pub fn tempo_tiny_quorums(&self) -> bool {
self.tempo_tiny_quorums
}
/// Changes the value of `tempo_tiny_quorums`.
pub fn set_tempo_tiny_quorums(&mut self, tempo_tiny_quorums: bool) {
self.tempo_tiny_quorums = tempo_tiny_quorums;
}
/// Checks tempo clock bump interval.
pub fn tempo_clock_bump_interval(&self) -> Option<Duration> {
self.tempo_clock_bump_interval
}
/// Sets tempo clock bump interval.
pub fn set_tempo_clock_bump_interval<I>(&mut self, interval: I)
where
I: Into<Option<Duration>>,
{
self.tempo_clock_bump_interval = interval.into();
}
/// Checks tempo detached send interval.
pub fn tempo_detached_send_interval(&self) -> Option<Duration> {
self.tempo_detached_send_interval
}
/// Sets tempo clock bump interval.
pub fn set_tempo_detached_send_interval<I>(&mut self, interval: I)
where
I: Into<Option<Duration>>,
{
self.tempo_detached_send_interval = interval.into();
}
/// Checks whether caesar's wait condition is enabled or not.
pub fn caesar_wait_condition(&self) -> bool {
self.caesar_wait_condition
}
/// Changes the value of `caesar_wait_condition`.
pub fn set_caesar_wait_condition(&mut self, caesar_wait_condition: bool) {
self.caesar_wait_condition = caesar_wait_condition;
}
/// Checks whether skip fast ack is enabled or not.
pub fn skip_fast_ack(&self) -> bool {
self.skip_fast_ack
}
/// Changes the value of `skip_fast_ack`.
pub fn set_skip_fast_ack(&mut self, skip_fast_ack: bool) {
self.skip_fast_ack = skip_fast_ack;
}
}
impl Config {
/// Computes the size of a majority quorum.
pub fn majority_quorum_size(&self) -> usize {
(self.n / 2) + 1
}
/// Computes `Basic` quorum size.
pub fn basic_quorum_size(&self) -> usize {
self.f + 1
}
/// Computes `FPaxos` quorum size.
pub fn fpaxos_quorum_size(&self) -> usize {
self.f + 1
}
/// Computes `Atlas` fast and write quorum sizes.
pub fn atlas_quorum_sizes(&self) -> (usize, usize) {
let n = self.n;
let f = self.f;
let fast_quorum_size = (n / 2) + f;
let write_quorum_size = f + 1;
(fast_quorum_size, write_quorum_size)
}
/// Computes `EPaxos` fast and write quorum sizes.
pub fn epaxos_quorum_sizes(&self) -> (usize, usize) {
let n = self.n;
// ignore config.f() since EPaxos always tolerates a minority of
// failures
let f = n / 2;
let fast_quorum_size = f + ((f + 1) / 2 as usize);
let write_quorum_size = f + 1;
(fast_quorum_size, write_quorum_size)
}
/// Computes `Caesar` fast and write quorum sizes.
pub fn caesar_quorum_sizes(&self) -> (usize, usize) {
let n = self.n;
let fast_quorum_size = ((3 * n) / 4) + 1;
let write_quorum_size = (n / 2) + 1;
(fast_quorum_size, write_quorum_size)
}
/// Computes `Tempo` fast quorum size, stability threshold and write quorum
/// size.
///
/// The threshold should be n - q + 1, where n is the number of processes
/// and q the size of the quorum used to compute clocks. In `Tempo` e.g.
/// with tiny quorums, although the fast quorum is 2f (which would
/// suggest q = 2f), in fact q = f + 1. The quorum size of 2f ensures that
/// all clocks are computed from f + 1 processes. So, n - q + 1 = n - (f
/// + 1) + 1 = n - f.
///
/// In general, the stability threshold is given by:
/// "n - (fast_quorum_size - f + 1) + 1 = n - fast_quorum_size + f"
/// - this ensures that the stability threshold plus the minimum number of
/// processes where clocks are computed (i.e. fast_quorum_size - f + 1) is
/// greater than n
pub fn tempo_quorum_sizes(&self) -> (usize, usize, usize) {
let n = self.n;
let f = self.f;
let minority = n / 2;
let (fast_quorum_size, stability_threshold) = if self.tempo_tiny_quorums
{
(2 * f, n - f)
} else {
(minority + f, minority + 1)
};
let write_quorum_size = f + 1;
(fast_quorum_size, write_quorum_size, stability_threshold)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn config() {
// n and f
let n = 5;
let f = 1;
// config
let mut config = Config::new(n, f);
assert_eq!(config.n(), n);
assert_eq!(config.f(), f);
// by default, the number shards is 1.
assert_eq!(config.shard_count(), 1);
// but that can change
let shards = 10;
config.set_shard_count(shards);
assert_eq!(config.shard_count(), shards);
// by deafult, execute at commit is false
assert!(!config.execute_at_commit());
// but that can change
config.set_execute_at_commit(true);
assert!(config.execute_at_commit());
// by default, the executor cleanup interval is 5ms
assert_eq!(
config.executor_cleanup_interval(),
Duration::from_millis(5)
);
// change its value and check it has changed
let interval = Duration::from_secs(2);
config.set_executor_cleanup_interval(interval);
assert_eq!(config.executor_cleanup_interval(), interval);
// by default, the executor executed notification interval is 50ms
assert_eq!(
config.executor_executed_notification_interval(),
Duration::from_millis(50)
);
// change its value and check it has changed
let interval = Duration::from_secs(10);
config.set_executor_executed_notification_interval(interval);
assert_eq!(config.executor_executed_notification_interval(), interval);
// by default, there's executor monitor pending interval
assert_eq!(config.executor_monitor_pending_interval(), None);
// change its value and check it has changed
let interval = Duration::from_millis(1);
config.set_executor_monitor_pending_interval(interval);
assert_eq!(config.executor_monitor_pending_interval(), Some(interval));
// by default, executor monitor execution order is false
assert_eq!(config.executor_monitor_execution_order(), false);
// but that can change
config.set_executor_monitor_execution_order(true);
assert_eq!(config.executor_monitor_execution_order(), true);
// by default, there's no garbage collection interval
assert_eq!(config.gc_interval(), None);
// change its value and check it has changed
let interval = Duration::from_millis(1);
config.set_gc_interval(interval);
assert_eq!(config.gc_interval(), Some(interval));
// by default, there's no leader
assert!(config.leader().is_none());
// but that can change
let leader = 1;
config.set_leader(leader);
assert_eq!(config.leader(), Some(leader));
// by default, deps NFR is false
assert!(!config.nfr());
// if we change it to false, remains false
config.set_nfr(false);
assert!(!config.nfr());
// if we change it to true, it becomes true
config.set_nfr(true);
assert!(config.nfr());
// by default, tempo tiny quorums is false
assert!(!config.tempo_tiny_quorums());
// if we change it to false, remains false
config.set_tempo_tiny_quorums(false);
assert!(!config.tempo_tiny_quorums());
// if we change it to true, it becomes true
config.set_tempo_tiny_quorums(true);
assert!(config.tempo_tiny_quorums());
// by default, there's no clock bump interval
assert!(config.tempo_clock_bump_interval().is_none());
// but that can change
let interval = Duration::from_millis(1);
config.set_tempo_clock_bump_interval(interval);
assert_eq!(config.tempo_clock_bump_interval(), Some(interval));
// by default, there's no sending of `MDetached` messages
assert!(config.tempo_detached_send_interval().is_none());
// but that can change
let interval = Duration::from_millis(2);
config.set_tempo_detached_send_interval(interval);
assert_eq!(config.tempo_detached_send_interval(), Some(interval));
// by default, caesar wait condition is true
assert!(config.caesar_wait_condition());
// if we change it to true, remains true
config.set_caesar_wait_condition(true);
assert!(config.caesar_wait_condition());
// if we change it to false, it becomes false
config.set_caesar_wait_condition(false);
assert!(!config.caesar_wait_condition());
// by default, skip fast ack is false
assert!(!config.skip_fast_ack());
// if we change it to false, remains false
config.set_skip_fast_ack(false);
assert!(!config.skip_fast_ack());
// if we change it to true, it becomes true
config.set_skip_fast_ack(true);
assert!(config.skip_fast_ack());
}
#[test]
fn majority_quorum_size() {
let config = Config::new(3, 1);
assert_eq!(config.majority_quorum_size(), 2);
let config = Config::new(4, 1);
assert_eq!(config.majority_quorum_size(), 3);
let config = Config::new(5, 1);
assert_eq!(config.majority_quorum_size(), 3);
let config = Config::new(5, 2);
assert_eq!(config.majority_quorum_size(), 3);
let config = Config::new(6, 1);
assert_eq!(config.majority_quorum_size(), 4);
let config = Config::new(7, 1);
assert_eq!(config.majority_quorum_size(), 4);
}
#[test]
fn basic_parameters() {
let config = Config::new(7, 1);
assert_eq!(config.basic_quorum_size(), 2);
let config = Config::new(7, 2);
assert_eq!(config.basic_quorum_size(), 3);
let config = Config::new(7, 3);
assert_eq!(config.basic_quorum_size(), 4);
}
#[test]
fn atlas_parameters() {
let config = Config::new(7, 1);
assert_eq!(config.atlas_quorum_sizes(), (4, 2));
let config = Config::new(7, 2);
assert_eq!(config.atlas_quorum_sizes(), (5, 3));
let config = Config::new(7, 3);
assert_eq!(config.atlas_quorum_sizes(), (6, 4));
}
#[test]
fn epaxos_parameters() {
let ns = vec![3, 5, 7, 9, 11, 13, 15, 17];
// expected pairs of fast and write quorum sizes
let expected = vec![
(2, 2),
(3, 3),
(5, 4),
(6, 5),
(8, 6),
(9, 7),
(11, 8),
(12, 9),
];
let fs: Vec<_> = ns
.into_iter()
.map(|n| {
// this f value won't be used
let f = 0;
let config = Config::new(n, f);
config.epaxos_quorum_sizes()
})
.collect();
assert_eq!(fs, expected);
}
#[test]
fn caesar_parameters() {
let ns = vec![3, 5, 7, 9, 11];
// expected pairs of fast and write quorum sizes
let expected = vec![(3, 2), (4, 3), (6, 4), (7, 5), (9, 6)];
let fs: Vec<_> = ns
.into_iter()
.map(|n| {
// this f value won't be used
let f = 0;
let config = Config::new(n, f);
config.caesar_quorum_sizes()
})
.collect();
assert_eq!(fs, expected);
}
#[test]
fn tempo_parameters() {
// tiny quorums = false
let mut config = Config::new(7, 1);
config.set_tempo_tiny_quorums(false);
assert_eq!(config.tempo_quorum_sizes(), (4, 2, 4));
let mut config = Config::new(7, 2);
config.set_tempo_tiny_quorums(false);
assert_eq!(config.tempo_quorum_sizes(), (5, 3, 4));
// tiny quorums = true
let mut config = Config::new(7, 1);
config.set_tempo_tiny_quorums(true);
assert_eq!(config.tempo_quorum_sizes(), (2, 2, 6));
let mut config = Config::new(7, 2);
config.set_tempo_tiny_quorums(true);
assert_eq!(config.tempo_quorum_sizes(), (4, 3, 5));
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/kvs.rs | fantoch/src/kvs.rs | use crate::executor::ExecutionOrderMonitor;
use crate::id::Rifl;
use crate::HashMap;
use serde::{Deserialize, Serialize};
// Definition of `Key` and `Value` types.
pub type Key = String;
pub type Value = String;
#[derive(
Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize,
)]
pub enum KVOp {
Get,
Put(Value),
Delete,
}
pub type KVOpResult = Option<Value>;
#[derive(Default, Clone)]
pub struct KVStore {
store: HashMap<Key, Value>,
monitor: Option<ExecutionOrderMonitor>,
}
impl KVStore {
/// Creates a new `KVStore` instance.
pub fn new(monitor_execution_order: bool) -> Self {
let monitor = if monitor_execution_order {
Some(ExecutionOrderMonitor::new())
} else {
None
};
Self {
store: Default::default(),
monitor,
}
}
pub fn monitor(&self) -> Option<&ExecutionOrderMonitor> {
self.monitor.as_ref()
}
/// Executes `KVOp`s in the `KVStore`.
#[cfg(test)]
pub fn test_execute(&mut self, key: &Key, op: KVOp) -> KVOpResult {
let mut results = self.do_execute(key, vec![op]);
assert_eq!(results.len(), 1);
results.pop().unwrap()
}
pub fn execute(
&mut self,
key: &Key,
ops: Vec<KVOp>,
rifl: Rifl,
) -> Vec<KVOpResult> {
// update monitor, if we're monitoring
if let Some(monitor) = self.monitor.as_mut() {
let read_only = ops.iter().all(|op| op == &KVOp::Get);
monitor.add(&key, read_only, rifl);
}
self.do_execute(key, ops)
}
#[allow(clippy::ptr_arg)]
fn do_execute(&mut self, key: &Key, ops: Vec<KVOp>) -> Vec<KVOpResult> {
ops.into_iter()
.map(|op| self.do_execute_op(key, op))
.collect()
}
fn do_execute_op(&mut self, key: &Key, op: KVOp) -> KVOpResult {
match op {
KVOp::Get => self.store.get(key).cloned(),
KVOp::Put(value) => {
// don't return the previous value
self.store.insert(key.clone(), value);
None
}
KVOp::Delete => self.store.remove(key),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn store_flow() {
// key and values
let key_a = String::from("A");
let key_b = String::from("B");
let x = String::from("x");
let y = String::from("y");
let z = String::from("z");
// store
let monitor = false;
let mut store = KVStore::new(monitor);
// get key_a -> none
assert_eq!(store.test_execute(&key_a, KVOp::Get), None);
// get key_b -> none
assert_eq!(store.test_execute(&key_b, KVOp::Get), None);
// put key_a x -> none
assert_eq!(store.test_execute(&key_a, KVOp::Put(x.clone())), None);
// get key_a -> some(x)
assert_eq!(store.test_execute(&key_a, KVOp::Get), Some(x.clone()));
// put key_b y -> none
assert_eq!(store.test_execute(&key_b, KVOp::Put(y.clone())), None);
// get key_b -> some(y)
assert_eq!(store.test_execute(&key_b, KVOp::Get), Some(y.clone()));
// put key_a z -> some(x)
assert_eq!(
store.test_execute(&key_a, KVOp::Put(z.clone())),
None,
/*
the following is correct if Put returns the previous value
Some(x.clone())
*/
);
// get key_a -> some(z)
assert_eq!(store.test_execute(&key_a, KVOp::Get), Some(z.clone()));
// get key_b -> some(y)
assert_eq!(store.test_execute(&key_b, KVOp::Get), Some(y.clone()));
// delete key_a -> some(z)
assert_eq!(store.test_execute(&key_a, KVOp::Delete), Some(z.clone()));
// get key_a -> none
assert_eq!(store.test_execute(&key_a, KVOp::Get), None);
// get key_b -> some(y)
assert_eq!(store.test_execute(&key_b, KVOp::Get), Some(y.clone()));
// delete key_b -> some(y)
assert_eq!(store.test_execute(&key_b, KVOp::Delete), Some(y.clone()));
// get key_b -> none
assert_eq!(store.test_execute(&key_b, KVOp::Get), None);
// get key_a -> none
assert_eq!(store.test_execute(&key_a, KVOp::Get), None);
// put key_a x -> none
assert_eq!(store.test_execute(&key_a, KVOp::Put(x.clone())), None);
// get key_a -> some(x)
assert_eq!(store.test_execute(&key_a, KVOp::Get), Some(x.clone()));
// get key_b -> none
assert_eq!(store.test_execute(&key_b, KVOp::Get), None);
// delete key_a -> some(x)
assert_eq!(store.test_execute(&key_a, KVOp::Delete), Some(x.clone()));
// get key_a -> none
assert_eq!(store.test_execute(&key_a, KVOp::Get), None);
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/lib.rs | fantoch/src/lib.rs | #![deny(rust_2018_idioms)]
// This module contains the definition of `Region` and `Planet`.
pub mod planet;
// This module contains the definition of all identifiers and generators of
// identifiers.
pub mod id;
// This module contains the definition of `Client`.
pub mod client;
// This module contains the implementation of a key-value store.
pub mod kvs;
// This module contains the definitions of `Metrics`, `Float` and `Histogram`.
pub mod metrics;
// This module contains the definition of an executor.
pub mod executor;
// This module contains the definition of `Command`, `CommandResult` and
// `Pending`.
pub mod command;
// This module contains the definition of `Config`.
pub mod config;
// This module contains the definition of `ToSend`, `Process` and `BaseProcess`
// and implementations of all protocols supported.
pub mod protocol;
// This module contains the definition of `SharedMap`.
pub mod shared;
// This module contains the definition of trait `SysTime` and its
// implementations.
pub mod time;
// This module contains the definition of `Simulation` and `Runner`.
pub mod sim;
// This module contains the definition of Runner` (that actually runs a given
// `Process`)
#[cfg(feature = "run")]
pub mod run;
pub mod load_balance {
use crate::id::Dot;
// the worker index that should be used by leader-based protocols
pub const LEADER_WORKER_INDEX: usize = 0;
// the worker index that should be for garbage collection:
// - it's okay to be the same as the leader index because this value is not
// used by leader-based protocols
// - e.g. in fpaxos, the gc only runs in the acceptor worker
pub const GC_WORKER_INDEX: usize = 0;
pub const WORKERS_INDEXES_RESERVED: usize = 2;
pub fn worker_index_no_shift(index: usize) -> Option<(usize, usize)> {
// when there's no shift, the index must be either 0 or 1
assert!(index < WORKERS_INDEXES_RESERVED);
Some((0, index))
}
// note: reserved indexing always reserve the first two workers
pub const fn worker_index_shift(index: usize) -> Option<(usize, usize)> {
Some((WORKERS_INDEXES_RESERVED, index))
}
pub fn worker_dot_index_shift(dot: &Dot) -> Option<(usize, usize)> {
worker_index_shift(dot.sequence() as usize)
}
}
// This module contains some utilitary functions.
pub mod util;
// Re-export `HashMap` and `HashSet`.
pub use hash_map::HashMap;
pub use hash_set::HashSet;
pub mod hash_map {
pub use hashbrown::hash_map::*;
}
pub mod hash_set {
pub use hashbrown::hash_set::*;
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/command.rs | fantoch/src/command.rs | use crate::executor::ExecutorResult;
use crate::id::{Rifl, ShardId};
use crate::kvs::{KVOp, KVOpResult, KVStore, Key};
use crate::HashMap;
use serde::{Deserialize, Serialize};
use std::fmt::{self, Debug};
use std::iter::FromIterator;
use std::sync::Arc;
pub const DEFAULT_SHARD_ID: ShardId = 0;
#[derive(Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct Command {
rifl: Rifl,
shard_to_ops: HashMap<ShardId, HashMap<Key, Arc<Vec<KVOp>>>>,
// mapping from shard to the keys on that shard; this will be used by
// `Tempo` to exchange `MStable` messages between shards
shard_to_keys: Arc<HashMap<ShardId, Vec<Key>>>,
// field used to output and empty iterator of keys when rustc can't figure
// out what we mean
_empty_keys: HashMap<Key, Arc<Vec<KVOp>>>,
}
impl Command {
/// Create a new `Command`.
pub fn new(
rifl: Rifl,
shard_to_ops: HashMap<ShardId, HashMap<Key, Vec<KVOp>>>,
) -> Self {
let mut shard_to_keys: HashMap<ShardId, Vec<Key>> = Default::default();
let shard_to_ops = shard_to_ops
.into_iter()
.map(|(shard_id, shard_ops)| {
(
shard_id,
shard_ops
.into_iter()
.map(|(key, ops)| {
// populate `shard_to_keys`
shard_to_keys
.entry(shard_id)
.or_default()
.push(key.clone());
// `Arc` the ops on this key
(key, Arc::new(ops))
})
.collect(),
)
})
.collect();
Self {
rifl,
shard_to_ops,
shard_to_keys: Arc::new(shard_to_keys),
_empty_keys: HashMap::new(),
}
}
// Create a new `Command` from an iterator.
pub fn from<I: IntoIterator<Item = (Key, KVOp)>>(
rifl: Rifl,
iter: I,
) -> Self {
// store all keys in the default shard
let inner = HashMap::from_iter(
iter.into_iter().map(|(key, op)| (key, vec![op])),
);
let shard_to_ops =
HashMap::from_iter(std::iter::once((DEFAULT_SHARD_ID, inner)));
Self::new(rifl, shard_to_ops)
}
/// Checks if the NFR optimization can be applied.
pub fn nfr_allowed(&self) -> bool {
self.read_only() && self.total_key_count() == 1
}
/// Checks if the command is read-only.
pub fn read_only(&self) -> bool {
// a command is read-only if all ops are `Get`s
self.shard_to_ops.values().all(|shard_ops| {
shard_ops
.values()
.all(|ops| ops.iter().all(|op| op == &KVOp::Get))
})
}
/// Checks if the command is replicated by `shard_id`.
pub fn replicated_by(&self, shard_id: &ShardId) -> bool {
self.shard_to_ops.contains_key(&shard_id)
}
/// Returns the command identifier.
pub fn rifl(&self) -> Rifl {
self.rifl
}
/// Returns the number of keys accessed by this command on the shard
/// provided.
pub fn key_count(&self, shard_id: ShardId) -> usize {
self.shard_to_ops
.get(&shard_id)
.map(|shard_ops| shard_ops.len())
.unwrap_or(0)
}
/// Returns the total number of keys accessed by this command.
pub fn total_key_count(&self) -> usize {
self.shard_to_ops.values().map(|ops| ops.len()).sum()
}
/// Returns references to the keys accessed by this command on the shard
/// provided.
pub fn keys(&self, shard_id: ShardId) -> impl Iterator<Item = &Key> {
self.shard_to_ops
.get(&shard_id)
.map(|shard_ops| shard_ops.keys())
.unwrap_or_else(|| self._empty_keys.keys())
}
/// Returns references to all the keys accessed by this command.
pub fn all_keys(&self) -> impl Iterator<Item = (&ShardId, &Key)> {
self.shard_to_ops.iter().flat_map(|(shard_id, shard_ops)| {
shard_ops.keys().map(move |key| (shard_id, key))
})
}
/// Returns a mapping from shard identifier to the keys being accessed on
/// that shard.
pub fn shard_to_keys(&self) -> &Arc<HashMap<ShardId, Vec<Key>>> {
&self.shard_to_keys
}
/// Returns the number of shards accessed by this command.
pub fn shard_count(&self) -> usize {
self.shard_to_ops.len()
}
/// Returns the shards accessed by this command.
pub fn shards(&self) -> impl Iterator<Item = &ShardId> {
self.shard_to_ops.keys()
}
/// Executes self in a `KVStore`, returning the resulting an iterator of
/// `ExecutorResult`.
pub fn execute<'a>(
self,
shard_id: ShardId,
store: &'a mut KVStore,
) -> impl Iterator<Item = ExecutorResult> + 'a {
let rifl = self.rifl;
self.into_iter(shard_id).map(move |(key, ops)| {
// take the ops inside the arc if we're the last with a
// reference to it (otherwise, clone them)
let ops =
Arc::try_unwrap(ops).unwrap_or_else(|ops| ops.as_ref().clone());
// execute this op
let partial_results = store.execute(&key, ops, rifl);
ExecutorResult::new(rifl, key, partial_results)
})
}
// Creates an iterator with ops on keys that belong to `shard_id`.
pub fn iter(
&self,
shard_id: ShardId,
) -> impl Iterator<Item = (&Key, &Arc<Vec<KVOp>>)> {
self.shard_to_ops
.get(&shard_id)
.map(|shard_ops| shard_ops.iter())
.unwrap_or_else(|| self._empty_keys.iter())
}
// Creates an iterator with ops on keys that belong to `shard_id`.
pub fn into_iter(
mut self,
shard_id: ShardId,
) -> impl Iterator<Item = (Key, Arc<Vec<KVOp>>)> {
self.shard_to_ops
.remove(&shard_id)
.map(|shard_ops| shard_ops.into_iter())
.unwrap_or_else(|| self._empty_keys.into_iter())
}
/// Checks if a command conflicts with another given command.
pub fn conflicts(&self, other: &Command) -> bool {
self.shard_to_ops.iter().any(|(shard_id, shard_ops)| {
shard_ops
.iter()
.any(|(key, _)| other.contains_key(*shard_id, key))
})
}
/// Checks if `key` is accessed by this command.
fn contains_key(&self, shard_id: ShardId, key: &Key) -> bool {
self.shard_to_ops
.get(&shard_id)
.map(|shard_ops| shard_ops.contains_key(key))
.unwrap_or(false)
}
/// Adds the operations in the `other` command to this command.
pub fn merge(&mut self, other: Command) {
for (shard_id, shard_ops) in other.shard_to_ops {
let current_shard_ops =
self.shard_to_ops.entry(shard_id).or_default();
for (key, ops) in shard_ops {
let ops = Arc::try_unwrap(ops).expect("a command to be merged into another command should have not been cloned");
let current_ops = current_shard_ops.entry(key).or_default();
Arc::get_mut(current_ops).expect("a command should only be cloned after all merges have occurred").extend(ops);
}
}
}
}
impl fmt::Debug for Command {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let keys: std::collections::BTreeSet<_> = self
.shard_to_ops
.iter()
.flat_map(|(shard_id, ops)| {
ops.keys().map(move |key| (shard_id, key))
})
.collect();
write!(f, "({:?} -> {:?})", self.rifl, keys)
}
}
/// Structure that aggregates partial results of multi-key commands.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct CommandResultBuilder {
rifl: Rifl,
key_count: usize,
results: HashMap<Key, Vec<KVOpResult>>,
}
impl CommandResultBuilder {
/// Creates a new `CommandResultBuilder` given the number of keys accessed
/// by the command.
pub fn new(rifl: Rifl, key_count: usize) -> Self {
CommandResultBuilder {
rifl,
key_count,
results: HashMap::new(),
}
}
/// Adds a partial command result to the overall result.
/// Returns a boolean indicating whether the full result is ready.
pub fn add_partial(&mut self, key: Key, partial_results: Vec<KVOpResult>) {
// add op result for `key`
let res = self.results.insert(key, partial_results);
// assert there was nothing about this `key` previously
assert!(res.is_none());
}
pub fn ready(&self) -> bool {
// we're ready if the number of partial results equals `key_count`
self.results.len() == self.key_count
}
}
/// Structure that aggregates partial results of multi-key commands.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct CommandResult {
rifl: Rifl,
results: HashMap<Key, Vec<KVOpResult>>,
}
impl CommandResult {
/// Creates a new `CommandResult`.
pub fn new(rifl: Rifl, results: HashMap<Key, Vec<KVOpResult>>) -> Self {
CommandResult { rifl, results }
}
/// Returns the command identifier.
pub fn rifl(&self) -> Rifl {
self.rifl
}
/// Returns the commands results.
pub fn results(&self) -> &HashMap<Key, Vec<KVOpResult>> {
&self.results
}
}
impl From<CommandResultBuilder> for CommandResult {
fn from(cmd_result_builder: CommandResultBuilder) -> Self {
assert!(cmd_result_builder.ready());
Self {
rifl: cmd_result_builder.rifl,
results: cmd_result_builder.results,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
fn multi_put(rifl: Rifl, keys: Vec<String>) -> Command {
Command::from(
rifl,
keys.into_iter().map(|key| (key.clone(), KVOp::Put(key))),
)
}
#[test]
fn conflicts() {
let rifl = Rifl::new(1, 1);
let cmd_a = multi_put(rifl, vec![String::from("A")]);
let cmd_b = multi_put(rifl, vec![String::from("B")]);
let cmd_c = multi_put(rifl, vec![String::from("C")]);
let cmd_ab =
multi_put(rifl, vec![String::from("A"), String::from("B")]);
// check command a conflicts
assert!(cmd_a.conflicts(&cmd_a));
assert!(!cmd_a.conflicts(&cmd_b));
assert!(!cmd_a.conflicts(&cmd_c));
assert!(cmd_a.conflicts(&cmd_ab));
// check command b conflicts
assert!(!cmd_b.conflicts(&cmd_a));
assert!(cmd_b.conflicts(&cmd_b));
assert!(!cmd_b.conflicts(&cmd_c));
assert!(cmd_b.conflicts(&cmd_ab));
// check command c conflicts
assert!(!cmd_c.conflicts(&cmd_a));
assert!(!cmd_c.conflicts(&cmd_b));
assert!(cmd_c.conflicts(&cmd_c));
assert!(!cmd_c.conflicts(&cmd_ab));
// check command ab conflicts
assert!(cmd_ab.conflicts(&cmd_a));
assert!(cmd_ab.conflicts(&cmd_b));
assert!(!cmd_ab.conflicts(&cmd_c));
assert!(cmd_ab.conflicts(&cmd_ab));
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/id.rs | fantoch/src/id.rs | use serde::{Deserialize, Serialize};
use std::fmt;
use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::Arc;
// process ids
pub type ProcessId = u8;
pub type Dot = Id<ProcessId>;
pub type DotGen = IdGen<ProcessId>;
pub type AtomicDotGen = AtomicIdGen<ProcessId>;
// client ids
// for info on RIFL see: http://sigops.org/sosp/sosp15/current/2015-Monterey/printable/126-lee.pdf
pub type ClientId = u64;
pub type Rifl = Id<ClientId>;
pub type RiflGen = IdGen<ClientId>;
// shard ids
pub type ShardId = u64;
#[derive(
Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize,
)]
pub struct Id<S> {
source: S,
sequence: u64,
}
impl<S> Id<S>
where
S: Copy,
{
/// Creates a new identifier Id.
pub fn new(source: S, sequence: u64) -> Self {
Self { source, sequence }
}
/// Retrieves the source that created this `Id`.
pub fn source(&self) -> S {
self.source
}
/// Retrieves the sequence consumed by this `Id`.
pub fn sequence(&self) -> u64 {
self.sequence
}
}
impl<S> fmt::Debug for Id<S>
where
S: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "({:?}, {})", self.source, self.sequence)
}
}
impl Dot {
pub fn target_shard(&self, n: usize) -> ShardId {
((self.source() - 1) as usize / n) as ShardId
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct IdGen<S> {
source: S,
last_sequence: u64,
}
impl<S> IdGen<S>
where
S: Copy,
{
/// Creates a new generator of `Id`.
pub fn new(source: S) -> Self {
Self {
source,
last_sequence: 0,
}
}
/// Retrieves source.
pub fn source(&self) -> S {
self.source
}
/// Generates the next `Id`.
pub fn next_id(&mut self) -> Id<S> {
self.last_sequence += 1;
Id::new(self.source, self.last_sequence)
}
}
#[derive(Clone)]
pub struct AtomicIdGen<S> {
source: S,
last_sequence: Arc<AtomicU64>,
}
impl<S> AtomicIdGen<S>
where
S: Copy,
{
/// Creates a new generator of `Id`.
pub fn new(source: S) -> Self {
Self {
source,
last_sequence: Arc::new(AtomicU64::new(0)),
}
}
/// Retrieves source.
pub fn source(&self) -> S {
self.source
}
/// Generates the next `Id`.
pub fn next_id(&self) -> Id<S> {
// TODO can the ordering be `Ordering::Relaxed`?
let previous = self.last_sequence.fetch_add(1, Ordering::SeqCst);
Id::new(self.source, previous + 1)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn next_id() {
type MyGen = IdGen<u64>;
// create id generator
let source = 10;
let mut gen = MyGen::new(source);
// check source
assert_eq!(gen.source(), source);
// check the `id` generated for `id_count` ids
let id_count = 100;
for seq in 1..=id_count {
// generate id
let id = gen.next_id();
// check `id`
assert_eq!(id.source(), source);
assert_eq!(id.sequence(), seq);
}
}
#[test]
fn atomic_next_id() {
type MyAtomicGen = AtomicIdGen<u64>;
// create id generator
let source = 10;
let gen = MyAtomicGen::new(source);
// check source
assert_eq!(gen.source(), source);
// check the `id` generated for `id_count` ids
let id_count = 100;
for seq in 1..=id_count {
// generate id
let id = gen.next_id();
// check `id`
assert_eq!(id.source(), source);
assert_eq!(id.sequence(), seq);
}
}
#[test]
fn dot_target() {
let shard_count = 5;
let n = 3;
crate::util::all_process_ids(shard_count, n).for_each(
|(process_id, shard_id)| {
assert_eq!(Dot::new(process_id, 1).target_shard(n), shard_id)
},
);
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/time.rs | fantoch/src/time.rs | use std::time::{Duration, SystemTime, UNIX_EPOCH};
pub trait SysTime: Send + 'static + Sync /* TODO why is Sync needed here */ {
fn millis(&self) -> u64;
fn micros(&self) -> u64;
}
// TODO find a better name
pub struct RunTime;
impl RunTime {
fn duration_since_unix_epoch(&self) -> Duration {
let now = SystemTime::now();
now.duration_since(UNIX_EPOCH)
.expect("we're way past UNIX EPOCH")
}
}
impl SysTime for RunTime {
fn millis(&self) -> u64 {
self.duration_since_unix_epoch().as_millis() as u64
}
fn micros(&self) -> u64 {
self.duration_since_unix_epoch().as_micros() as u64
}
}
#[derive(Default)]
pub struct SimTime {
micros: u64,
}
impl SimTime {
/// Creates a new simulation time.
pub fn new() -> Self {
Self { micros: 0 }
}
// Increases simulation time by `millis`.
pub fn add_millis(&mut self, millis: u64) {
self.micros += Self::millis_to_micros(millis);
}
/// Sets simulation time.
pub fn set_millis(&mut self, new_time_millis: u64) {
let new_time_micros = Self::millis_to_micros(new_time_millis);
// make sure time is monotonic
assert!(self.micros <= new_time_micros);
self.micros = new_time_micros;
}
fn millis_to_micros(millis: u64) -> u64 {
millis * 1000
}
fn micros_to_millis(micros: u64) -> u64 {
micros / 1000
}
}
impl SysTime for SimTime {
fn micros(&self) -> u64 {
self.micros
}
fn millis(&self) -> u64 {
Self::micros_to_millis(self.micros)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn sim_now() {
// create new simulation time
let mut time = SimTime::new();
assert_eq!(time.micros(), 0);
// first tick
let tick = 10;
time.add_millis(tick);
assert_eq!(time.millis(), 10);
// second tick
let tick = 6;
time.add_millis(tick);
assert_eq!(time.millis(), 16);
// set time at 20
time.set_millis(20);
assert_eq!(time.millis(), 20);
}
#[test]
#[should_panic]
fn sim_time_should_be_monotonic() {
// create new simulation time
let mut time = SimTime::new();
// set time at 20
time.set_millis(20);
assert_eq!(time.micros(), 20);
// set time at 19
// should panic!
time.set_millis(19);
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/util.rs | fantoch/src/util.rs | use crate::id::{Dot, ProcessId, ShardId};
use crate::kvs::Key;
use crate::planet::{Planet, Region};
use crate::HashMap;
use std::hash::{Hash, Hasher};
// warn and info logs are always enabled!
// - debug is enabled if `max_level_debug` or `max_level_trace`
// - trace is enabled if `max_level_trace`
#[macro_export]
macro_rules! warn {
($( $args:expr ),*) => { tracing::warn!( $( $args ),* ) }
}
#[macro_export]
macro_rules! info {
($( $args:expr ),*) => { tracing::info!( $( $args ),* ) }
}
#[macro_export]
#[cfg(any(feature = "max_level_debug", feature = "max_level_trace"))]
macro_rules! debug {
($( $args:expr ),*) => { tracing::debug!( $( $args ),* ) }
}
#[macro_export]
#[cfg(not(any(feature = "max_level_debug", feature = "max_level_trace")))]
macro_rules! debug {
($( $args:expr ),*) => {
()
};
}
#[macro_export]
#[cfg(feature = "max_level_trace")]
macro_rules! trace {
($( $args:expr ),*) => { tracing::trace!( $( $args ),* ) }
}
#[macro_export]
#[cfg(not(feature = "max_level_trace"))]
macro_rules! trace {
($( $args:expr ),*) => {
()
};
}
/// create a singleton hash set
#[macro_export]
macro_rules! singleton {
( $x:expr ) => {{
let mut set = HashSet::with_capacity(1);
set.insert($x);
set
}};
}
#[macro_export]
macro_rules! elapsed {
( $x:expr ) => {{
use std::time::Instant;
let start = Instant::now();
let result = $x;
let time = start.elapsed();
(time, result)
}};
}
#[must_use]
pub fn init_tracing_subscriber(
log_file: Option<impl AsRef<std::path::Path> + std::fmt::Debug>,
tracing_directives: Option<&'static str>,
) -> tracing_appender::non_blocking::WorkerGuard {
// create log format
let format = tracing_subscriber::fmt::format()
.without_time()
.with_target(false)
.with_level(false)
.with_thread_ids(false)
.with_thread_names(false)
.with_ansi(false);
// create env filter
let env_filter = match tracing_directives {
Some(dirs) => tracing_subscriber::EnvFilter::new(dirs),
None => tracing_subscriber::EnvFilter::from_default_env(),
};
println!("log_file: {:?}", log_file);
println!("env_filter: {}", env_filter);
// create writer
let builder = tracing_appender::non_blocking::NonBlockingBuilder::default()
.lossy(false);
let (non_blocking_appender, guard) = match log_file {
Some(log_file) => {
builder.finish(tracing_appender::rolling::never(".", log_file))
}
None => builder.finish(std::io::stdout()),
};
tracing_subscriber::fmt()
.event_format(format)
.with_writer(non_blocking_appender)
.with_env_filter(env_filter)
.init();
guard
}
type DefaultHasher = ahash::AHasher;
/// Compute the hash of a key.
#[allow(clippy::ptr_arg)]
pub fn key_hash(key: &Key) -> u64 {
let mut hasher = DefaultHasher::default();
key.hash(&mut hasher);
hasher.finish()
}
/// Returns an iterator with all process identifiers in this shard in a system
/// with `n` processes.
pub fn process_ids(
shard_id: ShardId,
n: usize,
) -> impl Iterator<Item = ProcessId> {
// compute process identifiers, making sure ids are non-zero
let shift = n * shard_id as usize;
(1..=n).map(move |id| (id + shift) as ProcessId)
}
pub fn all_process_ids(
shard_count: usize,
n: usize,
) -> impl Iterator<Item = (ProcessId, ShardId)> {
(0..shard_count).flat_map(move |shard_id| {
let shard_id = shard_id as ShardId;
process_ids(shard_id, n).map(move |process_id| (process_id, shard_id))
})
}
/// Converts a reprentation of dots to the actual dots.
pub fn dots(repr: Vec<(ProcessId, u64, u64)>) -> impl Iterator<Item = Dot> {
repr.into_iter().flat_map(|(process_id, start, end)| {
(start..=end).map(move |event| Dot::new(process_id, event))
})
}
/// Updates the processes known by this process.
pub fn sort_processes_by_distance(
region: &Region,
planet: &Planet,
mut processes: Vec<(ProcessId, ShardId, Region)>,
) -> Vec<(ProcessId, ShardId)> {
// TODO the following computation could be cached on `planet`
let indexes: HashMap<_, _> = planet
// get all regions sorted by distance from `region`
.sorted(region)
.expect("region should be part of planet")
.iter()
// create a mapping from region to its index
.enumerate()
.map(|(index, (_distance, region))| (region, index))
.collect();
// use the region order index (based on distance) to order `processes`
// - if two `processes` are from the same region, they're sorted by id
processes.sort_unstable_by(|(id_a, _, a), (id_b, _, b)| {
if a == b {
id_a.cmp(id_b)
} else {
let index_a = indexes.get(a).expect("region should exist");
let index_b = indexes.get(b).expect("region should exist");
index_a.cmp(index_b)
}
});
processes
.into_iter()
.map(|(id, shard_id, _)| (id, shard_id))
.collect()
}
/// Returns a mapping from shard id to the closest process on that shard.
pub fn closest_process_per_shard(
region: &Region,
planet: &Planet,
processes: Vec<(ProcessId, ShardId, Region)>,
) -> HashMap<ShardId, ProcessId> {
let sorted = sort_processes_by_distance(region, planet, processes);
let mut processes = HashMap::new();
for (process_id, shard_id) in sorted {
if !processes.contains_key(&shard_id) {
processes.insert(shard_id, process_id);
}
}
processes
}
#[cfg(test)]
pub mod tests {
use super::*;
#[test]
fn process_ids_test() {
let n = 3;
assert_eq!(process_ids(0, n).collect::<Vec<_>>(), vec![1, 2, 3]);
assert_eq!(process_ids(1, n).collect::<Vec<_>>(), vec![4, 5, 6]);
assert_eq!(process_ids(3, n).collect::<Vec<_>>(), vec![10, 11, 12]);
let n = 5;
assert_eq!(process_ids(0, n).collect::<Vec<_>>(), vec![1, 2, 3, 4, 5]);
assert_eq!(
process_ids(2, n).collect::<Vec<_>>(),
vec![11, 12, 13, 14, 15]
);
}
#[test]
fn sort_processes_by_distance_test() {
// processes
let processes = vec![
(0, Region::new("asia-east1")),
(1, Region::new("asia-northeast1")),
(2, Region::new("asia-south1")),
(3, Region::new("asia-southeast1")),
(4, Region::new("australia-southeast1")),
(5, Region::new("europe-north1")),
(6, Region::new("europe-west1")),
(7, Region::new("europe-west2")),
(8, Region::new("europe-west3")),
(9, Region::new("europe-west4")),
(10, Region::new("northamerica-northeast1")),
(11, Region::new("southamerica-east1")),
(12, Region::new("us-central1")),
(13, Region::new("us-east1")),
(14, Region::new("us-east4")),
(15, Region::new("us-west1")),
(16, Region::new("us-west2")),
];
let shard_id = 0;
// map them all to the same shard
let processes = processes
.into_iter()
.map(|(process_id, region)| (process_id, shard_id, region))
.collect();
// sort processes
let region = Region::new("europe-west3");
let planet = Planet::new();
let sorted = sort_processes_by_distance(®ion, &planet, processes);
let expected =
vec![8, 9, 6, 7, 5, 14, 10, 13, 12, 15, 16, 11, 1, 0, 4, 3, 2];
// map them all to the same shard
let expected: Vec<_> = expected
.into_iter()
.map(|process_id| (process_id, shard_id))
.collect();
assert_eq!(expected, sorted);
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/shared.rs | fantoch/src/shared.rs | use dashmap::iter::Iter;
use dashmap::mapref::one::Ref;
use dashmap::DashMap;
use std::collections::hash_map::RandomState;
use std::collections::BTreeSet;
use std::hash::Hash;
// TODO: - try https://docs.rs/lever/0.1.1/lever/table/lotable/struct.LOTable.html
// as an alternative to dashmap.
// - flurry is also an option
pub type SharedMapIter<'a, K, V> =
Iter<'a, K, V, RandomState, DashMap<K, V, RandomState>>;
pub type SharedMapRef<'a, K, V> = Ref<'a, K, V>;
#[derive(Debug, Clone)]
pub struct SharedMap<K: Eq + Hash + Clone, V> {
shared: DashMap<K, V>,
}
impl<K, V> SharedMap<K, V>
where
K: Eq + Hash + Clone,
{
// Create a `Shared` instance.
pub fn new() -> Self {
// create shared
let shared = DashMap::new();
Self { shared }
}
pub fn get(&self, key: &K) -> Option<SharedMapRef<'_, K, V>> {
self.shared.get(key)
}
// Tries to retrieve the current value associated with `key`. If there's no
// associated value, an entry will be created.
pub fn get_or<F>(&self, key: &K, value: F) -> SharedMapRef<'_, K, V>
where
F: Fn() -> V + Copy,
{
match self.shared.get(key) {
Some(value) => value,
None => self
.shared
.entry(key.clone())
.or_insert_with(value)
.downgrade(),
}
}
// Tries to retrieve the current value associated with `keys`. An entry will
// be created for each of the non-existing keys.
pub fn get_or_all<'k, 'd, F>(
&'d self,
keys: &BTreeSet<&'k K>,
refs: &mut Vec<(&'k K, Ref<'d, K, V>)>,
value: F,
) where
F: Fn() -> V + Copy,
{
for key in keys {
match self.shared.get(*key) {
Some(value) => {
refs.push((key, value));
}
None => {
// clear any previous references to the map (since
// `self.shared.entry` used in `self.maybe_insert` can
// deadlock if we hold any references to `self.shared`)
refs.clear();
// make sure key exits, and start again
self.maybe_insert(key, value);
return self.get_or_all(keys, refs, value);
}
}
}
}
pub fn contains_key(&self, key: &K) -> bool {
self.shared.contains_key(key)
}
pub fn insert(&self, key: K, value: V) -> Option<V> {
self.shared.insert(key, value)
}
pub fn remove(&self, key: &K) -> Option<(K, V)> {
self.shared.remove(key)
}
pub fn iter(&self) -> SharedMapIter<'_, K, V> {
self.shared.iter()
}
fn maybe_insert<F>(&self, key: &K, value: F)
where
F: Fn() -> V,
{
// insert entry only if it doesn't yet exist:
// - maybe another thread tried to `maybe_insert` and was able to insert
// before us
// - replacing this function with what follows should make the tests
// fail (blindly inserting means that we could lose updates)
// `self.shared.insert(key.clone(), value());`
// - `Entry::or_*` methods from `dashmap` ensure that we don't lose any
// updates. See: https://github.com/xacrimon/dashmap/issues/47
self.shared.entry(key.clone()).or_insert_with(value);
}
pub fn len(&self) -> usize {
self.shared.len()
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/planet/region.rs | fantoch/src/planet/region.rs | use serde::{Deserialize, Deserializer, Serialize, Serializer};
use std::fmt;
#[derive(Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
pub struct Region {
name: String,
}
impl Region {
/// Create a new `Region`.
pub fn new<S: Into<String>>(name: S) -> Self {
Region { name: name.into() }
}
pub fn name(&self) -> &String {
&self.name
}
}
impl fmt::Debug for Region {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.name)
}
}
// custom implementation which allows `Region`'s to be used as keys in maps when
// serializing with `serde_json`
impl Serialize for Region {
fn serialize<S>(&self, s: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
s.serialize_str(self.name())
}
}
impl<'de> Deserialize<'de> for Region {
fn deserialize<D>(d: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let name = String::deserialize(d)?;
Ok(Region::new(name))
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/planet/dat.rs | fantoch/src/planet/dat.rs | use crate::planet::Region;
use crate::HashMap;
use std::str::FromStr;
use std::io::{BufRead, BufReader};
// TODO
// when we create Dat, we should compute region and latencies in that same
// method; also, we should only assume a structure in the filename (region.dat)
// and not in the folder structure (as we're doing now:
// latency_gcp/region.dat). the current implementation of region e.g. only
// works for that structure
#[derive(Debug)]
pub struct Dat {
filename: String,
}
impl Dat {
/// Computes this `Dat`'s region.
pub fn region(&self) -> Region {
let parts: Vec<_> =
self.filename.split(|c| c == '/' || c == '.').collect();
// at `parts[parts.len() - 1]` we have "dat", and thus the name of the
// region is in the previous position
let region = parts[parts.len() - 2];
Region::new(region)
}
/// Computes, based on the `Dat` file, the latency from this region to all
/// other regions.
/// The local latency (within the same region) will always be 1.
pub fn latencies(&self) -> HashMap<Region, u64> {
// open the file in read-only mode (ignoring errors)
let file = std::fs::File::open(self.filename.clone()).unwrap();
// get this region
let this_region = self.region();
// for each line in the file, compute a pair (region, latency)
// - intra-region latency is assumed to be 0
BufReader::new(file)
.lines()
.map(|line| line.unwrap())
.map(Dat::latency)
.map(|(region, latency)| {
if region == this_region {
(region, super::INTRA_REGION_LATENCY)
} else {
(region, latency)
}
})
.collect()
}
/// Extracts from a line of the `Dat` file, the region's name and the
/// average latency to it.
fn latency(line: String) -> (Region, u64) {
let mut iter = line.split(|c| c == '/' || c == ':');
// latency is in the second entry
let latency = iter.nth(1).unwrap();
// convert it to f64
let latency = f64::from_str(latency).unwrap();
// convert it to u64 (it always rounds down)
let latency = latency as u64;
// region is the last entry
let region = iter.last().unwrap();
// convert it to Region
let region = Region::new(region);
// return both
(region, latency)
}
/// Gets the list of all `Dat`'s present in `LAT_DIR`.
pub fn all_dats(lat_dir: &str) -> Vec<Dat> {
// create path and check it is indeed a dir
let path = std::path::Path::new(lat_dir);
// get all .dat files in lat dir
path.read_dir()
.unwrap_or_else(|_| panic!("read_dir {:?} failed", path))
// map all entries to PathBuf
.map(|entry| entry.unwrap().path())
// map all entries to &str
.map(|entry| entry.to_str().unwrap().to_string())
// get only files that end in ".dat"
.filter(|entry| entry.ends_with(".dat"))
// map all entry to Dat
.map(Dat::from)
.collect()
}
}
impl From<String> for Dat {
fn from(filename: String) -> Self {
Dat { filename }
}
}
impl From<&str> for Dat {
fn from(filename: &str) -> Self {
Self {
filename: String::from(filename),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn region() {
// create dat
let filename = "../latency_gcp/europe-west3.dat";
let dat = Dat::from(filename);
assert_eq!(dat.region(), Region::new("europe-west3"));
}
#[test]
fn latencies() {
// create dat
let filename = "../latency_gcp/europe-west3.dat";
let dat = Dat::from(filename);
// create expected latencies
let mut expected = HashMap::new();
expected.insert(Region::new("europe-west3"), 0);
expected.insert(Region::new("europe-west4"), 7);
expected.insert(Region::new("europe-west6"), 7);
expected.insert(Region::new("europe-west1"), 8);
expected.insert(Region::new("europe-west2"), 13);
expected.insert(Region::new("europe-north1"), 31);
expected.insert(Region::new("us-east4"), 86);
expected.insert(Region::new("northamerica-northeast1"), 87);
expected.insert(Region::new("us-east1"), 98);
expected.insert(Region::new("us-central1"), 105);
expected.insert(Region::new("us-west1"), 136);
expected.insert(Region::new("us-west2"), 139);
expected.insert(Region::new("southamerica-east1"), 214);
expected.insert(Region::new("asia-northeast1"), 224);
expected.insert(Region::new("asia-northeast2"), 233);
expected.insert(Region::new("asia-east1"), 258);
expected.insert(Region::new("asia-east2"), 268);
expected.insert(Region::new("australia-southeast1"), 276);
expected.insert(Region::new("asia-southeast1"), 289);
expected.insert(Region::new("asia-south1"), 352);
assert_eq!(dat.latencies(), expected);
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/planet/mod.rs | fantoch/src/planet/mod.rs | // This module contains the definition of `Dat`.
mod dat;
// This module contains the definition of `Region`.
pub mod region;
// Re-exports.
pub use region::Region;
use crate::planet::dat::Dat;
use crate::HashMap;
use serde::{Deserialize, Serialize};
use std::fmt::{self, Write};
// directory that contains all dat files for GCP
const GCP_LAT_DIR: &str = "../latency_gcp/";
// assume that intra region latency is 0
const INTRA_REGION_LATENCY: u64 = 0;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Planet {
/// mapping from region A to a mapping from region B to the latency between
/// A and B
latencies: HashMap<Region, HashMap<Region, u64>>,
/// mapping from each region to the regions sorted by distance
sorted: HashMap<Region, Vec<(u64, Region)>>,
}
impl Planet {
/// Creates a new GCP `Planet` instance.
#[allow(clippy::new_without_default)]
pub fn new() -> Self {
Self::from(GCP_LAT_DIR)
}
/// Creates a new `Planet` instance.
pub fn from(lat_dir: &str) -> Self {
// create latencies
let latencies: HashMap<_, _> = Dat::all_dats(lat_dir)
.iter()
.map(|dat| (dat.region(), dat.latencies()))
.collect();
Self::from_latencies(latencies)
}
/// Creates a new `Planet` instance from the latencies provided.
pub fn from_latencies(
latencies: HashMap<Region, HashMap<Region, u64>>,
) -> Self {
// create sorted and and planet
let sorted = Self::sort_by_distance(latencies.clone());
Planet { latencies, sorted }
}
/// Creates a equidistant `Planet`.
pub fn equidistant(
planet_distance: u64,
region_number: usize,
) -> (Vec<Region>, Self) {
// create regions
let regions: Vec<_> = (0..region_number)
.map(|i| {
// region name
let name = format!("r_{}", i);
Region::new(name)
})
.collect();
// create latencies
let latencies: HashMap<_, _> = regions
.clone()
.into_iter()
.map(|from| {
// create distances
let distances = regions
.clone()
.into_iter()
.map(|to| {
let distance = if from == to {
INTRA_REGION_LATENCY
} else {
planet_distance
};
(to, distance)
})
.collect();
(from, distances)
})
.collect();
// create sorted
let sorted = Self::sort_by_distance(latencies.clone());
// create single-region planet
let planet = Planet { latencies, sorted };
(regions, planet)
}
/// Retrieves a list with all regions.
pub fn regions(&self) -> Vec<Region> {
self.latencies.keys().cloned().collect()
}
/// Retrieves the distance between the two regions passed as argument.
pub fn ping_latency(&self, from: &Region, to: &Region) -> Option<u64> {
// get from's entries
let entries = self.latencies.get(from)?;
// get to's entry in from's entries
entries.get(to).cloned()
}
/// Returns a list of `Region`s sorted by the distance to the `Region`
/// passed as argument. The distance to each region is also returned.
pub fn sorted(&self, from: &Region) -> Option<&Vec<(u64, Region)>> {
self.sorted.get(from)
}
/// Returns a mapping from region to regions sorted by distance (ASC).
fn sort_by_distance(
latencies: HashMap<Region, HashMap<Region, u64>>,
) -> HashMap<Region, Vec<(u64, Region)>> {
latencies
.into_iter()
.map(|(from, entries)| {
// collect entries into a vector with reversed tuple order
let mut entries: Vec<_> = entries
.into_iter()
.map(|(to, latency)| (latency, to))
.collect();
// sort entries by latency
entries.sort_unstable();
(from, entries)
})
.collect()
}
}
impl Planet {
pub fn distance_matrix(
&self,
regions: Vec<Region>,
) -> Result<String, fmt::Error> {
let mut output = String::new();
// start header
write!(&mut output, "| |")?;
for r in regions.iter() {
write!(&mut output, " {:?} |", r)?;
}
writeln!(&mut output)?;
// end header
write!(&mut output, "|:---:|")?;
for _ in regions.iter() {
write!(&mut output, ":---:|")?;
}
writeln!(&mut output)?;
// for each region a
for a in regions.iter() {
write!(&mut output, "| __{:?}__ |", a)?;
// compute latency from a to every other region b
for b in regions.iter() {
let lat = self.ping_latency(a, b).unwrap();
write!(&mut output, " {} |", lat)?;
}
writeln!(&mut output)?;
}
Ok(output)
}
}
#[cfg(test)]
mod tests {
use super::*;
fn symmetric(a: &Region, b: &Region, planet: &Planet) -> bool {
let a_to_b = planet.ping_latency(a, b).unwrap();
let b_to_a = planet.ping_latency(b, a).unwrap();
a_to_b == b_to_a
}
#[test]
fn latency() {
// planet
let planet = Planet::new();
// regions
let eu_w3 = Region::new("europe-west3");
let us_c1 = Region::new("us-central1");
// most times latency is symmetric in GCP
assert!(symmetric(&eu_w3, &us_c1, &planet));
// sometimes it's not
let us_e1 = Region::new("us-east1");
let eu_w3 = Region::new("europe-west3");
let us_e4 = Region::new("us-east4");
let us_w1 = Region::new("us-west1");
assert!(!symmetric(&us_e1, &eu_w3, &planet));
assert!(!symmetric(&us_e4, &us_w1, &planet));
assert!(!symmetric(&us_w1, &eu_w3, &planet));
}
#[test]
fn sorted() {
// planet
let planet = Planet::new();
// regions
let eu_w3 = Region::new("europe-west3");
// create expected regions:
// - the first two have the same value, so they're ordered by name
let expected = vec![
Region::new("europe-west3"),
Region::new("europe-west4"),
Region::new("europe-west6"),
Region::new("europe-west1"),
Region::new("europe-west2"),
Region::new("europe-north1"),
Region::new("us-east4"),
Region::new("northamerica-northeast1"),
Region::new("us-east1"),
Region::new("us-central1"),
Region::new("us-west1"),
Region::new("us-west2"),
Region::new("southamerica-east1"),
Region::new("asia-northeast1"),
Region::new("asia-northeast2"),
Region::new("asia-east1"),
Region::new("asia-east2"),
Region::new("australia-southeast1"),
Region::new("asia-southeast1"),
Region::new("asia-south1"),
];
// get sorted regions from `eu_w3`, drop the distance and clone the
// region
let res: Vec<_> = planet
.sorted(&eu_w3)
.unwrap()
.into_iter()
.map(|(_, region)| region)
.cloned()
.collect();
assert_eq!(res, expected);
}
#[test]
fn equidistant() {
let planet_distance = 10;
let region_number = 3;
let (regions, planet) =
Planet::equidistant(planet_distance, region_number);
// check correct number of regions
assert_eq!(regions.len(), region_number);
// check distances between regions
for a in regions.iter() {
for b in regions.iter() {
let a_to_b =
planet.ping_latency(a, b).expect("regions must exist");
let expected = if a == b {
INTRA_REGION_LATENCY
} else {
planet_distance
};
assert_eq!(a_to_b, expected);
}
}
}
#[test]
fn distance_matrix() {
let planet = Planet::new();
let regions = vec![
Region::new("asia-southeast1"),
Region::new("europe-west4"),
Region::new("southamerica-east1"),
Region::new("australia-southeast1"),
Region::new("europe-west2"),
Region::new("asia-south1"),
Region::new("us-east1"),
Region::new("asia-northeast1"),
Region::new("europe-west1"),
Region::new("asia-east1"),
Region::new("us-west1"),
Region::new("europe-west3"),
Region::new("us-central1"),
];
assert!(planet.distance_matrix(regions).is_ok());
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/sim/simulation.rs | fantoch/src/sim/simulation.rs | use crate::client::Client;
use crate::command::{Command, CommandResult};
use crate::executor::AggregatePending;
use crate::id::{ClientId, ProcessId};
use crate::protocol::{Action, Protocol};
use crate::time::SimTime;
use crate::HashMap;
use std::cell::Cell;
pub struct Simulation<P: Protocol> {
time: SimTime,
processes: HashMap<ProcessId, Cell<(P, P::Executor, AggregatePending)>>,
clients: HashMap<ClientId, Cell<Client>>,
}
impl<P> Simulation<P>
where
P: Protocol,
{
/// Create a new `Simulation`.
#[allow(clippy::new_without_default)]
pub fn new() -> Self {
Simulation {
time: SimTime::new(),
processes: HashMap::new(),
clients: HashMap::new(),
}
}
// Return a mutable reference to the simulation time.
pub fn time(&mut self) -> &mut SimTime {
&mut self.time
}
/// Registers a `Process` in the `Simulation` by storing it in a `Cell`.
pub fn register_process(&mut self, process: P, executor: P::Executor) {
// get identifier
let process_id = process.id();
let shard_id = process.shard_id();
// create pending
let pending = AggregatePending::new(process_id, shard_id);
// register process and check it has never been registered before
let res = self
.processes
.insert(process_id, Cell::new((process, executor, pending)));
assert!(res.is_none());
}
/// Registers a `Client` in the `Simulation` by storing it in a `Cell`.
pub fn register_client(&mut self, client: Client) {
// get identifier
let id = client.id();
// register client and check it has never been registerd before
let res = self.clients.insert(id, Cell::new(client));
assert!(res.is_none());
}
/// Starts all clients registered in the router.
pub fn start_clients(&mut self) -> Vec<(ClientId, ProcessId, Command)> {
let time = &self.time;
self.clients
.iter_mut()
.map(|(_, client)| {
let client = client.get_mut();
// start client
let (target_shard, cmd) = client
.cmd_send(time)
.expect("clients should submit at least one command");
let process_id = client.shard_process(&target_shard);
(client.id(), process_id, cmd)
})
.collect()
}
/// Forward a `ToSend`.
pub fn forward_to_processes(
&mut self,
(process_id, action): (ProcessId, Action<P>),
) -> Vec<(ProcessId, Action<P>)> {
match action {
Action::ToSend { target, msg } => {
// get self process and its shard id
let (process, _, _, time) = self.get_process(process_id);
assert_eq!(process.id(), process_id);
let shard_id = process.shard_id();
// handle first in self if self in target
if target.contains(&process_id) {
// handle msg
process.handle(process_id, shard_id, msg.clone(), time);
};
// take out (potentially) new actions:
// - this makes sure that the first to_send is the one from self
let mut actions: Vec<_> = process
.to_processes_iter()
.map(|action| (process_id, action))
.collect();
target
.into_iter()
// make sure we don't handle again in self
.filter(|to| to != &process_id)
.for_each(|to| {
// get target process
let (to_process, _, _, time) = self.get_process(to);
assert_eq!(to_process.id(), to);
// handle msg
to_process.handle(
process_id,
shard_id,
msg.clone(),
time,
);
// take out new actions
to_process.to_processes_iter().for_each(|action| {
actions.push((to, action));
})
});
actions
}
action => {
panic!("non supported action: {:?}", action);
}
}
}
/// Forward a `CommandResult`.
pub fn forward_to_client(
&mut self,
cmd_result: CommandResult,
) -> Option<(ProcessId, Command)> {
// get client id
let client_id = cmd_result.rifl().source();
// find client
let (client, time) = self.get_client(client_id);
// handle command result
// TODO: we should aggregate command results if we have more than one
// shard in simulation
client.cmd_recv(cmd_result.rifl(), time);
// and generate the next command
client.cmd_send(time).map(|(target_shard, cmd)| {
let target = client.shard_process(&target_shard);
(target, cmd)
})
}
/// Returns the process registered with this identifier.
/// It panics if the process is not registered.
pub fn get_process(
&mut self,
process_id: ProcessId,
) -> (&mut P, &mut P::Executor, &mut AggregatePending, &SimTime) {
let (process, executor, pending) = self
.processes
.get_mut(&process_id)
.unwrap_or_else(|| {
panic!(
"process {} should have been registered before",
process_id
);
})
.get_mut();
(process, executor, pending, &self.time)
}
/// Returns the client registered with this identifier.
/// It panics if the client is not registered.
pub fn get_client(
&mut self,
client_id: ClientId,
) -> (&mut Client, &SimTime) {
let client = self
.clients
.get_mut(&client_id)
.unwrap_or_else(|| {
panic!(
"client {} should have been registered before",
client_id
);
})
.get_mut();
(client, &self.time)
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/sim/schedule.rs | fantoch/src/sim/schedule.rs | use crate::time::{SimTime, SysTime};
use std::cmp::{Ordering, Reverse};
use std::collections::BinaryHeap;
use std::time::Duration;
pub struct Schedule<A> {
queue: BinaryHeap<Reverse<QueueEntry<A>>>,
}
#[derive(PartialEq, Eq)]
struct QueueEntry<A> {
schedule_time: u64,
action: A,
}
impl<A: Eq> Ord for QueueEntry<A> {
fn cmp(&self, other: &Self) -> Ordering {
// simply compare their schedule time
self.schedule_time.cmp(&other.schedule_time)
}
}
impl<A: Eq> PartialOrd for QueueEntry<A> {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl<A: Eq> Schedule<A> {
#[allow(clippy::new_without_default)]
pub fn new() -> Self {
Self {
queue: BinaryHeap::new(),
}
}
/// Schedule a new `ScheduleAction` at a certain `time`.
pub fn schedule(&mut self, time: &SimTime, delay: Duration, action: A) {
// compute schedule time
let schedule_time = time.millis() + delay.as_millis() as u64;
// create new queue entry
let entry = QueueEntry {
schedule_time,
action,
};
// push new entry to the queue
self.queue.push(Reverse(entry));
}
/// Retrieve the next scheduled action.
pub fn next_action(&mut self, time: &mut SimTime) -> Option<A> {
// get the next actions
self.queue.pop().map(|entry| {
// advance simulation time
time.set_millis(entry.0.schedule_time);
// return only the action
entry.0.action
})
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn schedule_flow() {
// create simulation time and schedule
let mut time = SimTime::new();
let mut schedule: Schedule<String> = Schedule::new();
// check min time is none and there are no next actions
assert!(schedule.next_action(&mut time).is_none());
// schedule "a" with a delay 10
schedule.schedule(&time, Duration::from_millis(10), String::from("a"));
// check "a" is the next action, simulation time is now 10
let next = schedule
.next_action(&mut time)
.expect("there should be a next action");
assert_eq!(next, String::from("a"));
assert_eq!(time.millis(), 10);
assert!(schedule.next_action(&mut time).is_none());
// schedule "b" with a delay 7, "c" with delay 2
schedule.schedule(&time, Duration::from_millis(7), String::from("b"));
schedule.schedule(&time, Duration::from_millis(2), String::from("c"));
// check "c" is the next action, simulation time is now 12
let next = schedule
.next_action(&mut time)
.expect("there should be a next action");
assert_eq!(next, String::from("c"));
assert_eq!(time.millis(), 12);
// schedule "d" with a delay 2, "e" with delay 5
schedule.schedule(&time, Duration::from_millis(2), String::from("d"));
schedule.schedule(&time, Duration::from_millis(5), String::from("e"));
// check "d" is the next action, simulation time is now 14
let next = schedule
.next_action(&mut time)
.expect("there should be a next action");
assert_eq!(next, String::from("d"));
assert_eq!(time.millis(), 14);
// check "b" and "e" are the next actions, simulation time is now 17
let next = schedule
.next_action(&mut time)
.expect("there should be a next action");
assert!(next == String::from("b") || next == String::from("e"));
assert_eq!(time.millis(), 17);
let next = schedule
.next_action(&mut time)
.expect("there should be a next action");
assert!(next == String::from("b") || next == String::from("e"));
assert_eq!(time.millis(), 17);
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/sim/runner.rs | fantoch/src/sim/runner.rs | use crate::client::{Client, Workload};
use crate::command::{Command, CommandResult, DEFAULT_SHARD_ID};
use crate::config::Config;
use crate::executor::{ExecutionOrderMonitor, Executor, ExecutorMetrics};
use crate::id::{ClientId, ProcessId, ShardId};
use crate::metrics::Histogram;
use crate::planet::{Planet, Region};
use crate::protocol::{Action, Protocol, ProtocolMetrics};
use crate::sim::{Schedule, Simulation};
use crate::time::SysTime;
use crate::util;
use crate::HashMap;
use rand::Rng;
use std::fmt;
use std::fmt::Debug;
use std::time::Duration;
#[derive(PartialEq, Eq)]
enum ScheduleAction<Message, PeriodicEvent> {
SubmitToProc(ProcessId, Command),
SendToProc(ProcessId, ShardId, ProcessId, Message),
SendToClient(ClientId, CommandResult),
PeriodicProcessEvent(ProcessId, PeriodicEvent, Duration),
PeriodicExecutedNotification(ProcessId, Duration),
}
#[derive(Clone)]
enum MessageRegion {
Process(ProcessId),
Client(ClientId),
}
pub struct Runner<P: Protocol> {
planet: Planet,
simulation: Simulation<P>,
schedule: Schedule<ScheduleAction<P::Message, P::PeriodicEvent>>,
// mapping from process identifier to its region
process_to_region: HashMap<ProcessId, Region>,
// mapping from client identifier to its region
client_to_region: HashMap<ClientId, Region>,
// total number of clients
client_count: usize,
// boolean indicating whether the runner should make the distance between
// regions symmetric
make_distances_symmetric: bool,
// boolean indicating whether the runner should reoder messages
reorder_messages: bool,
}
#[derive(PartialEq)]
enum SimulationStatus {
ClientsRunning,
ExtraSimulationTime,
Done,
}
impl<P> Runner<P>
where
P: Protocol,
{
/// Create a new `Runner` from a `planet`, a `config`, and two lists of
/// regions:
/// - `process_regions`: list of regions where processes are located
/// - `client_regions`: list of regions where clients are located
pub fn new(
planet: Planet,
config: Config,
workload: Workload,
clients_per_process: usize,
process_regions: Vec<Region>,
client_regions: Vec<Region>,
) -> Self {
// check that we have the correct number of `process_regions`
assert_eq!(process_regions.len(), config.n());
assert!(config.gc_interval().is_some());
// create simulation
let mut simulation = Simulation::new();
// create processes
let mut processes = Vec::with_capacity(config.n());
let mut periodic_process_events = Vec::new();
let mut periodic_executed_notifications = Vec::new();
// there's a single shard
let shard_id = 0;
let to_discover: Vec<_> = process_regions
.into_iter()
.zip(util::process_ids(shard_id, config.n()))
.map(|(region, process_id)| {
// create process and save it
let (process, process_events) =
P::new(process_id, shard_id, config);
processes.push((region.clone(), process));
// save periodic process events
periodic_process_events.extend(
process_events
.into_iter()
.map(|(event, delay)| (process_id, event, delay)),
);
// save periodic executed notifications
let executed_notification_interval =
config.executor_executed_notification_interval();
periodic_executed_notifications
.push((process_id, executed_notification_interval));
(process_id, shard_id, region)
})
.collect();
// create processs to region mapping
let process_to_region = to_discover
.clone()
.into_iter()
.map(|(process_id, _, region)| (process_id, region))
.collect();
// register processes
processes.into_iter().for_each(|(region, mut process)| {
// discover
let sorted = util::sort_processes_by_distance(
®ion,
&planet,
to_discover.clone(),
);
let (connect_ok, _) = process.discover(sorted);
assert!(connect_ok);
// create executor for this process
let executor = <P::Executor as Executor>::new(
process.id(),
process.shard_id(),
config,
);
// and register both
simulation.register_process(process, executor);
});
// register clients and create client to region mapping
let mut client_id = 0;
let mut client_to_region = HashMap::new();
for region in client_regions {
for _ in 1..=clients_per_process {
// create client
client_id += 1;
let status_frequency = None;
let mut client =
Client::new(client_id, workload, status_frequency);
// discover
let closest = util::closest_process_per_shard(
®ion,
&planet,
to_discover.clone(),
);
client.connect(closest);
// and register it
simulation.register_client(client);
client_to_region.insert(client_id, region.clone());
}
}
// create runner
let mut runner = Self {
planet,
simulation,
schedule: Schedule::new(),
process_to_region,
client_to_region,
// since we start ids in 1, the last id is the same as the number of
// clients
client_count: client_id as usize,
make_distances_symmetric: false,
reorder_messages: false,
};
// schedule periodic process events
for (process_id, event, delay) in periodic_process_events {
runner.schedule_periodic_process_event(process_id, event, delay);
}
// schedule periodic executed notifications
for (process_id, delay) in periodic_executed_notifications {
runner.schedule_periodic_executed_notification(process_id, delay)
}
runner
}
pub fn make_distances_symmetric(&mut self) {
self.make_distances_symmetric = true;
}
pub fn reorder_messages(&mut self) {
self.reorder_messages = true;
}
/// Run the simulation. `extra_sim_time` indicates how much longer should
/// the simulation run after clients are finished.
pub fn run(
&mut self,
extra_sim_time: Option<Duration>,
) -> (
HashMap<ProcessId, (ProtocolMetrics, ExecutorMetrics)>,
HashMap<ProcessId, Option<ExecutionOrderMonitor>>,
HashMap<Region, (usize, Histogram)>,
) {
// start clients
self.simulation.start_clients().into_iter().for_each(
|(client_id, process_id, cmd)| {
// schedule client commands
self.schedule_submit(
MessageRegion::Client(client_id),
process_id,
cmd,
)
},
);
// run simulation loop
self.simulation_loop(extra_sim_time);
// return metrics and client latencies
(
self.metrics(),
self.executors_monitors(),
self.clients_latencies(),
)
}
fn simulation_loop(&mut self, extra_sim_time: Option<Duration>) {
let mut simulation_status = SimulationStatus::ClientsRunning;
let mut clients_done = 0;
let mut simulation_final_time = 0;
while simulation_status != SimulationStatus::Done {
let action = self.schedule
.next_action(self.simulation.time())
.expect("there should be a new action since stability is always running");
match action {
ScheduleAction::PeriodicProcessEvent(
process_id,
event,
delay,
) => {
self.handle_periodic_process_event(process_id, event, delay)
}
ScheduleAction::PeriodicExecutedNotification(
process_id,
delay,
) => self
.handle_periodic_executed_notification(process_id, delay),
ScheduleAction::SubmitToProc(process_id, cmd) => {
self.handle_submit_to_proc(process_id, cmd);
}
ScheduleAction::SendToProc(
from,
from_shard_id,
process_id,
msg,
) => {
self.handle_send_to_proc(
from,
from_shard_id,
process_id,
msg,
);
}
ScheduleAction::SendToClient(client_id, cmd_result) => {
// handle new command result in client
let submit = self.simulation.forward_to_client(cmd_result);
if let Some((process_id, cmd)) = submit {
self.schedule_submit(
MessageRegion::Client(client_id),
process_id,
cmd,
);
} else {
clients_done += 1;
// if all clients are done, enter the next phase
if clients_done == self.client_count {
simulation_status = match extra_sim_time {
Some(extra) => {
// if there's extra time, compute the
// final simulation time
simulation_final_time =
self.simulation.time().millis()
+ extra.as_millis() as u64;
SimulationStatus::ExtraSimulationTime
}
None => {
// otherwise, end the simulation
SimulationStatus::Done
}
}
}
}
}
}
// check if we're in extra simulation time; if yes, finish the
// simulation if we're past the final simulation time
let should_end_sim = simulation_status
== SimulationStatus::ExtraSimulationTime
&& self.simulation.time().millis() > simulation_final_time;
if should_end_sim {
simulation_status = SimulationStatus::Done;
}
}
}
fn handle_periodic_process_event(
&mut self,
process_id: ProcessId,
event: P::PeriodicEvent,
delay: Duration,
) {
// get process
let (process, _, _, time) = self.simulation.get_process(process_id);
// handle event adn schedule new actions
process.handle_event(event.clone(), time);
self.send_to_processes_and_executors(process_id);
// schedule the next periodic event
self.schedule_periodic_process_event(process_id, event, delay);
}
fn handle_periodic_executed_notification(
&mut self,
process_id: ProcessId,
delay: Duration,
) {
// get process and executor
let (process, executor, _, time) =
self.simulation.get_process(process_id);
// handle executed and schedule new actions
if let Some(executed) = executor.executed(time) {
process.handle_executed(executed, time);
self.send_to_processes_and_executors(process_id);
}
// schedule the next periodic event
self.schedule_periodic_executed_notification(process_id, delay);
}
fn handle_submit_to_proc(&mut self, process_id: ProcessId, cmd: Command) {
// get process and executor
let (process, _executor, pending, time) =
self.simulation.get_process(process_id);
// register command in pending
pending.wait_for(&cmd);
// submit to process and schedule new actions
process.submit(None, cmd, time);
self.send_to_processes_and_executors(process_id);
}
fn handle_send_to_proc(
&mut self,
from: ProcessId,
from_shard_id: ShardId,
process_id: ProcessId,
msg: P::Message,
) {
// get process and executor
let (process, _, _, time) = self.simulation.get_process(process_id);
// handle message and schedule new actions
process.handle(from, from_shard_id, msg, time);
self.send_to_processes_and_executors(process_id);
}
// (maybe) Schedules a new submit from a client.
fn schedule_submit(
&mut self,
from_region: MessageRegion,
process_id: ProcessId,
cmd: Command,
) {
// create action and schedule it
let action = ScheduleAction::SubmitToProc(process_id, cmd);
self.schedule_message(
from_region,
MessageRegion::Process(process_id),
action,
);
}
fn send_to_processes_and_executors(&mut self, process_id: ProcessId) {
// get process and executor
let (process, executor, pending, time) =
self.simulation.get_process(process_id);
assert_eq!(process.id(), process_id);
let shard_id = process.shard_id();
// get ready commands
let protocol_actions = process.to_processes_iter().collect();
// handle new execution info in the executor
let ready: Vec<_> = process
.to_executors_iter()
.flat_map(|info| {
executor.handle(info, time);
// handle executor messages to self
let to_executors =
executor.to_executors_iter().collect::<Vec<_>>();
for (shard_id, info) in to_executors {
assert_eq!(shard_id, DEFAULT_SHARD_ID);
executor.handle(info, time);
}
// TODO remove collect
executor.to_clients_iter().collect::<Vec<_>>()
})
// handle all partial results in pending
.filter_map(|executor_result| {
pending.add_executor_result(executor_result)
})
.collect();
// schedule new messages
self.schedule_protocol_actions(
process_id,
shard_id,
MessageRegion::Process(process_id),
protocol_actions,
);
// schedule new command results
ready.into_iter().for_each(|cmd_result| {
self.schedule_to_client(
MessageRegion::Process(process_id),
cmd_result,
)
});
}
/// (maybe) Schedules a new send from some process.
fn schedule_protocol_actions(
&mut self,
process_id: ProcessId,
shard_id: ShardId,
from_region: MessageRegion,
protocol_actions: Vec<Action<P>>,
) {
for protocol_action in protocol_actions {
match protocol_action {
Action::ToSend { target, msg } => {
// for each process in target, schedule message delivery
target.into_iter().for_each(|to| {
// if message to self, deliver immediately
if to == process_id {
self.handle_send_to_proc(
process_id,
shard_id,
process_id,
msg.clone(),
)
} else {
// otherwise, create action and schedule it
let action = ScheduleAction::SendToProc(
process_id,
shard_id,
to,
msg.clone(),
);
self.schedule_message(
from_region.clone(),
MessageRegion::Process(to),
action,
);
}
});
}
Action::ToForward { msg } => {
// deliver to-forward messages immediately
self.handle_send_to_proc(
process_id, shard_id, process_id, msg,
);
}
}
}
}
/// Schedules a new command result.
fn schedule_to_client(
&mut self,
from_region: MessageRegion,
cmd_result: CommandResult,
) {
// create action and schedule it
let client_id = cmd_result.rifl().source();
let action = ScheduleAction::SendToClient(client_id, cmd_result);
self.schedule_message(
from_region,
MessageRegion::Client(client_id),
action,
);
}
/// Schedules a message.
fn schedule_message(
&mut self,
from_region: MessageRegion,
to_region: MessageRegion,
action: ScheduleAction<P::Message, P::PeriodicEvent>,
) {
// get actual regions
let from = self.compute_region(from_region);
let to = self.compute_region(to_region);
// compute distance between regions
let mut distance = self.distance(from, to);
// check if we should reorder messages
if self.reorder_messages {
// if so, multiply distance by some random number between 0 and 10
let multiplier: f64 = rand::thread_rng().gen_range(0.0..10.0);
distance = (distance as f64 * multiplier) as u64;
}
// schedule action
let distance = Duration::from_millis(distance);
self.schedule
.schedule(self.simulation.time(), distance, action);
}
/// Schedules the next periodic process event.
fn schedule_periodic_process_event(
&mut self,
process_id: ProcessId,
event: P::PeriodicEvent,
delay: Duration,
) {
// create action
let action =
ScheduleAction::PeriodicProcessEvent(process_id, event, delay);
self.schedule
.schedule(self.simulation.time(), delay, action);
}
/// Schedules the next periodic executed notification.
fn schedule_periodic_executed_notification(
&mut self,
process_id: ProcessId,
delay: Duration,
) {
// create action
let action =
ScheduleAction::PeriodicExecutedNotification(process_id, delay);
self.schedule
.schedule(self.simulation.time(), delay, action);
}
/// Retrieves the region of some process/client.
fn compute_region(&self, message_region: MessageRegion) -> &Region {
match message_region {
MessageRegion::Process(process_id) => self
.process_to_region
.get(&process_id)
.expect("process region should be known"),
MessageRegion::Client(client_id) => self
.client_to_region
.get(&client_id)
.expect("client region should be known"),
}
}
/// Computes the distance between two regions which is half the ping
/// latency.
fn distance(&self, from: &Region, to: &Region) -> u64 {
let from_to = self
.planet
.ping_latency(from, to)
.expect("both regions should exist on the planet");
// compute ping time (maybe make it symmetric)
let ping = if self.make_distances_symmetric {
let to_from = self
.planet
.ping_latency(to, from)
.expect("both regions should exist on the planet");
(from_to + to_from) / 2
} else {
from_to
};
// distance is half the ping latency
let ms = ping / 2;
ms
}
/// Get metrics from processes and executors.
/// TODO does this need to be mut?
fn metrics(
&mut self,
) -> HashMap<ProcessId, (ProtocolMetrics, ExecutorMetrics)> {
self.check_processes_and_executors(|process, executor| {
let process_metrics = process.metrics().clone();
let executor_metrics = executor.metrics().clone();
(process_metrics, executor_metrics)
})
}
fn executors_monitors(
&mut self,
) -> HashMap<ProcessId, Option<ExecutionOrderMonitor>> {
self.check_processes_and_executors(|_process, executor| {
executor.monitor()
})
}
/// Get client's stats.
/// TODO does this need to be mut?
fn clients_latencies(&mut self) -> HashMap<Region, (usize, Histogram)> {
self.check_clients(
|client, (commands, histogram): &mut (usize, Histogram)| {
// update issued commands with this client's issued commands
*commands += client.issued_commands();
// update region's histogram with this client's histogram
for latency in client.data().latency_data() {
// since the simulation assumes WAN, use milliseconds for
// latency precision
let ms = latency.as_millis() as u64;
histogram.increment(ms);
}
},
)
}
fn check_processes_and_executors<F, R>(
&mut self,
f: F,
) -> HashMap<ProcessId, R>
where
F: Fn(&P, &P::Executor) -> R,
{
let simulation = &mut self.simulation;
self.process_to_region
.keys()
.map(|&process_id| {
// get process and executor from simulation
let (process, executor, _, _) =
simulation.get_process(process_id);
// compute process result
(process_id, f(&process, &executor))
})
.collect()
}
fn check_clients<F, R>(&mut self, f: F) -> HashMap<Region, R>
where
F: Fn(&Client, &mut R),
R: Default,
{
let simulation = &mut self.simulation;
let mut region_to_results = HashMap::new();
for (&client_id, region) in self.client_to_region.iter() {
// get current result for this region
let mut result = match region_to_results.get_mut(region) {
Some(v) => v,
None => region_to_results.entry(region.clone()).or_default(),
};
// get client from simulation
let (client, _) = simulation.get_client(client_id);
// update region result
f(&client, &mut result);
}
region_to_results
}
}
impl<Message: Debug, PeriodicEvent: Debug> fmt::Debug
for ScheduleAction<Message, PeriodicEvent>
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
ScheduleAction::SubmitToProc(process_id, cmd) => {
write!(f, "SubmitToProc({}, {:?})", process_id, cmd)
}
ScheduleAction::SendToProc(
from_process_id,
from_shard_id,
to,
msg,
) => write!(
f,
"SendToProc({}, {}, {}, {:?})",
from_process_id, from_shard_id, to, msg
),
ScheduleAction::SendToClient(client_id, cmd_result) => {
write!(f, "SendToClient({}, {:?})", client_id, cmd_result)
}
ScheduleAction::PeriodicProcessEvent(process_id, event, delay) => {
write!(
f,
"PeriodicProcessEvent({}, {:?}, {:?})",
process_id, event, delay
)
}
ScheduleAction::PeriodicExecutedNotification(process_id, delay) => {
write!(
f,
"PeriodicExecutedNotification({}, {:?})",
process_id, delay
)
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::client::KeyGen;
use crate::metrics::F64;
use crate::protocol::{Basic, ProtocolMetricsKind};
fn run(f: usize, clients_per_process: usize) -> (Histogram, Histogram) {
// planet
let planet = Planet::new();
// config
let n = 3;
let mut config = Config::new(n, f);
// make sure stability is running
config.set_gc_interval(Duration::from_millis(100));
// clients workload
let shard_count = 1;
let keys_per_command = 1;
let pool_size = 1;
let conflict_rate = 100;
let key_gen = KeyGen::ConflictPool {
pool_size,
conflict_rate,
};
let commands_per_client = 1000;
let payload_size = 100;
let workload = Workload::new(
shard_count,
key_gen,
keys_per_command,
commands_per_client,
payload_size,
);
// process regions
let process_regions = vec![
Region::new("asia-east1"),
Region::new("us-central1"),
Region::new("us-west1"),
];
// client regions
let client_regions =
vec![Region::new("us-west1"), Region::new("us-west2")];
// create runner
let mut runner: Runner<Basic> = Runner::new(
planet,
config,
workload,
clients_per_process,
process_regions,
client_regions,
);
// run simulation until the clients end + another second second
let (metrics, _executors_monitors, mut clients_latencies) =
runner.run(Some(Duration::from_secs(1)));
// check client stats
let (us_west1_issued, us_west1) = clients_latencies
.remove(&Region::new("us-west1"))
.expect("there should stats from us-west1 region");
let (us_west2_issued, us_west2) = clients_latencies
.remove(&Region::new("us-west2"))
.expect("there should stats from us-west2 region");
// check the number of issued commands
let expected = commands_per_client * clients_per_process;
assert_eq!(us_west1_issued, expected);
assert_eq!(us_west2_issued, expected);
// check process stats
metrics.values().into_iter().for_each(
|(process_metrics, _executor_metrics)| {
// check stability has run
let stable_count = process_metrics
.get_aggregated(ProtocolMetricsKind::Stable)
.expect("stability should have happened");
// check that all commands were gc-ed:
// - since we have clients in two regions, the total number of
// commands is two times the expected per region
let total_commands = (expected * 2) as u64;
assert!(*stable_count == total_commands)
},
);
// return stats for both regions
(us_west1, us_west2)
}
#[test]
fn runner_single_client_per_process() {
// expected stats:
// - client us-west1: since us-west1 is a process, from client's
// perspective it should be the latency of accessing the coordinator
// (0ms) plus the latency of accessing the closest fast quorum
// - client us-west2: since us-west2 is _not_ a process, from client's
// perspective it should be the latency of accessing the coordinator
// us-west1 (12ms + 12ms) plus the latency of accessing the closest
// fast quorum
// clients per process
let clients_per_process = 1;
// f = 0
let f = 0;
let (us_west1, us_west2) = run(f, clients_per_process);
assert_eq!(us_west1.mean(), F64::new(0.0));
assert_eq!(us_west2.mean(), F64::new(24.0));
// f = 1
let f = 1;
let (us_west1, us_west2) = run(f, clients_per_process);
assert_eq!(us_west1.mean(), F64::new(34.0));
assert_eq!(us_west2.mean(), F64::new(58.0));
}
#[test]
fn runner_multiple_clients_per_process() {
// 1 client per region
let f = 1;
let clients_per_process = 1;
let (us_west1_with_one, us_west2_with_one) =
run(f, clients_per_process);
// 10 clients per region
let f = 1;
let clients_per_process = 10;
let (us_west1_with_ten, us_west2_with_ten) =
run(f, clients_per_process);
// check stats are the same
assert_eq!(us_west1_with_one.mean(), us_west1_with_ten.mean());
assert_eq!(us_west1_with_one.cov(), us_west1_with_ten.cov());
assert_eq!(us_west2_with_one.mean(), us_west2_with_ten.mean());
assert_eq!(us_west2_with_one.cov(), us_west2_with_ten.cov());
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/sim/mod.rs | fantoch/src/sim/mod.rs | // This module contains the definition of `Simulation`.
pub mod simulation;
// This module contains the definition of `Schedule`.
pub mod schedule;
// This module contains the definition of `Runner`.
pub mod runner;
// Re-exports.
pub use runner::Runner;
pub use schedule::Schedule;
pub use simulation::Simulation;
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/metrics/mod.rs | fantoch/src/metrics/mod.rs | // This module contains the definition of `F64`.
pub mod float;
// This module contains the definition of `Histogram`.
mod histogram;
// Re-exports.
pub use float::F64;
pub use histogram::{Histogram, Stats};
use crate::HashMap;
use serde::{Deserialize, Serialize};
use std::fmt;
use std::hash::Hash;
#[derive(Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct Metrics<K: Eq + Hash> {
collected: HashMap<K, Histogram>,
aggregated: HashMap<K, u64>,
}
impl<K> Metrics<K>
where
K: Eq + Hash + Copy,
{
#[allow(clippy::new_without_default)]
pub fn new() -> Self {
Self {
collected: HashMap::new(),
aggregated: HashMap::new(),
}
}
pub fn collect(&mut self, kind: K, value: u64) {
let stats = match self.collected.get_mut(&kind) {
Some(current) => current,
None => self.collected.entry(kind).or_insert_with(Histogram::new),
};
stats.increment(value);
}
pub fn aggregate(&mut self, kind: K, by: u64) {
let current = match self.aggregated.get_mut(&kind) {
Some(current) => current,
None => self.aggregated.entry(kind).or_default(),
};
*current += by;
}
pub fn get_collected(&self, kind: K) -> Option<&Histogram> {
self.collected.get(&kind)
}
pub fn get_aggregated(&self, kind: K) -> Option<&u64> {
self.aggregated.get(&kind)
}
pub fn merge(&mut self, other: &Self) {
for (k, hist) in other.collected.iter() {
let current = self.collected.entry(*k).or_default();
current.merge(hist);
}
for (k, v) in other.aggregated.iter() {
let current = self.aggregated.entry(*k).or_default();
*current += v;
}
}
}
impl<K> fmt::Debug for Metrics<K>
where
K: Eq + Hash + fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
for (kind, histogram) in self.collected.iter() {
writeln!(f, "{:?}: {:?}", kind, histogram)?;
}
for (kind, value) in self.aggregated.iter() {
writeln!(f, "{:?}: {:?}", kind, value)?;
}
Ok(())
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/metrics/float.rs | fantoch/src/metrics/float.rs | use serde::{Deserialize, Serialize};
use std::cmp::Ordering;
use std::fmt;
#[derive(PartialOrd, Deserialize, Serialize, Clone, Copy)]
pub struct F64(f64);
impl F64 {
pub fn new(x: f64) -> Self {
Self(x)
}
pub fn zero() -> Self {
Self::new(0.0)
}
pub fn nan() -> Self {
Self::new(std::f64::NAN)
}
/// Rounds the inner `f64` with 1 decimal place.
pub fn round(self) -> String {
format!("{:.1}", self.0)
}
pub fn value(self) -> f64 {
self.0
}
}
impl std::ops::Add for F64 {
type Output = Self;
fn add(self, other: Self) -> Self {
Self(self.0 + other.0)
}
}
impl std::ops::AddAssign for F64 {
fn add_assign(&mut self, other: Self) {
*self = Self(self.0 + other.0);
}
}
impl std::ops::Sub for F64 {
type Output = Self;
fn sub(self, other: Self) -> Self {
Self(self.0 - other.0)
}
}
impl std::ops::Mul for F64 {
type Output = Self;
fn mul(self, other: Self) -> Self {
Self(self.0 * other.0)
}
}
// based on: https://github.com/reem/rust-ordered-float/ `cmp` implementation for `OrderedFloat`
impl Ord for F64 {
fn cmp(&self, other: &Self) -> Ordering {
match self.partial_cmp(other) {
Some(ordering) => ordering,
None => {
if self.0.is_nan() {
if other.0.is_nan() {
Ordering::Equal
} else {
Ordering::Greater
}
} else {
Ordering::Less
}
}
}
}
}
impl PartialEq for F64 {
fn eq(&self, other: &Self) -> bool {
if self.0.is_nan() {
other.0.is_nan()
} else {
self.0 == other.0
}
}
}
impl Eq for F64 {}
impl From<F64> for u64 {
fn from(value: F64) -> Self {
value.0 as u64
}
}
impl fmt::Debug for F64 {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.0)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn new_with_zero() {
assert_eq!(F64::new(0.0), F64::zero());
}
#[test]
fn value() {
assert_eq!(F64::zero().value(), 0.0);
assert_eq!(F64::new(11.2).value(), 11.2);
}
#[test]
fn add() {
use std::ops::Add;
let x = F64::new(11.2);
let y = F64::new(30.8);
let expected = F64::new(42.0);
let result = x.add(y);
assert_eq!(result, expected);
}
#[test]
fn add_assign() {
use std::ops::AddAssign;
let mut x = F64::new(11.2);
let y = F64::new(30.8);
let expected = F64::new(42.0);
x.add_assign(y);
assert_eq!(x, expected);
}
#[test]
fn sub() {
use std::ops::Sub;
let x = F64::new(10.8);
let y = F64::new(30.8);
let expected = F64::new(-20.0);
let result = x.sub(y);
assert_eq!(result, expected);
}
#[test]
fn mul() {
use std::ops::Mul;
let x = F64::new(5.2);
let y = F64::new(5.3);
let expected = F64::new(27.56);
let result = x.mul(y);
assert_eq!(result, expected);
}
#[test]
fn ord() {
use std::cmp::Ordering;
assert_eq!(F64::new(5.2).cmp(&F64::new(5.3)), Ordering::Less);
assert_eq!(F64::new(5.3).cmp(&F64::new(5.2)), Ordering::Greater);
assert_eq!(F64::new(5.2).cmp(&F64::new(5.2)), Ordering::Equal);
assert_eq!(F64::new(5.2).cmp(&F64::nan()), Ordering::Less);
assert_eq!(F64::nan().cmp(&F64::new(5.3)), Ordering::Greater);
assert_eq!(F64::nan().cmp(&F64::nan()), Ordering::Equal);
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/metrics/histogram.rs | fantoch/src/metrics/histogram.rs | use crate::metrics::F64;
use serde::{Deserialize, Serialize};
use std::cmp::Ordering;
use std::collections::BTreeMap;
use std::fmt;
pub enum Stats {
Mean,
COV, // coefficient of variation
MDTM, // mean distance to mean
}
// TODO maybe use https://docs.rs/hdrhistogram/7.0.0/hdrhistogram/
#[derive(Default, Clone, PartialEq, Eq, Deserialize, Serialize)]
pub struct Histogram {
// raw values: we have "100%" precision as all values are stored
values: BTreeMap<u64, usize>,
}
impl Histogram {
/// Creates an empty histogram.
pub fn new() -> Self {
Self::default()
}
/// Creates an histogram from a list of values.
pub fn from<T: IntoIterator<Item = u64>>(values: T) -> Self {
let mut stats = Self::new();
values.into_iter().for_each(|value| stats.increment(value));
stats
}
/// Returns the number of occurrences.
pub fn count(&self) -> usize {
self.values.iter().map(|(_, count)| count).sum::<usize>()
}
/// Create string with values in the histogram.
pub fn values(&self) -> impl Iterator<Item = u64> + '_ {
self.values
.iter()
.flat_map(|(value, count)| (0..*count).map(move |_| *value))
}
pub fn inner(&self) -> &BTreeMap<u64, usize> {
&self.values
}
/// Merges two histograms.
pub fn merge(&mut self, other: &Self) {
histogram_merge(&mut self.values, &other.values)
}
/// Increments the occurrence of some value in the histogram.
pub fn increment(&mut self, value: u64) {
// register another occurrence of `value`
let count = self.values.entry(value).or_insert(0);
*count += 1;
}
pub fn mean(&self) -> F64 {
let (mean, _) = self.compute_mean_and_count();
F64::new(mean)
}
pub fn stddev(&self) -> F64 {
let (mean, count) = self.compute_mean_and_count();
let stddev = self.compute_stddev(mean, count);
F64::new(stddev)
}
pub fn cov(&self) -> F64 {
let cov = self.compute_cov();
F64::new(cov)
}
pub fn mdtm(&self) -> F64 {
let mdtm = self.compute_mdtm();
F64::new(mdtm)
}
pub fn mean_improv(&self, other: &Self) -> F64 {
self.mean() - other.mean()
}
pub fn cov_improv(&self, other: &Self) -> F64 {
self.cov() - other.cov()
}
pub fn mdtm_improv(&self, other: &Self) -> F64 {
self.mdtm() - other.mdtm()
}
pub fn min(&self) -> F64 {
self.values
.iter()
.next()
.map(|(min, _)| F64::new(*min as f64))
.unwrap_or_else(F64::nan)
}
pub fn max(&self) -> F64 {
self.values
.iter()
.next_back()
.map(|(min, _)| F64::new(*min as f64))
.unwrap_or_else(F64::nan)
}
// Computes a given percentile.
pub fn percentile(&self, percentile: f64) -> F64 {
assert!(percentile >= 0.0 && percentile <= 1.0);
if self.values.is_empty() {
return F64::zero();
}
// compute the number of elements in the histogram
let count = self.count() as f64;
let index = percentile * count;
let index_rounded = index.round();
// check if index is a whole number
let is_whole_number = (index - index_rounded).abs() == 0.0;
// compute final index
let mut index = index_rounded as usize;
// create data iterator of values in the histogram
let mut data = self.values.iter();
// compute left and right value that will be used to compute the
// percentile
let left_value;
let right_value;
loop {
let (value, count) =
data.next().expect("there should a next histogram value");
match index.cmp(&count) {
Ordering::Equal => {
// if it's the same, this is the left value and the next
// histogram value is the right value
left_value = *value as f64;
right_value = data.next().map(|(value, _)| *value as f64);
break;
}
Ordering::Less => {
// if index is smaller, this value is both the left and the
// right value
left_value = *value as f64;
right_value = Some(left_value);
break;
}
Ordering::Greater => {
// if greater, keep going
index -= count;
}
}
}
let value = if is_whole_number {
(left_value + right_value.expect("there should be a right value"))
/ 2.0
} else {
left_value
};
F64::new(value)
}
fn compute_mean_and_count(&self) -> (f64, f64) {
let (sum, count) = self.sum_and_count();
// cast them to floats
let sum = sum as f64;
let count = count as f64;
// compute mean
let mean = sum / count;
(mean, count)
}
fn sum_and_count(&self) -> (u64, usize) {
self.values.iter().fold(
(0, 0),
|(sum_acc, count_acc), (value, count)| {
// compute the actual sum for this value
let sum = value * (*count as u64);
(sum_acc + sum, count_acc + count)
},
)
}
fn compute_cov(&self) -> f64 {
let (mean, count) = self.compute_mean_and_count();
let stddev = self.compute_stddev(mean, count);
stddev / mean
}
fn compute_stddev(&self, mean: f64, count: f64) -> f64 {
let variance = self.compute_variance(mean, count);
variance.sqrt()
}
fn compute_variance(&self, mean: f64, count: f64) -> f64 {
let sum = self
.values
.iter()
.map(|(x, x_count)| (*x as f64, *x_count as f64))
.map(|(x, x_count)| {
let diff = mean - x;
// as `x` was reported `x_count` times, we multiply the squared
// diff by it
(diff * diff) * x_count
})
.sum::<f64>();
// we divide by (count - 1) to have the corrected version of variance
// - https://en.wikipedia.org/wiki/Standard_deviation#Corrected_sample_standard_deviation
sum / (count - 1.0)
}
fn compute_mdtm(&self) -> f64 {
let (mean, count) = self.compute_mean_and_count();
let distances_sum = self
.values
.iter()
.map(|(x, x_count)| (*x as f64, *x_count as f64))
.map(|(x, x_count)| {
let diff = mean - x;
// as `x` was reported `x_count` times, we multiply the absolute
// value by it
diff.abs() * x_count
})
.sum::<f64>();
distances_sum / count
}
}
impl fmt::Debug for Histogram {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if self.values.is_empty() {
write!(f, "(empty)")
} else {
write!(
f,
"avg={:<5} std={:<5} p95={:<5} p99={:<5} p99.9={:<5} p99.99={:<5} min={:<5} max={:<5}",
self.mean().value().round(),
self.stddev().value().round(),
self.percentile(0.95).value().round(),
self.percentile(0.99).value().round(),
self.percentile(0.999).value().round(),
self.percentile(0.9999).value().round(),
self.min().value().round(),
self.max().value().round(),
)
}
}
}
pub fn histogram_merge<K>(
map: &mut BTreeMap<K, usize>,
other: &BTreeMap<K, usize>,
) where
K: Ord + Eq + Clone,
{
// create iterators for `map` and `other`
let mut map_iter = map.iter_mut();
let mut other_iter = other.iter();
// variables to hold the "current value" of each iterator
let mut map_current = map_iter.next();
let mut other_current = other_iter.next();
// create vec where we'll store all entries with keys that are in `map`, are
// smaller than the larger key in `map`, but can't be inserted when
// interating `map`
let mut absent = Vec::new();
loop {
match (map_current, other_current) {
(Some((map_key, map_value)), Some((other_key, other_value))) => {
match map_key.cmp(&other_key) {
Ordering::Less => {
// simply advance `map` iterator
map_current = map_iter.next();
other_current = Some((other_key, other_value));
}
Ordering::Greater => {
// save entry to added later
absent.push((other_key, other_value));
// advance `other` iterator
map_current = Some((map_key, map_value));
other_current = other_iter.next();
}
Ordering::Equal => {
// merge values
*map_value += other_value;
// advance both iterators
map_current = map_iter.next();
other_current = other_iter.next();
}
}
}
(None, Some(entry)) => {
// the key in `entry` is the first key from `other` that is
// larger than the larger key in `map`; save
// entry and break out of the loop
absent.push(entry);
break;
}
(_, None) => {
// there's nothing else to do here as in these (two) cases we
// have already incorporated all entries from
// `other`
break;
}
};
}
// extend `map` with keys from `other` that are not in `map`:
// - `absent`: keys from `other` that are smaller than the larger key in
// `map`
// - `other_iter`: keys from `other` that are larger than the larger key in
// `map`
map.extend(absent.into_iter().map(|(key, value)| (key.clone(), *value)));
map.extend(other_iter.map(|(key, value)| (key.clone(), *value)));
}
#[cfg(test)]
mod proptests {
use super::*;
use crate::elapsed;
use crate::HashMap;
use quickcheck::TestResult;
use quickcheck_macros::quickcheck;
use std::hash::Hash;
use std::iter::FromIterator;
fn hash_merge<K>(map: &mut HashMap<K, usize>, other: &HashMap<K, usize>)
where
K: Hash + Eq + Clone,
{
other.into_iter().for_each(|(k, v)| match map.get_mut(&k) {
Some(m) => {
*m += *v;
}
None => {
map.entry(k.clone()).or_insert(*v);
}
});
}
type K = u64;
#[quickcheck]
fn merge_check(map: Vec<(K, usize)>, other: Vec<(K, usize)>) -> TestResult {
// create hashmaps and merge them
let mut hashmap = HashMap::from_iter(map.clone());
let other_hashmap = HashMap::from_iter(other.clone());
for (key, value) in hashmap.iter() {
if let Some(other_value) = other_hashmap.get(key) {
let overflow = value.checked_add(*other_value).is_none();
if overflow {
return TestResult::discard();
}
}
}
let (naive_time, _) =
elapsed!(hash_merge(&mut hashmap, &other_hashmap));
// create btreemaps and merge them
let mut btreemap = BTreeMap::from_iter(map.clone());
let other_btreemap = BTreeMap::from_iter(other.clone());
let (time, _) =
elapsed!(histogram_merge(&mut btreemap, &other_btreemap));
// show merge times
println!("{} {}", naive_time.as_nanos(), time.as_nanos());
let result = btreemap == BTreeMap::from_iter(hashmap);
TestResult::from_bool(result)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn stats() {
let stats = Histogram::from(vec![1, 1, 1]);
assert_eq!(stats.mean(), F64::new(1.0));
assert_eq!(stats.cov(), F64::new(0.0));
assert_eq!(stats.mdtm(), F64::new(0.0));
assert_eq!(stats.min(), F64::new(1.0));
assert_eq!(stats.max(), F64::new(1.0));
let stats = Histogram::from(vec![10, 20, 30]);
assert_eq!(stats.mean(), F64::new(20.0));
assert_eq!(stats.cov(), F64::new(0.5));
assert_eq!(stats.min(), F64::new(10.0));
assert_eq!(stats.max(), F64::new(30.0));
let stats = Histogram::from(vec![10, 20]);
assert_eq!(stats.mean(), F64::new(15.0));
assert_eq!(stats.mdtm(), F64::new(5.0));
assert_eq!(stats.min(), F64::new(10.0));
assert_eq!(stats.max(), F64::new(20.0));
}
#[test]
fn stats_show() {
let stats = Histogram::from(vec![1, 1, 1]);
assert_eq!(stats.mean().round(), "1.0");
assert_eq!(stats.cov().round(), "0.0");
assert_eq!(stats.mdtm().round(), "0.0");
let stats = Histogram::from(vec![10, 20, 30]);
assert_eq!(stats.mean().round(), "20.0");
assert_eq!(stats.cov().round(), "0.5");
assert_eq!(stats.mdtm().round(), "6.7");
let stats = Histogram::from(vec![10, 20]);
assert_eq!(stats.mean().round(), "15.0");
assert_eq!(stats.cov().round(), "0.5");
assert_eq!(stats.mdtm().round(), "5.0");
let stats = Histogram::from(vec![10, 20, 40, 10]);
assert_eq!(stats.mean().round(), "20.0");
assert_eq!(stats.cov().round(), "0.7");
assert_eq!(stats.mdtm().round(), "10.0");
}
#[test]
fn stats_improv() {
let stats_a = Histogram::from(vec![1, 1, 1]);
let stats_b = Histogram::from(vec![10, 20]);
assert_eq!(stats_a.mean_improv(&stats_b), F64::new(-14.0));
let stats_a = Histogram::from(vec![1, 1, 1]);
let stats_b = Histogram::from(vec![10, 20, 30]);
assert_eq!(stats_a.cov_improv(&stats_b), F64::new(-0.5));
let stats_a = Histogram::from(vec![1, 1, 1]);
let stats_b = Histogram::from(vec![10, 20]);
assert_eq!(stats_a.mdtm_improv(&stats_b), F64::new(-5.0));
}
#[test]
fn percentile() {
let data = vec![
43, 54, 56, 61, 62, 66, 68, 69, 69, 70, 71, 72, 77, 78, 79, 85, 87,
88, 89, 93, 95, 96, 98, 99, 99,
];
let stats = Histogram::from(data);
assert_eq!(stats.min(), F64::new(43.0));
assert_eq!(stats.max(), F64::new(99.0));
assert_eq!(stats.percentile(0.9), F64::new(98.0));
assert_eq!(stats.percentile(0.5), F64::new(77.0));
assert_eq!(stats.percentile(0.2), F64::new(64.0));
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/executor/mod.rs | fantoch/src/executor/mod.rs | // This module contains the definition of `Pending`.
mod aggregate;
// This module contains the implementation of a basic executor that executes
// operations as soon as it receives them.
mod basic;
// This module contains the definition of `ExecutionOrderMonitor`.
mod monitor;
// Re-exports.
pub use aggregate::AggregatePending;
pub use basic::{BasicExecutionInfo, BasicExecutor};
pub use monitor::ExecutionOrderMonitor;
use crate::config::Config;
use crate::id::{ProcessId, Rifl, ShardId};
use crate::kvs::{KVOpResult, Key};
use crate::metrics::Metrics;
use crate::protocol::{CommittedAndExecuted, MessageIndex};
use crate::time::SysTime;
use crate::util;
use serde::de::DeserializeOwned;
use serde::{Deserialize, Serialize};
use std::fmt::{self, Debug};
pub trait Executor: Clone {
// TODO why is Send needed?
type ExecutionInfo: Debug
+ Clone
+ Serialize
+ DeserializeOwned
+ Send
+ Sync
+ MessageIndex; // TODO why is Sync needed??
fn new(process_id: ProcessId, shard_id: ShardId, config: Config) -> Self;
fn set_executor_index(&mut self, _index: usize) {
// executors interested in the index should overwrite this
}
fn cleanup(&mut self, _time: &dyn SysTime) {
// executors interested in a periodic cleanup should overwrite this
}
fn monitor_pending(&mut self, _time: &dyn SysTime) {
// executors interested in a periodic check of pending commands should
// overwrite this
}
fn handle(&mut self, infos: Self::ExecutionInfo, time: &dyn SysTime);
#[must_use]
fn to_clients(&mut self) -> Option<ExecutorResult>;
#[must_use]
fn to_clients_iter(&mut self) -> ToClientsIter<'_, Self> {
ToClientsIter { executor: self }
}
#[must_use]
fn to_executors(&mut self) -> Option<(ShardId, Self::ExecutionInfo)> {
// non-genuine protocols should overwrite this
None
}
#[must_use]
fn to_executors_iter(&mut self) -> ToExecutorsIter<'_, Self> {
ToExecutorsIter { executor: self }
}
#[must_use]
fn executed(
&mut self,
_time: &dyn SysTime,
) -> Option<CommittedAndExecuted> {
// protocols that are interested in notifying the worker
// `GC_WORKER_INDEX` (see fantoch::run::prelude) with these executed
// notifications should overwrite this
None
}
fn parallel() -> bool;
fn metrics(&self) -> &ExecutorMetrics;
fn monitor(&self) -> Option<ExecutionOrderMonitor>;
}
pub struct ToClientsIter<'a, E> {
executor: &'a mut E,
}
impl<'a, E> Iterator for ToClientsIter<'a, E>
where
E: Executor,
{
type Item = ExecutorResult;
fn next(&mut self) -> Option<Self::Item> {
self.executor.to_clients()
}
}
pub struct ToExecutorsIter<'a, E> {
executor: &'a mut E,
}
impl<'a, E> Iterator for ToExecutorsIter<'a, E>
where
E: Executor,
{
type Item = (ShardId, E::ExecutionInfo);
fn next(&mut self) -> Option<Self::Item> {
self.executor.to_executors()
}
}
pub type ExecutorMetrics = Metrics<ExecutorMetricsKind>;
#[derive(Clone, Copy, Hash, PartialEq, Eq, Serialize, Deserialize)]
pub enum ExecutorMetricsKind {
ExecutionDelay,
ChainSize,
OutRequests,
InRequests,
InRequestReplies,
}
impl Debug for ExecutorMetricsKind {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
// general metric
ExecutorMetricsKind::ExecutionDelay => write!(f, "execution_delay"),
// graph executor specific
ExecutorMetricsKind::ChainSize => write!(f, "chain_size"),
ExecutorMetricsKind::OutRequests => write!(f, "out_requests"),
ExecutorMetricsKind::InRequests => write!(f, "in_requests"),
ExecutorMetricsKind::InRequestReplies => {
write!(f, "in_request_replies")
}
}
}
}
pub trait MessageKey {
/// Returns which `key` the execution info is about.
fn key(&self) -> &Key;
}
impl<A> MessageIndex for A
where
A: MessageKey,
{
fn index(&self) -> Option<(usize, usize)> {
Some(key_index(self.key()))
}
}
// The index of a key is its hash
#[allow(clippy::ptr_arg)]
fn key_index(key: &Key) -> (usize, usize) {
let index = util::key_hash(key) as usize;
(0, index)
}
#[derive(Debug, Clone)]
pub struct ExecutorResult {
pub rifl: Rifl,
pub key: Key,
pub partial_results: Vec<KVOpResult>,
}
impl ExecutorResult {
pub fn new(rifl: Rifl, key: Key, partial_results: Vec<KVOpResult>) -> Self {
ExecutorResult {
rifl,
key,
partial_results,
}
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/executor/monitor.rs | fantoch/src/executor/monitor.rs | use crate::id::Rifl;
use crate::kvs::Key;
use crate::HashMap;
/// This structure can be used to monitor the order in which commands are
/// executed, per key, and then check that all processes have the same order
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct ExecutionOrderMonitor {
order_per_key: HashMap<Key, Vec<Rifl>>,
}
impl ExecutionOrderMonitor {
pub fn new() -> Self {
Self {
order_per_key: Default::default(),
}
}
/// Adds a new command to the monitor.
/// Read-only commandds are ignored.
pub fn add(&mut self, key: &Key, read_only: bool, rifl: Rifl) {
if read_only {
return;
}
if let Some(current) = self.order_per_key.get_mut(key) {
current.push(rifl);
} else {
self.order_per_key.insert(key.clone(), vec![rifl]);
}
}
/// Merge other monitor into this one. This can be used by protocols that
/// can have multiple executors.
pub fn merge(&mut self, other: Self) {
for (key, rifls) in other.order_per_key {
let result = self.order_per_key.insert(key, rifls);
// different monitors should operate on different keys; panic if
// that's not the case
assert!(result.is_none());
}
}
pub fn get_order(&self, key: &Key) -> Option<&Vec<Rifl>> {
self.order_per_key.get(key)
}
pub fn keys(&self) -> impl Iterator<Item = &Key> {
self.order_per_key.keys()
}
pub fn len(&self) -> usize {
self.order_per_key.len()
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/executor/basic.rs | fantoch/src/executor/basic.rs | use crate::config::Config;
use crate::executor::{
ExecutionOrderMonitor, Executor, ExecutorMetrics, ExecutorResult,
MessageKey,
};
use crate::id::{ProcessId, Rifl, ShardId};
use crate::kvs::{KVOp, KVStore, Key};
use crate::time::SysTime;
use serde::{Deserialize, Serialize};
use std::sync::Arc;
#[derive(Clone)]
pub struct BasicExecutor {
store: KVStore,
metrics: ExecutorMetrics,
to_clients: Vec<ExecutorResult>,
}
impl Executor for BasicExecutor {
type ExecutionInfo = BasicExecutionInfo;
fn new(
_process_id: ProcessId,
_shard_id: ShardId,
_config: Config,
) -> Self {
let monitor = false;
let store = KVStore::new(monitor);
let metrics = ExecutorMetrics::new();
let to_clients = Vec::new();
Self {
store,
metrics,
to_clients,
}
}
fn handle(&mut self, info: Self::ExecutionInfo, _time: &dyn SysTime) {
let BasicExecutionInfo { rifl, key, ops } = info;
// take the ops inside the arc if we're the last with a
// reference to it (otherwise, clone them)
let ops =
Arc::try_unwrap(ops).unwrap_or_else(|ops| ops.as_ref().clone());
// execute op in the `KVStore`
let partial_results = self.store.execute(&key, ops, rifl);
self.to_clients
.push(ExecutorResult::new(rifl, key, partial_results));
}
fn to_clients(&mut self) -> Option<ExecutorResult> {
self.to_clients.pop()
}
fn parallel() -> bool {
true
}
fn metrics(&self) -> &ExecutorMetrics {
&self.metrics
}
fn monitor(&self) -> Option<ExecutionOrderMonitor> {
None
}
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct BasicExecutionInfo {
rifl: Rifl,
key: Key,
ops: Arc<Vec<KVOp>>,
}
impl BasicExecutionInfo {
pub fn new(rifl: Rifl, key: Key, ops: Arc<Vec<KVOp>>) -> Self {
Self { rifl, key, ops }
}
}
impl MessageKey for BasicExecutionInfo {
fn key(&self) -> &Key {
&self.key
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/executor/aggregate.rs | fantoch/src/executor/aggregate.rs | use crate::command::{Command, CommandResult, CommandResultBuilder};
use crate::executor::ExecutorResult;
use crate::id::{ProcessId, Rifl, ShardId};
use crate::trace;
use crate::HashMap;
/// Structure that tracks the progress of pending commands.
pub struct AggregatePending {
process_id: ProcessId,
shard_id: ShardId,
pending: HashMap<Rifl, CommandResultBuilder>,
}
impl AggregatePending {
/// Creates a new `Pending` instance.
/// In this `Pending` implementation, results are only returned once they're
/// the aggregation of all partial results is complete; this also means that
/// non-parallel executors can return the full command result without having
/// to return partials
pub fn new(process_id: ProcessId, shard_id: ShardId) -> Self {
Self {
process_id,
shard_id,
pending: HashMap::new(),
}
}
/// Starts tracking a command submitted by some client.
pub fn wait_for(&mut self, cmd: &Command) -> bool {
// get command rifl and key count
let rifl = cmd.rifl();
let key_count = cmd.key_count(self.shard_id);
trace!(
"p{}: AggregatePending::wait_for {:?} | count = {}",
self.process_id,
rifl,
key_count
);
// create `CommandResult`
let cmd_result = CommandResultBuilder::new(rifl, key_count);
// add it to pending
self.pending.insert(rifl, cmd_result).is_none()
}
/// Adds a new partial command result.
pub fn add_executor_result(
&mut self,
executor_result: ExecutorResult,
) -> Option<CommandResult> {
let ExecutorResult {
rifl,
key,
partial_results,
} = executor_result;
// get current value:
// - if it's not part of pending, then ignore it
// (if it's not part of pending, it means that it is from a client from
// another tempo process, and `pending.wait_for*` has not been
// called)
let cmd_result_builder = self.pending.get_mut(&rifl)?;
// add partial result and check if it's ready
cmd_result_builder.add_partial(key, partial_results);
if cmd_result_builder.ready() {
trace!(
"p{}: AggregatePending::add_partial {:?} is ready",
self.process_id,
rifl
);
// if it is, remove it from pending
let cmd_result_builder = self
.pending
.remove(&rifl)
.expect("command result builder must exist");
// finally, build the command result
Some(cmd_result_builder.into())
} else {
trace!(
"p{}: AggregatePending::add_partial {:?} is not ready",
self.process_id,
rifl
);
None
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::command::Command;
use crate::kvs::{KVOp, KVStore};
#[test]
fn pending_flow() {
// create pending and store
let process_id = 1;
let shard_id = 0;
let mut pending = AggregatePending::new(process_id, shard_id);
let monitor = false;
let mut store = KVStore::new(monitor);
// keys and commands
let key_a = String::from("A");
let key_b = String::from("B");
let foo = String::from("foo");
let bar = String::from("bar");
// command put a
let put_a_rifl = Rifl::new(1, 1);
let put_a = Command::from(
put_a_rifl,
vec![(key_a.clone(), KVOp::Put(foo.clone()))],
);
// command put b
let put_b_rifl = Rifl::new(2, 1);
let put_b = Command::from(
put_b_rifl,
vec![(key_b.clone(), KVOp::Put(bar.clone()))],
);
// command get a and b
let get_ab_rifl = Rifl::new(3, 1);
let get_ab = Command::from(
get_ab_rifl,
vec![(key_a.clone(), KVOp::Get), (key_b.clone(), KVOp::Get)],
);
// wait for `get_ab` and `put_b`
assert!(pending.wait_for(&get_ab));
assert!(pending.wait_for(&put_b));
// starting a command already started `false`
assert!(!pending.wait_for(&put_b));
// add the result of get b and assert that the command is not ready yet
let get_b_res = store.test_execute(&key_b, KVOp::Get);
let res = pending.add_executor_result(ExecutorResult::new(
get_ab_rifl,
key_b.clone(),
vec![get_b_res],
));
assert!(res.is_none());
// add the result of put a before being waited for
let put_a_res = store.test_execute(&key_a, KVOp::Put(foo.clone()));
let res = pending.add_executor_result(ExecutorResult::new(
put_a_rifl,
key_a.clone(),
vec![put_a_res.clone()],
));
assert!(res.is_none());
// wait for `put_a`
pending.wait_for(&put_a);
// add the result of put a and assert that the command is ready
let res = pending.add_executor_result(ExecutorResult::new(
put_a_rifl,
key_a.clone(),
vec![put_a_res.clone()],
));
assert!(res.is_some());
// check that there's only one result (since the command accessed a
// single key)
let res = res.unwrap();
assert_eq!(res.results().len(), 1);
// check that there was nothing in the kvs before
assert_eq!(res.results().get(&key_a).unwrap(), &vec![None]);
// add the result of put b and assert that the command is ready
let put_b_res = store.test_execute(&key_b, KVOp::Put(bar.clone()));
let res = pending.add_executor_result(ExecutorResult::new(
put_b_rifl,
key_b.clone(),
vec![put_b_res],
));
// check that there's only one result (since the command accessed a
// single key)
let res = res.unwrap();
assert_eq!(res.results().len(), 1);
// check that there was nothing in the kvs before
assert_eq!(res.results().get(&key_b).unwrap(), &vec![None]);
// add the result of get a and assert that the command is ready
let get_a_res = store.test_execute(&key_a, KVOp::Get);
let res = pending.add_executor_result(ExecutorResult::new(
get_ab_rifl,
key_a.clone(),
vec![get_a_res],
));
assert!(res.is_some());
// check that there are two results (since the command accessed two
// keys)
let res = res.unwrap();
assert_eq!(res.results().len(), 2);
// check that `get_ab` saw `put_a` but not `put_b`
assert_eq!(res.results().get(&key_a).unwrap(), &vec![Some(foo)]);
assert_eq!(res.results().get(&key_b).unwrap(), &vec![None]);
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/run/prelude.rs | fantoch/src/run/prelude.rs | use super::chan::{ChannelReceiver, ChannelSender};
use super::pool;
use crate::command::{Command, CommandResult};
use crate::executor::{Executor, ExecutorMetrics, ExecutorResult};
use crate::id::{ClientId, Dot, ProcessId, ShardId};
use crate::load_balance::*;
use crate::protocol::{
CommittedAndExecuted, MessageIndex, Protocol, ProtocolMetrics,
};
use serde::{Deserialize, Serialize};
use std::fmt;
use std::sync::Arc;
#[derive(Debug, Serialize, Deserialize)]
pub struct ProcessHi {
pub process_id: ProcessId,
pub shard_id: ShardId,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct ClientHi(pub Vec<ClientId>);
// If the command touches a single shard, then a `Submit` will be sent to that
// shard. If the command touches more than on shard, a `Submit` will be sent to
// one targetted shard and a `Register` will be sent to the remaining shards to
// make sure that the client will eventually receive a `CommandResult` from all
// shards.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum ClientToServer {
Submit(Command),
Register(Command),
}
#[derive(Debug, Clone)]
pub enum ClientToExecutor {
// clients can register
Register(Vec<ClientId>, ExecutorResultSender),
// unregister
Unregister(Vec<ClientId>),
}
#[derive(Debug, Serialize, Deserialize)]
// these bounds are explained here: https://github.com/serde-rs/serde/issues/1503#issuecomment-475059482
#[serde(bound(
serialize = "P::Message: Serialize",
deserialize = "P::Message: Deserialize<'de>",
))]
pub enum POEMessage<P: Protocol> {
Protocol(<P as Protocol>::Message),
Executor(<<P as Protocol>::Executor as Executor>::ExecutionInfo),
}
impl<P: Protocol> POEMessage<P> {
pub fn to_executor(&self) -> bool {
match self {
Self::Protocol(_) => false,
Self::Executor(_) => true,
}
}
}
// list of channels used to communicate between tasks
// TODO: remove most of these as it doesn't really help reading the code
pub type ReaderReceiver<P> =
ChannelReceiver<(ProcessId, ShardId, <P as Protocol>::Message)>;
pub type WriterReceiver<P> = ChannelReceiver<Arc<POEMessage<P>>>;
pub type WriterSender<P> = ChannelSender<Arc<POEMessage<P>>>;
pub type ClientToExecutorReceiver = ChannelReceiver<ClientToExecutor>;
pub type ClientToServerReceiver = ChannelReceiver<ClientToServer>;
pub type ServerToClientSender = ChannelSender<CommandResult>;
pub type ExecutorResultReceiver = ChannelReceiver<ExecutorResult>;
pub type ExecutorResultSender = ChannelSender<ExecutorResult>;
pub type ExecutedReceiver = ChannelReceiver<CommittedAndExecuted>;
pub type SubmitReceiver = ChannelReceiver<(Option<Dot>, Command)>;
pub type ExecutionInfoReceiver<P> =
ChannelReceiver<<<P as Protocol>::Executor as Executor>::ExecutionInfo>;
pub type ExecutionInfoSender<P> =
ChannelSender<<<P as Protocol>::Executor as Executor>::ExecutionInfo>;
pub type PeriodicEventReceiver<P, R> =
ChannelReceiver<FromPeriodicMessage<P, R>>;
pub type InspectFun<P, R> = (fn(&P) -> R, ChannelSender<R>);
pub type InspectReceiver<P, R> = ChannelReceiver<InspectFun<P, R>>;
pub type SortedProcessesSender =
ChannelSender<ChannelSender<Vec<(ProcessId, ShardId)>>>;
pub type SortedProcessesReceiver =
ChannelReceiver<ChannelSender<Vec<(ProcessId, ShardId)>>>;
pub type ProtocolMetricsReceiver = ChannelReceiver<(usize, ProtocolMetrics)>;
pub type ProtocolMetricsSender = ChannelSender<(usize, ProtocolMetrics)>;
pub type ExecutorMetricsReceiver = ChannelReceiver<(usize, ExecutorMetrics)>;
pub type ExecutorMetricsSender = ChannelSender<(usize, ExecutorMetrics)>;
// 1. workers receive messages from clients
pub type ClientToWorkers = pool::ToPool<(Option<Dot>, Command)>;
impl pool::PoolIndex for (Option<Dot>, Command) {
fn index(&self) -> Option<(usize, usize)> {
// if there's a `Dot`, then the protocol is leaderless; otherwise, it is
// leader-based and the command should always be forwarded to the leader
// worker
self.0
.as_ref()
.map(worker_dot_index_shift)
// no necessary reserve if there's a leader
.unwrap_or_else(|| worker_index_no_shift(LEADER_WORKER_INDEX))
}
}
// 2. workers receive messages from readers
pub type ReaderToWorkers<P> =
pool::ToPool<(ProcessId, ShardId, <P as Protocol>::Message)>;
// The following allows e.g. (ProcessId, ShardId, <P as Protocol>::Message) to
// be `ToPool::forward`
impl<A> pool::PoolIndex for (ProcessId, ShardId, A)
where
A: MessageIndex,
{
fn index(&self) -> Option<(usize, usize)> {
self.2.index()
}
}
// 3. workers receive messages from the periodic-events task
// - this message can either be a periodic event or
// - an inspect function that takes a reference to the protocol state and
// returns a boolean; this boolean is then sent through the `ChannelSender`
// (this is useful for e.g. testing)
#[derive(Clone)]
pub enum FromPeriodicMessage<P: Protocol, R> {
Event(P::PeriodicEvent),
Inspect(fn(&P) -> R, ChannelSender<R>),
}
impl<P, R> fmt::Debug for FromPeriodicMessage<P, R>
where
P: Protocol,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Event(e) => write!(f, "FromPeriodicMessage::Event({:?})", e),
Self::Inspect(_, _) => write!(f, "FromPeriodicMessage::Inspect"),
}
}
}
pub type PeriodicToWorkers<P, R> = pool::ToPool<FromPeriodicMessage<P, R>>;
// The following allows e.g. <P as Protocol>::Periodic to be `ToPool::forward`
impl<P, R> pool::PoolIndex for FromPeriodicMessage<P, R>
where
P: Protocol,
{
fn index(&self) -> Option<(usize, usize)> {
match self {
Self::Event(e) => MessageIndex::index(e),
Self::Inspect(_, _) => None, // send to all
}
}
}
// 4. the worker `GC_WORKER_INDEX` receives executed notification messages from
// executors
pub type ExecutorsToWorkers = pool::ToPool<CommittedAndExecuted>;
impl pool::PoolIndex for CommittedAndExecuted {
fn index(&self) -> Option<(usize, usize)> {
worker_index_no_shift(GC_WORKER_INDEX)
}
}
// 5. executors receive messages from clients
pub type ClientToExecutors = pool::ToPool<ClientToExecutor>;
// 6. executors receive messages from workers and reader tasks
pub type ToExecutors<P> =
pool::ToPool<<<P as Protocol>::Executor as Executor>::ExecutionInfo>;
// The following allows <<P as Protocol>::Executor as Executor>::ExecutionInfo
// to be forwarded
impl<A> pool::PoolIndex for A
where
A: MessageIndex,
{
fn index(&self) -> Option<(usize, usize)> {
self.index()
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/run/chan.rs | fantoch/src/run/chan.rs | use crate::warn;
use color_eyre::Report;
use std::fmt::Debug;
use tokio::sync::mpsc::error::TrySendError;
use tokio::sync::mpsc::{self, Receiver, Sender};
#[derive(Debug)]
pub struct ChannelSender<M> {
name: Option<String>,
sender: Sender<M>,
}
#[derive(Debug)]
pub struct ChannelReceiver<M> {
receiver: Receiver<M>,
}
pub fn channel<M>(
channel_buffer_size: usize,
) -> (ChannelSender<M>, ChannelReceiver<M>) {
let (sender, receiver) = mpsc::channel(channel_buffer_size);
(
ChannelSender { name: None, sender },
ChannelReceiver { receiver },
)
}
impl<M> ChannelSender<M>
where
M: Debug + 'static,
{
pub fn set_name<S: Into<String>>(&mut self, name: S) {
self.name = Some(name.into());
}
pub async fn send(&mut self, value: M) -> Result<(), Report> {
match self.sender.try_send(value) {
Ok(()) => {
// if it was sent, we're done
Ok(())
}
Err(TrySendError::Full(value)) => {
// if it's full, use `send` and `await` on it
match &self.name {
Some(name) => warn!("named channel {} is full", name),
None => warn!("unnamed channel is full"),
}
self.sender
.send(value)
.await
.map_err(|e| Report::msg(e.to_string()))
}
Err(e) => {
// otherwise, upstream the error
Err(Report::msg(e.to_string()))
}
}
}
}
impl<M> ChannelSender<M> {
pub async fn blind_send(&mut self, value: M) {
let res = self.sender.send(value).await;
assert!(res.is_ok(), "blind_send should succeeed");
}
}
impl<M> ChannelReceiver<M> {
pub async fn recv(&mut self) -> Option<M> {
self.receiver.recv().await
}
}
impl<T> Clone for ChannelSender<T> {
fn clone(&self) -> Self {
Self {
name: self.name.clone(),
sender: self.sender.clone(),
}
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/run/mod.rs | fantoch/src/run/mod.rs | // The architecture of this runner was thought in a way that allows all
/// protocols that implement the `Protocol` trait to achieve their maximum
/// throughput. Below we detail all key decisions.
///
/// We assume:
/// - C clients
/// - E executors
/// - P protocol processes
///
/// 1. When a client connects for the first time it registers itself in all
/// executors. This register request contains the channel in which executors
/// should write command results (potentially partial command results if the
/// command is multi-key).
///
/// 2. When a client issues a command, it registers this command in all
/// executors that are responsible for executing this command. This is how each
/// executor knows if it should notify this client when the command is executed.
/// If the commmand is single-key, this command only needs to be registered in
/// one executor. If multi-key, it needs to be registered in several executors
/// if the keys accessed by the command are assigned to different executors.
///
/// 3. Once the command registration occurs (and the client must wait for an ack
/// from the executor, otherwise the execution info can reach the executor
/// before the "wait for rifl" registration from the client), the command is
/// forwarded to *ONE* protocol process (even if the command is multi-key). This
/// single protocol process *needs to* be chosen by looking the message
/// identifier `Dot`. Using the keys being accessed by the command will not work
/// for all cases, for example, when recovering and the payload is not known, we
/// only have acesss to a `noOp` meaning that we would need to broadcast to all
/// processes, which would be tricky to get correctly. In particular,
/// when the command is being submitted, its `Dot` has not been computed yet. So
/// the idea here is for parallel protocols to have the `DotGen` outside and
/// once the `Dot` is computed, the submit is forwarded to the correct protocol
/// process. For maximum parallelism, this generator can live in the clients and
/// have a lock-free implementation (see `AtomicIdGen`).
//
/// 4. When the protocol process receives the new command from a client it does
/// whatever is specified in the `Protocol` trait, which may include sending
/// messages to other replicas/nodes, which leads to point 5.
///
/// 5. When a message is received from other replicas, the same forward function
/// from point 3. is used to select the protocol process that is responsible for
/// handling that message. This suggests a message should define which `Dot` it
/// refers to. This is achieved through the `MessageDot` trait.
///
/// 6. Everytime a message is handled in a protocol process, the process checks
/// if it has new execution info. If so, it forwards each execution info to the
/// responsible executor. This suggests that execution info should define to
/// which key it refers to. This is achieved through the `MessageKey` trait.
///
/// 7. When execution info is handled in an executor, the executor may have new
/// (potentially partial if the executor is parallel) command results. If the
/// command was previously registered by some client, the result is forwarded to
/// such client.
///
/// 8. When command results are received by a client, they may have to be
/// aggregated in case the executor is parallel. Once the full command result is
/// complete, the notification is sent to the actual client.
///
/// Other notes:
/// - the runner allows `Protocol` workers to share state; however, it assumes
/// that `Executor` workers never do
// This module contains the "runner" prelude.
mod prelude;
// This module contains the definition of `ToPool`.
mod pool;
// This module contains the common read-write (+serde) utilities.
pub mod rw;
// This module contains the definition of `ChannelSender` and `ChannelReceiver`.
pub mod chan;
// This module contains the implementaion on client-side and server-side logic.
pub mod task;
const CONNECT_RETRIES: usize = 100;
use crate::client::Workload;
use crate::config::Config;
use crate::executor::Executor;
use crate::hash_map::HashMap;
use crate::id::{AtomicDotGen, ClientId, ProcessId, ShardId};
use crate::info;
use crate::protocol::Protocol;
use color_eyre::Report;
use futures::stream::{FuturesUnordered, StreamExt};
use prelude::*;
use std::fmt::Debug;
use std::net::IpAddr;
use std::sync::Arc;
use std::time::Duration;
use tokio::net::ToSocketAddrs;
use tokio::sync::Semaphore;
pub async fn process<P, A>(
process_id: ProcessId,
shard_id: ShardId,
sorted_processes: Option<Vec<(ProcessId, ShardId)>>,
ip: IpAddr,
port: u16,
client_port: u16,
addresses: Vec<(A, Option<Duration>)>,
config: Config,
tcp_nodelay: bool,
tcp_buffer_size: usize,
tcp_flush_interval: Option<Duration>,
process_channel_buffer_size: usize,
client_channel_buffer_size: usize,
workers: usize,
executors: usize,
multiplexing: usize,
execution_log: Option<String>,
ping_interval: Option<Duration>,
metrics_file: Option<String>,
) -> Result<(), Report>
where
P: Protocol + Send + 'static, // TODO what does this 'static do?
A: ToSocketAddrs + Debug + Clone,
{
// create semaphore for callers that don't care about the connected
// notification
let semaphore = Arc::new(Semaphore::new(0));
process_with_notify_and_inspect::<P, A, ()>(
process_id,
shard_id,
sorted_processes,
ip,
port,
client_port,
addresses,
config,
tcp_nodelay,
tcp_buffer_size,
tcp_flush_interval,
process_channel_buffer_size,
client_channel_buffer_size,
workers,
executors,
multiplexing,
execution_log,
ping_interval,
metrics_file,
semaphore,
None,
)
.await
}
#[allow(clippy::too_many_arguments)]
async fn process_with_notify_and_inspect<P, A, R>(
process_id: ProcessId,
shard_id: ShardId,
sorted_processes: Option<Vec<(ProcessId, ShardId)>>,
ip: IpAddr,
port: u16,
client_port: u16,
addresses: Vec<(A, Option<Duration>)>,
config: Config,
tcp_nodelay: bool,
tcp_buffer_size: usize,
tcp_flush_interval: Option<Duration>,
process_channel_buffer_size: usize,
client_channel_buffer_size: usize,
workers: usize,
executors: usize,
multiplexing: usize,
execution_log: Option<String>,
ping_interval: Option<Duration>,
metrics_file: Option<String>,
connected: Arc<Semaphore>,
inspect_chan: Option<InspectReceiver<P, R>>,
) -> Result<(), Report>
where
P: Protocol + Send + 'static, // TODO what does this 'static do?
A: ToSocketAddrs + Debug + Clone,
R: Clone + Debug + Send + 'static,
{
// panic if protocol is not parallel and we have more than one worker
if workers > 1 && !P::parallel() {
panic!("running non-parallel protocol with {} workers", workers);
}
// panic if executor is not parallel and we have more than one executor
if executors > 1 && !P::Executor::parallel() {
panic!("running non-parallel executor with {} executors", executors)
}
// panic if protocol is leaderless and there's a leader
if P::leaderless() && config.leader().is_some() {
panic!("running leaderless protocol with a leader");
}
// panic if leader-based and there's no leader
if !P::leaderless() && config.leader().is_none() {
panic!("running leader-based protocol without a leader");
}
// check ports are different
assert!(port != client_port);
// ---------------------
// start process listener
let listener = task::listen((ip, port)).await?;
// create forward channels: reader -> workers
let (reader_to_workers, reader_to_workers_rxs) = ReaderToWorkers::<P>::new(
"reader_to_workers",
process_channel_buffer_size,
workers,
);
// create forward channels: worker /readers -> executors
let (to_executors, to_executors_rxs) = ToExecutors::<P>::new(
"to_executors",
process_channel_buffer_size,
executors,
);
// connect to all processes
let (ips, to_writers) = task::server::connect_to_all::<A, P>(
process_id,
shard_id,
config,
listener,
addresses,
reader_to_workers.clone(),
to_executors.clone(),
CONNECT_RETRIES,
tcp_nodelay,
tcp_buffer_size,
tcp_flush_interval,
process_channel_buffer_size,
multiplexing,
)
.await?;
// get sorted processes (maybe from ping task)
let sorted_processes = if let Some(sorted_processes) = sorted_processes {
// in this case, we already have the sorted processes, so simply span
// the ping task without a parent and return what we have
task::spawn(task::server::ping::ping_task(
ping_interval,
process_id,
shard_id,
ips,
None,
));
sorted_processes
} else {
// when we don't have the sorted processes, spawn the ping task and ask
// it for the sorted processes
let to_ping = task::spawn_consumer(process_channel_buffer_size, |rx| {
let parent = Some(rx);
task::server::ping::ping_task(
ping_interval,
process_id,
shard_id,
ips,
parent,
)
});
ask_ping_task(to_ping).await
};
// check that we have n processes (all in my shard), plus one connection to
// each other shard
assert_eq!(
sorted_processes.len(),
config.n() + config.shard_count() - 1,
"sorted processes count should be n + shards - 1"
);
// ---------------------
// start client listener
let client_listener = task::listen((ip, client_port)).await?;
// create atomic dot generator to be used by clients in case the protocol is
// leaderless:
// - leader-based protocols like paxos shouldn't use this and the fact that
// there's no `Dot` will make new client commands always be forwarded to
// the leader worker (in case there's more than one worker); see
// `LEADER_WORKER_INDEX` in FPaxos implementation
let atomic_dot_gen = if P::leaderless() {
let atomic_dot_gen = AtomicDotGen::new(process_id);
Some(atomic_dot_gen)
} else {
None
};
// create forward channels: periodic task -> workers
let (periodic_to_workers, periodic_to_workers_rxs) = PeriodicToWorkers::new(
"periodic_to_workers",
process_channel_buffer_size,
workers,
);
// create forward channels: executors -> workers
let (executors_to_workers, executors_to_workers_rxs) =
ExecutorsToWorkers::new(
"executors_to_workers",
process_channel_buffer_size,
workers,
);
// create forward channels: client -> workers
let (client_to_workers, client_to_workers_rxs) = ClientToWorkers::new(
"client_to_workers",
client_channel_buffer_size,
workers,
);
// create forward channels: client -> executors
let (client_to_executors, client_to_executors_rxs) = ClientToExecutors::new(
"client_to_executors",
client_channel_buffer_size,
executors,
);
// start client listener
task::server::client::start_listener(
process_id,
shard_id,
client_listener,
atomic_dot_gen,
client_to_workers,
client_to_executors,
tcp_nodelay,
client_channel_buffer_size,
);
// maybe create metrics logger
let (worker_to_metrics_logger, executor_to_metrics_logger) =
if let Some(metrics_file) = metrics_file {
let (worker_to_metrics_logger, from_workers) =
chan::channel(process_channel_buffer_size);
let (executor_to_metrics_logger, from_executors) =
chan::channel(process_channel_buffer_size);
task::spawn(task::server::metrics_logger::metrics_logger_task(
metrics_file,
from_workers,
from_executors,
));
(
Some(worker_to_metrics_logger),
Some(executor_to_metrics_logger),
)
} else {
(None, None)
};
// create process
let (mut process, process_events) = P::new(process_id, shard_id, config);
// discover processes
let (connect_ok, closest_shard_process) =
process.discover(sorted_processes);
assert!(connect_ok, "process should have discovered successfully");
// spawn periodic task
task::spawn(task::server::periodic::periodic_task(
process_events,
periodic_to_workers,
inspect_chan,
));
// create mapping from shard id to writers
let mut shard_writers = HashMap::with_capacity(closest_shard_process.len());
for (shard_id, peer_id) in closest_shard_process {
let writers = to_writers
.get(&peer_id)
.expect("closest shard process should be connected")
.clone();
shard_writers.insert(shard_id, writers);
}
// start executors
task::server::executor::start_executors::<P>(
process_id,
shard_id,
config,
to_executors_rxs,
client_to_executors_rxs,
executors_to_workers,
shard_writers,
to_executors.clone(),
executor_to_metrics_logger,
);
// start process workers
let handles = task::server::process::start_processes::<P, R>(
process,
reader_to_workers_rxs,
client_to_workers_rxs,
periodic_to_workers_rxs,
executors_to_workers_rxs,
to_writers,
reader_to_workers,
to_executors,
process_channel_buffer_size,
execution_log,
worker_to_metrics_logger,
);
info!("process {} started", process_id);
// notify parent that we're connected
connected.add_permits(1);
let mut handles = handles.into_iter().collect::<FuturesUnordered<_>>();
while let Some(join_result) = handles.next().await {
let join_result = join_result?;
info!("process ended {:?}", join_result);
}
Ok(())
}
pub async fn client<A>(
ids: Vec<ClientId>,
addresses: Vec<A>,
interval: Option<Duration>,
workload: Workload,
batch_max_size: usize,
batch_max_delay: Duration,
tcp_nodelay: bool,
channel_buffer_size: usize,
status_frequency: Option<usize>,
metrics_file: Option<String>,
) -> Result<(), Report>
where
A: ToSocketAddrs + Clone + Debug + Send + 'static + Sync,
{
task::client::client(
ids,
addresses,
interval,
workload,
batch_max_size,
batch_max_delay,
CONNECT_RETRIES,
tcp_nodelay,
channel_buffer_size,
status_frequency,
metrics_file,
)
.await
}
async fn ask_ping_task(
mut to_ping: SortedProcessesSender,
) -> Vec<(ProcessId, ShardId)> {
let (tx, mut rx) = chan::channel(1);
if let Err(e) = to_ping.send(tx).await {
panic!("error sending request to ping task: {:?}", e);
}
if let Some(sorted_processes) = rx.recv().await {
sorted_processes
} else {
panic!("error receiving reply from ping task");
}
}
// TODO this is `pub` so that `fantoch_ps` can run these `run_test` for the
// protocols implemented
pub mod tests {
use super::*;
use crate::protocol::ProtocolMetricsKind;
use crate::util;
use rand::Rng;
#[tokio::test]
async fn test_semaphore() {
// create semaphore
let semaphore = Arc::new(Semaphore::new(0));
let task_semaphore = semaphore.clone();
tokio::spawn(async move {
println!("[task] will sleep for 5 seconds");
tokio::time::sleep(Duration::from_secs(5)).await;
println!("[task] semaphore released!");
task_semaphore.add_permits(1);
});
println!("[main] will block on the semaphore");
let _ = semaphore.acquire().await;
println!("[main] semaphore acquired!");
}
#[allow(dead_code)]
fn inspect_stable_commands<P>(worker: &P) -> usize
where
P: Protocol,
{
worker
.metrics()
.get_aggregated(ProtocolMetricsKind::Stable)
.cloned()
.unwrap_or_default() as usize
}
#[test]
fn run_basic_test() {
use crate::client::KeyGen;
// config
let n = 3;
let f = 1;
let mut config = Config::new(n, f);
// make sure stability is running
config.set_gc_interval(Duration::from_millis(100));
// there's a single shard
config.set_shard_count(1);
// create workload
let keys_per_command = 1;
let shard_count = 1;
let conflict_rate = 50;
let pool_size = 1;
let key_gen = KeyGen::ConflictPool {
conflict_rate,
pool_size,
};
let commands_per_client = 100;
let payload_size = 1;
let workload = Workload::new(
shard_count,
key_gen,
keys_per_command,
commands_per_client,
payload_size,
);
let clients_per_process = 3;
let workers = 2;
let executors = 2;
let extra_run_time = Some(Duration::from_secs(5));
// run test and get total stable commands
let total_stable_count = tokio_test_runtime()
.block_on(
run_test_with_inspect_fun::<crate::protocol::Basic, usize>(
config,
workload,
clients_per_process,
workers,
executors,
Some(inspect_stable_commands),
extra_run_time,
),
)
.expect("run should complete successfully")
.into_iter()
.map(|(_, stable_counts)| stable_counts.into_iter().sum::<usize>())
.sum::<usize>();
// get that all commands stablized at all processes
let total_commands = n * clients_per_process * commands_per_client;
assert!(total_stable_count == total_commands * n);
}
pub fn tokio_test_runtime() -> tokio::runtime::Runtime {
// create tokio runtime
tokio::runtime::Builder::new_multi_thread()
.worker_threads(num_cpus::get())
.thread_stack_size(32 * 1024 * 1024) // 32MB
.enable_io()
.enable_time()
.thread_name("runner")
.build()
.expect("tokio runtime build should work")
}
pub async fn run_test_with_inspect_fun<P, R>(
config: Config,
workload: Workload,
clients_per_process: usize,
workers: usize,
executors: usize,
inspect_fun: Option<fn(&P) -> R>,
extra_run_time: Option<Duration>,
) -> Result<HashMap<ProcessId, Vec<R>>, Report>
where
P: Protocol + Send + 'static,
R: Clone + Debug + Send + 'static,
{
// create semaphore so that processes can notify once they're connected
let semaphore = Arc::new(Semaphore::new(0));
let localhost = "127.0.0.1"
.parse::<IpAddr>()
.expect("127.0.0.1 should be a valid ip");
let tcp_nodelay = true;
let tcp_buffer_size = 1024;
let tcp_flush_interval = Some(Duration::from_millis(1));
let process_channel_buffer_size = 10000;
let client_channel_buffer_size = 10000;
let multiplexing = 2;
let ping_interval = Some(Duration::from_secs(1));
// create processes ports and client ports
let n = config.n();
let shard_count = config.shard_count();
let ports: HashMap<_, _> = util::all_process_ids(shard_count, n)
.map(|(id, _shard_id)| (id, get_available_port()))
.collect();
let client_ports: HashMap<_, _> = util::all_process_ids(shard_count, n)
.map(|(id, _shard_id)| (id, get_available_port()))
.collect();
// create connect addresses
let all_addresses: HashMap<_, _> = ports
.clone()
.into_iter()
.map(|(process_id, port)| {
let address = format!("localhost:{}", port);
(process_id, address)
})
.collect();
let mut inspect_channels = HashMap::new();
// the list of all ids that we can shuffle in order to set
// `sorted_processes`
let mut ids: Vec<_> = util::all_process_ids(shard_count, n).collect();
// function used to figure out which which processes belong to the same
// "region index"; there are as many region indexes as `n`; we have n =
// 5, and shard_count = 3, we have:
// - region index 1: processes 1, 6, 11
// - region index 2: processes 2, 7, 12
// - and so on..
let region_index = |process_id| {
let mut index = process_id;
let n = config.n() as u8;
while index > n {
index -= n;
}
index
};
let same_shard_id_but_self =
|process_id: ProcessId,
shard_id: ShardId,
ids: &Vec<(ProcessId, ShardId)>| {
ids.clone().into_iter().filter(
move |(peer_id, peer_shard_id)| {
// keep all that have the same shard id (that are not
// self)
*peer_id != process_id && *peer_shard_id == shard_id
},
)
};
let same_region_index_but_self =
|process_id: ProcessId, ids: &Vec<(ProcessId, ShardId)>| {
// compute index
let index = region_index(process_id);
ids.clone().into_iter().filter(move |(peer_id, _)| {
// keep all that have the same index (that are not self)
*peer_id != process_id && region_index(*peer_id) == index
})
};
// compute the set of processes we should connect to
for (process_id, shard_id) in util::all_process_ids(shard_count, n) {
// the following shuffle is here in case these `connect_to`
// processes are used to compute `sorted_processes`
use rand::seq::SliceRandom;
ids.shuffle(&mut rand::thread_rng());
// start `connect_to` will the processes within the same region
// (i.e. one connection to each shard)
let mut connect_to: Vec<_> =
same_region_index_but_self(process_id, &ids).collect();
// make self the first element
let myself = (process_id, shard_id);
connect_to.insert(0, myself);
// add the missing processes from my shard (i.e. the processes from
// my shard in the other regions)
connect_to
.extend(same_shard_id_but_self(process_id, shard_id, &ids));
let sorted_processes = if shard_count > 1 {
// don't set sorted processes in partial replication (no reason,
// just for testing)
None
} else {
// set sorted processes in full replication (no reason, just for
// testing)
Some(connect_to.clone())
};
// get ports
let port = *ports.get(&process_id).unwrap();
let client_port = *client_ports.get(&process_id).unwrap();
// compute addresses
let addresses = all_addresses
.clone()
.into_iter()
.filter(|(peer_id, _)| {
// connect to all in `connect_to` but self
connect_to
.iter()
.any(|(to_connect_id, _)| to_connect_id == peer_id)
&& *peer_id != process_id
})
.map(|(process_id, address)| {
let delay = if process_id % 2 == 1 {
// add 0 delay to odd processes
Some(Duration::from_secs(0))
} else {
None
};
(address, delay)
})
.collect();
// execution log
let execution_log = Some(format!("p{}.execution_log", process_id));
// create inspect channel and save sender side
let (inspect_tx, inspect) = chan::channel(1);
inspect_channels.insert(process_id, inspect_tx);
// spawn processes
let metrics_file = format!(".metrics_process_{}", process_id);
tokio::task::spawn(
process_with_notify_and_inspect::<P, String, R>(
process_id,
shard_id,
sorted_processes,
localhost,
port,
client_port,
addresses,
config,
tcp_nodelay,
tcp_buffer_size,
tcp_flush_interval,
process_channel_buffer_size,
client_channel_buffer_size,
workers,
executors,
multiplexing,
execution_log,
ping_interval,
Some(metrics_file),
semaphore.clone(),
Some(inspect),
),
);
}
// wait that all processes are connected
println!("[main] waiting that processes are connected");
for _ in util::all_process_ids(shard_count, n) {
let _ = semaphore.acquire().await;
}
println!("[main] processes are connected");
let clients_per_process = clients_per_process as u64;
let client_handles: Vec<_> = util::all_process_ids(shard_count, n)
.map(|(process_id, _)| {
// if n = 3, this gives the following:
// id = 1: [1, 2, 3, 4]
// id = 2: [5, 6, 7, 8]
// id = 3: [9, 10, 11, 12]
let client_id_start =
((process_id - 1) as u64 * clients_per_process) + 1;
let client_id_end = process_id as u64 * clients_per_process;
let client_ids = (client_id_start..=client_id_end).collect();
// connect client to all processes in the same "region index"
let addresses = same_region_index_but_self(process_id, &ids)
.map(|(peer_id, _)| peer_id)
// also connect to "self"
.chain(std::iter::once(process_id))
.map(|peer_id| {
let client_port = *client_ports.get(&peer_id).unwrap();
format!("localhost:{}", client_port)
})
.collect();
// compute interval:
// - if the process id is even, then issue a command every 2ms
// - otherwise, it's a closed-loop client
let interval = match process_id % 2 {
0 => Some(Duration::from_millis(2)),
1 => None,
_ => panic!("n mod 2 should be in [0,1]"),
};
// batching config
let batch_max_size = 1;
let batch_max_delay = Duration::from_millis(1);
// spawn client
let status_frequency = None;
let metrics_file =
Some(format!(".metrics_client_{}", process_id));
tokio::task::spawn(client(
client_ids,
addresses,
interval,
workload,
batch_max_size,
batch_max_delay,
tcp_nodelay,
client_channel_buffer_size,
status_frequency,
metrics_file,
))
})
.collect();
// wait for all clients
for client_handle in client_handles {
let _ = client_handle.await.expect("client should finish");
}
// wait for the extra run time (if any)
if let Some(extra_run_time) = extra_run_time {
tokio::time::sleep(extra_run_time).await;
}
// inspect all processes (if there's an inspect function)
let mut result = HashMap::new();
if let Some(inspect_fun) = inspect_fun {
// create reply channel
let (reply_chan_tx, mut reply_chan) = chan::channel(1);
// contact all processes
for (process_id, mut inspect_tx) in inspect_channels {
inspect_tx
.blind_send((inspect_fun, reply_chan_tx.clone()))
.await;
let replies =
gather_workers_replies(workers, &mut reply_chan).await;
result.insert(process_id, replies);
}
}
Ok(result)
}
async fn gather_workers_replies<R>(
workers: usize,
reply_chan: &mut chan::ChannelReceiver<R>,
) -> Vec<R> {
let mut replies = Vec::with_capacity(workers);
for _ in 0..workers {
let reply = reply_chan
.recv()
.await
.expect("reply from process 1 should work");
replies.push(reply);
}
replies
}
// adapted from: https://github.com/rust-lang-nursery/rust-cookbook/issues/500
fn get_available_port() -> u16 {
loop {
let port = rand::thread_rng().gen_range(1025..65535);
if port_is_available(port) {
return port;
}
}
}
fn port_is_available(port: u16) -> bool {
std::net::TcpListener::bind(("127.0.0.1", port)).is_ok()
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/run/pool.rs | fantoch/src/run/pool.rs | use crate::run::chan::{self, ChannelReceiver, ChannelSender};
use color_eyre::Report;
use std::fmt::Debug;
pub trait PoolIndex {
fn index(&self) -> Option<(usize, usize)>;
}
#[derive(Clone)]
pub struct ToPool<M> {
pool: Vec<ChannelSender<M>>,
}
impl<M> ToPool<M>
where
M: Clone + Debug + 'static,
{
/// Creates a pool with size `pool_size`.
pub fn new<S>(
name: S,
channel_buffer_size: usize,
pool_size: usize,
) -> (Self, Vec<ChannelReceiver<M>>)
where
S: Into<String> + Debug,
{
let mut pool = Vec::with_capacity(pool_size);
// create a channel per pool worker:
// - save the sender-side so it can be used by to forward messages to
// the pool
// - return the receiver-side so it can be used by the pool workers
let rxs = (0..pool_size)
.map(|index| {
let (mut tx, rx) = chan::channel(channel_buffer_size);
tx.set_name(format!("{:?}_{}", name, index));
pool.push(tx);
rx
})
.collect();
// create pool
let to_pool = Self { pool };
(to_pool, rxs)
}
/// Returns the size of the pool.
pub fn pool_size(&self) -> usize {
self.pool.len()
}
/// Checks the index of the destination worker.
pub fn only_to_self(&self, msg: &M, worker_index: usize) -> bool
where
M: PoolIndex,
{
match self.index(msg) {
Some(index) => index == worker_index,
None => false,
}
}
/// Forwards message `msg` to the pool worker with id `msg.index() %
/// pool_size`.
pub async fn forward(&mut self, msg: M) -> Result<(), Report>
where
M: PoolIndex,
{
let index = self.index(&msg);
self.do_forward(index, msg).await
}
/// Forwards message `map(value)` to the pool worker with id `value.index()
/// % pool_size`.
pub async fn forward_map<V, F>(
&mut self,
value: V,
map: F,
) -> Result<(), Report>
where
V: PoolIndex,
F: FnOnce(V) -> M,
{
let index = self.index(&value);
self.do_forward(index, map(value)).await
}
/// Forwards a message to the pool.
pub async fn broadcast(&mut self, msg: M) -> Result<(), Report>
where
M: Clone,
{
if self.pool.len() == 1 {
self.pool[0].send(msg).await
} else {
for tx in self.pool.iter_mut() {
tx.send(msg.clone()).await?;
}
Ok(())
}
}
fn index<T>(&self, msg: &T) -> Option<usize>
where
T: PoolIndex,
{
msg.index().map(|(reserved, index)| {
Self::do_index(reserved, index, self.pool_size())
})
}
fn do_index(reserved: usize, index: usize, pool_size: usize) -> usize {
if reserved < pool_size {
// compute the actual index only in the remaining indexes
reserved + (index % (pool_size - reserved))
} else {
// if there's as many reserved (or more) as workers in the
// pool, then ignore reservation
index % pool_size
}
}
async fn do_forward(
&mut self,
index: Option<usize>,
msg: M,
) -> Result<(), Report> {
// send to the correct worker if an index was specified. otherwise, send
// to all workers.
match index {
Some(index) => self.pool[index].send(msg).await,
None => self.broadcast(msg).await,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
fn do_index(reserved: usize, index: usize, pool_size: usize) -> usize {
ToPool::<()>::do_index(reserved, index, pool_size)
}
#[test]
fn index() {
let pool_size = 1;
// if the pool size is 1, the remaining arguments are irrelevant
assert_eq!(do_index(0, 0, pool_size), 0);
assert_eq!(do_index(0, 1, pool_size), 0);
assert_eq!(do_index(0, 2, pool_size), 0);
assert_eq!(do_index(0, 3, pool_size), 0);
assert_eq!(do_index(1, 0, pool_size), 0);
assert_eq!(do_index(1, 1, pool_size), 0);
assert_eq!(do_index(1, 2, pool_size), 0);
assert_eq!(do_index(1, 3, pool_size), 0);
assert_eq!(do_index(2, 0, pool_size), 0);
assert_eq!(do_index(2, 1, pool_size), 0);
assert_eq!(do_index(2, 2, pool_size), 0);
assert_eq!(do_index(2, 3, pool_size), 0);
assert_eq!(do_index(3, 0, pool_size), 0);
assert_eq!(do_index(3, 1, pool_size), 0);
assert_eq!(do_index(3, 2, pool_size), 0);
assert_eq!(do_index(3, 3, pool_size), 0);
let pool_size = 2;
// if the pool size is 2, with reserved = 0, we simply %
assert_eq!(do_index(0, 0, pool_size), 0);
assert_eq!(do_index(0, 1, pool_size), 1);
assert_eq!(do_index(0, 2, pool_size), 0);
assert_eq!(do_index(0, 3, pool_size), 1);
// if the pool size is 2, with reserved = 1, all requests go to 1
assert_eq!(do_index(1, 0, pool_size), 1);
assert_eq!(do_index(1, 1, pool_size), 1);
assert_eq!(do_index(1, 2, pool_size), 1);
assert_eq!(do_index(1, 3, pool_size), 1);
// if the pool size is 2, with reserved >= 2, we simply %
assert_eq!(do_index(2, 0, pool_size), 0);
assert_eq!(do_index(2, 1, pool_size), 1);
assert_eq!(do_index(2, 2, pool_size), 0);
assert_eq!(do_index(2, 3, pool_size), 1);
assert_eq!(do_index(3, 0, pool_size), 0);
assert_eq!(do_index(3, 1, pool_size), 1);
assert_eq!(do_index(3, 2, pool_size), 0);
assert_eq!(do_index(3, 3, pool_size), 1);
let pool_size = 3;
// if the pool size is 3, with reserved = 0, we simply %
assert_eq!(do_index(0, 0, pool_size), 0);
assert_eq!(do_index(0, 1, pool_size), 1);
assert_eq!(do_index(0, 2, pool_size), 2);
assert_eq!(do_index(0, 3, pool_size), 0);
// if the pool size is 3, with reserved = 1, we % between 1 and 2
assert_eq!(do_index(1, 0, pool_size), 1);
assert_eq!(do_index(1, 1, pool_size), 2);
assert_eq!(do_index(1, 2, pool_size), 1);
assert_eq!(do_index(1, 3, pool_size), 2);
// if the pool size is 3, with reserved = 2, all requests go to 2
assert_eq!(do_index(2, 0, pool_size), 2);
assert_eq!(do_index(2, 1, pool_size), 2);
assert_eq!(do_index(2, 2, pool_size), 2);
assert_eq!(do_index(2, 3, pool_size), 2);
// if the pool size is 3, with reserved >= 3, we simply %
assert_eq!(do_index(3, 0, pool_size), 0);
assert_eq!(do_index(3, 1, pool_size), 1);
assert_eq!(do_index(3, 2, pool_size), 2);
assert_eq!(do_index(3, 3, pool_size), 0);
assert_eq!(do_index(4, 0, pool_size), 0);
assert_eq!(do_index(4, 1, pool_size), 1);
assert_eq!(do_index(4, 2, pool_size), 2);
assert_eq!(do_index(4, 3, pool_size), 0);
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/run/rw/connection.rs | fantoch/src/run/rw/connection.rs | use super::Rw;
use std::net::IpAddr;
use std::ops::{Deref, DerefMut};
use tokio::net::TcpStream;
use tokio::time::Duration;
#[derive(Debug)]
pub struct Connection {
ip_addr: Option<IpAddr>,
delay: Option<Duration>,
rw: Rw<TcpStream>,
}
impl Connection {
pub fn new(
stream: TcpStream,
tcp_nodelay: bool,
tcp_buffer_size: usize,
) -> Self {
// get ip addr
let ip_addr = stream.peer_addr().ok().map(|peer_addr| peer_addr.ip());
// configure stream
configure(&stream, tcp_nodelay);
// create rw
let rw = Rw::from(tcp_buffer_size, tcp_buffer_size, stream);
Self {
ip_addr,
delay: None,
rw,
}
}
pub fn ip_addr(&self) -> Option<IpAddr> {
self.ip_addr
}
pub fn delay(&self) -> Option<Duration> {
self.delay
}
pub fn set_delay(&mut self, delay: Duration) {
self.delay = Some(delay)
}
}
fn configure(stream: &TcpStream, tcp_nodelay: bool) {
// set TCP_NODELAY
stream
.set_nodelay(tcp_nodelay)
.expect("setting TCP_NODELAY should work");
// // maybe adapt SO_RCVBUF and SO_SNDBUF and compute buffer capacity
// // change SO_RCVBUF if lower than `tcp_buffer_size`
// if let Ok(so_rcvbuf) = stream.recv_buffer_size() {
// if so_rcvbuf < tcp_buffer_size {
// stream
// .set_recv_buffer_size(tcp_buffer_size)
// .expect("setting tcp recv buffer should work");
// }
// }
// log!("SO_RCVBUF: {:?}", stream.recv_buffer_size());
// // change SO_SNFBUF if lower than `tcp_buffer_size`
// if let Ok(so_sndbuf) = stream.send_buffer_size() {
// if so_sndbuf < tcp_buffer_size {
// stream
// .set_send_buffer_size(tcp_buffer_size)
// .expect("setting tcp send buffer should work");
// }
// }
// log!("SO_SNDBUF: {:?}", stream.send_buffer_size());
}
impl Deref for Connection {
type Target = Rw<TcpStream>;
fn deref(&self) -> &Self::Target {
&self.rw
}
}
impl DerefMut for Connection {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.rw
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/run/rw/mod.rs | fantoch/src/run/rw/mod.rs | // This module contains the definition of `Connection`.
mod connection;
// Re-exports.
pub use connection::Connection;
use crate::warn;
use bytes::{Bytes, BytesMut};
use color_eyre::eyre::{Report, WrapErr};
use futures::sink::{Sink, SinkExt};
use futures::stream::StreamExt;
use serde::de::DeserializeOwned;
use serde::Serialize;
use std::pin::Pin;
use tokio::io::{AsyncRead, AsyncWrite, BufStream};
use tokio_util::codec::{Framed, LengthDelimitedCodec};
/// Delimits frames using a length header.
/// TODO take a look at async_bincode: https://docs.rs/async-bincode/0.5.1/async_bincode/index.html
#[derive(Debug)]
pub struct Rw<S> {
rw: Framed<BufStream<S>, LengthDelimitedCodec>,
}
impl<S> Rw<S>
where
S: AsyncWrite + AsyncRead + Unpin,
{
pub fn from(reader_capacity: usize, writer_capacity: usize, rw: S) -> Self {
// buffer rw
let rw = BufStream::with_capacity(reader_capacity, writer_capacity, rw);
// frame rw
let rw = Framed::new(rw, LengthDelimitedCodec::new());
Self { rw }
}
pub async fn recv<V>(&mut self) -> Option<V>
where
V: DeserializeOwned,
{
match self.rw.next().await {
Some(Ok(bytes)) => {
// if it is, and not an error, deserialize it
let value = deserialize(bytes);
Some(value)
}
Some(Err(e)) => {
warn!("[rw] error while reading from stream: {:?}", e);
None
}
None => None,
}
}
pub async fn send<V>(&mut self, value: &V) -> Result<(), Report>
where
V: Serialize,
{
let bytes = serialize(value);
self.rw
.send(bytes)
.await
.wrap_err("error while sending to sink")
}
pub async fn write<V>(&mut self, value: &V) -> Result<(), Report>
where
V: Serialize,
{
let bytes = serialize(value);
futures::future::poll_fn(|cx| Pin::new(&mut self.rw).poll_ready(cx))
.await
.wrap_err("error while polling sink ready")?;
Pin::new(&mut self.rw)
.start_send(bytes)
.wrap_err("error while starting send to sink")
}
pub async fn flush(&mut self) -> Result<(), Report> {
futures::future::poll_fn(|cx| Pin::new(&mut self.rw).poll_flush(cx))
.await
.wrap_err("error while flushing sink")
}
}
fn deserialize<V>(bytes: BytesMut) -> V
where
V: DeserializeOwned,
{
bincode::deserialize(&bytes).expect("[rw] deserialize should work")
}
fn serialize<V>(value: &V) -> Bytes
where
V: Serialize,
{
// TODO can we avoid `Bytes`?
let bytes = bincode::serialize(value).expect("[rw] serialize should work");
Bytes::from(bytes)
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/run/task/util.rs | fantoch/src/run/task/util.rs | use crate::id::ClientId;
use color_eyre::Report;
use serde::Serialize;
use tokio::time::{Duration, Instant};
pub fn deadline(delay: Duration) -> Instant {
Instant::now()
.checked_add(delay)
.expect("deadline should exist")
}
pub fn ids_repr(client_ids: &Vec<ClientId>) -> String {
client_ids
.iter()
.map(|id| id.to_string())
.collect::<Vec<_>>()
.join("-")
}
pub fn serialize_and_compress<T: Serialize>(
data: &T,
file: &str,
) -> Result<(), Report> {
// if the file does not exist it will be created, otherwise truncated
std::fs::File::create(file)
.ok()
// create a buf writer
.map(std::io::BufWriter::new)
// compress using gzip
.map(|buffer| {
flate2::write::GzEncoder::new(buffer, flate2::Compression::best())
})
// and try to serialize
.map(|writer| {
bincode::serialize_into(writer, data)
.expect("error serializing data")
})
.unwrap_or_else(|| {
panic!("couldn't save serialized data in file {:?}", file)
});
Ok(())
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/run/task/mod.rs | fantoch/src/run/task/mod.rs | mod util;
// This module contains server's side logic.
pub mod server;
// This module contains client's side logic.
pub mod client;
use crate::run::chan::{self, ChannelReceiver, ChannelSender};
use crate::run::rw::Connection;
use crate::{info, trace, warn};
use color_eyre::Report;
use std::fmt::Debug;
use std::future::Future;
use tokio::net::{TcpListener, TcpStream, ToSocketAddrs};
use tokio::task::JoinHandle;
use tokio::time::Duration;
/// Just a wrapper around tokio::spawn.
pub fn spawn<F>(task: F) -> JoinHandle<F::Output>
where
F: Future + Send + 'static,
F::Output: Send + 'static,
{
tokio::spawn(task)
}
/// Spawns a single producer, returning the consumer-end of the channel.
pub fn spawn_producer<M, F>(
channel_buffer_size: usize,
producer: impl FnOnce(ChannelSender<M>) -> F,
) -> ChannelReceiver<M>
where
F: Future + Send + 'static,
F::Output: Send + 'static,
{
// create channel and:
// - pass the producer-end of the channel to producer
// - return the consumer-end of the channel to the caller
let (tx, rx) = chan::channel(channel_buffer_size);
spawn(producer(tx));
rx
}
/// Spawns many producers, returning the consumer-end of the channel.
pub fn spawn_producers<A, T, M, F>(
channel_buffer_size: usize,
args: T,
producer: impl Fn(A, ChannelSender<M>) -> F,
) -> ChannelReceiver<M>
where
T: IntoIterator<Item = A>,
F: Future + Send + 'static,
F::Output: Send + 'static,
{
// create channel and:
// - pass a clone of the producer-end of the channel to each producer
// - return the consumer-end of the channel to the caller
let (tx, rx) = chan::channel(channel_buffer_size);
for arg in args {
spawn(producer(arg, tx.clone()));
}
rx
}
/// Spawns a consumer, returning the producer-end of the channel.
pub fn spawn_consumer<M, F>(
channel_buffer_size: usize,
consumer: impl FnOnce(ChannelReceiver<M>) -> F,
) -> ChannelSender<M>
where
F: Future + Send + 'static,
F::Output: Send + 'static,
{
// create channel and:
// - pass the consumer-end of the channel to the consumer
// - return the producer-end of the channel to the caller
let (tx, rx) = chan::channel(channel_buffer_size);
spawn(consumer(rx));
tx
}
/// Spawns a producer/consumer, returning two channels: the consumer-end and the
/// producer-end of the channels.
pub fn spawn_producer_and_consumer<M, N, F>(
channel_buffer_size: usize,
task: impl FnOnce(ChannelSender<M>, ChannelReceiver<N>) -> F,
) -> (ChannelReceiver<M>, ChannelSender<N>)
where
F: Future + Send + 'static,
F::Output: Send + 'static,
{
// create two channels and:
// - pass the producer-end of the 1st channel and the consumer-end of the
// 2nd channel to the task
// - return the consumer-end of the 1st channel and the producer-end of the
// 2nd channel to the caller
let (tx1, rx1) = chan::channel(channel_buffer_size);
let (tx2, rx2) = chan::channel(channel_buffer_size);
spawn(task(tx1, rx2));
(rx1, tx2)
}
/// Connect to some address.
pub async fn connect<A>(
address: A,
tcp_nodelay: bool,
tcp_buffer_size: usize,
connect_retries: usize,
) -> Result<Connection, Report>
where
A: ToSocketAddrs + Clone + Debug,
{
let mut tries = 0;
loop {
match TcpStream::connect(address.clone()).await {
Ok(stream) => {
let connection =
Connection::new(stream, tcp_nodelay, tcp_buffer_size);
return Ok(connection);
}
Err(e) => {
// if not, try again if we shouldn't give up (due to too many
// attempts)
tries += 1;
if tries < connect_retries {
info!("failed to connect to {:?}: {}", address, e);
info!(
"will try again in 1 second ({} out of {})",
tries, connect_retries
);
tokio::time::sleep(Duration::from_secs(1)).await;
} else {
return Err(e.into());
}
}
}
}
}
/// Listen on some address.
pub async fn listen<A>(address: A) -> Result<TcpListener, Report>
where
A: ToSocketAddrs,
{
Ok(TcpListener::bind(address).await?)
}
/// Listen on new connections and send them to parent process.
async fn listener_task(
listener: TcpListener,
tcp_nodelay: bool,
tcp_buffer_size: usize,
mut parent: ChannelSender<Connection>,
) {
loop {
match listener.accept().await {
Ok((stream, _addr)) => {
trace!("[listener] new connection: {:?}", _addr);
// create connection
let connection =
Connection::new(stream, tcp_nodelay, tcp_buffer_size);
if let Err(e) = parent.send(connection).await {
warn!("[listener] error sending stream to parent process: {:?}", e);
}
}
Err(e) => {
warn!("[listener] couldn't accept new connection: {:?}", e)
}
}
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/run/task/server/execution_logger.rs | fantoch/src/run/task/server/execution_logger.rs | use crate::protocol::Protocol;
use crate::run::prelude::*;
use crate::run::rw::Rw;
use crate::{info, trace, warn};
use tokio::fs::File;
use tokio::time::{self, Duration};
const EXECUTION_LOGGER_FLUSH_INTERVAL: Duration = Duration::from_secs(1); // flush every second
const EXECUTION_LOGGER_BUFFER_SIZE: usize = 8 * 1024; // 8KB
pub async fn execution_logger_task<P>(
execution_log: String,
mut from_executors: ExecutionInfoReceiver<P>,
) where
P: Protocol,
{
info!("[execution_logger] started with log {}", execution_log);
// create execution log file (truncating it if already exists)
let file = File::create(execution_log)
.await
.expect("it should be possible to create execution log file");
// create file logger
let mut logger = Rw::from(
EXECUTION_LOGGER_BUFFER_SIZE,
EXECUTION_LOGGER_BUFFER_SIZE,
file,
);
// create interval
let mut interval = time::interval(EXECUTION_LOGGER_FLUSH_INTERVAL);
loop {
tokio::select! {
execution_info = from_executors.recv() => {
trace!("[executor_logger] from parent: {:?}", execution_info);
if let Some(execution_info) = execution_info {
// write execution info to file
if let Err(e) = logger.write(&execution_info).await {
warn!("[executor_logger] error when writing to the logger file: {:?}", e);
}
} else {
warn!("[executor_logger] error while receiving execution info from parent");
}
}
_ = interval.tick() => {
// flush
if let Err(e) = logger.flush().await {
warn!("[executor_logger] error when flushing to the logger file: {:?}", e);
}
}
}
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/run/task/server/process.rs | fantoch/src/run/task/server/process.rs | use super::execution_logger;
use crate::command::Command;
use crate::id::{Dot, ProcessId, ShardId};
use crate::protocol::{Action, CommittedAndExecuted, Protocol};
use crate::run::prelude::*;
use crate::run::task;
use crate::time::RunTime;
use crate::HashMap;
use crate::{trace, warn};
use rand::Rng;
use std::fmt::Debug;
use std::sync::Arc;
use tokio::task::JoinHandle;
use tokio::time;
/// Starts process workers.
pub fn start_processes<P, R>(
process: P,
reader_to_workers_rxs: Vec<ReaderReceiver<P>>,
client_to_workers_rxs: Vec<SubmitReceiver>,
periodic_to_workers_rxs: Vec<PeriodicEventReceiver<P, R>>,
executors_to_workers_rxs: Vec<ExecutedReceiver>,
to_writers: HashMap<ProcessId, Vec<WriterSender<P>>>,
reader_to_workers: ReaderToWorkers<P>,
to_executors: ToExecutors<P>,
process_channel_buffer_size: usize,
execution_log: Option<String>,
to_metrics_logger: Option<ProtocolMetricsSender>,
) -> Vec<JoinHandle<()>>
where
P: Protocol + Send + 'static,
R: Debug + Clone + Send + 'static,
{
let to_execution_logger = execution_log.map(|execution_log| {
// if the execution log was set, then start the execution logger
let mut tx = task::spawn_consumer(process_channel_buffer_size, |rx| {
execution_logger::execution_logger_task::<P>(execution_log, rx)
});
tx.set_name("to_execution_logger");
tx
});
// zip rxs'
let incoming = reader_to_workers_rxs
.into_iter()
.zip(client_to_workers_rxs.into_iter())
.zip(periodic_to_workers_rxs.into_iter())
.zip(executors_to_workers_rxs.into_iter());
// create executor workers
incoming
.enumerate()
.map(
|(
worker_index,
(((from_readers, from_clients), from_periodic), from_executors),
)| {
// create task
let task = process_task::<P, R>(
worker_index,
process.clone(),
from_readers,
from_clients,
from_periodic,
from_executors,
to_writers.clone(),
reader_to_workers.clone(),
to_executors.clone(),
to_execution_logger.clone(),
to_metrics_logger.clone(),
);
task::spawn(task)
// // if this is a reserved worker, run it on its own runtime
// if worker_index < super::INDEXES_RESERVED {
// let thread_name =
// format!("worker_{}_runtime", worker_index);
// tokio::task::spawn_blocking(|| {
// // create tokio runtime
// let mut runtime = tokio::runtime::Builder::new()
// .threaded_scheduler()
// .core_threads(1)
// .thread_name(thread_name)
// .build()
// .expect("tokio runtime build should work");
// runtime.block_on(task)
// });
// None
// } else {
// Some(task::spawn(task))
// }
},
)
.collect()
}
async fn process_task<P, R>(
worker_index: usize,
mut process: P,
mut from_readers: ReaderReceiver<P>,
mut from_clients: SubmitReceiver,
mut from_periodic: PeriodicEventReceiver<P, R>,
mut from_executors: ExecutedReceiver,
mut to_writers: HashMap<ProcessId, Vec<WriterSender<P>>>,
mut reader_to_workers: ReaderToWorkers<P>,
mut to_executors: ToExecutors<P>,
mut to_execution_logger: Option<ExecutionInfoSender<P>>,
mut to_metrics_logger: Option<ProtocolMetricsSender>,
) where
P: Protocol + 'static,
R: Debug + 'static,
{
// create time
let time = RunTime;
// create interval (for metrics notification)
let mut interval = time::interval(super::metrics_logger::METRICS_INTERVAL);
loop {
// TODO maybe used select_biased
tokio::select! {
msg = from_readers.recv() => {
selected_from_processes(worker_index, msg, &mut process, &mut to_writers, &mut reader_to_workers, &mut to_executors, &mut to_execution_logger, &time).await
}
event = from_periodic.recv() => {
selected_from_periodic_task(worker_index, event, &mut process, &mut to_writers, &mut reader_to_workers, &mut to_executors, &mut to_execution_logger, &time).await
}
executed = from_executors.recv() => {
selected_from_executors(worker_index, executed, &mut process, &mut to_writers, &mut reader_to_workers, &mut to_executors, &mut to_execution_logger, &time).await
}
cmd = from_clients.recv() => {
selected_from_clients(worker_index, cmd, &mut process, &mut to_writers, &mut reader_to_workers, &mut to_executors, &mut to_execution_logger, &time).await
}
_ = interval.tick() => {
if let Some(to_metrics_logger) = to_metrics_logger.as_mut() {
// send metrics to logger (in case there's one)
let protocol_metrics = process.metrics().clone();
if let Err(e) = to_metrics_logger.send((worker_index, protocol_metrics)).await {
warn!("[server] error while sending metrics to metrics logger: {:?}", e);
}
}
}
}
}
}
async fn selected_from_processes<P>(
worker_index: usize,
msg: Option<(ProcessId, ShardId, P::Message)>,
process: &mut P,
to_writers: &mut HashMap<ProcessId, Vec<WriterSender<P>>>,
reader_to_workers: &mut ReaderToWorkers<P>,
to_executors: &mut ToExecutors<P>,
to_execution_logger: &mut Option<ExecutionInfoSender<P>>,
time: &RunTime,
) where
P: Protocol + 'static,
{
trace!("[server] reader message: {:?}", msg);
if let Some((from_id, from_shard_id, msg)) = msg {
handle_from_processes(
worker_index,
from_id,
from_shard_id,
msg,
process,
to_writers,
reader_to_workers,
to_executors,
to_execution_logger,
time,
)
.await
} else {
warn!(
"[server] error while receiving new process message from readers"
);
}
}
async fn handle_from_processes<P>(
worker_index: usize,
from_id: ProcessId,
from_shard_id: ShardId,
msg: P::Message,
process: &mut P,
to_writers: &mut HashMap<ProcessId, Vec<WriterSender<P>>>,
reader_to_workers: &mut ReaderToWorkers<P>,
to_executors: &mut ToExecutors<P>,
to_execution_logger: &mut Option<ExecutionInfoSender<P>>,
time: &RunTime,
) where
P: Protocol + 'static,
{
// handle message in process and potentially new actions
process.handle(from_id, from_shard_id, msg, time);
send_to_processes_and_executors(
worker_index,
process,
to_writers,
reader_to_workers,
to_executors,
to_execution_logger,
time,
)
.await;
}
// TODO maybe run in parallel
async fn send_to_processes_and_executors<P>(
worker_index: usize,
process: &mut P,
to_writers: &mut HashMap<ProcessId, Vec<WriterSender<P>>>,
reader_to_workers: &mut ReaderToWorkers<P>,
to_executors: &mut ToExecutors<P>,
to_execution_logger: &mut Option<ExecutionInfoSender<P>>,
time: &RunTime,
) where
P: Protocol + 'static,
{
while let Some(action) = process.to_processes() {
match action {
Action::ToSend { target, msg } => {
// check if should handle message locally
if target.contains(&process.id()) {
// handle msg locally if self in `target`
handle_message_from_self::<P>(
worker_index,
msg.clone(),
process,
reader_to_workers,
time,
)
.await;
}
// prevent unnecessary cloning of messages, since send only
// requires a reference to the message
let msg_to_send = Arc::new(POEMessage::Protocol(msg));
// send message to writers in target
for (to, channels) in to_writers.iter_mut() {
if target.contains(to) {
send_to_one_writer::<P>(
"server",
msg_to_send.clone(),
channels,
)
.await
}
}
}
Action::ToForward { msg } => {
// handle msg locally if self in `target`
handle_message_from_self(
worker_index,
msg,
process,
reader_to_workers,
time,
)
.await;
}
}
}
// notify executors
for execution_info in process.to_executors_iter() {
// if there's an execution logger, then also send execution info to it
if let Some(to_execution_logger) = to_execution_logger {
if let Err(e) =
to_execution_logger.send(execution_info.clone()).await
{
warn!("[server] error while sending new execution info to execution logger: {:?}", e);
}
}
// notify executor
if let Err(e) = to_executors.forward(execution_info).await {
warn!(
"[server] error while sending new execution info to executor: {:?}",
e
);
}
}
}
async fn handle_message_from_self<P>(
worker_index: usize,
msg: P::Message,
process: &mut P,
reader_to_workers: &mut ReaderToWorkers<P>,
time: &RunTime,
) where
P: Protocol + 'static,
{
// create msg to be forwarded
let to_forward = (process.id(), process.shard_id(), msg);
// only handle message from self in this worker if the destination worker is
// us; this means that "messages to self are delivered immediately" is only
// true for self messages to the same worker
if reader_to_workers.only_to_self(&to_forward, worker_index) {
process.handle(to_forward.0, to_forward.1, to_forward.2, time)
} else {
if let Err(e) = reader_to_workers.forward(to_forward).await {
warn!("[server] error notifying process task with msg from self: {:?}", e);
}
}
}
pub async fn send_to_one_writer<P>(
tag: &'static str,
msg: Arc<POEMessage<P>>,
writers: &mut Vec<WriterSender<P>>,
) where
P: Protocol + 'static,
{
// pick a random one
let writer_index = rand::thread_rng().gen_range(0..writers.len());
if let Err(e) = writers[writer_index].send(msg).await {
warn!(
"[{}] error while sending to writer {}: {:?}",
tag, writer_index, e
);
}
}
async fn selected_from_clients<P>(
worker_index: usize,
cmd: Option<(Option<Dot>, Command)>,
process: &mut P,
to_writers: &mut HashMap<ProcessId, Vec<WriterSender<P>>>,
reader_to_workers: &mut ReaderToWorkers<P>,
to_executors: &mut ToExecutors<P>,
to_execution_logger: &mut Option<ExecutionInfoSender<P>>,
time: &RunTime,
) where
P: Protocol + 'static,
{
trace!("[server] from clients: {:?}", cmd);
if let Some((dot, cmd)) = cmd {
handle_from_clients(
worker_index,
dot,
cmd,
process,
to_writers,
reader_to_workers,
to_executors,
to_execution_logger,
time,
)
.await
} else {
warn!("[server] error while receiving new command from clients");
}
}
async fn handle_from_clients<P>(
worker_index: usize,
dot: Option<Dot>,
cmd: Command,
process: &mut P,
to_writers: &mut HashMap<ProcessId, Vec<WriterSender<P>>>,
reader_to_workers: &mut ReaderToWorkers<P>,
to_executors: &mut ToExecutors<P>,
to_execution_logger: &mut Option<ExecutionInfoSender<P>>,
time: &RunTime,
) where
P: Protocol + 'static,
{
// submit command in process
process.submit(dot, cmd, time);
send_to_processes_and_executors(
worker_index,
process,
to_writers,
reader_to_workers,
to_executors,
to_execution_logger,
time,
)
.await;
}
async fn selected_from_periodic_task<P, R>(
worker_index: usize,
event: Option<FromPeriodicMessage<P, R>>,
process: &mut P,
to_writers: &mut HashMap<ProcessId, Vec<WriterSender<P>>>,
reader_to_workers: &mut ReaderToWorkers<P>,
to_executors: &mut ToExecutors<P>,
to_execution_logger: &mut Option<ExecutionInfoSender<P>>,
time: &RunTime,
) where
P: Protocol + 'static,
R: Debug + 'static,
{
trace!("[server] from periodic task: {:?}", event);
if let Some(event) = event {
handle_from_periodic_task(
worker_index,
event,
process,
to_writers,
reader_to_workers,
to_executors,
to_execution_logger,
time,
)
.await
} else {
warn!("[server] error while receiving new event from periodic task");
}
}
async fn handle_from_periodic_task<P, R>(
worker_index: usize,
msg: FromPeriodicMessage<P, R>,
process: &mut P,
to_writers: &mut HashMap<ProcessId, Vec<WriterSender<P>>>,
reader_to_workers: &mut ReaderToWorkers<P>,
to_executors: &mut ToExecutors<P>,
to_execution_logger: &mut Option<ExecutionInfoSender<P>>,
time: &RunTime,
) where
P: Protocol + 'static,
R: Debug + 'static,
{
match msg {
FromPeriodicMessage::Event(event) => {
// handle event in process
process.handle_event(event, time);
send_to_processes_and_executors(
worker_index,
process,
to_writers,
reader_to_workers,
to_executors,
to_execution_logger,
time,
)
.await;
}
FromPeriodicMessage::Inspect(f, mut tx) => {
let outcome = f(&process);
if let Err(e) = tx.send(outcome).await {
warn!("[server] error while sending inspect result: {:?}", e);
}
}
}
}
async fn selected_from_executors<P>(
worker_index: usize,
committed_and_executed: Option<CommittedAndExecuted>,
process: &mut P,
to_writers: &mut HashMap<ProcessId, Vec<WriterSender<P>>>,
reader_to_workers: &mut ReaderToWorkers<P>,
to_executors: &mut ToExecutors<P>,
to_execution_logger: &mut Option<ExecutionInfoSender<P>>,
time: &RunTime,
) where
P: Protocol + 'static,
{
trace!("[server] from executors: {:?}", committed_and_executed);
if let Some(committed_and_executed) = committed_and_executed {
handle_from_executors(
worker_index,
committed_and_executed,
process,
to_writers,
reader_to_workers,
to_executors,
to_execution_logger,
time,
)
.await
} else {
warn!("[server] error while receiving message from executors");
}
}
async fn handle_from_executors<P>(
worker_index: usize,
committed_and_executed: CommittedAndExecuted,
process: &mut P,
to_writers: &mut HashMap<ProcessId, Vec<WriterSender<P>>>,
reader_to_workers: &mut ReaderToWorkers<P>,
to_executors: &mut ToExecutors<P>,
to_execution_logger: &mut Option<ExecutionInfoSender<P>>,
time: &RunTime,
) where
P: Protocol + 'static,
{
process.handle_executed(committed_and_executed, time);
send_to_processes_and_executors(
worker_index,
process,
to_writers,
reader_to_workers,
to_executors,
to_execution_logger,
time,
)
.await;
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/run/task/server/delay.rs | fantoch/src/run/task/server/delay.rs | use crate::run::chan::{ChannelReceiver, ChannelSender};
use crate::run::task;
use crate::warn;
use std::collections::VecDeque;
use tokio::time::{self, Duration, Instant};
pub async fn delay_task<M>(
mut from: ChannelReceiver<M>,
mut to: ChannelSender<M>,
delay: Duration,
) where
M: std::fmt::Debug + 'static,
{
// important to use VecDeque here since we want to always pop the first
// element
let mut queue = VecDeque::new();
let mut error_shown = false;
loop {
match queue.front() {
None => {
let msg = from.recv().await;
enqueue(msg, delay, &mut queue, &mut error_shown);
}
Some((next_instant, _)) => {
tokio::select! {
_ = time::sleep_until(*next_instant) => {
let msg = dequeue(&mut queue);
if let Err(e) = to.send(msg).await {
warn!("[delay_task] error forwarding message: {:?}", e);
break;
}
}
msg = from.recv() => {
enqueue(msg, delay, &mut queue, &mut error_shown);
}
}
}
}
}
}
fn enqueue<M>(
msg: Option<M>,
delay: Duration,
queue: &mut VecDeque<(Instant, M)>,
error_shown: &mut bool,
) {
if let Some(msg) = msg {
queue.push_back((task::util::deadline(delay), msg));
} else {
// TODO: replace this with an error, as in `batcher.rs`
if !*error_shown {
*error_shown = true;
warn!("[delay_task] error receiving message from parent");
}
}
}
fn dequeue<M>(queue: &mut VecDeque<(Instant, M)>) -> M {
let (_, msg) = queue.pop_front().expect("a first element should exist");
msg
}
#[cfg(test)]
mod tests {
use crate::run::chan;
use rand::Rng;
use tokio::time::{Duration, Instant};
const OPERATIONS: usize = 1000;
const MAX_SLEEP: u64 = 20;
const DELAY: Duration = Duration::from_millis(42);
#[tokio::test]
async fn delay_test() {
// make sure there's enough space in the buffer channel
let (tx, mut rx) = chan::channel::<Instant>(OPERATIONS * 2);
let (mut delay_tx, delay_rx) = chan::channel::<Instant>(OPERATIONS * 2);
// spawn delay task
tokio::spawn(super::delay_task(delay_rx, tx, DELAY));
// spawn reader
let reader = tokio::spawn(async move {
let mut latencies = Vec::with_capacity(OPERATIONS);
for _ in 0..OPERATIONS {
let start = rx.recv().await.expect("operation received");
// compute
let delay = start.elapsed().as_millis() as u64;
latencies.push(delay);
}
// compute average
let sum = latencies.into_iter().sum::<u64>();
sum / (OPERATIONS as u64)
});
// spawn writer
let writer = tokio::spawn(async move {
for _ in 0..OPERATIONS {
let sleep_time =
rand::thread_rng().gen_range(1..(MAX_SLEEP + 1));
tokio::time::sleep(Duration::from_millis(sleep_time)).await;
let start = Instant::now();
// send to delay task
delay_tx.send(start).await.expect("operation sent");
}
});
writer.await.expect("writer finished");
let latency = reader.await.expect("reader finished");
// allow messages to be suffer an extra delay of 1/2ms
let delay = DELAY.as_millis() as u64;
assert!(
latency == delay || latency == delay + 1 || latency == delay + 2
);
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/run/task/server/client.rs | fantoch/src/run/task/server/client.rs | use crate::command::Command;
use crate::executor::{AggregatePending, ExecutorResult};
use crate::id::{AtomicDotGen, ClientId, ProcessId, ShardId};
use crate::run::chan;
use crate::run::prelude::*;
use crate::run::rw::Connection;
use crate::run::task;
use crate::{info, trace, warn};
use tokio::net::TcpListener;
pub fn start_listener(
process_id: ProcessId,
shard_id: ShardId,
listener: TcpListener,
atomic_dot_gen: Option<AtomicDotGen>,
client_to_workers: ClientToWorkers,
client_to_executors: ClientToExecutors,
tcp_nodelay: bool,
client_channel_buffer_size: usize,
) {
task::spawn(client_listener_task(
process_id,
shard_id,
listener,
atomic_dot_gen,
client_to_workers,
client_to_executors,
tcp_nodelay,
client_channel_buffer_size,
));
}
/// Listen on new client connections and spawn a client task for each new
/// connection.
async fn client_listener_task(
process_id: ProcessId,
shard_id: ShardId,
listener: TcpListener,
atomic_dot_gen: Option<AtomicDotGen>,
client_to_workers: ClientToWorkers,
client_to_executors: ClientToExecutors,
tcp_nodelay: bool,
client_channel_buffer_size: usize,
) {
// start listener task
let tcp_buffer_size = 0;
let mut rx = task::spawn_producer(client_channel_buffer_size, |tx| {
task::listener_task(listener, tcp_nodelay, tcp_buffer_size, tx)
});
loop {
// handle new client connections
match rx.recv().await {
Some(connection) => {
trace!("[client_listener] new connection");
// start client server task and give it the producer-end of the
// channel in order for this client to notify
// parent
task::spawn(client_server_task(
process_id,
shard_id,
atomic_dot_gen.clone(),
client_to_workers.clone(),
client_to_executors.clone(),
client_channel_buffer_size,
connection,
));
}
None => {
warn!(
"[client_listener] error receiving message from listener"
);
}
}
}
}
/// Client server-side task. Checks messages both from the client connection
/// (new commands) and parent (new command results).
async fn client_server_task(
process_id: ProcessId,
shard_id: ShardId,
atomic_dot_gen: Option<AtomicDotGen>,
mut client_to_workers: ClientToWorkers,
mut client_to_executors: ClientToExecutors,
client_channel_buffer_size: usize,
mut connection: Connection,
) {
let client = server_receive_hi(
process_id,
shard_id,
client_channel_buffer_size,
&mut connection,
&mut client_to_executors,
)
.await;
if client.is_none() {
warn!("[client_server] giving up on new client {:?} since handshake failed:", connection);
return;
}
let (client_ids, mut executor_results) = client.unwrap();
// create pending
let mut pending = AggregatePending::new(process_id, shard_id);
loop {
tokio::select! {
executor_result = executor_results.recv() => {
trace!("[client_server] new executor result: {:?}", executor_result);
client_server_task_handle_executor_result(executor_result, &mut connection, &mut pending).await;
}
from_client = connection.recv() => {
trace!("[client_server] from client: {:?}", from_client);
if !client_server_task_handle_from_client(from_client, &client_ids, &atomic_dot_gen, &mut client_to_workers, &mut client_to_executors, &mut pending).await {
return;
}
}
}
}
}
async fn server_receive_hi(
process_id: ProcessId,
shard_id: ShardId,
client_channel_buffer_size: usize,
connection: &mut Connection,
client_to_executors: &mut ClientToExecutors,
) -> Option<(Vec<ClientId>, ExecutorResultReceiver)> {
// receive hi from client
let client_ids = if let Some(ClientHi(client_ids)) = connection.recv().await
{
trace!("[client_server] received hi from clients {:?}", client_ids);
client_ids
} else {
warn!(
"[client_server] couldn't receive client ids from connected client"
);
return None;
};
// create channel where the executors will write executor results
let (mut executor_results_tx, executor_results_rx) =
chan::channel(client_channel_buffer_size);
// set channels name
let ids_repr = task::util::ids_repr(&client_ids);
executor_results_tx
.set_name(format!("client_server_executor_results_{}", ids_repr));
// register clients in all executors
let register =
ClientToExecutor::Register(client_ids.clone(), executor_results_tx);
if let Err(e) = client_to_executors.broadcast(register).await {
warn!(
"[client_server] error while registering clients in executors: {:?}",
e
);
}
// say hi back
let hi = ProcessHi {
process_id,
shard_id,
};
if let Err(e) = connection.send(&hi).await {
warn!("[client_server] error while sending hi: {:?}", e);
}
// return client id and channel where client should read executor results
Some((client_ids, executor_results_rx))
}
async fn client_server_task_handle_from_client(
from_client: Option<ClientToServer>,
client_ids: &Vec<ClientId>,
atomic_dot_gen: &Option<AtomicDotGen>,
client_to_workers: &mut ClientToWorkers,
client_to_executors: &mut ClientToExecutors,
pending: &mut AggregatePending,
) -> bool {
if let Some(from_client) = from_client {
client_server_task_handle_cmd(
from_client,
atomic_dot_gen,
client_to_workers,
pending,
)
.await;
true
} else {
info!("[client_server] client disconnected.");
// unregister client in all executors
if let Err(e) = client_to_executors
.broadcast(ClientToExecutor::Unregister(client_ids.clone()))
.await
{
warn!(
"[client_server] error while unregistering client in executors: {:?}",
e
);
}
false
}
}
async fn client_server_task_handle_cmd(
from_client: ClientToServer,
atomic_dot_gen: &Option<AtomicDotGen>,
client_to_workers: &mut ClientToWorkers,
pending: &mut AggregatePending,
) {
match from_client {
ClientToServer::Register(cmd) => {
// only register the command
client_server_task_register_cmd(&cmd, pending).await;
}
ClientToServer::Submit(cmd) => {
// register the command and submit it
client_server_task_register_cmd(&cmd, pending).await;
// create dot for this command (if we have a dot gen)
let dot = atomic_dot_gen
.as_ref()
.map(|atomic_dot_gen| atomic_dot_gen.next_id());
// forward command to worker process
if let Err(e) = client_to_workers.forward((dot, cmd)).await {
warn!(
"[client_server] error while sending new command to protocol worker: {:?}",
e
);
}
}
}
}
async fn client_server_task_register_cmd(
cmd: &Command,
pending: &mut AggregatePending,
) {
// we'll receive partial
// results from the executor, thus register command in pending
pending.wait_for(&cmd);
}
async fn client_server_task_handle_executor_result(
executor_result: Option<ExecutorResult>,
connection: &mut Connection,
pending: &mut AggregatePending,
) {
if let Some(executor_result) = executor_result {
if let Some(cmd_result) = pending.add_executor_result(executor_result) {
if let Err(e) = connection.send(&cmd_result).await {
warn!(
"[client_server] error while sending command results: {:?}",
e
);
}
}
} else {
warn!("[client_server] error while receiving new executor result from executor");
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/run/task/server/ping.rs | fantoch/src/run/task/server/ping.rs | use crate::id::{ProcessId, ShardId};
use crate::metrics::Histogram;
use crate::run::prelude::*;
use crate::run::task::chan::ChannelSender;
use crate::HashMap;
use crate::{info, trace, warn};
use std::net::IpAddr;
use tokio::time::{self, Duration};
const PING_SHOW_INTERVAL: u64 = 5000; // millis
const ITERATIONS_PER_PING: u64 = 5;
pub async fn ping_task(
ping_interval: Option<Duration>,
process_id: ProcessId,
shard_id: ShardId,
ips: HashMap<ProcessId, (ShardId, IpAddr, Option<Duration>)>,
parent: Option<SortedProcessesReceiver>,
) {
// if no interval, do not ping
if ping_interval.is_none() {
return;
}
let ping_interval = ping_interval.unwrap();
// create tokio interval
trace!("[ping_task] interval {:?}", ping_interval);
let mut ping_interval = time::interval(ping_interval);
// create another tokio interval
let millis = Duration::from_millis(PING_SHOW_INTERVAL);
trace!("[ping_task] show interval {:?}", millis);
let mut ping_show_interval = time::interval(millis);
// create ping stats
let mut ping_stats = ips
.into_iter()
.map(|(process_id, (shard_id, ip, delay))| {
(process_id, (shard_id, ip, delay, Histogram::new()))
})
.collect();
if let Some(mut parent) = parent {
// do one round of pinging and then process the parent message
ping_task_ping(&mut ping_stats).await;
let sort_request = parent.recv().await;
ping_task_sort(process_id, shard_id, &ping_stats, sort_request).await;
}
loop {
tokio::select! {
_ = ping_interval.tick() => {
ping_task_ping(&mut ping_stats).await;
}
_ = ping_show_interval.tick() => {
ping_task_show(&ping_stats);
}
}
}
}
async fn ping_task_ping(
ping_stats: &mut HashMap<
ProcessId,
(ShardId, IpAddr, Option<Duration>, Histogram),
>,
) {
for (_shard_id, ip, delay, histogram) in ping_stats.values_mut() {
for _ in 0..ITERATIONS_PER_PING {
let latency = loop {
let command =
format!("ping -c 1 -q {} | tail -n 1 | cut -d/ -f5", ip);
let out = tokio::process::Command::new("sh")
.arg("-c")
.arg(command)
.output()
.await
.expect("ping command should work");
let stdout = String::from_utf8(out.stdout)
.expect("ping output should be utf8")
.trim()
.to_string();
if stdout.is_empty() {
warn!("[ping_task] ping output was empty; trying again...")
} else {
break stdout;
}
};
let latency = latency
.parse::<f64>()
.expect("ping output should be a float");
let rounded_latency = latency as u64;
// add two times the delay (since delay should be half the ping
// latency), if there's one
let rounded_latency = if let Some(delay) = delay {
let delay = delay.as_millis() as u64;
rounded_latency + 2 * delay
} else {
rounded_latency
};
histogram.increment(rounded_latency);
}
}
}
fn ping_task_show(
ping_stats: &HashMap<
ProcessId,
(ShardId, IpAddr, Option<Duration>, Histogram),
>,
) {
for (process_id, (_, _, _, histogram)) in ping_stats {
info!("[ping_task] {}: {:?}", process_id, histogram);
}
}
async fn ping_task_sort(
process_id: ProcessId,
shard_id: ShardId,
ping_stats: &HashMap<
ProcessId,
(ShardId, IpAddr, Option<Duration>, Histogram),
>,
sort_request: Option<ChannelSender<Vec<(ProcessId, ShardId)>>>,
) {
match sort_request {
Some(mut sort_request) => {
let sorted_processes =
sort_by_distance(process_id, shard_id, &ping_stats);
if let Err(e) = sort_request.send(sorted_processes).await {
warn!("[ping_task] error sending message to parent: {:?}", e);
}
}
None => {
warn!("[ping_task] error receiving message from parent");
}
}
}
/// This function makes sure that self is always the first process in the
/// returned list.
fn sort_by_distance(
process_id: ProcessId,
shard_id: ShardId,
ping_stats: &HashMap<
ProcessId,
(ShardId, IpAddr, Option<Duration>, Histogram),
>,
) -> Vec<(ProcessId, ShardId)> {
// sort processes by ping time
let mut pings = ping_stats
.iter()
.map(|(id, (shard_id, _, _, histogram))| {
(u64::from(histogram.mean()), id, shard_id)
})
.collect::<Vec<_>>();
pings.sort();
// make sure we're the first process
std::iter::once((process_id, shard_id))
.chain(
pings.into_iter().map(|(_latency, process_id, shard_id)| {
(*process_id, *shard_id)
}),
)
.collect()
}
#[cfg(test)]
mod tests {
use super::*;
use std::net::Ipv4Addr;
#[test]
fn sort_by_distance_test() {
let ip = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1));
let process_id = 1;
let shard_id = 0;
let mut ping_stats = HashMap::new();
assert_eq!(
sort_by_distance(process_id, shard_id, &ping_stats),
vec![(process_id, shard_id)]
);
ping_stats.insert(2, (7, ip, None, Histogram::from(vec![10, 20, 30])));
assert_eq!(
sort_by_distance(process_id, shard_id, &ping_stats),
vec![(process_id, shard_id), (2, 7)]
);
ping_stats.insert(3, (7, ip, None, Histogram::from(vec![5, 5, 5])));
assert_eq!(
sort_by_distance(process_id, shard_id, &ping_stats),
vec![(process_id, shard_id), (3, 7), (2, 7)]
);
let (_, _, _, histogram_2) = ping_stats.get_mut(&2).unwrap();
for _ in 1..100 {
histogram_2.increment(1);
}
assert_eq!(
sort_by_distance(process_id, shard_id, &ping_stats),
vec![(process_id, shard_id), (2, 7), (3, 7)]
);
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/run/task/server/executor.rs | fantoch/src/run/task/server/executor.rs | use crate::config::Config;
use crate::executor::Executor;
use crate::id::{ClientId, ProcessId, ShardId};
use crate::protocol::Protocol;
use crate::run::prelude::*;
use crate::run::task;
use crate::time::RunTime;
use crate::HashMap;
use crate::{debug, trace, warn};
use std::sync::Arc;
use tokio::time;
/// Starts executors.
pub fn start_executors<P>(
process_id: ProcessId,
shard_id: ShardId,
config: Config,
to_executors_rxs: Vec<ExecutionInfoReceiver<P>>,
client_to_executors_rxs: Vec<ClientToExecutorReceiver>,
executors_to_workers: ExecutorsToWorkers,
shard_writers: HashMap<ShardId, Vec<WriterSender<P>>>,
to_executors: ToExecutors<P>,
to_metrics_logger: Option<ExecutorMetricsSender>,
) where
P: Protocol + 'static,
{
// zip rxs'
let incoming = to_executors_rxs
.into_iter()
.zip(client_to_executors_rxs.into_iter());
// create executor
let executor = P::Executor::new(process_id, shard_id, config);
// create executor workers
for (executor_index, (from_workers, from_clients)) in incoming.enumerate() {
task::spawn(executor_task::<P>(
executor_index,
executor.clone(),
shard_id,
config,
from_workers,
from_clients,
executors_to_workers.clone(),
shard_writers.clone(),
to_executors.clone(),
to_metrics_logger.clone(),
));
}
}
async fn executor_task<P>(
executor_index: usize,
mut executor: P::Executor,
shard_id: ShardId,
config: Config,
mut from_workers: ExecutionInfoReceiver<P>,
mut from_clients: ClientToExecutorReceiver,
mut executors_to_workers: ExecutorsToWorkers,
mut shard_writers: HashMap<ShardId, Vec<WriterSender<P>>>,
mut to_executors: ToExecutors<P>,
mut to_metrics_logger: Option<ExecutorMetricsSender>,
) where
P: Protocol + 'static,
{
// set executor index
executor.set_executor_index(executor_index);
// create time
let time = RunTime;
// holder of all client info
let mut to_clients = ToClients::new();
// create a tokio sleep
let sleep = |interval| Box::pin(time::sleep(interval));
// create executors cleanup interval
let gen_cleanup_delay = || sleep(config.executor_cleanup_interval());
let mut cleanup_delay = gen_cleanup_delay();
// create executors executed notification delay
let gen_executed_notification_delay =
|| sleep(config.executor_executed_notification_interval());
let mut executed_notification_delay = gen_executed_notification_delay();
// create metrics interval
let gen_metrics_delay = || sleep(super::metrics_logger::METRICS_INTERVAL);
let mut metrics_delay = gen_metrics_delay();
// check if executors monitor pending interval is set
if let Some(monitor_pending_interval) =
config.executor_monitor_pending_interval()
{
// create executors monitor pending interval
let gen_monitor_pending_delay =
|| Box::pin(time::sleep(monitor_pending_interval));
let mut monitor_pending_delay = gen_monitor_pending_delay();
loop {
tokio::select! {
_ = &mut monitor_pending_delay => {
executor.monitor_pending(&time);
monitor_pending_delay = gen_monitor_pending_delay();
}
execution_info = from_workers.recv() => {
handle_execution_info(execution_info, &mut executor, shard_id, &mut shard_writers, &mut to_executors, &mut to_clients, &time).await;
}
from_client = from_clients.recv() => {
handle_from_client::<P>(from_client, &mut to_clients).await;
}
_ = &mut cleanup_delay => {
cleanup_tick(&mut executor, shard_id, &mut shard_writers, &mut to_executors, &mut to_clients, &time).await;
cleanup_delay = gen_cleanup_delay();
}
_ = &mut executed_notification_delay => {
executed_notification_tick::<P>(&mut executor, &mut executors_to_workers, &time).await;
executed_notification_delay = gen_executed_notification_delay();
}
_ = &mut metrics_delay => {
metrics_tick::<P>(executor_index, &mut executor, &mut to_metrics_logger).await;
metrics_delay = gen_metrics_delay();
}
}
}
} else {
loop {
tokio::select! {
execution_info = from_workers.recv() => {
handle_execution_info(execution_info, &mut executor, shard_id, &mut shard_writers, &mut to_executors, &mut to_clients, &time).await;
}
from_client = from_clients.recv() => {
handle_from_client::<P>(from_client, &mut to_clients).await;
}
_ = &mut cleanup_delay => {
cleanup_tick(&mut executor, shard_id, &mut shard_writers, &mut to_executors, &mut to_clients, &time).await;
cleanup_delay = gen_cleanup_delay();
}
_ = &mut executed_notification_delay => {
executed_notification_tick::<P>(&mut executor, &mut executors_to_workers, &time).await;
executed_notification_delay = gen_executed_notification_delay();
}
_ = &mut metrics_delay => {
metrics_tick::<P>(executor_index, &mut executor, &mut to_metrics_logger).await;
metrics_delay = gen_metrics_delay();
}
}
}
}
}
async fn handle_execution_info<P>(
execution_info: Option<<P::Executor as Executor>::ExecutionInfo>,
executor: &mut P::Executor,
shard_id: ShardId,
shard_writers: &mut HashMap<ShardId, Vec<WriterSender<P>>>,
to_executors: &mut ToExecutors<P>,
to_clients: &mut ToClients,
time: &RunTime,
) where
P: Protocol + 'static,
{
trace!("[executor] from workers: {:?}", execution_info);
if let Some(execution_info) = execution_info {
executor.handle(execution_info, time);
fetch_results(
executor,
shard_id,
shard_writers,
to_executors,
to_clients,
)
.await;
} else {
warn!("[executor] error while receiving execution info from worker");
}
}
async fn fetch_results<P>(
executor: &mut P::Executor,
shard_id: ShardId,
shard_writers: &mut HashMap<ShardId, Vec<WriterSender<P>>>,
to_executors: &mut ToExecutors<P>,
to_clients: &mut ToClients,
) where
P: Protocol + 'static,
{
fetch_new_command_results::<P>(executor, to_clients).await;
fetch_info_to_executors::<P>(
executor,
shard_id,
shard_writers,
to_executors,
)
.await;
}
async fn fetch_new_command_results<P>(
executor: &mut P::Executor,
to_clients: &mut ToClients,
) where
P: Protocol,
{
// forward executor results (commands or partial commands) to clients that
// are waiting for them
for executor_result in executor.to_clients_iter() {
// get client id
let client_id = executor_result.rifl.source();
// send executor result to client (in case it is registered)
if let Some(executor_results_tx) = to_clients.to_client(&client_id) {
if let Err(e) = executor_results_tx.send(executor_result).await {
warn!(
"[executor] error while sending executor result to client {}: {:?}",
client_id, e
);
}
}
}
}
async fn fetch_info_to_executors<P>(
executor: &mut P::Executor,
shard_id: ShardId,
shard_writers: &mut HashMap<ShardId, Vec<WriterSender<P>>>,
to_executors: &mut ToExecutors<P>,
) where
P: Protocol + 'static,
{
// forward execution info to other shards
for (target_shard, execution_info) in executor.to_executors_iter() {
debug!(
"[executor] to executors in shard {}: {:?}",
target_shard, execution_info
);
// check if it's a message to self
if shard_id == target_shard {
// notify executor
if let Err(e) = to_executors.forward(execution_info).await {
warn!("[executor] error while notifying other executors with new execution info: {:?}", e);
}
} else {
let msg_to_send = Arc::new(POEMessage::Executor(execution_info));
if let Some(channels) = shard_writers.get_mut(&target_shard) {
crate::run::task::server::process::send_to_one_writer::<P>(
"executor",
msg_to_send,
channels,
)
.await
} else {
panic!(
"[executor] tried to send a message to a non-connected shard"
);
}
}
}
}
async fn handle_from_client<P>(
from_client: Option<ClientToExecutor>,
to_clients: &mut ToClients,
) where
P: Protocol,
{
trace!("[executor] from client: {:?}", from_client);
if let Some(from_client) = from_client {
match from_client {
ClientToExecutor::Register(client_ids, executor_results_tx) => {
to_clients.register(client_ids, executor_results_tx);
}
ClientToExecutor::Unregister(client_ids) => {
to_clients.unregister(client_ids);
}
}
} else {
warn!("[executor] error while receiving new command from clients");
}
}
async fn cleanup_tick<P>(
executor: &mut P::Executor,
shard_id: ShardId,
shard_writers: &mut HashMap<ShardId, Vec<WriterSender<P>>>,
to_executors: &mut ToExecutors<P>,
to_clients: &mut ToClients,
time: &RunTime,
) where
P: Protocol + 'static,
{
trace!("[executor] cleanup");
executor.cleanup(time);
fetch_results(executor, shard_id, shard_writers, to_executors, to_clients)
.await;
}
async fn executed_notification_tick<P>(
executor: &mut P::Executor,
executors_to_workers: &mut ExecutorsToWorkers,
time: &RunTime,
) where
P: Protocol + 'static,
{
trace!("[executor] executed");
if let Some(executed) = executor.executed(time) {
if let Err(e) = executors_to_workers.forward(executed).await {
warn!(
"[executor] error while sending executed to workers: {:?}",
e
);
}
}
}
async fn metrics_tick<P>(
executor_index: usize,
executor: &mut P::Executor,
to_metrics_logger: &mut Option<ExecutorMetricsSender>,
) where
P: Protocol + 'static,
{
if let Some(to_metrics_logger) = to_metrics_logger.as_mut() {
// send metrics to logger (in case there's one)
let executor_metrics = executor.metrics().clone();
if let Err(e) = to_metrics_logger
.send((executor_index, executor_metrics))
.await
{
warn!("[executor] error while sending metrics to metrics logger: {:?}", e);
}
}
}
struct ToClients {
/// since many `ClientId` can share the same `ExecutorResultSender`, in
/// order to avoid cloning these senders we'll have this additional index
/// that tells us which `ToClient` to use for each `ClientId`
next_id: usize,
index: HashMap<ClientId, usize>,
to_clients: HashMap<usize, ExecutorResultSender>,
}
impl ToClients {
fn new() -> Self {
Self {
next_id: 0,
index: HashMap::new(),
to_clients: HashMap::new(),
}
}
fn register(
&mut self,
client_ids: Vec<ClientId>,
executor_results_tx: ExecutorResultSender,
) {
// compute id for this set of clients
let id = self.next_id;
self.next_id += 1;
// map each `ClientId` to the computed id
for client_id in client_ids {
trace!("[executor] clients {} registered", client_id);
assert!(
self.index.insert(client_id, id).is_none(),
"client already registered"
);
}
// save executor result sender
assert!(self.to_clients.insert(id, executor_results_tx).is_none());
}
fn unregister(&mut self, client_ids: Vec<ClientId>) {
let mut ids: Vec<_> = client_ids
.into_iter()
.filter_map(|client_id| {
trace!("[executor] clients {} unregistered", client_id);
self.index.remove(&client_id)
})
.collect();
ids.sort();
ids.dedup();
assert_eq!(ids.len(), 1, "id indexing client ids should be the same");
assert!(self.to_clients.remove(&ids[0]).is_some());
}
fn to_client(
&mut self,
client_id: &ClientId,
) -> Option<&mut ExecutorResultSender> {
// search index
if let Some(id) = self.index.get(client_id) {
// get client channel
Some(
self.to_clients
.get_mut(id)
.expect("indexed client not found"),
)
} else {
None
}
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/run/task/server/mod.rs | fantoch/src/run/task/server/mod.rs | // This module contains executor's implementation.
pub mod executor;
// This module contains execution logger's implementation.
mod execution_logger;
// This module contains process's implementation.
pub mod process;
// This module contains client's implementation.
pub mod client;
// This module contains periodic's implementation.
pub mod periodic;
// This module contains ping's implementation.
pub mod ping;
// This module contains delay's implementation.
pub mod delay;
// This module contains periodic metrics's implementation.
pub mod metrics_logger;
use crate::config::Config;
use crate::id::{ProcessId, ShardId};
use crate::protocol::Protocol;
use crate::run::chan;
use crate::run::prelude::*;
use crate::run::rw::Connection;
use crate::run::task;
use crate::HashMap;
use crate::{trace, warn};
use color_eyre::Report;
use std::fmt::Debug;
use std::net::IpAddr;
use tokio::net::{TcpListener, ToSocketAddrs};
use tokio::time::{self, Duration};
pub async fn connect_to_all<A, P>(
process_id: ProcessId,
shard_id: ShardId,
config: Config,
listener: TcpListener,
addresses: Vec<(A, Option<Duration>)>,
to_workers: ReaderToWorkers<P>,
to_executors: ToExecutors<P>,
connect_retries: usize,
tcp_nodelay: bool,
tcp_buffer_size: usize,
tcp_flush_interval: Option<Duration>,
channel_buffer_size: usize,
multiplexing: usize,
) -> Result<
(
HashMap<ProcessId, (ShardId, IpAddr, Option<Duration>)>,
HashMap<ProcessId, Vec<WriterSender<P>>>,
),
Report,
>
where
A: ToSocketAddrs + Debug,
P: Protocol + 'static,
{
// check that (n-1 + shards-1) addresses were set
let total = config.n() - 1 + config.shard_count() - 1;
assert_eq!(
addresses.len(),
total,
"addresses count should be (n-1 + shards-1)"
);
// compute the number of expected connections
let total_connections = total * multiplexing;
// spawn listener
let mut from_listener = task::spawn_producer(channel_buffer_size, |tx| {
task::listener_task(listener, tcp_nodelay, tcp_buffer_size, tx)
});
// create list of in and out connections:
// - even though TCP is full-duplex, due to the current tokio
// non-parallel-tcp-socket-read-write limitation, we going to use in
// streams for reading and out streams for writing, which can be done in
// parallel
let mut outgoing = Vec::with_capacity(total_connections);
let mut incoming = Vec::with_capacity(total_connections);
// connect to all addresses (outgoing)
for (address, delay) in addresses {
// create `multiplexing` connections per address
for _ in 0..multiplexing {
let mut connection = task::connect(
&address,
tcp_nodelay,
tcp_buffer_size,
connect_retries,
)
.await?;
// maybe set delay
if let Some(delay) = delay {
connection.set_delay(delay);
}
// save connection if connected successfully
outgoing.push(connection);
}
}
// receive from listener all connected (incoming)
for _ in 0..total_connections {
let connection = from_listener
.recv()
.await
.expect("should receive connection from listener");
incoming.push(connection);
}
let res = handshake::<P>(
process_id,
shard_id,
to_workers,
to_executors,
tcp_flush_interval,
channel_buffer_size,
incoming,
outgoing,
)
.await;
Ok(res)
}
async fn handshake<P>(
process_id: ProcessId,
shard_id: ShardId,
to_workers: ReaderToWorkers<P>,
to_executors: ToExecutors<P>,
tcp_flush_interval: Option<Duration>,
channel_buffer_size: usize,
mut connections_0: Vec<Connection>,
mut connections_1: Vec<Connection>,
) -> (
HashMap<ProcessId, (ShardId, IpAddr, Option<Duration>)>,
HashMap<ProcessId, Vec<WriterSender<P>>>,
)
where
P: Protocol + 'static,
{
// say hi to all on both connections
say_hi(process_id, shard_id, &mut connections_0).await;
say_hi(process_id, shard_id, &mut connections_1).await;
trace!("said hi to all processes");
// receive hi from all on both connections
let id_to_connection_0 = receive_hi(connections_0).await;
let id_to_connection_1 = receive_hi(connections_1).await;
// start readers and writers
start_readers::<P>(to_workers, to_executors, id_to_connection_0);
start_writers::<P>(
shard_id,
tcp_flush_interval,
channel_buffer_size,
id_to_connection_1,
)
.await
}
async fn say_hi(
process_id: ProcessId,
shard_id: ShardId,
connections: &mut Vec<Connection>,
) {
let hi = ProcessHi {
process_id,
shard_id,
};
// send hi on each connection
for connection in connections.iter_mut() {
if let Err(e) = connection.send(&hi).await {
warn!("error while sending hi to connection: {:?}", e)
}
}
}
async fn receive_hi(
connections: Vec<Connection>,
) -> Vec<(ProcessId, ShardId, Connection)> {
let mut id_to_connection = Vec::with_capacity(connections.len());
// receive hi from each connection
for mut connection in connections {
if let Some(ProcessHi {
process_id,
shard_id,
}) = connection.recv().await
{
id_to_connection.push((process_id, shard_id, connection));
} else {
panic!("error receiving hi");
}
}
id_to_connection
}
/// Starts a reader task per connection received. A `ReaderToWorkers` is passed
/// to each reader so that these can forward immediately to the correct worker
/// process.
fn start_readers<P>(
to_workers: ReaderToWorkers<P>,
to_executors: ToExecutors<P>,
connections: Vec<(ProcessId, ShardId, Connection)>,
) where
P: Protocol + 'static,
{
for (process_id, shard_id, connection) in connections {
task::spawn(reader_task::<P>(
to_workers.clone(),
to_executors.clone(),
process_id,
shard_id,
connection,
));
}
}
async fn start_writers<P>(
shard_id: ShardId,
tcp_flush_interval: Option<Duration>,
channel_buffer_size: usize,
connections: Vec<(ProcessId, ShardId, Connection)>,
) -> (
HashMap<ProcessId, (ShardId, IpAddr, Option<Duration>)>,
HashMap<ProcessId, Vec<WriterSender<P>>>,
)
where
P: Protocol + 'static,
{
let mut ips = HashMap::with_capacity(connections.len());
// mapping from process id to channel broadcast writer should write to
let mut writers = HashMap::with_capacity(connections.len());
// start on writer task per connection
for (peer_id, peer_shard_id, connection) in connections {
// save shard id, ip and connection delay
let ip = connection
.ip_addr()
.expect("ip address should be set for outgoing connection");
let delay = connection.delay();
ips.insert(peer_id, (peer_shard_id, ip, delay));
// get connection delay
let connection_delay = connection.delay();
// get list set of writers to this process and create writer channels
let txs = writers.entry(peer_id).or_insert_with(Vec::new);
let (mut writer_tx, writer_rx) = chan::channel(channel_buffer_size);
// name the channel accordingly
writer_tx.set_name(format!(
"to_writer_{}_process_{}",
txs.len(),
peer_id
));
// don't use a flush interval if this peer is in my region: a peer is in
// my region if it has a different shard id
let tcp_flush_interval = if peer_shard_id != shard_id {
None
} else {
tcp_flush_interval
};
// spawn the writer task
task::spawn(writer_task::<P>(
tcp_flush_interval,
connection,
writer_rx,
));
let tx = if let Some(delay) = connection_delay {
// if connection has a delay, spawn a delay task for this writer
let (mut delay_tx, delay_rx) = chan::channel(channel_buffer_size);
// name the channel accordingly
delay_tx.set_name(format!(
"to_delay_{}_process_{}",
txs.len(),
peer_id
));
// spawn delay task
task::spawn(delay::delay_task(delay_rx, writer_tx, delay));
// in this case, messages are first forward to the delay task, which
// then forwards them to the writer task
delay_tx
} else {
// if there's no connection delay, then send the messages directly
// to the writer task
writer_tx
};
// and add a new writer channel
txs.push(tx);
}
(ips, writers)
}
/// Reader task.
async fn reader_task<P>(
mut reader_to_workers: ReaderToWorkers<P>,
mut to_executors: ToExecutors<P>,
process_id: ProcessId,
shard_id: ShardId,
mut connection: Connection,
) where
P: Protocol + 'static,
{
loop {
match connection.recv::<POEMessage<P>>().await {
Some(msg) => match msg {
POEMessage::Protocol(msg) => {
let forward = reader_to_workers
.forward((process_id, shard_id, msg))
.await;
if let Err(e) = forward {
warn!("[reader] error notifying process task with new msg: {:?}",e);
}
}
POEMessage::Executor(execution_info) => {
trace!("[reader] to executor {:?}", execution_info);
// notify executor
if let Err(e) = to_executors.forward(execution_info).await {
warn!("[reader] error while notifying executor with new execution info: {:?}", e);
}
}
},
None => {
warn!("[reader] error receiving message from connection");
break;
}
}
}
}
/// Writer task.
async fn writer_task<P>(
tcp_flush_interval: Option<Duration>,
mut connection: Connection,
mut parent: WriterReceiver<P>,
) where
P: Protocol + 'static,
{
// track whether there's been a flush error on this connection
let mut flush_error = false;
// if flush interval higher than 0, then flush periodically; otherwise,
// flush on every write
if let Some(tcp_flush_interval) = tcp_flush_interval {
// create interval
let mut interval = time::interval(tcp_flush_interval);
loop {
tokio::select! {
msg = parent.recv() => {
if let Some(msg) = msg {
// connection write *doesn't* flush
if let Err(e) = connection.write(&*msg).await {
warn!("[writer] error writing message in connection: {:?}", e);
}
} else {
warn!("[writer] error receiving message from parent");
break;
}
}
_ = interval.tick() => {
// flush socket
if let Err(e) = connection.flush().await {
// make sure we only log the error once
if !flush_error {
warn!("[writer] error flushing connection: {:?}", e);
flush_error = true;
}
}
}
}
}
} else {
loop {
if let Some(msg) = parent.recv().await {
// connection write *does* flush
if let Err(e) = connection.send(&*msg).await {
warn!(
"[writer] error sending message to connection: {:?}",
e
);
}
} else {
warn!("[writer] error receiving message from parent");
break;
}
}
}
warn!("[writer] exiting after failure");
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/run/task/server/periodic.rs | fantoch/src/run/task/server/periodic.rs | use crate::protocol::Protocol;
use crate::run::prelude::*;
use crate::{trace, warn};
use tokio::time::{self, Duration, Instant, Interval};
// TODO: check async-timer for <1ms intervals
// https://github.com/DoumanAsh/async-timer/
pub async fn periodic_task<P, R>(
events: Vec<(P::PeriodicEvent, Duration)>,
periodic_to_workers: PeriodicToWorkers<P, R>,
to_periodic_inspect: Option<InspectReceiver<P, R>>,
) where
P: Protocol + 'static,
R: Clone + 'static,
{
// create intervals
let intervals = make_intervals(events);
// different loop depending on whether there's an inspect channel or not
match to_periodic_inspect {
None => {
periodic_loop_without_inspect(intervals, periodic_to_workers).await
}
Some(to_periodic_inspect) => {
periodic_loop_with_inspect(
intervals,
periodic_to_workers,
to_periodic_inspect,
)
.await
}
}
}
fn make_intervals<P, R>(
events: Vec<(P::PeriodicEvent, Duration)>,
) -> Vec<(FromPeriodicMessage<P, R>, Interval)>
where
P: Protocol + 'static,
{
events
.into_iter()
.map(|(event, duration)| {
trace!("[periodic] event: {:?} | interval {:?}", event, duration);
// create event msg
let event_msg = FromPeriodicMessage::Event(event);
// compute first tick
let first_tick = Instant::now()
.checked_add(duration)
.expect("first tick in periodic task should exist");
// create interval
let interval = time::interval_at(first_tick, duration);
(event_msg, interval)
})
.collect()
}
async fn periodic_loop_without_inspect<P, R>(
mut intervals: Vec<(FromPeriodicMessage<P, R>, Interval)>,
mut periodic_to_workers: PeriodicToWorkers<P, R>,
) where
P: Protocol + 'static,
R: Clone + 'static,
{
match intervals.len() {
0 => {
// nothing to do, loop forever
loop {
time::sleep(Duration::from_secs(60 * 60)).await;
}
}
1 => {
let (event_msg0, mut interval0) = intervals.remove(0);
loop {
let _ = interval0.tick().await;
// create event msg
periodic_task_send_msg(
&mut periodic_to_workers,
event_msg0.clone(),
)
.await;
}
}
2 => {
let (event_msg0, mut interval0) = intervals.remove(0);
let (event_msg1, mut interval1) = intervals.remove(0);
loop {
tokio::select! {
_ = interval0.tick() => {
periodic_task_send_msg(&mut periodic_to_workers, event_msg0.clone()).await;
}
_ = interval1.tick() => {
periodic_task_send_msg(&mut periodic_to_workers, event_msg1.clone()).await;
}
}
}
}
3 => {
let (event_msg0, mut interval0) = intervals.remove(0);
let (event_msg1, mut interval1) = intervals.remove(0);
let (event_msg2, mut interval2) = intervals.remove(0);
loop {
tokio::select! {
_ = interval0.tick() => {
periodic_task_send_msg(&mut periodic_to_workers, event_msg0.clone()).await;
}
_ = interval1.tick() => {
periodic_task_send_msg(&mut periodic_to_workers, event_msg1.clone()).await;
}
_ = interval2.tick() => {
periodic_task_send_msg(&mut periodic_to_workers, event_msg2.clone()).await;
}
}
}
}
n => {
panic!("number of periodic events {:?} not supported", n);
}
}
}
async fn periodic_loop_with_inspect<P, R>(
mut intervals: Vec<(FromPeriodicMessage<P, R>, Interval)>,
mut periodic_to_workers: PeriodicToWorkers<P, R>,
mut to_periodic_inspect: InspectReceiver<P, R>,
) where
P: Protocol + 'static,
R: Clone + 'static,
{
let mut error_shown = false;
match intervals.len() {
0 => loop {
let inspect = to_periodic_inspect.recv().await;
periodic_task_inspect(
&mut periodic_to_workers,
inspect,
&mut error_shown,
)
.await;
},
1 => {
let (event_msg0, mut interval0) = intervals.remove(0);
loop {
tokio::select! {
_ = interval0.tick() => {
periodic_task_send_msg(&mut periodic_to_workers, event_msg0.clone()).await;
}
inspect = to_periodic_inspect.recv() => {
periodic_task_inspect(&mut periodic_to_workers, inspect, &mut error_shown).await
}
}
}
}
2 => {
let (event_msg0, mut interval0) = intervals.remove(0);
let (event_msg1, mut interval1) = intervals.remove(0);
loop {
tokio::select! {
_ = interval0.tick() => {
periodic_task_send_msg(&mut periodic_to_workers, event_msg0.clone()).await;
}
_ = interval1.tick() => {
periodic_task_send_msg(&mut periodic_to_workers, event_msg1.clone()).await;
}
inspect = to_periodic_inspect.recv() => {
periodic_task_inspect(&mut periodic_to_workers, inspect, &mut error_shown).await
}
}
}
}
3 => {
let (event_msg0, mut interval0) = intervals.remove(0);
let (event_msg1, mut interval1) = intervals.remove(0);
let (event_msg2, mut interval2) = intervals.remove(0);
loop {
tokio::select! {
_ = interval0.tick() => {
periodic_task_send_msg(&mut periodic_to_workers, event_msg0.clone()).await;
}
_ = interval1.tick() => {
periodic_task_send_msg(&mut periodic_to_workers, event_msg1.clone()).await;
}
_ = interval2.tick() => {
periodic_task_send_msg(&mut periodic_to_workers, event_msg2.clone()).await;
}
inspect = to_periodic_inspect.recv() => {
periodic_task_inspect(&mut periodic_to_workers, inspect, &mut error_shown).await
}
}
}
}
n => {
panic!("number of periodic events {:?} not supported", n);
}
}
}
async fn periodic_task_send_msg<P, R>(
periodic_to_workers: &mut PeriodicToWorkers<P, R>,
msg: FromPeriodicMessage<P, R>,
) where
P: Protocol + 'static,
R: Clone + 'static,
{
if let Err(e) = periodic_to_workers.forward(msg).await {
warn!("[periodic] error sending message to workers: {:?}", e);
}
}
async fn periodic_task_inspect<P, R>(
periodic_to_workers: &mut PeriodicToWorkers<P, R>,
inspect: Option<InspectFun<P, R>>,
error_shown: &mut bool,
) where
P: Protocol + 'static,
R: Clone + 'static,
{
if let Some((inspect_fun, reply_chan)) = inspect {
let inspect_msg = FromPeriodicMessage::Inspect(inspect_fun, reply_chan);
periodic_task_send_msg(periodic_to_workers, inspect_msg).await;
} else {
if !*error_shown {
warn!("[periodic] error while receiving new inspect message");
*error_shown = true;
}
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/run/task/server/metrics_logger.rs | fantoch/src/run/task/server/metrics_logger.rs | use crate::executor::ExecutorMetrics;
use crate::protocol::ProtocolMetrics;
use crate::run::prelude::*;
use crate::run::task;
use crate::HashMap;
use crate::{info, trace, warn};
use serde::{Deserialize, Serialize};
use tokio::time::{self, Duration};
pub const METRICS_INTERVAL: Duration = Duration::from_secs(5); // notify/flush every 5 seconds
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct ProcessMetrics {
workers: HashMap<usize, ProtocolMetrics>,
executors: HashMap<usize, ExecutorMetrics>,
}
impl ProcessMetrics {
fn new() -> Self {
Self {
workers: HashMap::new(),
executors: HashMap::new(),
}
}
pub fn protocol_metrics(&self) -> ProtocolMetrics {
let mut metrics = ProtocolMetrics::new();
for worker_metrics in self.workers.values() {
metrics.merge(worker_metrics);
}
metrics
}
pub fn executor_metrics(&self) -> ExecutorMetrics {
let mut metrics = ExecutorMetrics::new();
for executor_metrics in self.executors.values() {
metrics.merge(executor_metrics);
}
metrics
}
}
pub async fn metrics_logger_task(
metrics_file: String,
mut from_workers: ProtocolMetricsReceiver,
mut from_executors: ExecutorMetricsReceiver,
) {
info!("[metrics_logger] started with log {}", metrics_file);
// create metrics
let mut global_metrics = ProcessMetrics::new();
// create interval
let mut interval = time::interval(METRICS_INTERVAL);
loop {
tokio::select! {
metrics = from_workers.recv() => {
trace!("[metrics_logger] from protocol worker: {:?}", metrics);
if let Some((index, protocol_metrics)) = metrics {
// update metrics for this worker
global_metrics.workers.insert(index, protocol_metrics);
} else {
warn!("[metrics_logger] error while receiving metrics from protocol worker");
}
}
metrics = from_executors.recv() => {
trace!("[metrics_logger] from executor: {:?}", metrics);
if let Some((index, executor_metrics)) = metrics {
// update metrics for this executor
global_metrics.executors.insert(index, executor_metrics);
} else {
warn!("[metrics_logger] error while receiving metrics from executor");
}
}
_ = interval.tick() => {
// First serialize to a temporary file, and then rename it. This makes it more
// likely we won't end up with a corrupted file if we're shutdown in the middle
// of this.
let tmp = format!("{}_tmp", metrics_file);
if let Err(e) = task::util::serialize_and_compress(&global_metrics, &tmp) {
panic!("[metrics_logger] couldn't serialize metrics: {:?}", e);
}
// rename file
if let Err(e) = std::fs::rename(&tmp, &metrics_file) {
warn!("[metrics_logger] coudn't rename temporary metrics file: {:?}", e);
}
}
}
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/run/task/client/pending.rs | fantoch/src/run/task/client/pending.rs | use crate::command::{Command, CommandResult};
use crate::hash_map::{Entry, HashMap};
use crate::id::Rifl;
use crate::trace;
struct Expected {
shard_count: usize,
total_key_count: usize,
}
pub struct ShardsPending {
pending: HashMap<Rifl, (Expected, Vec<CommandResult>)>,
rifl_to_batch_rifls: HashMap<Rifl, Vec<Rifl>>,
}
impl ShardsPending {
pub fn new() -> Self {
Self {
pending: Default::default(),
rifl_to_batch_rifls: Default::default(),
}
}
pub fn register(&mut self, cmd: &Command, batch_rifls: Vec<Rifl>) {
let rifl = cmd.rifl();
trace!("c{}: register {:?}", rifl.source(), rifl);
// add command to pending
let expected = Expected {
shard_count: cmd.shard_count(),
total_key_count: cmd.total_key_count(),
};
let results = Vec::with_capacity(expected.shard_count);
let res = self.pending.insert(rifl, (expected, results));
assert!(res.is_none());
// update mapping rifl -> batch rifls
let res = self.rifl_to_batch_rifls.insert(rifl, batch_rifls);
assert!(res.is_none());
}
// Add new `CommandResult`.
// If some command got the `CommandResult`s from each of the shards
// accessed, then return all the `Rifl`s in that batch.
pub fn add(&mut self, result: CommandResult) -> Option<Vec<Rifl>> {
let rifl = result.rifl();
trace!("c{}: received {:?}", rifl.source(), rifl);
// check if command is ready
match self.pending.entry(rifl) {
Entry::Occupied(mut entry) => {
let (expected, results) = entry.get_mut();
// add new result
results.push(result);
trace!(
"c{}: {:?} {}/{}",
rifl.source(),
rifl,
results.len(),
expected.shard_count
);
// return results if we have one `CommandResult` per shard
// - TODO: add an assert checking that indeed these
// `CommandResult` came from different shards, and are not
// sent by the same shard
if results.len() == expected.shard_count {
// assert that all keys accessed got a result
let results_key_count: usize = results
.into_iter()
.map(|cmd_result| cmd_result.results().len())
.sum();
assert_eq!(results_key_count, expected.total_key_count);
// remove command from pending
entry.remove();
// return batch rifls associated with this rifl
let batch_rifls =
self.rifl_to_batch_rifls.remove(&rifl).expect(
"each rifl should be mapped to their batch rifls",
);
Some(batch_rifls)
} else {
None
}
}
Entry::Vacant(_) => panic!(
"received command result about a rifl we didn't register for"
),
}
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/run/task/client/batcher.rs | fantoch/src/run/task/client/batcher.rs | use super::batch::Batch;
use crate::command::Command;
use crate::id::ShardId;
use crate::run::chan::{ChannelReceiver, ChannelSender};
use crate::run::task;
use crate::warn;
use color_eyre::eyre::{eyre, Report};
use tokio::time::{self, Duration};
struct BatchingConfig {
batch_max_size: usize,
batch_max_delay: Duration,
}
pub async fn batcher(
mut from: ChannelReceiver<(ShardId, Command)>,
mut to: ChannelSender<Batch>,
batch_max_size: usize,
batch_max_delay: Duration,
) {
// create batching config
let config = BatchingConfig {
batch_max_size,
batch_max_delay,
};
// variable to hold the next batch to be sent
let mut next_batch = None;
loop {
match next_batch.as_ref() {
None => {
let received = from.recv().await;
let add_to_batch =
add_to_batch(received, &mut next_batch, &mut to, &config)
.await;
if let Err(e) = add_to_batch {
warn!("[batcher] {:?}", e);
break;
}
}
Some(batch) => {
tokio::select! {
_ = time::sleep_until(batch.deadline()) => {
if let Err(e) = send_batch(&mut next_batch, &mut to).await {
warn!("[batcher] error forwarding batch: {:?}", e);
break;
}
}
received = from.recv() => {
let add_to_batch =
add_to_batch(received, &mut next_batch, &mut to, &config)
.await;
if let Err(e) = add_to_batch {
warn!("[batcher] {:?}", e);
break;
}
}
}
}
}
}
}
async fn add_to_batch(
received: Option<(ShardId, Command)>,
next_batch: &mut Option<Batch>,
to: &mut ChannelSender<Batch>,
config: &BatchingConfig,
) -> Result<(), Report> {
if let Some((target_shard, cmd)) = received {
match next_batch.as_mut() {
Some(batch) => {
batch.merge(target_shard, cmd);
}
None => {
// create a new batch only with this command
let deadline = task::util::deadline(config.batch_max_delay);
let batch = Batch::new(target_shard, cmd, deadline);
*next_batch = Some(batch);
}
}
let batch = next_batch.as_ref().expect("there should be a batch");
// if the batch was reached the batch max size, then send it right away
// (i.e., do not wait for its deadline)
if batch.size() == config.batch_max_size {
send_batch(next_batch, to).await?;
}
Ok(())
} else {
Err(eyre!("error receiving message from parent"))
}
}
async fn send_batch(
next_batch: &mut Option<Batch>,
to: &mut ChannelSender<Batch>,
) -> Result<(), Report> {
let batch = next_batch.take().expect("a batch should exist");
to.send(batch).await
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/run/task/client/batch.rs | fantoch/src/run/task/client/batch.rs | use crate::command::Command;
use crate::id::{Rifl, ShardId};
use crate::HashMap;
use std::iter::FromIterator;
use tokio::time::Instant;
#[derive(Debug)]
pub struct Batch {
cmd: Command,
rifls: Vec<Rifl>,
deadline: Instant,
// mapping from shard id to the number of times it was selected as the
// target for the commands in this batch
target_shards: HashMap<ShardId, usize>,
}
impl Batch {
pub fn new(target_shard: ShardId, cmd: Command, deadline: Instant) -> Self {
let rifl = cmd.rifl();
Self {
cmd,
rifls: vec![rifl],
deadline,
target_shards: HashMap::from_iter(vec![(target_shard, 1)]),
}
}
pub fn merge(&mut self, target_shard: ShardId, other: Command) {
// check that the target shard is one of the shards accessed by the
// command
assert!(other.shards().any(|shard_id| shard_id == &target_shard));
let rifl = other.rifl();
self.cmd.merge(other);
// add this command's rifl to the list of rifls in this batch
self.rifls.push(rifl);
// update target shard counts
let current_count = self.target_shards.entry(target_shard).or_default();
*current_count += 1;
}
pub fn deadline(&self) -> Instant {
self.deadline
}
#[cfg(test)]
pub fn rifls(&self) -> &Vec<Rifl> {
&self.rifls
}
pub fn size(&self) -> usize {
self.rifls.len()
}
pub fn unpack(self) -> (ShardId, Command, Vec<Rifl>) {
let target_shard = self.target_shard();
(target_shard, self.cmd, self.rifls)
}
/// Computes the target shard as the shard most selected as the target
/// shard. Assume that the batch is non-empty.
fn target_shard(&self) -> ShardId {
assert!(self.size() > 0);
let mut count_to_shard: Vec<_> = self
.target_shards
.iter()
.map(|(shard_id, count)| (count, shard_id))
.collect();
// sort by count
count_to_shard.sort_unstable();
// return the shard id with the highest count
*count_to_shard.pop().map(|(_, shard_id)| shard_id).unwrap()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::kvs::{KVOp, Key};
#[test]
fn batch_test() {
let rifl1 = Rifl::new(1, 1);
let rifl2 = Rifl::new(1, 2);
let rifl3 = Rifl::new(1, 3);
let rifl4 = Rifl::new(1, 4);
let rifl5 = Rifl::new(1, 5);
let create_command = |rifl: Rifl, shard_id: ShardId, key: Key| {
let mut shard_ops = HashMap::new();
shard_ops.insert(key, vec![KVOp::Get]);
let mut shard_to_ops = HashMap::new();
shard_to_ops.insert(shard_id, shard_ops);
Command::new(rifl, shard_to_ops)
};
// shard 1
let shard1: ShardId = 1;
let key_a = String::from("A");
let cmd1 = create_command(rifl1, shard1, key_a.clone());
let cmd2 = create_command(rifl2, shard1, key_a.clone());
// shard 2
let shard2: ShardId = 2;
let key_b = String::from("B");
let key_c = String::from("C");
let cmd3 = create_command(rifl3, shard2, key_b.clone());
let cmd4 = create_command(rifl4, shard2, key_b.clone());
let cmd5 = create_command(rifl5, shard2, key_c.clone());
let mut batch = Batch::new(shard1, cmd1, Instant::now());
assert_eq!(batch.rifls(), &vec![rifl1]);
assert_eq!(batch.size(), 1);
assert_eq!(batch.target_shard(), shard1);
batch.merge(shard1, cmd2);
assert_eq!(batch.rifls(), &vec![rifl1, rifl2]);
assert_eq!(batch.size(), 2);
assert_eq!(batch.target_shard(), shard1);
batch.merge(shard2, cmd3);
assert_eq!(batch.rifls(), &vec![rifl1, rifl2, rifl3]);
assert_eq!(batch.size(), 3);
assert_eq!(batch.target_shard(), shard1);
batch.merge(shard2, cmd4);
assert_eq!(batch.rifls(), &vec![rifl1, rifl2, rifl3, rifl4]);
assert_eq!(batch.size(), 4);
// at this point the target shard can be either as both have the same
// count
assert!(
batch.target_shard() == shard1 || batch.target_shard() == shard2
);
batch.merge(shard2, cmd5);
assert_eq!(batch.rifls(), &vec![rifl1, rifl2, rifl3, rifl4, rifl5]);
assert_eq!(batch.size(), 5);
assert_eq!(batch.target_shard(), shard2);
// check that the merge has occurred
assert_eq!(batch.cmd.shard_count(), 2);
assert_eq!(batch.cmd.key_count(shard1), 1);
assert_eq!(batch.cmd.key_count(shard2), 2);
let shard1_keys: Vec<_> = batch.cmd.keys(shard1).collect();
assert_eq!(shard1_keys.len(), 1);
assert!(shard1_keys.contains(&&key_a));
let shard2_keys: Vec<_> = batch.cmd.keys(shard2).collect();
assert_eq!(shard2_keys.len(), 2);
assert!(shard2_keys.contains(&&key_b));
assert!(shard2_keys.contains(&&key_c));
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/run/task/client/mod.rs | fantoch/src/run/task/client/mod.rs | // Implementation of the read-write task;
mod rw;
// Implementation of `ShardsPending`.
mod pending;
// Definition of `Batch`.
mod batch;
// Implementation of a batcher.
mod batcher;
// Implementation of an unbatcher.
mod unbatcher;
use crate::client::{Client, ClientData, Workload};
use crate::command::{Command, CommandResult};
use crate::hash_map::HashMap;
use crate::id::{ClientId, ProcessId, Rifl, ShardId};
use crate::run::chan::{self, ChannelReceiver, ChannelSender};
use crate::run::prelude::*;
use crate::run::rw::Connection;
use crate::run::task;
use crate::time::{RunTime, SysTime};
use crate::HashSet;
use crate::{info, trace, warn};
use color_eyre::Report;
use futures::stream::{FuturesUnordered, StreamExt};
use std::fmt::Debug;
use std::time::Duration;
use tokio::net::ToSocketAddrs;
const MAX_CLIENT_CONNECTIONS: usize = 32;
pub async fn client<A>(
ids: Vec<ClientId>,
addresses: Vec<A>,
interval: Option<Duration>,
workload: Workload,
batch_max_size: usize,
batch_max_delay: Duration,
connect_retries: usize,
tcp_nodelay: bool,
channel_buffer_size: usize,
status_frequency: Option<usize>,
metrics_file: Option<String>,
) -> Result<(), Report>
where
A: ToSocketAddrs + Clone + Debug + Send + 'static + Sync,
{
// create client pool
let mut pool = Vec::with_capacity(MAX_CLIENT_CONNECTIONS);
// init each entry
pool.resize_with(MAX_CLIENT_CONNECTIONS, Vec::new);
// assign each client to a client worker
ids.into_iter().enumerate().for_each(|(index, client_id)| {
let index = index % MAX_CLIENT_CONNECTIONS;
pool[index].push(client_id);
});
// start each client worker in pool
let handles = pool.into_iter().filter_map(|client_ids| {
// only start a client for this pool index if any client id was assigned
// to it
if !client_ids.is_empty() {
// start the open loop client if some interval was provided
let handle = if let Some(interval) = interval {
task::spawn(open_loop_client::<A>(
client_ids,
addresses.clone(),
interval,
workload,
batch_max_size,
batch_max_delay,
connect_retries,
tcp_nodelay,
channel_buffer_size,
status_frequency,
))
} else {
task::spawn(closed_loop_client::<A>(
client_ids,
addresses.clone(),
workload,
batch_max_size,
batch_max_delay,
connect_retries,
tcp_nodelay,
channel_buffer_size,
status_frequency,
))
};
Some(handle)
} else {
None
}
});
// wait for all clients to complete and aggregate their metrics
let mut data = ClientData::new();
let mut handles = handles.collect::<FuturesUnordered<_>>();
while let Some(join_result) = handles.next().await {
let clients = join_result?.expect("client should run correctly");
for client in clients {
info!("client {} ended", client.id());
data.merge(client.data());
info!("metrics from {} collected", client.id());
}
}
if let Some(file) = metrics_file {
info!("will write client data to {}", file);
task::util::serialize_and_compress(&data, &file)?;
}
info!("all clients ended");
Ok(())
}
async fn closed_loop_client<A>(
client_ids: Vec<ClientId>,
addresses: Vec<A>,
workload: Workload,
batch_max_size: usize,
batch_max_delay: Duration,
connect_retries: usize,
tcp_nodelay: bool,
channel_buffer_size: usize,
status_frequency: Option<usize>,
) -> Option<Vec<Client>>
where
A: ToSocketAddrs + Clone + Debug + Send + 'static + Sync,
{
// create system time
let time = RunTime;
// setup client
let (mut clients, mut unbatcher_rx, mut batcher_tx) = client_setup(
client_ids,
addresses,
workload,
batch_max_size,
batch_max_delay,
connect_retries,
tcp_nodelay,
channel_buffer_size,
status_frequency,
)
.await?;
// track which clients are finished (i.e. all their commands have completed)
let mut finished = HashSet::with_capacity(clients.len());
// track which clients are workload finished
let mut workload_finished = HashSet::with_capacity(clients.len());
// generate the first message of each client
for client in clients.values_mut() {
cmd_send(client, &time, &mut batcher_tx, &mut workload_finished).await;
}
// wait for results and generate/submit new commands while there are
// commands to be generated
while finished.len() < clients.len() {
// and wait for next result
let from_unbatcher = unbatcher_rx.recv().await;
let ready_clients =
cmd_recv(&mut clients, &time, from_unbatcher, &mut finished);
for client_id in ready_clients {
let client = clients
.get_mut(&client_id)
.expect("[client] ready client should exist");
// if client hasn't finished, issue a new command
cmd_send(client, &time, &mut batcher_tx, &mut workload_finished)
.await;
}
}
assert_eq!(workload_finished.len(), finished.len());
// return clients
Some(
clients
.into_iter()
.map(|(_client_id, client)| client)
.collect(),
)
}
async fn open_loop_client<A>(
client_ids: Vec<ClientId>,
addresses: Vec<A>,
interval: Duration,
workload: Workload,
batch_max_size: usize,
batch_max_delay: Duration,
connect_retries: usize,
tcp_nodelay: bool,
channel_buffer_size: usize,
status_frequency: Option<usize>,
) -> Option<Vec<Client>>
where
A: ToSocketAddrs + Clone + Debug + Send + 'static + Sync,
{
// create system time
let time = RunTime;
// setup client
let (mut clients, mut unbatcher_rx, mut batcher_tx) = client_setup(
client_ids,
addresses,
workload,
batch_max_size,
batch_max_delay,
connect_retries,
tcp_nodelay,
channel_buffer_size,
status_frequency,
)
.await?;
// create interval
let mut interval = tokio::time::interval(interval);
// track which clients are finished (i.e. all their commands have completed)
let mut finished = HashSet::with_capacity(clients.len());
// track which clients are workload finished
let mut workload_finished = HashSet::with_capacity(clients.len());
while finished.len() < clients.len() {
tokio::select! {
from_unbatcher = unbatcher_rx.recv() => {
cmd_recv(
&mut clients,
&time,
from_unbatcher,
&mut finished,
);
}
_ = interval.tick() => {
// submit new command on every tick for each connected client
// (if there are still commands to be generated)
for (client_id, client) in clients.iter_mut(){
// if the client hasn't finished, try to issue a new command
if !workload_finished.contains(client_id) {
cmd_send(client, &time, &mut batcher_tx, &mut workload_finished).await;
}
}
}
}
}
assert_eq!(workload_finished.len(), finished.len());
// return clients
Some(
clients
.into_iter()
.map(|(_client_id, client)| client)
.collect(),
)
}
async fn client_setup<A>(
client_ids: Vec<ClientId>,
addresses: Vec<A>,
workload: Workload,
batch_max_size: usize,
batch_max_delay: Duration,
client_retries: usize,
tcp_nodelay: bool,
channel_buffer_size: usize,
status_frequency: Option<usize>,
) -> Option<(
HashMap<ClientId, Client>,
ChannelReceiver<Vec<Rifl>>,
ChannelSender<(ShardId, Command)>,
)>
where
A: ToSocketAddrs + Clone + Debug + Send + 'static + Sync,
{
let mut shard_to_process = HashMap::with_capacity(addresses.len());
let mut connections = Vec::with_capacity(addresses.len());
// connect to each address (one per shard)
let tcp_buffer_size = 0;
for address in addresses {
let connect = task::connect(
address,
tcp_nodelay,
tcp_buffer_size,
client_retries,
);
let mut connection = match connect.await {
Ok(connection) => connection,
Err(e) => {
// TODO panicking here as not sure how to make error handling
// send + 'static (required by tokio::spawn) and
// still be able to use the ? operator
panic!(
"[client] error connecting at clients {:?}: {:?}",
client_ids, e
);
}
};
// say hi
let (process_id, shard_id) =
client_say_hi(client_ids.clone(), &mut connection).await?;
// update set of processes to be discovered by the client
assert!(shard_to_process.insert(shard_id, process_id).is_none(), "client shouldn't try to connect to the same shard more than once, only to the closest one");
// update list of connected processes
connections.push((process_id, connection));
}
// start client read-write task
let (read, mut process_to_writer) = rw::start_client_rw_tasks(
&client_ids,
channel_buffer_size,
connections,
);
// create mapping from shard id to client read-write task
let shard_to_write = shard_to_process
.into_iter()
.map(|(shard_id, process_id)| {
let writer = process_to_writer
.remove(&process_id)
.expect("a rw-task should exist for each process id");
(shard_id, writer)
})
.collect();
assert!(
process_to_writer.is_empty(),
"all rw-tasks should be associated with some shard"
);
// create clients
let clients = client_ids
.iter()
.map(|&client_id| {
let client = Client::new(client_id, workload, status_frequency);
// no need to discover as the `unbatcher` will do the job of
// selecting the closest process
(client_id, client)
})
.collect();
spawn_batcher_and_unbatcher(
client_ids,
batch_max_size,
batch_max_delay,
clients,
channel_buffer_size,
read,
shard_to_write,
)
.await
}
async fn spawn_batcher_and_unbatcher(
client_ids: Vec<ClientId>,
batch_max_size: usize,
batch_max_delay: Duration,
clients: HashMap<ClientId, Client>,
channel_buffer_size: usize,
read: ChannelReceiver<CommandResult>,
shard_to_writer: HashMap<ShardId, ChannelSender<ClientToServer>>,
) -> Option<(
HashMap<ClientId, Client>,
ChannelReceiver<Vec<Rifl>>,
ChannelSender<(ShardId, Command)>,
)> {
let (mut batcher_tx, batcher_rx) = chan::channel(channel_buffer_size);
batcher_tx
.set_name(format!("to_batcher_{}", super::util::ids_repr(&client_ids)));
let (mut to_unbatcher_tx, to_unbatcher_rx) =
chan::channel(channel_buffer_size);
to_unbatcher_tx.set_name(format!(
"to_unbatcher_{}",
super::util::ids_repr(&client_ids)
));
let (mut to_client_tx, to_client_rx) = chan::channel(channel_buffer_size);
to_client_tx
.set_name(format!("to_client_{}", super::util::ids_repr(&client_ids)));
// spawn batcher
task::spawn(batcher::batcher(
batcher_rx,
to_unbatcher_tx,
batch_max_size,
batch_max_delay,
));
// spawn unbatcher
task::spawn(unbatcher::unbatcher(
to_unbatcher_rx,
to_client_tx,
read,
shard_to_writer,
));
// return clients and their means to communicate with the service
Some((clients, to_client_rx, batcher_tx))
}
/// Generate the next command, returning a boolean representing whether a new
/// command was generated or not.
async fn cmd_send(
client: &mut Client,
time: &dyn SysTime,
to_batcher: &mut ChannelSender<(ShardId, Command)>,
workload_finished: &mut HashSet<ClientId>,
) {
if let Some(next) = client.cmd_send(time) {
if let Err(e) = to_batcher.send(next).await {
warn!("[client] error forwarding batch: {:?}", e);
}
} else {
// record that this client has finished its workload
assert!(client.workload_finished());
assert!(workload_finished.insert(client.id()));
}
}
/// Handles new ready rifls. Returns the client ids of clients with a new
/// command finished.
fn cmd_recv(
clients: &mut HashMap<ClientId, Client>,
time: &dyn SysTime,
from_unbatcher: Option<Vec<Rifl>>,
finished: &mut HashSet<ClientId>,
) -> Vec<ClientId> {
if let Some(rifls) = from_unbatcher {
do_cmd_recv(clients, time, rifls, finished)
} else {
panic!("[client] error while receiving message from client read-write task");
}
}
fn do_cmd_recv(
clients: &mut HashMap<ClientId, Client>,
time: &dyn SysTime,
rifls: Vec<Rifl>,
finished: &mut HashSet<ClientId>,
) -> Vec<ClientId> {
rifls
.into_iter()
.map(move |rifl| {
// find client that sent this command
let client_id = rifl.source();
let client = clients
.get_mut(&client_id)
.expect("[client] command result should belong to a client");
// handle command results
client.cmd_recv(rifl, time);
// check if client is finished
if client.finished() {
// record that this client is finished
info!("client {:?} exited loop", client_id);
assert!(finished.insert(client_id));
}
client_id
})
.collect()
}
async fn client_say_hi(
client_ids: Vec<ClientId>,
connection: &mut Connection,
) -> Option<(ProcessId, ShardId)> {
trace!("[client] will say hi with ids {:?}", client_ids);
// say hi
let hi = ClientHi(client_ids.clone());
if let Err(e) = connection.send(&hi).await {
warn!("[client] error while sending hi: {:?}", e);
}
// receive hi back
if let Some(ProcessHi {
process_id,
shard_id,
}) = connection.recv().await
{
trace!(
"[client] clients {:?} received hi from process {} with shard id {}",
client_ids,
process_id,
shard_id
);
Some((process_id, shard_id))
} else {
warn!("[client] clients {:?} couldn't receive process id from connected process", client_ids);
None
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/run/task/client/unbatcher.rs | fantoch/src/run/task/client/unbatcher.rs | use super::batch::Batch;
use super::pending::ShardsPending;
use crate::command::CommandResult;
use crate::id::{Rifl, ShardId};
use crate::run::chan::{ChannelReceiver, ChannelSender};
use crate::run::prelude::ClientToServer;
use crate::warn;
use crate::HashMap;
use color_eyre::eyre::{eyre, Report};
pub async fn unbatcher(
mut from: ChannelReceiver<Batch>,
mut to: ChannelSender<Vec<Rifl>>,
mut read: ChannelReceiver<CommandResult>,
mut shard_to_writer: HashMap<ShardId, ChannelSender<ClientToServer>>,
) {
// create pending
let mut pending = ShardsPending::new();
loop {
tokio::select! {
from_batcher = from.recv() => {
let handle_from_batcher = handle_from_batcher(from_batcher, &mut shard_to_writer, &mut pending).await;
if let Err(e) = handle_from_batcher {
warn!("[unbatcher] {:?}", e);
break;
}
}
from_server = read.recv() => {
let handle_from_server = handle_from_server(from_server, &mut to, &mut pending).await;
if let Err(e) = handle_from_server {
warn!("[unbatcher] {:?}", e);
break;
}
}
}
}
}
async fn handle_from_batcher(
batch: Option<Batch>,
shard_to_writer: &mut HashMap<ShardId, ChannelSender<ClientToServer>>,
pending: &mut ShardsPending,
) -> Result<(), Report> {
if let Some(batch) = batch {
handle_batch(batch, shard_to_writer, pending).await;
Ok(())
} else {
Err(eyre!("error receiving message from parent"))
}
}
async fn handle_batch(
batch: Batch,
shard_to_writer: &mut HashMap<ShardId, ChannelSender<ClientToServer>>,
pending: &mut ShardsPending,
) {
// extract info from batch
let (target_shard, cmd, rifls) = batch.unpack();
// register command in pending (which will aggregate several
// `CommandResult`s if the command acesses more than one shard)
pending.register(&cmd, rifls);
// 1. register the command in all shards but the target shard
for shard in cmd.shards().filter(|shard| **shard != target_shard) {
let msg = ClientToServer::Register(cmd.clone());
send_to_shard(shard_to_writer, shard, msg).await
}
// 2. submit the command to the target shard
let msg = ClientToServer::Submit(cmd);
send_to_shard(shard_to_writer, &target_shard, msg).await
}
async fn send_to_shard(
shard_to_writer: &mut HashMap<ShardId, ChannelSender<ClientToServer>>,
shard_id: &ShardId,
msg: ClientToServer,
) {
// find process writer
let writer = shard_to_writer
.get_mut(shard_id)
.expect("[unbatcher] dind't find writer for target shard");
if let Err(e) = writer.send(msg).await {
warn!(
"[unbatcher] error while sending message to client rw task: {:?}",
e
);
}
}
async fn handle_from_server(
cmd_result: Option<CommandResult>,
to: &mut ChannelSender<Vec<Rifl>>,
pending: &mut ShardsPending,
) -> Result<(), Report> {
if let Some(cmd_result) = cmd_result {
handle_cmd_result(cmd_result, to, pending).await;
Ok(())
} else {
Err(eyre!("error receiving message from parent"))
}
}
async fn handle_cmd_result(
cmd_result: CommandResult,
to: &mut ChannelSender<Vec<Rifl>>,
pending: &mut ShardsPending,
) {
if let Some(rifls) = pending.add(cmd_result) {
if let Err(e) = to.send(rifls).await {
warn!("[unbatcher] error while sending message to client: {:?}", e);
}
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/run/task/client/rw.rs | fantoch/src/run/task/client/rw.rs | use crate::command::CommandResult;
use crate::hash_map::HashMap;
use crate::id::{ClientId, ProcessId};
use crate::run::chan::{self, ChannelReceiver, ChannelSender};
use crate::run::prelude::*;
use crate::run::rw::Connection;
use crate::run::task;
use crate::{trace, warn};
pub fn start_client_rw_tasks(
client_ids: &Vec<ClientId>,
channel_buffer_size: usize,
connections: Vec<(ProcessId, Connection)>,
) -> (
ChannelReceiver<CommandResult>,
HashMap<ProcessId, ChannelSender<ClientToServer>>,
) {
// create server-to-client channels: although we keep one connection per
// shard, we'll have all rw tasks will write to the same channel; this means
// the client will read from a single channel (and potentially receive
// messages from any of the shards)
let (mut s2c_tx, s2c_rx) = chan::channel(channel_buffer_size);
s2c_tx.set_name(format!(
"server_to_client_{}",
task::util::ids_repr(&client_ids)
));
let mut process_to_tx = HashMap::with_capacity(connections.len());
for (process_id, connection) in connections {
// create client-to-server channels: since clients may send operations
// to different shards, we create one client-to-rw channel per rw task
let (mut c2s_tx, c2s_rx) = chan::channel(channel_buffer_size);
c2s_tx.set_name(format!(
"client_to_server_{}_{}",
process_id,
task::util::ids_repr(&client_ids)
));
// spawn rw task
task::spawn(client_rw_task(connection, s2c_tx.clone(), c2s_rx));
process_to_tx.insert(process_id, c2s_tx);
}
(s2c_rx, process_to_tx)
}
async fn client_rw_task(
mut connection: Connection,
mut to_parent: ServerToClientSender,
mut from_parent: ClientToServerReceiver,
) {
loop {
tokio::select! {
to_client = connection.recv() => {
trace!("[client_rw] to client: {:?}", to_client);
if let Some(to_client) = to_client {
if let Err(e) = to_parent.send(to_client).await {
warn!("[client_rw] error while sending message from server to parent: {:?}", e);
}
} else {
warn!("[client_rw] error while receiving message from server to parent");
break;
}
}
to_server = from_parent.recv() => {
trace!("[client_rw] from client: {:?}", to_server);
if let Some(to_server) = to_server {
if let Err(e) = connection.send(&to_server).await {
warn!("[client_rw] error while sending message to server: {:?}", e);
}
} else {
warn!("[client_rw] error while receiving message from parent to server");
// in this case it means that the parent (the client) is done, and so we can exit the loop
break;
}
}
}
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/client/pending.rs | fantoch/src/client/pending.rs | use crate::id::Rifl;
use crate::time::SysTime;
use crate::HashMap;
use std::time::Duration;
#[derive(Default)]
pub struct Pending {
/// mapping from Rifl to command start time (in micros)
pending: HashMap<Rifl, u64>,
}
impl Pending {
/// Create a new `Pending`
pub fn new() -> Self {
Default::default()
}
/// Start a command given its rifl.
pub fn start(&mut self, rifl: Rifl, time: &dyn SysTime) {
// compute start time
let start_time = time.micros();
// add to pending and check it has never been added before
// TODO: replace with `.expect_none` once it's stabilized
if self.pending.insert(rifl, start_time).is_some() {
panic!("the same rifl can't be inserted twice in client pending list of commands");
}
}
/// End a command returns command latency and the time it was returned.
pub fn end(&mut self, rifl: Rifl, time: &dyn SysTime) -> (Duration, u64) {
// get start time
let start_time = self
.pending
.remove(&rifl)
.expect("can't end a command if a command has not started");
// compute end time
let end_time = time.micros();
// make sure time is monotonic
assert!(start_time <= end_time);
// compute latency
let latency = Duration::from_micros(end_time - start_time);
// compute end time in milliseconds
let end_time = end_time / 1000;
(latency, end_time)
}
/// Checks whether pending is empty.
pub fn is_empty(&self) -> bool {
self.pending.is_empty()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::client::RiflGen;
use crate::time::SimTime;
#[test]
fn pending_flow() {
// create pending
let mut pending = Pending::new();
// create rifl gen and 3 rifls
let source = 10;
let mut rifl_gen = RiflGen::new(source);
let rifl1 = rifl_gen.next_id();
let rifl2 = rifl_gen.next_id();
let rifl3 = rifl_gen.next_id();
// create sys time
let mut time = SimTime::new();
// pending starts empty
assert!(pending.is_empty());
// start first rifl at time 0
pending.start(rifl1, &time);
// pending is not empty now
assert!(!pending.is_empty());
// start second rifl at time 10
time.add_millis(10);
pending.start(rifl2, &time);
// pending is not empty
assert!(!pending.is_empty());
// end first rifl at time 11
time.add_millis(1);
let (latency, return_time) = pending.end(rifl1, &time);
assert_eq!(latency.as_millis(), 11);
assert_eq!(return_time, 11);
// pending is not empty
assert!(!pending.is_empty());
// start third rifl at time 15
time.add_millis(4);
pending.start(rifl3, &time);
// pending is not empty
assert!(!pending.is_empty());
// end third rifl at time 16
time.add_millis(1);
let (latency, return_time) = pending.end(rifl3, &time);
assert_eq!(latency.as_millis(), 1);
assert_eq!(return_time, 16);
// pending is not empty
assert!(!pending.is_empty());
// end second rifl at time 20
time.add_millis(4);
let (latency, return_time) = pending.end(rifl2, &time);
assert_eq!(latency.as_millis(), 10);
assert_eq!(return_time, 20);
// pending is empty now
assert!(pending.is_empty());
}
#[test]
#[should_panic]
fn double_start() {
// create pending
let mut pending = Pending::new();
// create rifl gen and 1 rifl
let source = 10;
let mut rifl_gen = RiflGen::new(source);
let rifl1 = rifl_gen.next_id();
// create sys time
let time = SimTime::new();
// start rifl1 twice
pending.start(rifl1, &time);
// should panic!
pending.start(rifl1, &time);
}
#[test]
#[should_panic]
fn double_end() {
// create pending
let mut pending = Pending::new();
// create rifl gen and 1 rifl
let source = 10;
let mut rifl_gen = RiflGen::new(source);
let rifl1 = rifl_gen.next_id();
// create sys time
let time = SimTime::new();
// end rifl1 before starting it (basically a double end)
// should panic!
pending.end(rifl1, &time);
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/client/workload.rs | fantoch/src/client/workload.rs | use crate::client::key_gen::{KeyGen, KeyGenState};
use crate::command::Command;
use crate::id::{RiflGen, ShardId};
use crate::kvs::{KVOp, Key, Value};
use crate::trace;
use crate::HashMap;
use rand::distributions::Alphanumeric;
use rand::Rng;
use serde::{Deserialize, Serialize};
use std::iter;
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
pub struct Workload {
/// number of shards
shard_count: u64,
// key generator
key_gen: KeyGen,
/// number of keys accessed by the command
keys_per_command: usize,
/// number of commands to be submitted in this workload
commands_per_client: usize,
/// percentage of read-only commands
read_only_percentage: usize,
/// size of payload in command (in bytes)
payload_size: usize,
/// number of commands already issued in this workload
command_count: usize,
}
impl Workload {
pub fn new(
shard_count: usize,
key_gen: KeyGen,
keys_per_command: usize,
commands_per_client: usize,
payload_size: usize,
) -> Self {
// check for valid workloads
if let KeyGen::ConflictPool {
pool_size,
conflict_rate,
} = key_gen
{
assert!(
conflict_rate <= 100,
"the conflict rate must be less or equal to 100"
);
assert!(pool_size >= 1, "the pool size should be at least 1");
if conflict_rate == 100 && keys_per_command > 1 {
panic!("invalid workload; can't generate more than one key when the conflict_rate is 100");
}
if keys_per_command > 2 {
panic!("invalid workload; can't generate more than two keys with the conflict_rate key generator");
}
}
// by default, the read-only percentage is 0
let read_only_percentage = 0;
Self {
shard_count: shard_count as u64,
keys_per_command,
key_gen,
commands_per_client,
read_only_percentage,
payload_size,
command_count: 0,
}
}
/// Returns the number of shards in the system.
pub fn shard_count(&self) -> usize {
self.shard_count as usize
}
/// Returns the key generator.
pub fn key_gen(&self) -> KeyGen {
self.key_gen
}
/// Returns the total number of commands to be generated by this workload.
pub fn commands_per_client(&self) -> usize {
self.commands_per_client
}
/// Returns the number of keys accessed by commands generated by this
/// workload.
pub fn keys_per_command(&self) -> usize {
self.keys_per_command
}
/// Returns the percentage of read-only commands to be generated by this
/// workload.
pub fn read_only_percentage(&self) -> usize {
self.read_only_percentage
}
/// Sets the percentage of read-only commands to be generated by this
/// workload.
pub fn set_read_only_percentage(&mut self, read_only_percentage: usize) {
assert!(
read_only_percentage <= 100,
"the percentage of read-only commands must be less or equal to 100"
);
self.read_only_percentage = read_only_percentage;
}
/// Returns the payload size of the commands to be generated by this
/// workload.
pub fn payload_size(&self) -> usize {
self.payload_size
}
/// Generate the next command.
pub fn next_cmd(
&mut self,
rifl_gen: &mut RiflGen,
key_gen_state: &mut KeyGenState,
) -> Option<(ShardId, Command)> {
// check if we should generate more commands
if self.command_count < self.commands_per_client {
// increment command count
self.command_count += 1;
// generate new command
Some(self.gen_cmd(rifl_gen, key_gen_state))
} else {
trace!("c{:?}: done!", rifl_gen.source());
None
}
}
/// Returns the number of commands already issued.
pub fn issued_commands(&self) -> usize {
self.command_count
}
/// Returns a boolean indicating whether the workload has finished, i.e. all
/// commands have been issued.
pub fn finished(&self) -> bool {
self.command_count == self.commands_per_client
}
/// Generate a command.
fn gen_cmd(
&mut self,
rifl_gen: &mut RiflGen,
key_gen_state: &mut KeyGenState,
) -> (ShardId, Command) {
// generate rifl
let rifl = rifl_gen.next_id();
// generate all the key-value pairs
let mut ops: HashMap<_, HashMap<_, _>> = HashMap::new();
// generate unique keys:
// - since we store them in Vec, this ensures that the target shard will
// be the shard of the first key generated
let keys = self.gen_unique_keys(key_gen_state);
// check if the command should be read-only
let read_only = super::key_gen::true_if_random_is_less_than(
self.read_only_percentage,
);
let mut target_shard = None;
for key in keys {
// compute op
let op = if read_only {
// if read-only, the op is a `Get`
KVOp::Get
} else {
// if not read-only, the op is a `Put`:
// - generate payload for `Put` op
let value = self.gen_cmd_value();
KVOp::Put(value)
};
// compute key's shard and save op
let shard_id = self.shard_id(&key);
ops.entry(shard_id).or_default().insert(key, vec![op]);
// target shard is the shard of the first key generated
target_shard = target_shard.or(Some(shard_id));
}
let target_shard =
target_shard.expect("there should be a target shard");
// create command
(target_shard, Command::new(rifl, ops))
}
fn gen_unique_keys(&self, key_gen_state: &mut KeyGenState) -> Vec<Key> {
let mut keys = Vec::with_capacity(self.keys_per_command);
while keys.len() != self.keys_per_command {
let key = key_gen_state.gen_cmd_key();
if !keys.contains(&key) {
keys.push(key);
}
}
keys
}
/// Generate a command payload with the payload size provided.
fn gen_cmd_value(&self) -> Value {
let mut rng = rand::thread_rng();
iter::repeat(())
.map(|_| rng.sample(Alphanumeric) as char)
.take(self.payload_size)
.collect()
}
/// Computes which shard the key belongs to.
fn shard_id(&self, key: &Key) -> ShardId {
crate::util::key_hash(key) % self.shard_count
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::kvs::KVOp;
const POOL_SIZE: usize = 1;
// since the pool size is 1, the conflict color must be the following
const CONFLICT_COLOR: &str = "CONFLICT0";
#[test]
fn gen_cmd_key() {
// create rilf gen
let client_id = 1;
let mut rifl_gen = RiflGen::new(client_id);
// general config
let shard_count = 1;
let keys_per_command = 1;
let commands_per_client = 100;
let payload_size = 100;
// create conflicting workload
let key_gen = KeyGen::ConflictPool {
conflict_rate: 100,
pool_size: POOL_SIZE,
};
let mut workload = Workload::new(
shard_count,
key_gen,
keys_per_command,
commands_per_client,
payload_size,
);
let mut key_gen_state =
key_gen.initial_state(workload.shard_count(), client_id);
let (target_shard, command) =
workload.gen_cmd(&mut rifl_gen, &mut key_gen_state);
assert_eq!(target_shard, 0);
assert_eq!(
command.keys(target_shard).collect::<Vec<_>>(),
vec![CONFLICT_COLOR]
);
// create non-conflicting workload
let key_gen = KeyGen::ConflictPool {
conflict_rate: 0,
pool_size: POOL_SIZE,
};
let mut workload = Workload::new(
shard_count,
key_gen,
keys_per_command,
commands_per_client,
payload_size,
);
let mut key_gen_state =
key_gen.initial_state(workload.shard_count(), client_id);
let (target_shard, command) =
workload.gen_cmd(&mut rifl_gen, &mut key_gen_state);
assert_eq!(target_shard, 0);
assert_eq!(command.keys(target_shard).collect::<Vec<_>>(), vec!["1"]);
}
#[test]
fn next_cmd() {
// create rilf gen
let client_id = 1;
let mut rifl_gen = RiflGen::new(client_id);
// general config
let shard_count = 1;
let keys_per_command = 1;
let commands_per_client = 10000;
let payload_size = 10;
// create workload
let key_gen = KeyGen::ConflictPool {
conflict_rate: 100,
pool_size: POOL_SIZE,
};
let mut workload = Workload::new(
shard_count,
key_gen,
keys_per_command,
commands_per_client,
payload_size,
);
let mut key_gen_state =
key_gen.initial_state(workload.shard_count(), client_id);
// check total and issued commands
assert_eq!(workload.commands_per_client(), commands_per_client);
assert_eq!(workload.issued_commands(), 0);
// the first `total_commands` commands are `Some`
for i in 1..=commands_per_client {
if let Some((target_shard, cmd)) =
workload.next_cmd(&mut rifl_gen, &mut key_gen_state)
{
// since there's a single shard, keys should be on shard 0
assert_eq!(target_shard, 0);
let (key, ops) = cmd.into_iter(target_shard).next().unwrap();
let mut ops = ops.as_ref().clone();
// since the conflict is 100, the key should be `CONFLICT_COLOR`
assert_eq!(key, CONFLICT_COLOR);
assert_eq!(ops.len(), 1);
let op = ops.pop().unwrap();
// check that the value size is `payload_size`
if let KVOp::Put(payload) = op {
assert_eq!(payload.len(), payload_size);
} else {
panic!("workload should generate PUT commands");
}
// check total and issued commands
assert_eq!(workload.commands_per_client(), commands_per_client);
assert_eq!(workload.issued_commands(), i);
} else {
panic!("there should be a next command in this workload");
}
}
// check the workload is finished
assert!(workload.finished());
// after this, no more commands are generated
for _ in 1..=10 {
assert!(workload
.next_cmd(&mut rifl_gen, &mut key_gen_state)
.is_none());
}
// check the workload is still finished
assert!(workload.finished());
}
#[test]
fn conflict_rate() {
for conflict_rate in vec![1, 2, 10, 50] {
// create rilf gen
let client_id = 1;
let mut rifl_gen = RiflGen::new(client_id);
// total commands
let shard_count = 1;
let keys_per_command = 1;
let commands_per_client = 1000000;
let payload_size = 0;
// create workload
let key_gen = KeyGen::ConflictPool {
conflict_rate,
pool_size: POOL_SIZE,
};
let mut workload = Workload::new(
shard_count,
key_gen,
keys_per_command,
commands_per_client,
payload_size,
);
let mut key_gen_state =
key_gen.initial_state(workload.shard_count(), client_id);
// count conflicting commands
let mut conflict_color_count = 0;
while let Some((target_shard, cmd)) =
workload.next_cmd(&mut rifl_gen, &mut key_gen_state)
{
// since there's a single shard, keys should be on shard 0
assert_eq!(target_shard, 0);
// get command key and check if it's conflicting
let (key, _) = cmd.into_iter(target_shard).next().unwrap();
if key == CONFLICT_COLOR {
conflict_color_count += 1;
}
}
// compute percentage of conflicting commands
let percentage = (conflict_color_count * 100) as f64
/ commands_per_client as f64;
assert_eq!(percentage.round() as usize, conflict_rate);
}
}
#[test]
fn two_shards() {
// in order for this test to pass, `check_two_shards` should generate a
// command that accesses two shards
std::iter::repeat(()).any(|_| check_two_shards());
}
fn check_two_shards() -> bool {
// create rilf gen
let client_id = 1;
let mut rifl_gen = RiflGen::new(client_id);
// general config
let shard_count = 2;
let keys_per_command = 2;
let commands_per_client = 1;
let payload_size = 0;
// create workload
let key_gen = KeyGen::Zipf {
coefficient: 0.1,
total_keys_per_shard: 1_000_000,
};
let mut workload = Workload::new(
shard_count,
key_gen,
keys_per_command,
commands_per_client,
payload_size,
);
let mut key_gen_state =
key_gen.initial_state(workload.shard_count(), client_id);
let (target_shard, cmd) = workload
.next_cmd(&mut rifl_gen, &mut key_gen_state)
.expect("there should be at least one command");
assert!(
target_shard == 0 || target_shard == 1,
"target shard should be either 0 or 1"
);
assert!(
cmd.key_count(0) + cmd.key_count(1) == keys_per_command,
"the number of keys accessed by the command should be 2"
);
// we want an execution in which the two shards are accessed, i.e.:
// - 1 key in shard 0
// - 1 key in shard 1
cmd.key_count(0) == 1 && cmd.key_count(1) == 1
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/client/mod.rs | fantoch/src/client/mod.rs | // This module contains the definition of `Workload`
pub mod workload;
// This module contains the definition of `KeyGenerator` and
// `KeyGeneratorState`.
pub mod key_gen;
// This module contains the definition of `Pending`
pub mod pending;
// This module contains the definition of `ClientData`
pub mod data;
// Re-exports.
pub use data::ClientData;
pub use key_gen::KeyGen;
pub use pending::Pending;
pub use workload::Workload;
use crate::command::Command;
use crate::id::{ClientId, ProcessId, Rifl, RiflGen, ShardId};
use crate::time::SysTime;
use crate::HashMap;
use crate::{info, trace};
use key_gen::KeyGenState;
pub struct Client {
/// id of this client
client_id: ClientId,
/// mapping from shard id to the process id of that shard this client is
/// connected to
processes: HashMap<ShardId, ProcessId>,
/// rifl id generator
rifl_gen: RiflGen,
/// workload configuration
workload: Workload,
/// state needed by key generator
key_gen_state: KeyGenState,
/// map from pending command RIFL to its start time
pending: Pending,
/// mapping from
data: ClientData,
/// frequency of status messages; if set with Some(1), a status message
/// will be shown after each command completes
status_frequency: Option<usize>,
}
impl Client {
/// Creates a new client.
pub fn new(
client_id: ClientId,
workload: Workload,
status_frequency: Option<usize>,
) -> Self {
// create key gen state
let key_gen_state = workload
.key_gen()
.initial_state(workload.shard_count(), client_id);
// create client
Self {
client_id,
processes: HashMap::new(),
rifl_gen: RiflGen::new(client_id),
workload,
key_gen_state,
pending: Pending::new(),
data: ClientData::new(),
status_frequency,
}
}
/// Returns the client identifier.
pub fn id(&self) -> ClientId {
self.client_id
}
/// "Connect" to the closest process on each shard.
pub fn connect(&mut self, processes: HashMap<ShardId, ProcessId>) {
self.processes = processes;
}
/// Retrieves the closest process on this shard.
pub fn shard_process(&self, shard_id: &ShardId) -> ProcessId {
*self
.processes
.get(shard_id)
.expect("client should be connected to all shards")
}
/// Generates the next command in this client's workload.
pub fn cmd_send(
&mut self,
time: &dyn SysTime,
) -> Option<(ShardId, Command)> {
// generate next command in the workload if some process_id
self.workload
.next_cmd(&mut self.rifl_gen, &mut self.key_gen_state)
.map(|(target_shard, cmd)| {
// if a new command was generated, start it in pending
let rifl = cmd.rifl();
trace!(
"c{}: new rifl pending {:?} | time = {}",
self.client_id,
rifl,
time.micros()
);
self.pending.start(rifl, time);
(target_shard, cmd)
})
}
/// Handle executed command and return a boolean indicating whether we have
/// generated all commands and receive all the corresponding command
/// results.
pub fn cmd_recv(&mut self, rifl: Rifl, time: &dyn SysTime) {
// end command in pending and save command latency
let (latency, end_time) = self.pending.end(rifl, time);
trace!(
"c{}: rifl {:?} ended after {} micros at {}",
self.client_id,
rifl,
latency.as_micros(),
end_time
);
self.data.record(latency, end_time);
if let Some(frequency) = self.status_frequency {
if self.workload.issued_commands() % frequency == 0 {
info!(
"c{:?}: {} of {}",
self.client_id,
self.workload.issued_commands(),
self.workload.commands_per_client()
);
}
}
}
pub fn workload_finished(&self) -> bool {
self.workload.finished()
}
pub fn finished(&self) -> bool {
// we're done once:
// - the workload is finished and
// - pending is empty
self.workload.finished() && self.pending.is_empty()
}
pub fn data(&self) -> &ClientData {
&self.data
}
/// Returns the number of commands already issued.
pub fn issued_commands(&self) -> usize {
self.workload.issued_commands()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::planet::{Planet, Region};
use crate::time::SimTime;
use crate::util;
use std::collections::BTreeMap;
use std::iter::FromIterator;
use std::time::Duration;
// Generates some client.
fn gen_client(commands_per_client: usize) -> Client {
// workload
let shard_count = 1;
let keys_per_command = 1;
let conflict_rate = 100;
let pool_size = 1;
let key_gen = KeyGen::ConflictPool {
conflict_rate,
pool_size,
};
let payload_size = 100;
let workload = Workload::new(
shard_count,
key_gen,
keys_per_command,
commands_per_client,
payload_size,
);
// client
let id = 1;
let status_frequency = None;
Client::new(id, workload, status_frequency)
}
#[test]
fn discover() {
// create planet
let planet = Planet::new();
// there are two shards
let shard_0_id = 0;
let shard_1_id = 1;
// processes
let processes = vec![
(0, shard_0_id, Region::new("asia-east1")),
(1, shard_0_id, Region::new("australia-southeast1")),
(2, shard_0_id, Region::new("europe-west1")),
(3, shard_1_id, Region::new("europe-west2")),
];
// client
let region = Region::new("europe-west2");
let commands_per_client = 0;
let mut client = gen_client(commands_per_client);
// check discover with empty vec
let closest = util::closest_process_per_shard(®ion, &planet, vec![]);
client.connect(closest);
assert!(client.processes.is_empty());
// check discover with processes
let closest =
util::closest_process_per_shard(®ion, &planet, processes);
client.connect(closest);
assert_eq!(
BTreeMap::from_iter(client.processes),
// connected to process 2 on shard 0 and process 3 on shard 1
BTreeMap::from_iter(vec![(shard_0_id, 2), (shard_1_id, 3)])
);
}
#[test]
fn client_flow() {
// create planet
let planet = Planet::new();
// there's a single shard
let shard_id = 0;
// processes
let processes = vec![
(0, shard_id, Region::new("asia-east1")),
(1, shard_id, Region::new("australia-southeast1")),
(2, shard_id, Region::new("europe-west1")),
];
// client
let region = Region::new("europe-west2");
let commands_per_client = 2;
let mut client = gen_client(commands_per_client);
// discover
let closest =
util::closest_process_per_shard(®ion, &planet, processes);
client.connect(closest);
// create system time
let mut time = SimTime::new();
// start client at time 0
let (shard_id, cmd) = client
.cmd_send(&time)
.expect("there should a first operation");
let process_id = client.shard_process(&shard_id);
// process_id should be 2
assert_eq!(process_id, 2);
// handle result at time 10
time.add_millis(10);
client.cmd_recv(cmd.rifl(), &time);
let next = client.cmd_send(&time);
// check there's next command
assert!(next.is_some());
let (shard_id, cmd) = next.unwrap();
let process_id = client.shard_process(&shard_id);
// process_id should be 2
assert_eq!(process_id, 2);
// handle result at time 15
time.add_millis(5);
client.cmd_recv(cmd.rifl(), &time);
let next = client.cmd_send(&time);
// check there's no next command
assert!(next.is_none());
// check latency
let mut latency: Vec<_> = client.data().latency_data().collect();
latency.sort();
assert_eq!(
latency,
vec![Duration::from_millis(5), Duration::from_millis(10)]
);
// check throughput
let mut throughput: Vec<_> = client.data().throughput_data().collect();
throughput.sort();
assert_eq!(throughput, vec![(10, 1), (15, 1)],);
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/client/key_gen.rs | fantoch/src/client/key_gen.rs | use crate::id::ClientId;
use crate::kvs::Key;
use rand::distributions::Distribution;
use rand::Rng;
use serde::{Deserialize, Serialize};
use zipf::ZipfDistribution;
pub const CONFLICT_COLOR: &str = "CONFLICT";
#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)]
pub enum KeyGen {
ConflictPool {
conflict_rate: usize,
pool_size: usize,
},
Zipf {
coefficient: f64,
total_keys_per_shard: usize,
},
}
impl KeyGen {
pub fn initial_state(
self,
shard_count: usize,
client_id: ClientId,
) -> KeyGenState {
KeyGenState::new(self, shard_count, client_id)
}
}
impl std::fmt::Display for KeyGen {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::ConflictPool {
conflict_rate,
pool_size,
} => {
write!(f, "conflict_{}_{}", conflict_rate, pool_size)
}
Self::Zipf {
total_keys_per_shard,
coefficient,
} => write!(
f,
"{}",
format!("zipf_{:.2}_{}", coefficient, total_keys_per_shard)
.replace(".", "-")
),
}
}
}
pub struct KeyGenState {
key_gen: KeyGen,
client_id: ClientId,
zipf: Option<ZipfDistribution>,
}
impl KeyGenState {
fn new(key_gen: KeyGen, shard_count: usize, client_id: ClientId) -> Self {
let zipf = match key_gen {
KeyGen::ConflictPool { .. } => None,
KeyGen::Zipf {
coefficient,
total_keys_per_shard,
} => {
// compute key count
let key_count = total_keys_per_shard * shard_count;
// initialize zipf distribution
let zipf = ZipfDistribution::new(key_count, coefficient)
.expect(
"it should be possible to initialize the ZipfDistribution",
);
Some(zipf)
}
};
Self {
key_gen,
client_id,
zipf,
}
}
pub fn gen_cmd_key(&mut self) -> Key {
match self.key_gen {
KeyGen::ConflictPool {
conflict_rate,
pool_size,
} => self.gen_conflict_rate(conflict_rate, pool_size),
KeyGen::Zipf { .. } => self.gen_zipf(),
}
}
/// Generate a command key based on the conflict rate provided.
fn gen_conflict_rate(&self, conflict_rate: usize, pool_size: usize) -> Key {
debug_assert!(conflict_rate <= 100);
debug_assert!(pool_size >= 1);
// check if we should generate a conflict
let should_conflict = true_if_random_is_less_than(conflict_rate);
if should_conflict {
// if it should conflict, select a random key from the pool
let random_key = rand::thread_rng().gen_range(0..pool_size);
format!("{}{}", CONFLICT_COLOR, random_key)
} else {
// avoid conflict with unique client key
self.client_id.to_string()
}
}
/// Generate a command key based on the initiliazed zipfian distribution.
fn gen_zipf(&mut self) -> Key {
let zipf = self
.zipf
.expect("ZipfDistribution should already be initialized");
let mut rng = rand::thread_rng();
zipf.sample(&mut rng).to_string()
}
}
pub fn true_if_random_is_less_than(percentage: usize) -> bool {
match percentage {
0 => false,
100 => true,
_ => rand::thread_rng().gen_range(0..100) < percentage,
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/client/data.rs | fantoch/src/client/data.rs | use crate::HashMap;
use serde::{Deserialize, Serialize};
use std::time::Duration;
#[derive(Debug, Default, Clone, PartialEq, Deserialize, Serialize)]
pub struct ClientData {
// raw values: we have "100%" precision as all values are stored
// - mapping from operation end time to all latencies registered at that
// end time
data: HashMap<u64, Vec<Duration>>,
}
impl ClientData {
/// Creates an empty `ClientData`.
pub fn new() -> Self {
Self::default()
}
/// Merges two histograms.
pub fn merge(&mut self, other: &Self) {
data_merge(&mut self.data, &other.data)
}
/// Records a more mata.
pub fn record(&mut self, latency: Duration, end_time: u64) {
let latencies = self.data.entry(end_time).or_insert_with(Vec::new);
latencies.push(latency);
}
pub fn latency_data(&self) -> impl Iterator<Item = Duration> + '_ {
self.data.values().flat_map(|v| v.iter()).cloned()
}
pub fn throughput_data(&self) -> impl Iterator<Item = (u64, usize)> + '_ {
self.data
.iter()
.map(|(time, latencies)| (*time, latencies.len()))
}
pub fn throughput(&self) -> f64 {
let mut seconds_to_ops: HashMap<_, f64> = HashMap::new();
for (time_millis, ops) in self.data.iter() {
let time_seconds = time_millis / 1000;
let current_ops = seconds_to_ops.entry(time_seconds).or_default();
*current_ops += ops.len() as f64;
}
// incremental averaging to prevent (a potential) overflow
let mut average = 0f64;
let mut count = 0f64;
for value in seconds_to_ops.values() {
average = (average * count + value) / (count + 1f64);
count += 1f64;
}
average
// seconds_to_ops.values().sum::<u64>() as f64 / seconds_to_ops.len() as
// f64
}
/// Compute start and end times for this client.
pub fn start_and_end(&self) -> Option<(u64, u64)> {
let mut times: Vec<_> = self.data.keys().collect();
times.sort();
times.first().map(|first| {
// if there's a first, there's a last
let last = times.last().unwrap();
(**first, **last)
})
}
/// Prune events that are before `start` or after `end`.
pub fn prune(&mut self, start: u64, end: u64) {
self.data.retain(|&time, _| {
// retain if within the given bounds
time >= start && time <= end
})
}
}
pub fn data_merge<V>(
map: &mut HashMap<u64, Vec<V>>,
other: &HashMap<u64, Vec<V>>,
) where
V: Clone,
{
other.into_iter().for_each(|(k, v2)| match map.get_mut(&k) {
Some(v1) => {
v1.extend(v2.clone());
}
None => {
map.entry(*k).or_insert(v2.clone());
}
});
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn client_data_test() {
let mut data = ClientData::new();
assert_eq!(data.start_and_end(), None);
// at time 10, an operation with latency 1 ended
data.record(Duration::from_millis(1), 10);
assert_eq!(data.start_and_end(), Some((10, 10)));
// at time 10, another operation but with latency 2 ended
data.record(Duration::from_millis(2), 10);
assert_eq!(data.start_and_end(), Some((10, 10)));
// at time 11, an operation with latency 5 ended
data.record(Duration::from_millis(5), 11);
assert_eq!(data.start_and_end(), Some((10, 11)));
// check latency and throughput data
let mut latency: Vec<_> = data.latency_data().collect();
latency.sort();
assert_eq!(
latency,
vec![
Duration::from_millis(1),
Duration::from_millis(2),
Duration::from_millis(5),
]
);
let mut throughput: Vec<_> = data.throughput_data().collect();
throughput.sort();
assert_eq!(throughput, vec![(10, 2), (11, 1)]);
// check merge
let mut other = ClientData::new();
// at time 2, an operation with latency 5 ended
other.record(Duration::from_millis(5), 2);
data.merge(&other);
assert_eq!(data.start_and_end(), Some((2, 11)));
let mut latency: Vec<_> = data.latency_data().collect();
latency.sort();
assert_eq!(
latency,
vec![
Duration::from_millis(1),
Duration::from_millis(2),
Duration::from_millis(5),
Duration::from_millis(5),
]
);
let mut throughput: Vec<_> = data.throughput_data().collect();
throughput.sort();
assert_eq!(throughput, vec![(2, 1), (10, 2), (11, 1)]);
// check prune: if all events are within bounds, then no pruning happens
data.prune(1, 20);
let mut throughput: Vec<_> = data.throughput_data().collect();
throughput.sort();
assert_eq!(throughput, vec![(2, 1), (10, 2), (11, 1)]);
// prune event 2 out
data.prune(5, 20);
let mut throughput: Vec<_> = data.throughput_data().collect();
throughput.sort();
assert_eq!(throughput, vec![(10, 2), (11, 1)]);
// prune event 10 out
data.prune(11, 20);
let mut throughput: Vec<_> = data.throughput_data().collect();
throughput.sort();
assert_eq!(throughput, vec![(11, 1)]);
// prune event 11 out
data.prune(15, 20);
let mut throughput: Vec<_> = data.throughput_data().collect();
throughput.sort();
assert_eq!(throughput, vec![]);
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/protocol/base.rs | fantoch/src/protocol/base.rs | use crate::command::Command;
use crate::config::Config;
use crate::id::{Dot, DotGen, ProcessId, ShardId};
use crate::protocol::{ProtocolMetrics, ProtocolMetricsKind};
use crate::trace;
use crate::{HashMap, HashSet};
use std::iter::FromIterator;
// a `BaseProcess` has all functionalities shared by Atlas, Tempo, ...
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct BaseProcess {
pub process_id: ProcessId,
pub shard_id: ShardId,
pub config: Config,
all: Option<HashSet<ProcessId>>,
all_but_me: Option<HashSet<ProcessId>>,
majority_quorum: Option<HashSet<ProcessId>>,
fast_quorum: Option<HashSet<ProcessId>>,
write_quorum: Option<HashSet<ProcessId>>,
// mapping from shard id (that are not the same as mine) to the closest
// process from that shard
closest_shard_process: HashMap<ShardId, ProcessId>,
fast_quorum_size: usize,
write_quorum_size: usize,
dot_gen: DotGen,
metrics: ProtocolMetrics,
}
impl BaseProcess {
/// Creates a new `BaseProcess`.
pub fn new(
process_id: ProcessId,
shard_id: ShardId,
config: Config,
fast_quorum_size: usize,
write_quorum_size: usize,
) -> Self {
// since processes lead with ballot `id` when taking the slow path and
// we may rely on the fact that a zero accepted ballot means the process
// has never been through Paxos phase-2, all ids must non-zero
assert!(process_id != 0);
Self {
process_id,
shard_id,
config,
all: None,
all_but_me: None,
majority_quorum: None,
fast_quorum: None,
write_quorum: None,
closest_shard_process: HashMap::new(),
fast_quorum_size,
write_quorum_size,
dot_gen: DotGen::new(process_id),
metrics: ProtocolMetrics::new(),
}
}
/// Updates the processes known by this process.
/// The set of processes provided is already sorted by distance.
pub fn discover(
&mut self,
all_processes: Vec<(ProcessId, ShardId)>,
) -> bool {
// reset closest shard process
self.closest_shard_process =
HashMap::with_capacity(self.config.shard_count() - 1);
// select processes from my shard and compute `closest_shard_process`
let processes: Vec<_> = all_processes
.into_iter()
.filter_map(|(process_id, shard_id)| {
// check if process belongs to my shard
if self.shard_id == shard_id {
// if yes, keep process id
Some(process_id)
} else {
// if not, then it must be the closest process from that shard (i.e. from the same region) as mine
assert!(self.closest_shard_process.insert(shard_id, process_id).is_none(), "process should only connect to the closest process from each shard");
None
}
})
.collect();
let majority_quorum_size = self.config.majority_quorum_size();
// create majority quorum by taking the first `majority_quorum_size`
// elements
let majority_quorum: HashSet<_> = processes
.clone()
.into_iter()
.take(majority_quorum_size)
.collect();
// create fast quorum by taking the first `fast_quorum_size` elements
let fast_quorum: HashSet<_> = processes
.clone()
.into_iter()
.take(self.fast_quorum_size)
.collect();
// create write quorum by taking the first `write_quorum_size` elements
let write_quorum: HashSet<_> = processes
.clone()
.into_iter()
.take(self.write_quorum_size)
.collect();
// set all processes
let all = HashSet::from_iter(processes.clone());
let all_but_me = HashSet::from_iter(
processes.into_iter().filter(|&p| p != self.process_id),
);
self.all = Some(all);
self.all_but_me = Some(all_but_me);
// set majority quorum if we have enough processes
self.majority_quorum = if majority_quorum.len() == majority_quorum_size
{
Some(majority_quorum)
} else {
None
};
// set fast quorum if we have enough processes
self.fast_quorum = if fast_quorum.len() == self.fast_quorum_size {
Some(fast_quorum)
} else {
None
};
// set write quorum if we have enough processes
self.write_quorum = if write_quorum.len() == self.write_quorum_size {
Some(write_quorum)
} else {
None
};
trace!(
"p{}: all_but_me {:?} | majority_quorum {:?} | fast_quorum {:?} | write_quorum {:?} | closest_shard_process {:?}",
self.process_id,
self.all_but_me,
self.majority_quorum,
self.fast_quorum,
self.write_quorum,
self.closest_shard_process
);
// connected if quorums are set
self.majority_quorum.is_some()
&& self.fast_quorum.is_some()
&& self.write_quorum.is_some()
}
// Returns the next dot.
pub fn next_dot(&mut self) -> Dot {
self.dot_gen.next_id()
}
// Returns all processes.
pub fn all(&self) -> HashSet<ProcessId> {
self.all
.clone()
.expect("the set of all processes should be known")
}
// Returns all processes but self.
pub fn all_but_me(&self) -> HashSet<ProcessId> {
self.all_but_me
.clone()
.expect("the set of all processes (except self) should be known")
}
// Returns a majority quorum.
pub fn majority_quorum(&self) -> HashSet<ProcessId> {
self.majority_quorum
.clone()
.expect("the majority quorum should be known")
}
// Returns the fast quorum.
pub fn fast_quorum(&self) -> HashSet<ProcessId> {
self.fast_quorum
.clone()
.expect("the fast quorum should be known")
}
// Returns the write quorum.
pub fn write_quorum(&self) -> HashSet<ProcessId> {
self.write_quorum
.clone()
.expect("the slow quorum should be known")
}
// Returns the closest process for this shard.
pub fn closest_process(&self, shard_id: &ShardId) -> ProcessId {
*self
.closest_shard_process
.get(shard_id)
.expect("closest shard process should be known")
}
// Returns the closest process mapping.
pub fn closest_shard_process(&self) -> &HashMap<ShardId, ProcessId> {
&self.closest_shard_process
}
// Computes the quorum to be used:
// - use majority quorum if NFR is enabled, and `cmd` is a single-key read
// - use fast quorum otherwise
pub fn maybe_adjust_fast_quorum(
&self,
cmd: &Command,
) -> HashSet<ProcessId> {
if self.config.nfr() && cmd.nfr_allowed() {
self.majority_quorum()
} else {
self.fast_quorum()
}
}
// Return metrics.
pub fn metrics(&self) -> &ProtocolMetrics {
&self.metrics
}
// Update fast path metrics.
pub fn path(&mut self, fast_path: bool, read_only: bool) {
if fast_path {
self.metrics.aggregate(ProtocolMetricsKind::FastPath, 1);
if read_only {
self.metrics
.aggregate(ProtocolMetricsKind::FastPathReads, 1);
}
} else {
self.metrics.aggregate(ProtocolMetricsKind::SlowPath, 1);
if read_only {
self.metrics
.aggregate(ProtocolMetricsKind::SlowPathReads, 1);
}
}
}
// Accumulate more stable commands.
pub fn stable(&mut self, len: usize) {
self.metrics
.aggregate(ProtocolMetricsKind::Stable, len as u64);
}
// Collect a new metric.
pub fn collect_metric(&mut self, kind: ProtocolMetricsKind, value: u64) {
self.metrics.collect(kind, value);
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::command::Command;
use crate::id::Rifl;
use crate::kvs::KVOp;
use crate::planet::{Planet, Region};
use crate::util;
use std::collections::BTreeSet;
use std::iter::FromIterator;
#[test]
fn discover() {
// processes
let shard_id = 0;
let processes = vec![
(0, shard_id, Region::new("asia-east1")),
(1, shard_id, Region::new("asia-northeast1")),
(2, shard_id, Region::new("asia-south1")),
(3, shard_id, Region::new("asia-southeast1")),
(4, shard_id, Region::new("australia-southeast1")),
(5, shard_id, Region::new("europe-north1")),
(6, shard_id, Region::new("europe-west1")),
(7, shard_id, Region::new("europe-west2")),
(8, shard_id, Region::new("europe-west3")),
(9, shard_id, Region::new("europe-west4")),
(10, shard_id, Region::new("northamerica-northeast1")),
(11, shard_id, Region::new("southamerica-east1")),
(12, shard_id, Region::new("us-central1")),
(13, shard_id, Region::new("us-east1")),
(14, shard_id, Region::new("us-east4")),
(15, shard_id, Region::new("us-west1")),
(16, shard_id, Region::new("us-west2")),
];
// config
let n = 17;
let f = 3;
let config = Config::new(n, f);
// bp
let id = 8;
let region = Region::new("europe-west3");
let planet = Planet::new();
let fast_quorum_size = 6;
let write_quorum_size = 4;
let mut bp = BaseProcess::new(
id,
shard_id,
config,
fast_quorum_size,
write_quorum_size,
);
// no quorum is set yet
assert_eq!(bp.fast_quorum, None);
assert_eq!(bp.all, None);
// discover processes and check we're connected
let sorted =
util::sort_processes_by_distance(®ion, &planet, processes);
assert!(bp.discover(sorted));
// check set of all processes
assert_eq!(
BTreeSet::from_iter(bp.all()),
BTreeSet::from_iter(vec![
8, 9, 6, 7, 5, 14, 10, 13, 12, 15, 16, 11, 1, 0, 4, 3, 2
]),
);
// check set of all processes (but self)
assert_eq!(
BTreeSet::from_iter(bp.all_but_me()),
BTreeSet::from_iter(vec![
9, 6, 7, 5, 14, 10, 13, 12, 15, 16, 11, 1, 0, 4, 3, 2
]),
);
// check fast quorum
assert_eq!(
BTreeSet::from_iter(bp.fast_quorum()),
BTreeSet::from_iter(vec![8, 9, 6, 7, 5, 14])
);
// check write quorum
assert_eq!(
BTreeSet::from_iter(bp.write_quorum()),
BTreeSet::from_iter(vec![8, 9, 6, 7])
);
}
#[test]
fn discover_same_region() {
// processes
let shard_id = 0;
let processes = vec![
(0, shard_id, Region::new("asia-east1")),
(1, shard_id, Region::new("asia-east1")),
(2, shard_id, Region::new("europe-north1")),
(3, shard_id, Region::new("europe-north1")),
(4, shard_id, Region::new("europe-west1")),
];
// config
let n = 5;
let f = 2;
let config = Config::new(n, f);
// bp
let id = 2;
let shard_id = 0;
let region = Region::new("europe-north1");
let planet = Planet::new();
let fast_quorum_size = 3;
let write_quorum_size = 4;
let mut bp = BaseProcess::new(
id,
shard_id,
config,
fast_quorum_size,
write_quorum_size,
);
// discover processes and check we're connected
let sorted =
util::sort_processes_by_distance(®ion, &planet, processes);
assert!(bp.discover(sorted));
// check set of all processes
assert_eq!(
BTreeSet::from_iter(bp.all()),
BTreeSet::from_iter(vec![2, 3, 4, 0, 1])
);
// check set of all processes (but self)
assert_eq!(
BTreeSet::from_iter(bp.all_but_me()),
BTreeSet::from_iter(vec![3, 4, 0, 1])
);
// check fast quorum
assert_eq!(
BTreeSet::from_iter(bp.fast_quorum()),
BTreeSet::from_iter(vec![2, 3, 4])
);
// check write quorum
assert_eq!(
BTreeSet::from_iter(bp.write_quorum()),
BTreeSet::from_iter(vec![2, 3, 4, 0])
);
}
#[test]
fn discover_two_shards() {
let shard_id_0 = 0;
let shard_id_1 = 1;
// config
let n = 3;
let f = 1;
let config = Config::new(n, f);
// check for bp id = 1, shard_id = 0
let fast_quorum_size = 2;
let write_quorum_size = 2;
let mut bp = BaseProcess::new(
1,
shard_id_0,
config,
fast_quorum_size,
write_quorum_size,
);
// processes
let sorted = vec![
(1, shard_id_0),
(4, shard_id_1),
(2, shard_id_0),
(3, shard_id_0),
];
assert!(bp.discover(sorted));
// check set of all processes
assert_eq!(
BTreeSet::from_iter(bp.all()),
BTreeSet::from_iter(vec![1, 2, 3])
);
// check set of all processes (but self)
assert_eq!(
BTreeSet::from_iter(bp.all_but_me()),
BTreeSet::from_iter(vec![2, 3])
);
// check fast quorum
assert_eq!(
BTreeSet::from_iter(bp.fast_quorum()),
BTreeSet::from_iter(vec![1, 2])
);
// check write quorum
assert_eq!(
BTreeSet::from_iter(bp.write_quorum()),
BTreeSet::from_iter(vec![1, 2])
);
assert_eq!(bp.closest_shard_process.len(), 1);
assert_eq!(bp.closest_shard_process.get(&shard_id_0), None);
assert_eq!(bp.closest_shard_process.get(&shard_id_1), Some(&4));
// check replicated by
let mut ops = HashMap::new();
ops.insert(String::from("a"), vec![KVOp::Get]);
// create command replicated by shard 0
let rifl = Rifl::new(1, 1);
let mut shard_to_ops = HashMap::new();
shard_to_ops.insert(shard_id_0, ops.clone());
let cmd_shard_0 = Command::new(rifl, shard_to_ops);
assert!(cmd_shard_0.replicated_by(&shard_id_0));
assert!(!cmd_shard_0.replicated_by(&shard_id_1));
// create command replicated by shard 1
let rifl = Rifl::new(1, 2);
let mut shard_to_ops = HashMap::new();
shard_to_ops.insert(shard_id_1, ops.clone());
let cmd_shard_1 = Command::new(rifl, shard_to_ops);
assert!(!cmd_shard_1.replicated_by(&shard_id_0));
assert!(cmd_shard_1.replicated_by(&shard_id_1));
// create command replicated by both shards
let rifl = Rifl::new(1, 3);
let mut shard_to_ops = HashMap::new();
shard_to_ops.insert(shard_id_0, ops.clone());
shard_to_ops.insert(shard_id_1, ops.clone());
let cmd_both_shards = Command::new(rifl, shard_to_ops);
assert!(cmd_both_shards.replicated_by(&shard_id_0));
assert!(cmd_both_shards.replicated_by(&shard_id_1));
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/protocol/mod.rs | fantoch/src/protocol/mod.rs | // This module contains the implementation of data structured used to hold info
// about commands.
mod info;
// This module contains the definition of `BaseProcess`.
mod base;
// This module contains the definition of a basic replication protocol that
// waits for f + 1 acks before committing a command. It's for sure inconsistent
// and most likely non-fault-tolerant until we base it on the synod module.
// TODO evolve the synod module so that is allows patterns like Coordinated
// Paxos and Simple Paxos from Mencius. With such patterns we can make this
// protocol fault-tolerant (but still inconsistent).
mod basic;
// This module contains common functionality from tracking when it's safe to
// garbage-collect a command, i.e., when it's been committed at all processes.
mod gc;
// Re-exports.
pub use base::BaseProcess;
pub use basic::Basic;
pub use gc::{BasicGCTrack, ClockGCTrack, VClockGCTrack};
pub use info::{Info, LockedCommandsInfo, SequentialCommandsInfo};
use crate::command::Command;
use crate::config::Config;
use crate::executor::Executor;
use crate::id::{Dot, ProcessId, ShardId};
use crate::metrics::Metrics;
use crate::time::SysTime;
use crate::{HashMap, HashSet};
use serde::de::DeserializeOwned;
use serde::{Deserialize, Serialize};
use std::fmt::{self, Debug};
use std::time::Duration;
// Compact representation of which `Dot`s have been committed and executed.
pub type CommittedAndExecuted = (u64, Vec<Dot>);
pub trait Protocol: Debug + Clone {
type Message: Debug
+ Clone
+ PartialEq
+ Eq
+ Serialize
+ DeserializeOwned
+ Send
+ Sync
+ MessageIndex; // TODO why is Sync needed??
type PeriodicEvent: Debug + Clone + Send + Sync + MessageIndex + Eq;
type Executor: Executor + Send;
/// Returns a new instance of the protocol and a list of periodic events.
fn new(
process_id: ProcessId,
shard_id: ShardId,
config: Config,
) -> (Self, Vec<(Self::PeriodicEvent, Duration)>);
fn id(&self) -> ProcessId;
fn shard_id(&self) -> ShardId;
fn discover(
&mut self,
processes: Vec<(ProcessId, ShardId)>,
) -> (bool, HashMap<ShardId, ProcessId>);
fn submit(&mut self, dot: Option<Dot>, cmd: Command, time: &dyn SysTime);
fn handle(
&mut self,
from: ProcessId,
from_shard_id: ShardId,
msg: Self::Message,
time: &dyn SysTime,
);
fn handle_event(&mut self, event: Self::PeriodicEvent, time: &dyn SysTime);
fn handle_executed(
&mut self,
_committed_and_executed: CommittedAndExecuted,
_time: &dyn SysTime,
) {
// protocols interested in handling this type of notifications at the
// worker `GC_WORKER_INDEX` (see fantoch::run::prelude) should overwrite
// this
}
#[must_use]
fn to_processes(&mut self) -> Option<Action<Self>>;
#[must_use]
fn to_processes_iter(&mut self) -> ToProcessesIter<'_, Self> {
ToProcessesIter { process: self }
}
#[must_use]
fn to_executors(
&mut self,
) -> Option<<Self::Executor as Executor>::ExecutionInfo>;
#[must_use]
fn to_executors_iter(&mut self) -> ToExecutorsIter<'_, Self> {
ToExecutorsIter { process: self }
}
fn parallel() -> bool;
fn leaderless() -> bool;
fn metrics(&self) -> &ProtocolMetrics;
}
pub struct ToProcessesIter<'a, P> {
process: &'a mut P,
}
impl<'a, P> Iterator for ToProcessesIter<'a, P>
where
P: Protocol,
{
type Item = Action<P>;
fn next(&mut self) -> Option<Self::Item> {
self.process.to_processes()
}
}
pub struct ToExecutorsIter<'a, P> {
process: &'a mut P,
}
impl<'a, P> Iterator for ToExecutorsIter<'a, P>
where
P: Protocol,
{
type Item = <P::Executor as Executor>::ExecutionInfo;
fn next(&mut self) -> Option<Self::Item> {
self.process.to_executors()
}
}
pub type ProtocolMetrics = Metrics<ProtocolMetricsKind>;
impl ProtocolMetrics {
pub fn fast_paths(&self) -> u64 {
self.metric(ProtocolMetricsKind::FastPath)
}
pub fn slow_paths(&self) -> u64 {
self.metric(ProtocolMetricsKind::SlowPath)
}
pub fn fast_paths_reads(&self) -> u64 {
self.metric(ProtocolMetricsKind::FastPathReads)
}
pub fn slow_paths_reads(&self) -> u64 {
self.metric(ProtocolMetricsKind::SlowPathReads)
}
pub fn stable(&self) -> u64 {
self.metric(ProtocolMetricsKind::Stable)
}
/// Returns a tuple containing the number of fast paths, the number of slow
/// paths and the percentage of fast paths.
pub fn fast_path_stats(&self) -> (u64, u64, f64) {
let fast_path = self.fast_paths();
let slow_path = self.slow_paths();
let fp_rate = (fast_path * 100) as f64 / (fast_path + slow_path) as f64;
(fast_path, slow_path, fp_rate)
}
fn metric(&self, metric: ProtocolMetricsKind) -> u64 {
self.get_aggregated(metric).cloned().unwrap_or_default()
}
}
#[derive(Clone, Copy, Hash, PartialEq, Eq, Serialize, Deserialize)]
pub enum ProtocolMetricsKind {
/// fast paths of all commands
FastPath,
/// slow paths of all commands
SlowPath,
/// fast paths of read only commands
FastPathReads,
/// slow paths of read only commands
SlowPathReads,
Stable,
CommitLatency,
WaitConditionDelay,
CommittedDepsLen,
CommandKeyCount,
}
impl Debug for ProtocolMetricsKind {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
ProtocolMetricsKind::FastPath => write!(f, "fast_path"),
ProtocolMetricsKind::SlowPath => write!(f, "slow_path"),
ProtocolMetricsKind::FastPathReads => write!(f, "fast_path_reads"),
ProtocolMetricsKind::SlowPathReads => write!(f, "slow_path_reads"),
ProtocolMetricsKind::Stable => write!(f, "stable"),
ProtocolMetricsKind::CommitLatency => {
write!(f, "commit_latency")
}
ProtocolMetricsKind::WaitConditionDelay => {
write!(f, "wait_condition_delay")
}
ProtocolMetricsKind::CommittedDepsLen => {
write!(f, "committed_deps_len")
}
ProtocolMetricsKind::CommandKeyCount => {
write!(f, "command_key_count")
}
}
}
}
pub trait MessageIndex {
/// This trait is used to decide to which worker some messages should be
/// forwarded to, ensuring that messages with the same index are forwarded
/// to the same process. If `None` is returned, then the message is sent to
/// all workers. In particular, if the protocol is not parallel, the
/// message is sent to the single protocol worker.
///
/// There only 2 types of indexes are supported:
/// - Some((reserved, index)): `index` will be used to compute working index
/// making sure that index is higher than `reserved`
/// - None: no indexing; message will be sent to all workers
fn index(&self) -> Option<(usize, usize)>;
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum Action<P: Protocol> {
ToSend {
target: HashSet<ProcessId>,
msg: <P as Protocol>::Message,
},
ToForward {
msg: <P as Protocol>::Message,
},
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/protocol/basic.rs | fantoch/src/protocol/basic.rs | use crate::command::Command;
use crate::config::Config;
use crate::executor::{BasicExecutionInfo, BasicExecutor, Executor};
use crate::id::{Dot, ProcessId, ShardId};
use crate::protocol::{
Action, BaseProcess, Info, MessageIndex, Protocol, ProtocolMetrics,
SequentialCommandsInfo, VClockGCTrack,
};
use crate::singleton;
use crate::time::SysTime;
use crate::trace;
use crate::{HashMap, HashSet};
use serde::{Deserialize, Serialize};
use std::time::Duration;
use threshold::VClock;
type ExecutionInfo = <BasicExecutor as Executor>::ExecutionInfo;
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Basic {
bp: BaseProcess,
cmds: SequentialCommandsInfo<BasicInfo>,
gc_track: VClockGCTrack,
to_processes: Vec<Action<Self>>,
to_executors: Vec<ExecutionInfo>,
buffered_mcommits: HashSet<Dot>,
}
impl Protocol for Basic {
type Message = Message;
type PeriodicEvent = PeriodicEvent;
type Executor = BasicExecutor;
/// Creates a new `Basic` process.
fn new(
process_id: ProcessId,
shard_id: ShardId,
config: Config,
) -> (Self, Vec<(PeriodicEvent, Duration)>) {
// compute fast and write quorum sizes
let fast_quorum_size = config.basic_quorum_size();
let write_quorum_size = 0; // there's no write quorum as we have 100% fast paths
// create protocol data-structures
let bp = BaseProcess::new(
process_id,
shard_id,
config,
fast_quorum_size,
write_quorum_size,
);
let cmds = SequentialCommandsInfo::new(
process_id,
shard_id,
config.n(),
config.f(),
fast_quorum_size,
write_quorum_size,
);
let gc_track = VClockGCTrack::new(process_id, shard_id, config.n());
let to_processes = Vec::new();
let to_executors = Vec::new();
let buffered_mcommits = HashSet::new();
// create `Basic`
let protocol = Self {
bp,
cmds,
gc_track,
to_processes,
to_executors,
buffered_mcommits,
};
// create periodic events
let events = if let Some(interval) = config.gc_interval() {
vec![(PeriodicEvent::GarbageCollection, interval)]
} else {
vec![]
};
// return both
(protocol, events)
}
/// Returns the process identifier.
fn id(&self) -> ProcessId {
self.bp.process_id
}
/// Returns the shard identifier.
fn shard_id(&self) -> ShardId {
self.bp.shard_id
}
/// Updates the processes known by this process.
/// The set of processes provided is already sorted by distance.
fn discover(
&mut self,
processes: Vec<(ProcessId, ShardId)>,
) -> (bool, HashMap<ShardId, ProcessId>) {
let connect_ok = self.bp.discover(processes);
(connect_ok, self.bp.closest_shard_process().clone())
}
/// Submits a command issued by some client.
fn submit(&mut self, dot: Option<Dot>, cmd: Command, _time: &dyn SysTime) {
self.handle_submit(dot, cmd);
}
/// Handles protocol messages.
fn handle(
&mut self,
from: ProcessId,
_from_shard_id: ShardId,
msg: Self::Message,
_time: &dyn SysTime,
) {
match msg {
Message::MStore { dot, cmd, quorum } => {
self.handle_mstore(from, dot, cmd, quorum)
}
Message::MStoreAck { dot } => self.handle_mstoreack(from, dot),
Message::MCommit { dot } => self.handle_mcommit(dot),
Message::MCommitDot { dot } => self.handle_mcommit_dot(from, dot),
Message::MGarbageCollection { committed } => {
self.handle_mgc(from, committed)
}
Message::MStable { stable } => self.handle_mstable(from, stable),
}
}
/// Handles periodic local events.
fn handle_event(
&mut self,
event: Self::PeriodicEvent,
_time: &dyn SysTime,
) {
match event {
PeriodicEvent::GarbageCollection => {
self.handle_event_garbage_collection()
}
}
}
/// Returns a new action to be sent to other processes.
fn to_processes(&mut self) -> Option<Action<Self>> {
self.to_processes.pop()
}
/// Returns new execution info for executors.
fn to_executors(&mut self) -> Option<ExecutionInfo> {
self.to_executors.pop()
}
fn parallel() -> bool {
true
}
fn leaderless() -> bool {
true
}
fn metrics(&self) -> &ProtocolMetrics {
self.bp.metrics()
}
}
impl Basic {
/// Handles a submit operation by a client.
fn handle_submit(&mut self, dot: Option<Dot>, cmd: Command) {
// compute the command identifier
let dot = dot.unwrap_or_else(|| self.bp.next_dot());
// create `MStore` and target
let quorum = self.bp.fast_quorum();
let mstore = Message::MStore { dot, cmd, quorum };
let target = self.bp.all();
// save new action
self.to_processes.push(Action::ToSend {
target,
msg: mstore,
})
}
fn handle_mstore(
&mut self,
from: ProcessId,
dot: Dot,
cmd: Command,
quorum: HashSet<ProcessId>,
) {
trace!(
"p{}: MStore({:?}, {:?}, {:?}) from {}",
self.id(),
dot,
cmd,
quorum,
from
);
// get cmd info
let info = self.cmds.get(dot);
// update command info
info.cmd = Some(cmd);
// reply if we're part of the quorum
if quorum.contains(&self.id()) {
// create `MStoreAck` and target
let mstoreack = Message::MStoreAck { dot };
let target = singleton![from];
// save new action
self.to_processes.push(Action::ToSend {
target,
msg: mstoreack,
})
}
// check if there's a buffered commit notification; if yes, handle
// the commit again (since now we have the payload)
if self.buffered_mcommits.remove(&dot) {
self.handle_mcommit(dot);
}
}
fn handle_mstoreack(&mut self, from: ProcessId, dot: Dot) {
trace!("p{}: MStoreAck({:?}) from {}", self.id(), dot, from);
// get cmd info
let info = self.cmds.get(dot);
// update quorum clocks
info.acks.insert(from);
// check if we have all necessary replies
if info.acks.len() == self.bp.config.basic_quorum_size() {
let mcommit = Message::MCommit { dot };
let target = self.bp.all();
// save new action
self.to_processes.push(Action::ToSend {
target,
msg: mcommit,
});
}
}
fn handle_mcommit(&mut self, dot: Dot) {
trace!("p{}: MCommit({:?})", self.id(), dot);
// get cmd info and its rifl
let info = self.cmds.get(dot);
// check if we have received the initial `MStore`
if let Some(cmd) = info.cmd.as_ref() {
// if so, create execution info:
// - one entry per key being accessed will be created, which allows
// the basic executor to run in parallel
let rifl = cmd.rifl();
let execution_info =
cmd.iter(self.bp.shard_id).map(|(key, ops)| {
BasicExecutionInfo::new(rifl, key.clone(), ops.clone())
});
self.to_executors.extend(execution_info);
if self.gc_running() {
// notify self with the committed dot
self.to_processes.push(Action::ToForward {
msg: Message::MCommitDot { dot },
});
} else {
// if we're not running gc, remove the dot info now
self.cmds.gc_single(dot);
}
} else {
// if not, buffer this `MCommit` notification
self.buffered_mcommits.insert(dot);
}
}
fn handle_mcommit_dot(&mut self, from: ProcessId, dot: Dot) {
trace!("p{}: MCommitDot({:?})", self.id(), dot);
assert_eq!(from, self.bp.process_id);
self.gc_track.add_to_clock(&dot);
}
fn handle_mgc(&mut self, from: ProcessId, committed: VClock<ProcessId>) {
trace!(
"p{}: MGarbageCollection({:?}) from {}",
self.id(),
committed,
from
);
self.gc_track.update_clock_of(from, committed);
// compute newly stable dots
let stable = self.gc_track.stable();
// create `ToForward` to self
if !stable.is_empty() {
self.to_processes.push(Action::ToForward {
msg: Message::MStable { stable },
})
}
}
fn handle_mstable(
&mut self,
from: ProcessId,
stable: Vec<(ProcessId, u64, u64)>,
) {
trace!("p{}: MStable({:?}) from {}", self.id(), stable, from);
assert_eq!(from, self.bp.process_id);
let stable_count = self.cmds.gc(stable);
self.bp.stable(stable_count);
}
fn handle_event_garbage_collection(&mut self) {
trace!("p{}: PeriodicEvent::GarbageCollection", self.id());
// retrieve the committed clock
let committed = self.gc_track.clock().frontier();
// save new action
self.to_processes.push(Action::ToSend {
target: self.bp.all_but_me(),
msg: Message::MGarbageCollection { committed },
});
}
fn gc_running(&self) -> bool {
self.bp.config.gc_interval().is_some()
}
}
// `BasicInfo` contains all information required in the life-cyle of a
// `Command`
#[derive(Debug, Clone, PartialEq, Eq)]
struct BasicInfo {
cmd: Option<Command>,
acks: HashSet<ProcessId>,
}
impl Info for BasicInfo {
fn new(
_process_id: ProcessId,
_shard_id: ShardId,
_n: usize,
_f: usize,
fast_quorum_size: usize,
_write_quorum_size: usize,
) -> Self {
// create bottom consensus value
Self {
cmd: None,
acks: HashSet::with_capacity(fast_quorum_size),
}
}
}
// `Basic` protocol messages
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub enum Message {
MStore {
dot: Dot,
cmd: Command,
quorum: HashSet<ProcessId>,
},
MStoreAck {
dot: Dot,
},
MCommit {
dot: Dot,
},
MCommitDot {
dot: Dot,
},
MGarbageCollection {
committed: VClock<ProcessId>,
},
MStable {
stable: Vec<(ProcessId, u64, u64)>,
},
}
impl MessageIndex for Message {
fn index(&self) -> Option<(usize, usize)> {
use crate::load_balance::{
worker_dot_index_shift, worker_index_no_shift, GC_WORKER_INDEX,
};
match self {
// Protocol messages
Self::MStore { dot, .. } => worker_dot_index_shift(&dot),
Self::MStoreAck { dot, .. } => worker_dot_index_shift(&dot),
Self::MCommit { dot, .. } => worker_dot_index_shift(&dot),
// GC messages
Self::MCommitDot { .. } => worker_index_no_shift(GC_WORKER_INDEX),
Self::MGarbageCollection { .. } => {
worker_index_no_shift(GC_WORKER_INDEX)
}
Self::MStable { .. } => None,
}
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum PeriodicEvent {
GarbageCollection,
}
impl MessageIndex for PeriodicEvent {
fn index(&self) -> Option<(usize, usize)> {
use crate::load_balance::{worker_index_no_shift, GC_WORKER_INDEX};
match self {
Self::GarbageCollection => worker_index_no_shift(GC_WORKER_INDEX),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::client::{Client, KeyGen, Workload};
use crate::planet::{Planet, Region};
use crate::sim::Simulation;
use crate::time::SimTime;
use crate::util;
#[test]
fn basic_flow() {
// create simulation
let mut simulation = Simulation::new();
// processes ids
let process_id_1 = 1;
let process_id_2 = 2;
let process_id_3 = 3;
// regions
let europe_west2 = Region::new("europe-west2");
let europe_west3 = Region::new("europe-west2");
let us_west1 = Region::new("europe-west2");
// there's a single shard
let shard_id = 0;
// processes
let processes = vec![
(process_id_1, shard_id, europe_west2.clone()),
(process_id_2, shard_id, europe_west3.clone()),
(process_id_3, shard_id, us_west1.clone()),
];
// planet
let planet = Planet::new();
// create system time
let time = SimTime::new();
// n and f
let n = 3;
let f = 1;
let config = Config::new(n, f);
// executors
let executor_1 = BasicExecutor::new(process_id_1, shard_id, config);
let executor_2 = BasicExecutor::new(process_id_2, shard_id, config);
let executor_3 = BasicExecutor::new(process_id_3, shard_id, config);
// basic
let (mut basic_1, _) = Basic::new(process_id_1, shard_id, config);
let (mut basic_2, _) = Basic::new(process_id_2, shard_id, config);
let (mut basic_3, _) = Basic::new(process_id_3, shard_id, config);
// discover processes in all basic
let sorted = util::sort_processes_by_distance(
&europe_west2,
&planet,
processes.clone(),
);
basic_1.discover(sorted);
let sorted = util::sort_processes_by_distance(
&europe_west3,
&planet,
processes.clone(),
);
basic_2.discover(sorted);
let sorted = util::sort_processes_by_distance(
&us_west1,
&planet,
processes.clone(),
);
basic_3.discover(sorted);
// register processes
simulation.register_process(basic_1, executor_1);
simulation.register_process(basic_2, executor_2);
simulation.register_process(basic_3, executor_3);
// client workload
let shard_count = 1;
let keys_per_command = 1;
let conflict_rate = 100;
let pool_size = 1;
let key_gen = KeyGen::ConflictPool {
conflict_rate,
pool_size,
};
let commands_per_client = 10;
let payload_size = 100;
let workload = Workload::new(
shard_count,
key_gen,
keys_per_command,
commands_per_client,
payload_size,
);
// create client 1 that is connected to basic 1
let client_id = 1;
let client_region = europe_west2.clone();
let status_frequency = None;
let mut client_1 = Client::new(client_id, workload, status_frequency);
// discover processes in client 1
let closest =
util::closest_process_per_shard(&client_region, &planet, processes);
client_1.connect(closest);
// start client
let (target_shard, cmd) = client_1
.cmd_send(&time)
.expect("there should be a first operation");
let target = client_1.shard_process(&target_shard);
// check that `target` is basic 1
assert_eq!(target, process_id_1);
// register client
simulation.register_client(client_1);
// register command in executor and submit it in basic 1
let (process, _, pending, time) = simulation.get_process(process_id_1);
pending.wait_for(&cmd);
process.submit(None, cmd, time);
let mut actions: Vec<_> = process.to_processes_iter().collect();
// there's a single action
assert_eq!(actions.len(), 1);
let mstore = actions.pop().unwrap();
// check that the mstore is being sent to all processes
let check_target = |target: &HashSet<ProcessId>| target.len() == n;
assert!(
matches!(mstore.clone(), Action::ToSend {target, ..} if check_target(&target))
);
// handle mstores
let mut mstoreacks =
simulation.forward_to_processes((process_id_1, mstore));
// check that there are 2 mstoreacks
assert_eq!(mstoreacks.len(), 2);
// handle the first mstoreack
let mcommits = simulation.forward_to_processes(
mstoreacks.pop().expect("there should be an mstore ack"),
);
// no mcommit yet
assert!(mcommits.is_empty());
// handle the second mstoreack
let mut mcommits = simulation.forward_to_processes(
mstoreacks.pop().expect("there should be an mstore ack"),
);
// there's a commit now
assert_eq!(mcommits.len(), 1);
// check that the mcommit is sent to everyone
let mcommit = mcommits.pop().expect("there should be an mcommit");
let check_target = |target: &HashSet<ProcessId>| target.len() == n;
assert!(
matches!(mcommit.clone(), (_, Action::ToSend {target, ..}) if check_target(&target))
);
// all processes handle it
let to_sends = simulation.forward_to_processes(mcommit);
// check the MCommitDot
let check_msg =
|msg: &Message| matches!(msg, Message::MCommitDot { .. });
assert!(to_sends.into_iter().all(|(_, action)| {
matches!(action, Action::ToForward { msg } if check_msg(&msg))
}));
// process 1 should have something to the executor
let (process, executor, pending, time) =
simulation.get_process(process_id_1);
let to_executor: Vec<_> = process.to_executors_iter().collect();
assert_eq!(to_executor.len(), 1);
// handle in executor and check there's a single command partial
let mut ready: Vec<_> = to_executor
.into_iter()
.flat_map(|info| {
executor.handle(info, time);
executor.to_clients_iter().collect::<Vec<_>>()
})
.collect();
assert_eq!(ready.len(), 1);
// get that command
let executor_result =
ready.pop().expect("there should an executor result");
let cmd_result = pending
.add_executor_result(executor_result)
.expect("there should be a command result");
// handle the previous command result
let (target, cmd) = simulation
.forward_to_client(cmd_result)
.expect("there should a new submit");
let (process, _, _, time) = simulation.get_process(target);
process.submit(None, cmd, time);
let mut actions: Vec<_> = process.to_processes_iter().collect();
// there's a single action
assert_eq!(actions.len(), 1);
let mstore = actions.pop().unwrap();
let check_msg = |msg: &Message| matches!(msg, Message::MStore {dot, ..} if dot == &Dot::new(process_id_1, 2));
assert!(matches!(mstore, Action::ToSend {msg, ..} if check_msg(&msg)));
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/protocol/gc/clock.rs | fantoch/src/protocol/gc/clock.rs | use crate::id::{Dot, ProcessId, ShardId};
use crate::trace;
use crate::util;
use crate::HashMap;
use threshold::{AEClock, AboveExSet, Clock, EventSet, MaxSet, VClock};
pub type VClockGCTrack = ClockGCTrack<MaxSet>;
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct ClockGCTrack<E: EventSet> {
process_id: ProcessId,
shard_id: ShardId,
n: usize,
// the next 3 variables will be updated by the single process responsible
// for GC
my_clock: AEClock<ProcessId>,
all_but_me: HashMap<ProcessId, Clock<ProcessId, E>>,
previous_stable: Clock<ProcessId, E>,
}
impl<E: EventSet> ClockGCTrack<E> {
pub fn new(process_id: ProcessId, shard_id: ShardId, n: usize) -> Self {
// clocks from all processes but self
let all_but_me = HashMap::with_capacity(n - 1);
Self {
process_id,
shard_id,
n,
my_clock: Self::bottom_aeclock(shard_id, n),
all_but_me,
previous_stable: Self::bottom_clock(shard_id, n),
}
}
/// Returns a clock representing the set of commands recorded locally.
/// Note that there might be more commands recorded than the ones being
/// represented by the returned clock.
pub fn clock(&self) -> &AEClock<ProcessId> {
&self.my_clock
}
/// Records this command.
pub fn add_to_clock(&mut self, dot: &Dot) {
self.my_clock.add(&dot.source(), dot.sequence());
// make sure we don't record dots from other shards
debug_assert_eq!(self.my_clock.len(), self.n);
}
/// Records the set of commands by process `from`.
pub fn update_clock_of(
&mut self,
from: ProcessId,
clock: Clock<ProcessId, E>,
) {
if let Some(current) = self.all_but_me.get_mut(&from) {
// accumulate new knowledge; simply replacing it doesn't work since
// messages can be reordered
current.join(&clock);
} else {
self.all_but_me.insert(from, clock);
}
}
fn bottom_aeclock(shard_id: ShardId, n: usize) -> AEClock<ProcessId> {
AEClock::with(util::process_ids(shard_id, n))
}
fn bottom_clock(shard_id: ShardId, n: usize) -> Clock<ProcessId, E> {
Clock::<_, E>::with(util::process_ids(shard_id, n))
}
}
impl ClockGCTrack<MaxSet> {
/// Computes the new set of stable dots.
pub fn stable(&mut self) -> Vec<(ProcessId, u64, u64)> {
// compute new stable clock
let mut new_stable = self.stable_clock();
trace!("GCTrack::stable_clock {:?}", new_stable);
// compute new stable dots; while at it, update the previous stable
// clock and return newly stable dots
// - here we make sure we never go down on the previous clock, which
// would be possible if messages are reordered in the network or if
// we're multiplexing
let dots = self
.previous_stable
.iter()
.filter_map(|(process_id, previous)| {
let current =
if let Some(current) = new_stable.get_mut(process_id) {
current
} else {
panic!(
"actor {} should exist in the newly stable clock",
process_id
)
};
// compute representation of stable dots.
let start = previous.frontier() + 1;
let end = current.frontier();
// make sure new clock doesn't go backwards
current.join(previous);
if start > end {
None
} else {
// return stable dots representation
// - note that `start == end` also represents a stable dot
Some((*process_id, start, end))
}
})
.collect();
// update the previous stable clock and return newly stable dots
self.previous_stable = new_stable;
dots
}
// TODO we should design a fault-tolerant version of this
fn stable_clock(&mut self) -> VClock<ProcessId> {
if self.all_but_me.len() != self.n - 1 {
// if we don't have info from all processes, then there are no
// stable dots.
return Self::bottom_clock(self.shard_id, self.n);
}
// start from our own frontier
let mut stable = self.my_clock.frontier();
// and intersect with all the other clocks
self.all_but_me.values().for_each(|clock| {
stable.meet(clock);
});
stable
}
}
impl ClockGCTrack<AboveExSet> {
/// Computes the new set of stable dots.
pub fn stable(&mut self) -> std::collections::HashMap<ProcessId, Vec<u64>> {
// compute new stable clock
let new_stable = self.stable_clock();
trace!("GCTrack::stable_clock {:?}", new_stable);
// compute new stable dots
let dots = new_stable.subtracted(&self.previous_stable);
// update the previous stable clock and return newly stable dots
self.previous_stable = new_stable;
dots
}
// TODO we should design a fault-tolerant version of this
fn stable_clock(&mut self) -> AEClock<ProcessId> {
if self.all_but_me.len() != self.n - 1 {
// if we don't have info from all processes, then there are no
// stable dots.
return Self::bottom_clock(self.shard_id, self.n);
}
// start from our own frontier
let mut stable = self.my_clock.clone();
// and intersect with all the other clocks
self.all_but_me.values().for_each(|clock| {
stable.meet(clock);
});
stable
}
}
#[cfg(test)]
mod tests {
use super::*;
use threshold::MaxSet;
// create vector clock with two entries: process 1 and process 2
fn vclock(p1: u64, p2: u64) -> VClock<ProcessId> {
VClock::from(vec![(1, MaxSet::from(p1)), (2, MaxSet::from(p2))])
}
fn stable_dots(repr: Vec<(ProcessId, u64, u64)>) -> Vec<Dot> {
crate::util::dots(repr).collect()
}
#[test]
fn gc_flow() {
let n = 2;
let shard_id = 0;
// create new gc track for the our process: 1
let mut gc = VClockGCTrack::new(1, shard_id, n);
// let's also create a gc track for process 2
let mut gc2 = VClockGCTrack::new(2, shard_id, n);
// there's nothing committed and nothing stable
assert_eq!(gc.clock().frontier(), vclock(0, 0));
assert_eq!(gc.stable_clock(), vclock(0, 0));
assert_eq!(stable_dots(gc.stable()), vec![]);
// let's create a bunch of dots
let dot11 = Dot::new(1, 1);
let dot12 = Dot::new(1, 2);
let dot13 = Dot::new(1, 3);
// and commit dot12 locally
gc.add_to_clock(&dot12);
// this doesn't change anything
assert_eq!(gc.clock().frontier(), vclock(0, 0));
assert_eq!(gc.stable_clock(), vclock(0, 0));
assert_eq!(stable_dots(gc.stable()), vec![]);
// however, if we also commit dot11, the committed clock will change
gc.add_to_clock(&dot11);
assert_eq!(gc.clock().frontier(), vclock(2, 0));
assert_eq!(gc.stable_clock(), vclock(0, 0));
assert_eq!(stable_dots(gc.stable()), vec![]);
// if we update with the committed clock from process 2 nothing changes
gc.update_clock_of(2, gc2.clock().frontier());
assert_eq!(gc.clock().frontier(), vclock(2, 0));
assert_eq!(gc.stable_clock(), vclock(0, 0));
assert_eq!(stable_dots(gc.stable()), vec![]);
// let's commit dot11 and dot13 at process 2
gc2.add_to_clock(&dot11);
gc2.add_to_clock(&dot13);
// now dot11 is stable at process 1
gc.update_clock_of(2, gc2.clock().frontier());
assert_eq!(gc.clock().frontier(), vclock(2, 0));
assert_eq!(gc.stable_clock(), vclock(1, 0));
assert_eq!(stable_dots(gc.stable()), vec![dot11]);
// if we call stable again, no new dot is returned
assert_eq!(gc.stable_clock(), vclock(1, 0));
assert_eq!(stable_dots(gc.stable()), vec![]);
// let's commit dot13 at process 1 and dot12 at process 2
gc.add_to_clock(&dot13);
gc2.add_to_clock(&dot12);
// now both dot12 and dot13 are stable at process 1
gc.update_clock_of(2, gc2.clock().frontier());
assert_eq!(gc.clock().frontier(), vclock(3, 0));
assert_eq!(gc.stable_clock(), vclock(3, 0));
assert_eq!(stable_dots(gc.stable()), vec![dot12, dot13]);
assert_eq!(stable_dots(gc.stable()), vec![]);
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/protocol/gc/mod.rs | fantoch/src/protocol/gc/mod.rs | mod basic;
mod clock;
// Re-exports.
pub use basic::BasicGCTrack;
pub use clock::{ClockGCTrack, VClockGCTrack};
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/protocol/gc/basic.rs | fantoch/src/protocol/gc/basic.rs | use crate::id::Dot;
use crate::HashMap;
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct BasicGCTrack {
n: usize,
dot_to_count: HashMap<Dot, usize>,
}
impl BasicGCTrack {
pub fn new(n: usize) -> Self {
Self {
n,
dot_to_count: HashMap::new(),
}
}
/// Records this command, returning a bool indicating whether it is stable.
#[must_use]
pub fn add(&mut self, dot: Dot) -> bool {
let count = self.dot_to_count.entry(dot).or_default();
*count += 1;
if *count == self.n {
self.dot_to_count.remove(&dot);
true
} else {
false
}
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/protocol/info/sequential.rs | fantoch/src/protocol/info/sequential.rs | use super::Info;
use crate::id::{Dot, ProcessId, ShardId};
use crate::util;
use crate::HashMap;
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct SequentialCommandsInfo<I> {
process_id: ProcessId,
shard_id: ShardId,
n: usize,
f: usize,
fast_quorum_size: usize,
write_quorum_size: usize,
dot_to_info: HashMap<Dot, I>,
}
impl<I> SequentialCommandsInfo<I>
where
I: Info,
{
pub fn new(
process_id: ProcessId,
shard_id: ShardId,
n: usize,
f: usize,
fast_quorum_size: usize,
write_quorum_size: usize,
) -> Self {
Self {
process_id,
shard_id,
n,
f,
fast_quorum_size,
write_quorum_size,
dot_to_info: HashMap::new(),
}
}
/// Returns the `Info` associated with `Dot`.
/// If no `Info` is associated, an empty `Info` is returned.
pub fn get(&mut self, dot: Dot) -> &mut I {
// borrow everything we need so that the borrow checker does not
// complain
let process_id = self.process_id;
let shard_id = self.shard_id;
let n = self.n;
let f = self.f;
let fast_quorum_size = self.fast_quorum_size;
let write_quorum_size = self.write_quorum_size;
self.dot_to_info.entry(dot).or_insert_with(|| {
I::new(
process_id,
shard_id,
n,
f,
fast_quorum_size,
write_quorum_size,
)
})
}
/// Performs garbage collection of stable dots.
/// Returns how many stable does were removed.
pub fn gc(&mut self, stable: Vec<(ProcessId, u64, u64)>) -> usize {
util::dots(stable)
.filter(|dot| {
// remove dot:
// - the dot may not exist locally if there are multiple workers
// and this worker is not responsible for such dot
self.dot_to_info.remove(&dot).is_some()
})
.count()
}
/// Removes a command has been committed.
pub fn gc_single(&mut self, dot: Dot) {
assert!(self.dot_to_info.remove(&dot).is_some());
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/protocol/info/locked.rs | fantoch/src/protocol/info/locked.rs | use super::Info;
use crate::id::{Dot, ProcessId, ShardId};
use crate::shared::{SharedMap, SharedMapRef};
use parking_lot::Mutex;
use std::sync::Arc;
#[derive(Debug, Clone)]
pub struct LockedCommandsInfo<I: Info> {
process_id: ProcessId,
shard_id: ShardId,
n: usize,
f: usize,
fast_quorum_size: usize,
write_quorum_size: usize,
dot_to_info: Arc<SharedMap<Dot, Mutex<I>>>,
}
impl<I> LockedCommandsInfo<I>
where
I: Info,
{
pub fn new(
process_id: ProcessId,
shard_id: ShardId,
n: usize,
f: usize,
fast_quorum_size: usize,
write_quorum_size: usize,
) -> Self {
Self {
process_id,
shard_id,
n,
f,
fast_quorum_size,
write_quorum_size,
dot_to_info: Arc::new(SharedMap::new()),
}
}
/// Returns the `Info` associated with `Dot` in case it exists.
pub fn get(&mut self, dot: Dot) -> Option<SharedMapRef<'_, Dot, Mutex<I>>> {
self.dot_to_info.get(&dot)
}
/// Returns the `Info` associated with `Dot`.
/// If no `Info` is associated, an empty `Info` is returned.
pub fn get_or_default(
&mut self,
dot: Dot,
) -> SharedMapRef<'_, Dot, Mutex<I>> {
// borrow everything we need so that the borrow checker does not
// complain
let process_id = self.process_id;
let shard_id = self.shard_id;
let n = self.n;
let f = self.f;
let fast_quorum_size = self.fast_quorum_size;
let write_quorum_size = self.write_quorum_size;
self.dot_to_info.get_or(&dot, || {
let info = I::new(
process_id,
shard_id,
n,
f,
fast_quorum_size,
write_quorum_size,
);
Mutex::new(info)
})
}
/// Removes a command.
#[must_use]
pub fn gc_single(&mut self, dot: &Dot) -> Option<I> {
self.dot_to_info
.remove(dot)
.map(|(_dot, lock)| lock.into_inner())
}
pub fn len(&self) -> usize {
self.dot_to_info.len()
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch/src/protocol/info/mod.rs | fantoch/src/protocol/info/mod.rs | // This module contains the implementation of `SequentialCommandsInfo`.
mod sequential;
// This module contains the implementation of `LockedCommandsInfo`.
mod locked;
// Re-exports.
pub use locked::LockedCommandsInfo;
pub use sequential::SequentialCommandsInfo;
use crate::id::{ProcessId, ShardId};
pub trait Info {
fn new(
process_id: ProcessId,
shard_id: ShardId,
n: usize,
f: usize,
fast_quorum_size: usize,
write_quorum_size: usize,
) -> Self;
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_exp/src/config.rs | fantoch_exp/src/config.rs | #[cfg(feature = "exp")]
use crate::args;
use crate::{FantochFeature, Protocol, RunMode, Testbed};
use fantoch::client::Workload;
use fantoch::config::Config;
use fantoch::id::{ProcessId, ShardId};
use fantoch::planet::{Planet, Region};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::fmt;
use std::time::Duration;
pub type RegionIndex = usize;
pub type Placement = HashMap<(Region, ShardId), (ProcessId, RegionIndex)>;
pub type PlacementFlat = Vec<(Region, ShardId, ProcessId, RegionIndex)>;
// FIXED
#[cfg(feature = "exp")]
const IP: &str = "0.0.0.0";
// parallelism config
const WORKERS: usize = 16;
const EXECUTORS: usize = 16;
const MULTIPLEXING: usize = 16;
// process tcp config
const PROCESS_TCP_NODELAY: bool = true;
// by default, each socket stream is buffered (with a buffer of size 8KBs),
// which should greatly reduce the number of syscalls for small-sized messages
const PROCESS_TCP_BUFFER_SIZE: usize = 16 * 1024 * 1024; // 16MB
const PROCESS_TCP_FLUSH_INTERVAL: Option<Duration> =
Some(Duration::from_millis(5));
// if this value is 100, the run doesn't finish, which probably means there's a
// deadlock somewhere with 1000 we can see that channels fill up sometimes with
// 10000 that doesn't seem to happen
// - in AWS 10000 is not enough; setting it to 100k
// - in Apollo with 32k clients per site, 100k is not enough at the fpaxos
// leader; setting it to 1M
// - in Apollo with 16k clients per site, 1M is not enough with tempo; setting
// it to 100M (since 10M is also not enough)
const PROCESS_CHANNEL_BUFFER_SIZE: usize = 100_000_000;
const CLIENT_CHANNEL_BUFFER_SIZE: usize = 10_000;
// tokio config
#[cfg(feature = "exp")]
const PROCESS_STACK_SIZE: Option<usize> = Some(32 * 1024 * 1024); // 32MB
#[cfg(feature = "exp")]
const CLIENT_STACK_SIZE: Option<usize> = None; // default is 8MB
#[cfg(feature = "exp")]
const EXECUTION_LOG: Option<String> = None;
#[cfg(feature = "exp")]
const PING_INTERVAL: Option<Duration> = Some(Duration::from_millis(500));
#[cfg(feature = "exp")]
// const STATUS_FREQUENCY: Option<usize> = None;
const STATUS_FREQUENCY: Option<usize> = Some(10);
// if paxos, set process 1 as the leader
const LEADER: ProcessId = 1;
// client tcp config
const CLIENT_TCP_NODELAY: bool = true;
#[cfg(feature = "exp")]
pub struct ProtocolConfig {
process_id: ProcessId,
shard_id: ShardId,
sorted: Option<Vec<(ProcessId, ShardId)>>,
ips: Vec<(ProcessId, String, Option<usize>)>,
config: Config,
tcp_nodelay: bool,
tcp_buffer_size: usize,
tcp_flush_interval: Option<Duration>,
process_channel_buffer_size: usize,
client_channel_buffer_size: usize,
workers: usize,
executors: usize,
multiplexing: usize,
execution_log: Option<String>,
ping_interval: Option<Duration>,
metrics_file: String,
stack_size: Option<usize>,
cpus: usize,
log_file: String,
}
#[cfg(feature = "exp")]
impl ProtocolConfig {
pub fn new(
protocol: Protocol,
process_id: ProcessId,
shard_id: ShardId,
mut config: Config,
sorted: Option<Vec<(ProcessId, ShardId)>>,
ips: Vec<(ProcessId, String, Option<usize>)>,
metrics_file: String,
cpus: usize,
log_file: String,
) -> Self {
let (workers, executors) =
workers_executors_and_leader(protocol, &mut config);
Self {
process_id,
shard_id,
sorted,
ips,
config,
tcp_nodelay: PROCESS_TCP_NODELAY,
tcp_buffer_size: PROCESS_TCP_BUFFER_SIZE,
tcp_flush_interval: PROCESS_TCP_FLUSH_INTERVAL,
process_channel_buffer_size: PROCESS_CHANNEL_BUFFER_SIZE,
client_channel_buffer_size: CLIENT_CHANNEL_BUFFER_SIZE,
workers,
executors,
multiplexing: MULTIPLEXING,
execution_log: EXECUTION_LOG,
ping_interval: PING_INTERVAL,
metrics_file,
stack_size: PROCESS_STACK_SIZE,
cpus,
log_file,
}
}
pub fn to_args(&self) -> Vec<String> {
let mut args = args![
"--id",
self.process_id,
"--shard_id",
self.shard_id,
"--ip",
IP,
"--port",
port(self.process_id),
"--client_port",
client_port(self.process_id),
"--addresses",
self.ips_to_addresses(),
"--processes",
self.config.n(),
"--faults",
self.config.f(),
"--shard_count",
self.config.shard_count(),
"--execute_at_commit",
self.config.execute_at_commit(),
];
if let Some(sorted) = self.sorted.as_ref() {
// make sorted ids comma-separted
let sorted = sorted
.iter()
.map(|(process_id, shard_id)| {
format!("{}-{}", process_id, shard_id)
})
.collect::<Vec<_>>()
.join(",");
args.extend(args!["--sorted", sorted]);
}
args.extend(args![
"--executor_cleanup_interval",
self.config.executor_cleanup_interval().as_millis()
]);
if let Some(interval) = self.config.executor_monitor_pending_interval()
{
args.extend(args![
"--executor_monitor_pending_interval",
interval.as_millis()
]);
}
if let Some(interval) = self.config.gc_interval() {
args.extend(args!["--gc_interval", interval.as_millis()]);
}
if let Some(leader) = self.config.leader() {
args.extend(args!["--leader", leader]);
}
args.extend(args!["--nfr", self.config.nfr()]);
args.extend(args![
"--tempo_tiny_quorums",
self.config.tempo_tiny_quorums()
]);
if let Some(interval) = self.config.tempo_clock_bump_interval() {
args.extend(args![
"--tempo_clock_bump_interval",
interval.as_millis()
]);
}
if let Some(interval) = self.config.tempo_detached_send_interval() {
args.extend(args![
"--tempo_detached_send_interval",
interval.as_millis()
]);
}
args.extend(args!["--skip_fast_ack", self.config.skip_fast_ack()]);
args.extend(args![
"--tcp_nodelay",
self.tcp_nodelay,
"--tcp_buffer_size",
self.tcp_buffer_size
]);
if let Some(interval) = self.tcp_flush_interval {
args.extend(args!["--tcp_flush_interval", interval.as_millis()]);
}
args.extend(args![
"--process_channel_buffer_size",
self.process_channel_buffer_size,
"--client_channel_buffer_size",
self.client_channel_buffer_size,
"--workers",
self.workers,
"--executors",
self.executors,
"--multiplexing",
self.multiplexing
]);
if let Some(log) = &self.execution_log {
args.extend(args!["--execution_log", log]);
}
if let Some(interval) = self.ping_interval {
args.extend(args!["--ping_interval", interval.as_millis()]);
}
args.extend(args!["--metrics_file", self.metrics_file]);
if let Some(stack_size) = self.stack_size {
args.extend(args!["--stack_size", stack_size]);
}
args.extend(args!["--cpus", self.cpus, "--log_file", self.log_file]);
args
}
fn ips_to_addresses(&self) -> String {
self.ips
.iter()
.map(|(peer_id, ip, delay)| {
let address = format!("{}:{}", ip, port(*peer_id));
if let Some(delay) = delay {
format!("{}-{}", address, delay)
} else {
address
}
})
.collect::<Vec<_>>()
.join(",")
}
}
fn workers_executors_and_leader(
protocol: Protocol,
config: &mut Config,
) -> (usize, usize) {
let f = |executors| (WORKERS + EXECUTORS - executors, executors);
// for all protocol but tempo, create a single executor
match protocol {
// 1 extra executor for partial replication
Protocol::AtlasLocked => f(2),
// 1 extra executor for partial replication (although, not implemented
// yet)
Protocol::EPaxosLocked => f(2),
Protocol::CaesarLocked => f(1),
Protocol::FPaxos => {
// in the case of paxos, also set a leader
config.set_leader(LEADER);
f(1)
}
Protocol::TempoAtomic => f(EXECUTORS),
Protocol::TempoLocked => f(EXECUTORS),
Protocol::Basic => f(EXECUTORS),
}
}
#[cfg(feature = "exp")]
pub struct ClientConfig {
id_start: usize,
id_end: usize,
ips: Vec<(ProcessId, String)>,
workload: Workload,
batch_max_size: usize,
batch_max_delay: Duration,
tcp_nodelay: bool,
channel_buffer_size: usize,
status_frequency: Option<usize>,
metrics_file: String,
stack_size: Option<usize>,
cpus: Option<usize>,
log_file: String,
}
#[cfg(feature = "exp")]
impl ClientConfig {
pub fn new(
id_start: usize,
id_end: usize,
ips: Vec<(ProcessId, String)>,
workload: Workload,
batch_max_size: usize,
batch_max_delay: Duration,
metrics_file: String,
log_file: String,
) -> Self {
Self {
id_start,
id_end,
ips,
workload,
batch_max_size,
batch_max_delay,
tcp_nodelay: CLIENT_TCP_NODELAY,
channel_buffer_size: CLIENT_CHANNEL_BUFFER_SIZE,
status_frequency: STATUS_FREQUENCY,
metrics_file,
stack_size: CLIENT_STACK_SIZE,
cpus: None,
log_file,
}
}
pub fn to_args(&self) -> Vec<String> {
use fantoch::client::KeyGen;
let key_gen = match self.workload.key_gen() {
KeyGen::ConflictPool {
conflict_rate,
pool_size,
} => {
format!("conflict_pool,{},{}", conflict_rate, pool_size)
}
KeyGen::Zipf {
coefficient,
total_keys_per_shard,
} => format!("zipf,{},{}", coefficient, total_keys_per_shard),
};
let mut args = args![
"--ids",
format!("{}-{}", self.id_start, self.id_end),
"--addresses",
self.ips_to_addresses(),
"--shard_count",
self.workload.shard_count(),
"--key_gen",
key_gen,
"--keys_per_command",
self.workload.keys_per_command(),
"--commands_per_client",
self.workload.commands_per_client(),
"--payload_size",
self.workload.payload_size(),
"--read_only_percentage",
self.workload.read_only_percentage(),
"--batch_max_size",
self.batch_max_size,
"--batch_max_delay",
self.batch_max_delay.as_millis(),
"--tcp_nodelay",
self.tcp_nodelay,
"--channel_buffer_size",
self.channel_buffer_size,
"--metrics_file",
self.metrics_file,
];
if let Some(status_frequency) = self.status_frequency {
args.extend(args!["--status_frequency", status_frequency]);
}
if let Some(stack_size) = self.stack_size {
args.extend(args!["--stack_size", stack_size]);
}
if let Some(cpus) = self.cpus {
args.extend(args!["--cpus", cpus]);
}
args.extend(args!["--log_file", self.log_file]);
args
}
fn ips_to_addresses(&self) -> String {
self.ips
.iter()
.map(|(process_id, ip)| {
format!("{}:{}", ip, client_port(*process_id))
})
.collect::<Vec<_>>()
.join(",")
}
}
#[derive(Deserialize, Serialize)]
pub struct ExperimentConfig {
pub placement: PlacementFlat,
pub planet: Option<Planet>,
pub run_mode: RunMode,
pub features: Vec<FantochFeature>,
pub testbed: Testbed,
pub protocol: Protocol,
pub config: Config,
pub clients_per_region: usize,
pub workload: Workload,
pub batch_max_size: usize,
pub batch_max_delay: Duration,
pub process_tcp_nodelay: bool,
pub tcp_buffer_size: usize,
pub tcp_flush_interval: Option<Duration>,
pub process_channel_buffer_size: usize,
pub cpus: usize,
pub workers: usize,
pub executors: usize,
pub multiplexing: usize,
pub client_tcp_nodelay: bool,
pub client_channel_buffer_size: usize,
}
impl ExperimentConfig {
pub fn new(
placement: Placement,
planet: Option<Planet>,
run_mode: RunMode,
features: Vec<FantochFeature>,
testbed: Testbed,
protocol: Protocol,
mut config: Config,
clients_per_region: usize,
workload: Workload,
batch_max_size: usize,
batch_max_delay: Duration,
cpus: usize,
) -> Self {
let (workers, executors) =
workers_executors_and_leader(protocol, &mut config);
// can't serialize to json with a key that is not a string, so let's
// flat it
let placement = placement
.into_iter()
.map(|((a, b), (c, d))| (a, b, c, d))
.collect();
Self {
placement,
planet,
run_mode,
features,
testbed,
protocol,
config,
clients_per_region,
process_tcp_nodelay: PROCESS_TCP_NODELAY,
tcp_buffer_size: PROCESS_TCP_BUFFER_SIZE,
tcp_flush_interval: PROCESS_TCP_FLUSH_INTERVAL,
process_channel_buffer_size: PROCESS_CHANNEL_BUFFER_SIZE,
cpus,
workers,
executors,
multiplexing: MULTIPLEXING,
workload,
batch_max_size,
batch_max_delay,
client_tcp_nodelay: CLIENT_TCP_NODELAY,
client_channel_buffer_size: CLIENT_CHANNEL_BUFFER_SIZE,
}
}
}
impl fmt::Debug for ExperimentConfig {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
writeln!(f, "config = {:?}", self.config)?;
writeln!(f, "protocol = {:?}", self.protocol)?;
writeln!(f, "clients_per_region = {:?}", self.clients_per_region)?;
writeln!(f, "workload = {:?}", self.workload)
}
}
#[derive(Clone, Copy, Debug)]
pub enum ProcessType {
Server(ProcessId),
Client(usize),
}
impl ProcessType {
pub fn name(&self) -> String {
match self {
Self::Server(process_id) => format!("server_{}", process_id),
Self::Client(region_index) => format!("client_{}", region_index),
}
}
}
// create filename for a run file (which can be a log, metrics, dstats, etc,
// depending on the extension passed in)
pub fn run_file(process_type: ProcessType, file_ext: &str) -> String {
format!("{}.{}", process_type.name(), file_ext)
}
// create filename prefix
pub fn file_prefix(process_type: ProcessType, region: &Region) -> String {
format!("{:?}_{}", region, process_type.name())
}
const PORT: usize = 3000;
const CLIENT_PORT: usize = 4000;
pub fn port(process_id: ProcessId) -> usize {
process_id as usize + PORT
}
pub fn client_port(process_id: ProcessId) -> usize {
process_id as usize + CLIENT_PORT
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_exp/src/lib.rs | fantoch_exp/src/lib.rs | #![deny(rust_2018_idioms)]
#[cfg(feature = "exp")]
pub mod bench;
#[cfg(feature = "exp")]
pub mod machine;
#[cfg(feature = "exp")]
pub mod progress;
#[cfg(feature = "exp")]
pub mod testbed;
#[cfg(feature = "exp")]
pub mod util;
pub mod config;
// Re-exports.
pub use config::{ExperimentConfig, PlacementFlat, ProcessType};
use color_eyre::eyre::WrapErr;
use color_eyre::Report;
use serde::{Deserialize, Serialize};
use std::hash::Hash;
use std::path::Path;
#[derive(Debug, PartialEq, Clone, Copy, Serialize, Deserialize)]
pub enum RunMode {
Release,
Flamegraph,
Heaptrack,
}
#[cfg(feature = "exp")]
impl RunMode {
pub fn name(&self) -> String {
match self {
Self::Release => "release",
Self::Flamegraph => "flamegraph",
Self::Heaptrack => "heaptrack",
}
.to_string()
}
pub fn run_command(
&self,
process_type: ProcessType,
env_vars: &str,
binary: &str,
) -> String {
let run_command = format!("./fantoch/target/release/{}", binary);
match self {
Self::Release => format!("{} {}", env_vars, run_command),
Self::Flamegraph => {
// compute flamegraph file
let flamegraph_file = config::run_file(
process_type,
crate::bench::FLAMEGRAPH_FILE_EXT,
);
// compute perf file (which will be supported once https://github.com/flamegraph-rs/flamegraph/pull/95 gets in)
let perf_file = config::run_file(process_type, "perf.data");
// `source` is needed in order for `flamegraph` to be found
format!(
"source ~/.cargo/env && {} flamegraph -v -o {} -c 'record -F 997 --call-graph dwarf -g -o {}' {}",
env_vars, flamegraph_file, perf_file, run_command
)
}
Self::Heaptrack => {
format!("{} heaptrack {}", env_vars, run_command)
}
}
}
}
#[derive(Debug, Clone, Deserialize, Serialize)]
pub enum FantochFeature {
Jemalloc,
MaxLevelDebug,
MaxLevelTrace,
}
impl FantochFeature {
pub fn name(&self) -> String {
match self {
Self::Jemalloc => "jemalloc",
Self::MaxLevelDebug => "max_level_debug",
Self::MaxLevelTrace => "max_level_trace",
}
.to_string()
}
pub fn max_level(level: &tracing::Level) -> Option<Self> {
// generate a feature if max level is higher than INFO
match level {
&tracing::Level::INFO => None,
&tracing::Level::DEBUG => Some(Self::MaxLevelDebug),
&tracing::Level::TRACE => Some(Self::MaxLevelTrace),
_ => panic!("tracing level {:?} not supported", level),
}
}
}
#[derive(
Debug,
Clone,
Copy,
PartialEq,
Eq,
PartialOrd,
Ord,
Deserialize,
Serialize,
Hash,
)]
pub enum Protocol {
AtlasLocked,
EPaxosLocked,
CaesarLocked,
FPaxos,
TempoAtomic,
TempoLocked,
Basic,
}
impl Protocol {
pub fn binary(&self) -> &'static str {
match self {
Protocol::AtlasLocked => "atlas_locked",
Protocol::EPaxosLocked => "epaxos_locked",
Protocol::CaesarLocked => "caesar_locked",
Protocol::FPaxos => "fpaxos",
Protocol::TempoAtomic => "tempo_atomic",
Protocol::TempoLocked => "tempo_locked",
Protocol::Basic => "basic",
}
}
}
#[derive(Debug, PartialEq, Clone, Copy, Serialize, Deserialize)]
pub enum Testbed {
Aws,
Baremetal,
Local,
}
impl Testbed {
pub fn name(&self) -> String {
match self {
Self::Aws => "aws",
Self::Baremetal => "baremetal",
Self::Local => "local",
}
.to_string()
}
}
#[derive(Debug)]
pub enum SerializationFormat {
BincodeGz,
Json,
JsonPretty,
}
// TODO maybe make this async
pub fn serialize<T>(
data: T,
file: impl AsRef<Path>,
format: SerializationFormat,
) -> Result<(), Report>
where
T: serde::Serialize,
{
// if the file does not exist it will be created, otherwise truncated
let file = std::fs::File::create(file).wrap_err("serialize create file")?;
// create a buf writer
let buf = std::io::BufWriter::new(file);
// and try to serialize
match format {
SerializationFormat::BincodeGz => {
let buf =
flate2::write::GzEncoder::new(buf, flate2::Compression::best());
bincode::serialize_into(buf, &data).wrap_err("serialize")?
}
SerializationFormat::Json => {
serde_json::to_writer(buf, &data).wrap_err("serialize")?
}
SerializationFormat::JsonPretty => {
serde_json::to_writer_pretty(buf, &data).wrap_err("serialize")?
}
}
Ok(())
}
// TODO maybe make this async
pub fn deserialize<T>(
file: impl AsRef<Path>,
format: SerializationFormat,
) -> Result<T, Report>
where
T: serde::de::DeserializeOwned,
{
// open the file in read-only
let file = std::fs::File::open(file).wrap_err("deserialize open file")?;
// create a buf reader
let buf = std::io::BufReader::new(file);
// and try to deserialize
let data = match format {
SerializationFormat::BincodeGz => {
let buf = flate2::bufread::GzDecoder::new(buf);
bincode::deserialize_from(buf).wrap_err("deserialize")?
}
SerializationFormat::Json | SerializationFormat::JsonPretty => {
serde_json::from_reader(buf).wrap_err("deserialize")?
}
};
Ok(data)
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_exp/src/bench.rs | fantoch_exp/src/bench.rs | use crate::config::{
self, ClientConfig, ExperimentConfig, ProcessType, ProtocolConfig,
RegionIndex,
};
use crate::machine::{Machine, Machines};
use crate::progress::TracingProgressBar;
use crate::{FantochFeature, Protocol, RunMode, SerializationFormat, Testbed};
use color_eyre::eyre::{self, WrapErr};
use color_eyre::Report;
use fantoch::client::{KeyGen, Workload};
use fantoch::config::Config;
use fantoch::id::ProcessId;
use fantoch::planet::{Planet, Region};
use std::collections::HashMap;
use std::path::Path;
use tokio::time::Duration;
type Ips = HashMap<ProcessId, String>;
const LOG_FILE_EXT: &str = "log";
const ERR_FILE_EXT: &str = "err";
const DSTAT_FILE_EXT: &str = "dstat.csv";
const METRICS_FILE_EXT: &str = "metrics";
pub(crate) const FLAMEGRAPH_FILE_EXT: &str = "flamegraph.svg";
#[derive(Clone, Copy)]
pub struct ExperimentTimeouts {
pub start: Option<Duration>,
pub run: Option<Duration>,
pub stop: Option<Duration>,
}
#[derive(Debug)]
struct TimeoutError(&'static str);
impl std::fmt::Display for TimeoutError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{:?}", self)
}
}
impl std::error::Error for TimeoutError {}
pub async fn bench_experiment(
machines: Machines<'_>,
run_mode: RunMode,
max_log_level: &tracing::Level,
features: Vec<FantochFeature>,
testbed: Testbed,
planet: Option<Planet>,
configs: Vec<(Protocol, Config)>,
clients_per_region: Vec<usize>,
workloads: Vec<Workload>,
batch_max_sizes: Vec<usize>,
batch_max_delay: Duration,
cpus: usize,
skip: &mut impl FnMut(Protocol, Config, usize) -> bool,
experiment_timeouts: ExperimentTimeouts,
protocols_to_cleanup: Vec<Protocol>,
progress: &mut TracingProgressBar,
results_dir: impl AsRef<Path>,
) -> Result<(), Report> {
match testbed {
Testbed::Local | Testbed::Baremetal => {
cleanup(&machines, protocols_to_cleanup)
.await
.wrap_err("initial cleanup")?;
tracing::info!("initial cleanup completed");
}
Testbed::Aws => {
tracing::info!("nothing to cleanup on AWS");
}
}
for batch_max_size in &batch_max_sizes {
for &(protocol, config) in &configs {
for workload in &workloads {
for &clients in &clients_per_region {
// check that we have the correct number of server machines
assert_eq!(
machines.server_count(),
config.n() * config.shard_count(),
"not enough server machines"
);
// check that we have the correct number of client machines
assert_eq!(
machines.client_count(),
config.n(),
"not enough client machines"
);
// maybe skip configuration
if skip(protocol, config, clients) {
progress.inc();
continue;
}
if let KeyGen::ConflictPool { .. } = workload.key_gen() {
if workload.shard_count() > 1 {
// the conflict rate key gen is weird in partial
// replication; for example, consider the case where
// commands access two shards (so,
// `shards_per_command = 2`) and the conflict rate
// is 0; further, assume that client A issued
// command first a command X on shards 0 and 1 and
// then a command Y on shards 1 and 2; even though
// the conflict rate is 0, since we use the client
// identifier to make the command doesn't conflict
// with commands from another clients, commands from
// the same client conflict with itself; thus,
// command Y will depend on command X; this means
// that shard 2 needs to learn about command X in
// order to be able to execute command Y. overall,
// we have a non-conflicting workload that's
// non-genuine, and that doesn't seem right. for
// this reason, we simply don't allow it
// panic!("invalid workload; conflict rate key gen
// is inappropriate for partial replication
// scenarios");
panic!("conflict rate key generator is not suitable for partial replication");
}
}
loop {
let exp_dir = create_exp_dir(&results_dir)
.await
.wrap_err("create_exp_dir")?;
tracing::info!(
"experiment metrics will be saved in {}",
exp_dir
);
let run = run_experiment(
&machines,
run_mode,
max_log_level,
&features,
testbed,
&planet,
protocol,
config,
clients,
*workload,
*batch_max_size,
batch_max_delay,
cpus,
experiment_timeouts,
&exp_dir,
);
if let Err(e) = run.await {
// check if it's a timeout error
match e.downcast_ref::<TimeoutError>() {
Some(TimeoutError(source)) => {
// if it's a timeout error, cleanup and
// restart
// the experiment
tracing::warn!("timeout in {:?}; will cleanup and try again", source);
tokio::fs::remove_dir_all(exp_dir)
.await
.wrap_err("remove exp dir")?;
cleanup(&machines, vec![protocol]).await?;
}
None => {
// if not, quit
return Err(e);
}
}
} else {
// if there's no error, then exit the loop and run
// the next experiment
// (if any)
progress.inc();
break;
}
}
}
}
}
}
Ok(())
}
async fn run_experiment(
machines: &Machines<'_>,
run_mode: RunMode,
max_log_level: &tracing::Level,
features: &Vec<FantochFeature>,
testbed: Testbed,
planet: &Option<Planet>,
protocol: Protocol,
config: Config,
clients_per_region: usize,
workload: Workload,
batch_max_size: usize,
batch_max_delay: Duration,
cpus: usize,
experiment_timeouts: ExperimentTimeouts,
exp_dir: &str,
) -> Result<(), Report> {
// holder of dstat processes to be launched in all machines
let mut dstats = Vec::with_capacity(machines.vm_count());
// start processes
let start = start_processes(
machines,
run_mode,
max_log_level,
testbed,
planet,
protocol,
config,
cpus,
&mut dstats,
);
// check if a start timeout was set
let start_result = if let Some(timeout) = experiment_timeouts.start {
// if yes, abort experiment if timeout triggers
tokio::select! {
result = start => result,
_ = tokio::time::sleep(timeout) => {
return Err(Report::new(TimeoutError("start processes")));
}
}
} else {
// if no, simply wait for start to finish
start.await
};
let (process_ips, processes) = start_result.wrap_err("start_processes")?;
// run clients
let run_clients = run_clients(
clients_per_region,
workload,
batch_max_size,
batch_max_delay,
machines,
process_ips,
&mut dstats,
);
// check if a run timeout was set
let run_clients_result = if let Some(timeout) = experiment_timeouts.run {
// if yes, abort experiment if timeout triggers
tokio::select! {
result = run_clients => result,
_ = tokio::time::sleep(timeout) => {
return Err(Report::new(TimeoutError("run clients")));
}
}
} else {
// if not, simply wait for run to finish
run_clients.await
};
run_clients_result.wrap_err("run_clients")?;
// stop dstat
stop_dstats(machines, dstats).await.wrap_err("stop_dstat")?;
// create experiment config and pull metrics
let exp_config = ExperimentConfig::new(
machines.placement().clone(),
planet.clone(),
run_mode,
features.clone(),
testbed,
protocol,
config,
clients_per_region,
workload,
batch_max_size,
batch_max_delay,
cpus,
);
let pull_metrics_and_stop = async {
pull_metrics(machines, exp_config, &exp_dir)
.await
.wrap_err("pull_metrics")?;
// stop processes: should only be stopped after copying all the metrics
// to avoid unnecessary noise in the logs
stop_processes(machines, run_mode, protocol, &exp_dir, processes)
.await
.wrap_err("stop_processes")?;
Ok(())
};
// check if a stop was set
if let Some(timeout) = experiment_timeouts.stop {
// if yes, abort experiment if timeout triggers
tokio::select! {
result = pull_metrics_and_stop => result,
_ = tokio::time::sleep(timeout) => {
return Err(Report::new(TimeoutError("pull metrics and stop processes")));
}
}
} else {
// if not, simply wait for stop to finish
pull_metrics_and_stop.await
}
}
async fn start_processes(
machines: &Machines<'_>,
run_mode: RunMode,
max_log_level: &tracing::Level,
testbed: Testbed,
planet: &Option<Planet>,
protocol: Protocol,
config: Config,
cpus: usize,
dstats: &mut Vec<tokio::process::Child>,
) -> Result<(Ips, HashMap<ProcessId, (Region, tokio::process::Child)>), Report>
{
let ips: Ips = machines
.servers()
.map(|(process_id, vm)| (*process_id, vm.ip()))
.collect();
tracing::debug!("processes ips: {:?}", ips);
let process_count = config.n() * config.shard_count();
let mut processes = HashMap::with_capacity(process_count);
let mut wait_processes = Vec::with_capacity(process_count);
for ((from_region, shard_id), (process_id, region_index)) in
machines.placement()
{
let vm = machines.server(process_id);
// compute the set of sorted processes
let sorted = machines.sorted_processes(
config.shard_count(),
config.n(),
*process_id,
*shard_id,
*region_index,
);
// get ips to connect to (based on sorted)
let ips = sorted
.iter()
.filter(|(peer_id, _)| peer_id != process_id)
.map(|(peer_id, _)| {
// get process ip
let ip = ips
.get(peer_id)
.expect("all processes should have an ip")
.clone();
// compute delay to be injected (if theres's a `planet`)
let to_region = machines.process_region(peer_id);
let delay = maybe_inject_delay(from_region, to_region, planet);
(*peer_id, ip, delay)
})
.collect();
// set sorted only if on baremetal and no delay will be injected
let set_sorted = testbed == Testbed::Baremetal && planet.is_none();
let sorted = if set_sorted { Some(sorted) } else { None };
// compute process type
let process_type = ProcessType::Server(*process_id);
// compute files to be generated during this run
let log_file = config::run_file(process_type, LOG_FILE_EXT);
let err_file = config::run_file(process_type, ERR_FILE_EXT);
let dstat_file = config::run_file(process_type, DSTAT_FILE_EXT);
let metrics_file = config::run_file(process_type, METRICS_FILE_EXT);
// start dstat and save it
let dstat = start_dstat(dstat_file, vm).await?;
dstats.push(dstat);
// create protocol config and generate args
let protocol_config = ProtocolConfig::new(
protocol,
*process_id,
*shard_id,
config,
sorted,
ips,
metrics_file,
cpus,
log_file,
);
let args = protocol_config.to_args();
let command = crate::machine::fantoch_bin_script(
process_type,
protocol.binary(),
args,
run_mode,
max_log_level,
err_file,
);
let process = vm
.prepare_exec(command)
.spawn()
.wrap_err("failed to start process")?;
processes.insert(*process_id, (from_region.clone(), process));
wait_processes.push(wait_process_started(process_id, &vm));
}
// wait all processse started
for result in futures::future::join_all(wait_processes).await {
let () = result?;
}
Ok((ips, processes))
}
fn maybe_inject_delay(
from: &Region,
to: &Region,
planet: &Option<Planet>,
) -> Option<usize> {
// inject delay if a planet was provided
planet.as_ref().map(|planet| {
// find ping latency
let ping = planet
.ping_latency(from, to)
.expect("both regions should be part of the planet");
// the delay should be half the ping latency
(ping / 2) as usize
})
}
async fn run_clients(
clients_per_region: usize,
workload: Workload,
batch_max_size: usize,
batch_max_delay: Duration,
machines: &Machines<'_>,
process_ips: Ips,
dstats: &mut Vec<tokio::process::Child>,
) -> Result<(), Report> {
let mut clients = HashMap::with_capacity(machines.client_count());
let mut wait_clients = Vec::with_capacity(machines.client_count());
for (region, vm) in machines.clients() {
// find all processes in this region (we have more than one there's more
// than one shard)
let (processes_in_region, region_index) =
machines.processes_in_region(region);
// compute id start and id end:
// - first compute the id end
// - and then compute id start: subtract `clients_per_machine` and add 1
let id_end = region_index as usize * clients_per_region;
let id_start = id_end - clients_per_region + 1;
// get ips of all processes in this region
let ips = processes_in_region
.iter()
.map(|process_id| {
let ip = process_ips
.get(process_id)
.expect("process should have ip")
.clone();
(*process_id, ip)
})
.collect();
// compute process type
let process_type = ProcessType::Client(region_index);
// compute files to be generated during this run
let log_file = config::run_file(process_type, LOG_FILE_EXT);
let err_file = config::run_file(process_type, ERR_FILE_EXT);
let dstat_file = config::run_file(process_type, DSTAT_FILE_EXT);
let metrics_file = config::run_file(process_type, METRICS_FILE_EXT);
// start dstat and save it
let dstat = start_dstat(dstat_file, vm).await?;
dstats.push(dstat);
// create client config and generate args
let client_config = ClientConfig::new(
id_start,
id_end,
ips,
workload,
batch_max_size,
batch_max_delay,
metrics_file,
log_file,
);
let args = client_config.to_args();
let command = crate::machine::fantoch_bin_script(
process_type,
"client",
args,
// always run clients on release mode
RunMode::Release,
// always run clients on info level
&tracing::Level::INFO,
err_file,
);
let client = vm
.prepare_exec(command)
.spawn()
.wrap_err("failed to start client")?;
clients.insert(region_index, client);
wait_clients.push(wait_client_ended(region_index, region.clone(), &vm));
}
// wait all clients ended
for result in futures::future::join_all(wait_clients).await {
let _ = result.wrap_err("wait_client_ended")?;
}
Ok(())
}
async fn stop_processes(
machines: &Machines<'_>,
run_mode: RunMode,
protocol: Protocol,
exp_dir: &str,
processes: HashMap<ProcessId, (Region, tokio::process::Child)>,
) -> Result<(), Report> {
let mut wait_processes = Vec::with_capacity(machines.server_count());
for (process_id, (region, mut pchild)) in processes {
// find vm
let vm = machines.server(&process_id);
let heaptrack_pid = if let RunMode::Heaptrack = run_mode {
// find heaptrack pid if in heaptrack mode
let command = format!(
"ps -aux | grep heaptrack | grep ' \\-\\-id {}' | grep -v 'bash -c'",
process_id
);
let heaptrack_process =
vm.exec(command).await.wrap_err("ps heaptrack")?;
tracing::debug!("{}: {}", process_id, heaptrack_process);
// check that there's a single heaptrack processs
let lines: Vec<_> = heaptrack_process.split("\n").collect();
assert_eq!(
lines.len(),
1,
"there should be a single heaptrack process"
);
// compute heaptrack pid
let parts: Vec<_> = lines[0].split_whitespace().collect();
tracing::debug!("{}: parts {:?}", process_id, parts);
let heaptrack_pid = parts[1]
.parse::<u32>()
.expect("heaptrack pid should be a number");
Some(heaptrack_pid)
} else {
None
};
// kill ssh process
if let Err(e) = pchild.kill().await {
tracing::warn!(
"error trying to kill ssh process {:?} with pid {:?}: {:?}",
process_id,
pchild.id(),
e
);
}
// stop process
stop_process(vm, process_id, ®ion)
.await
.wrap_err("stop_process")?;
wait_processes.push(wait_process_ended(
protocol,
heaptrack_pid,
process_id,
region,
vm,
run_mode,
&exp_dir,
));
}
// wait all processse started
for result in futures::future::join_all(wait_processes).await {
let () = result?;
}
Ok(())
}
async fn stop_process(
vm: &Machine<'_>,
process_id: ProcessId,
region: &Region,
) -> Result<(), Report> {
// find process pid in remote vm
// TODO: this should equivalent to `pkill PROTOCOL_BINARY`
let command = format!(
"lsof -i :{} -i :{} -sTCP:LISTEN | grep -v PID",
config::port(process_id),
config::client_port(process_id)
);
let output = vm.exec(command).await.wrap_err("lsof | grep")?;
let mut pids: Vec<_> = output
.lines()
// take the second column (which contains the PID)
.map(|line| line.split_whitespace().collect::<Vec<_>>()[1])
.collect();
pids.sort();
pids.dedup();
// there should be at most one pid
match pids.len() {
0 => {
tracing::warn!(
"process {} already not running in region {:?}",
process_id,
region
);
}
1 => {
// kill all
let command = format!("kill {}", pids.join(" "));
let output = vm.exec(command).await.wrap_err("kill")?;
tracing::debug!("{}", output);
}
n => panic!("there should be at most one pid and found {}", n),
}
Ok(())
}
async fn wait_process_started(
process_id: &ProcessId,
vm: &Machine<'_>,
) -> Result<(), Report> {
// small delay between calls
let duration = tokio::time::Duration::from_secs(2);
// compute process type and log file
let process_type = ProcessType::Server(*process_id);
let log_file = config::run_file(process_type, LOG_FILE_EXT);
let mut count = 0;
while count != 1 {
tokio::time::sleep(duration).await;
let command =
format!("grep -c 'process {} started' {}", process_id, log_file);
let stdout = vm.exec(&command).await.wrap_err("grep -c")?;
if stdout.is_empty() {
tracing::warn!("empty output from: {}", command);
} else {
count = stdout.parse::<usize>().wrap_err("grep -c parse")?;
}
}
Ok(())
}
async fn wait_process_ended(
protocol: Protocol,
heaptrack_pid: Option<u32>,
process_id: ProcessId,
region: Region,
vm: &Machine<'_>,
run_mode: RunMode,
exp_dir: &str,
) -> Result<(), Report> {
// small delay between calls
let duration = tokio::time::Duration::from_secs(2);
let mut count = 1;
while count != 0 {
tokio::time::sleep(duration).await;
let command = format!(
"lsof -i :{} -i :{} -sTCP:LISTEN | wc -l",
config::port(process_id),
config::client_port(process_id)
);
let stdout = vm.exec(&command).await.wrap_err("lsof | wc")?;
if stdout.is_empty() {
tracing::warn!("empty output from: {}", command);
} else {
count = stdout.parse::<usize>().wrap_err("lsof | wc parse")?;
}
}
tracing::info!(
"process {} in region {:?} terminated successfully",
process_id,
region
);
// compute process type
let process_type = ProcessType::Server(process_id);
// pull aditional files
match run_mode {
RunMode::Release => {
// nothing to do in this case
}
RunMode::Flamegraph => {
// wait for the flamegraph process to finish writing the flamegraph
// file
let mut count = 1;
while count != 0 {
tokio::time::sleep(duration).await;
let command =
"ps -aux | grep flamegraph | grep -v grep | wc -l"
.to_string();
let stdout = vm.exec(&command).await.wrap_err("ps | wc")?;
if stdout.is_empty() {
tracing::warn!("empty output from: {}", command);
} else {
count =
stdout.parse::<usize>().wrap_err("ps | wc parse")?;
}
}
// once the flamegraph process is not running, we can grab the
// flamegraph file
pull_flamegraph_file(process_type, ®ion, vm, exp_dir)
.await
.wrap_err("pull_flamegraph_file")?;
}
RunMode::Heaptrack => {
let heaptrack_pid =
heaptrack_pid.expect("heaptrack pid should be set");
pull_heaptrack_file(
protocol,
heaptrack_pid,
process_type,
®ion,
vm,
exp_dir,
)
.await
.wrap_err("pull_heaptrack_file")?;
}
}
Ok(())
}
async fn wait_client_ended(
region_index: RegionIndex,
region: Region,
vm: &Machine<'_>,
) -> Result<(), Report> {
// small delay between calls
let duration = tokio::time::Duration::from_secs(10);
// compute process type and log file
let process_type = ProcessType::Client(region_index);
let log_file = config::run_file(process_type, LOG_FILE_EXT);
let mut count = 0;
while count != 1 {
tokio::time::sleep(duration).await;
let command = format!("grep -c 'all clients ended' {}", log_file);
let stdout = vm.exec(&command).await.wrap_err("grep -c")?;
if stdout.is_empty() {
tracing::warn!("empty output from: {}", command);
} else {
count = stdout.parse::<usize>().wrap_err("grep -c parse")?;
}
}
tracing::info!(
"client {} in region {:?} terminated successfully",
region_index,
region
);
Ok(())
}
async fn start_dstat(
dstat_file: String,
vm: &Machine<'_>,
) -> Result<tokio::process::Child, Report> {
let command = format!(
"dstat -t -T -cdnm --io --output {} 1 > /dev/null",
dstat_file
);
vm.prepare_exec(command)
.spawn()
.wrap_err("failed to start dstat")
}
async fn stop_dstats(
machines: &Machines<'_>,
dstats: Vec<tokio::process::Child>,
) -> Result<(), Report> {
for mut dstat in dstats {
// kill ssh process
if let Err(e) = dstat.kill().await {
tracing::warn!(
"error trying to kill ssh dstat {:?}: {:?}",
dstat.id(),
e
);
}
}
// stop dstats in parallel
let mut stops = Vec::new();
for vm in machines.vms() {
stops.push(stop_dstat(vm));
}
for result in futures::future::join_all(stops).await {
let _ = result?;
}
Ok(())
}
async fn stop_dstat(vm: &Machine<'_>) -> Result<(), Report> {
// find dstat pid in remote vm
let command = "ps -aux | grep dstat | grep -v grep";
let output = vm.exec(command).await.wrap_err("ps")?;
let mut pids: Vec<_> = output
.lines()
// take the second column (which contains the PID)
.map(|line| line.split_whitespace().collect::<Vec<_>>()[1])
.collect();
pids.sort();
pids.dedup();
// there should be at most one pid
match pids.len() {
0 => {
tracing::warn!("dstat already not running");
}
n => {
if n > 2 {
// there should be `bash -c dstat` and a `python2
// /usr/bin/dstat`; if more than these two, then there's
// more than one dstat running
tracing::warn!(
"found more than one dstat. killing all of them"
);
}
// kill dstat
let command = format!("kill {}", pids.join(" "));
let output = vm.exec(command).await.wrap_err("kill")?;
tracing::debug!("{}", output);
}
}
check_no_dstat(vm).await.wrap_err("check_no_dstat")?;
Ok(())
}
async fn check_no_dstat(vm: &Machine<'_>) -> Result<(), Report> {
let command = "ps -aux | grep dstat | grep -v grep | wc -l";
loop {
let stdout = vm.exec(&command).await?;
if stdout.is_empty() {
tracing::warn!("empty output from: {}", command);
// check again
continue;
} else {
let count = stdout.parse::<usize>().wrap_err("wc -c parse")?;
if count != 0 {
eyre::bail!("dstat shouldn't be running")
} else {
return Ok(());
}
}
}
}
async fn pull_metrics(
machines: &Machines<'_>,
exp_config: ExperimentConfig,
exp_dir: &str,
) -> Result<(), Report> {
// save experiment config
crate::serialize(
exp_config,
format!("{}/exp_config.json", exp_dir),
SerializationFormat::Json,
)
.wrap_err("save_exp_config")?;
let mut pulls = Vec::with_capacity(machines.vm_count());
// prepare server metrics pull
for (process_id, vm) in machines.servers() {
// compute region and process type
let region = machines.process_region(process_id);
let process_type = ProcessType::Server(*process_id);
pulls.push(pull_metrics_files(process_type, region, vm, &exp_dir));
}
// prepare client metrics pull
for (region, vm) in machines.clients() {
// compute region index and process type
let region_index = machines.region_index(region);
let process_type = ProcessType::Client(region_index);
pulls.push(pull_metrics_files(process_type, region, vm, &exp_dir));
}
// pull all metrics in parallel
for result in futures::future::join_all(pulls).await {
let _ = result.wrap_err("pull_metrics")?;
}
Ok(())
}
async fn create_exp_dir(
results_dir: impl AsRef<Path>,
) -> Result<String, Report> {
let timestamp = exp_timestamp();
let exp_dir = format!("{}/{}", results_dir.as_ref().display(), timestamp);
tokio::fs::create_dir_all(&exp_dir)
.await
.wrap_err("create_dir_all")?;
Ok(exp_dir)
}
fn exp_timestamp() -> u128 {
std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.expect("we're way past epoch")
.as_micros()
}
async fn pull_metrics_files(
process_type: ProcessType,
region: &Region,
vm: &Machine<'_>,
exp_dir: &str,
) -> Result<(), Report> {
// compute filename prefix
let prefix = config::file_prefix(process_type, region);
// compute files to be pulled
let log_file = config::run_file(process_type, LOG_FILE_EXT);
let err_file = config::run_file(process_type, ERR_FILE_EXT);
let dstat_file = config::run_file(process_type, DSTAT_FILE_EXT);
let metrics_file = config::run_file(process_type, METRICS_FILE_EXT);
// pull log file
let local_path = format!("{}/{}.log", exp_dir, prefix);
vm.copy_from(&log_file, local_path)
.await
.wrap_err("copy log")?;
// pull err file
let local_path = format!("{}/{}.err", exp_dir, prefix);
vm.copy_from(&err_file, local_path)
.await
.wrap_err("copy err")?;
// pull dstat
let local_path = format!("{}/{}_dstat.csv", exp_dir, prefix);
vm.copy_from(&dstat_file, local_path)
.await
.wrap_err("copy dstat")?;
// pull metrics file
let local_path = format!("{}/{}_metrics.bincode.gz", exp_dir, prefix);
vm.copy_from(&metrics_file, local_path)
.await
.wrap_err("copy metrics")?;
// remove metric files:
// - note that in the case of `Process::Server`, the metrics file is
// generated periodic, and thus, remove it makes little sense
let to_remove = format!("rm {} {} {}", log_file, dstat_file, metrics_file);
vm.exec(to_remove).await.wrap_err("remove files")?;
match process_type {
ProcessType::Server(process_id) => {
tracing::info!(
"process {:?} metric files pulled in region {:?}",
process_id,
region
);
}
ProcessType::Client(_) => {
tracing::info!("client metric files pulled in region {:?}", region);
}
}
Ok(())
}
async fn pull_flamegraph_file(
process_type: ProcessType,
region: &Region,
vm: &Machine<'_>,
exp_dir: &str,
) -> Result<(), Report> {
// compute flamegraph file
let flamegraph_file = config::run_file(process_type, FLAMEGRAPH_FILE_EXT);
// compute filename prefix
let prefix = config::file_prefix(process_type, region);
let local_path = format!("{}/{}_flamegraph.svg", exp_dir, prefix);
vm.copy_from(&flamegraph_file, local_path)
.await
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | true |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_exp/src/util.rs | fantoch_exp/src/util.rs | #[macro_export]
macro_rules! args {
($($element:expr),*) => {{
#[allow(unused_mut)]
let mut vs = Vec::new();
$(vs.push($element.to_string());)*
vs
}};
($($element:expr,)*) => {{
$crate::args![$($element),*]
}};
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_exp/src/progress.rs | fantoch_exp/src/progress.rs | #[derive(Clone)]
pub struct TracingProgressBar {
progress: indicatif::ProgressBar,
}
impl TracingProgressBar {
pub fn init(len: u64) -> Self {
// create progress bar style
let style = indicatif::ProgressStyle::default_bar().template(
"[{elapsed_precise}] {wide_bar:.green} {pos:>2}/{len:2} (ETA {eta})",
);
// create progress bar and set its style
let progress = indicatif::ProgressBar::new(len);
progress.set_style(style);
let progress = Self { progress };
// init tracing subscriber
tracing_subscriber::fmt()
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
// redirect all tracing logs to self; this makes sure that there's a
// single progress bar that, and not one scattered, in between
// tracing logs
.with_writer(progress.clone())
.init();
progress
}
pub fn inc(&mut self) {
// NOTE: this function doesn't have to be &mut self
self.progress.inc(1);
}
}
impl std::io::Write for TracingProgressBar {
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
self.progress
.println(format!("{}", std::str::from_utf8(buf).unwrap()));
Ok(buf.len())
}
fn flush(&mut self) -> std::io::Result<()> {
Ok(())
}
}
impl<'writer> tracing_subscriber::fmt::MakeWriter<'writer>
for TracingProgressBar
{
type Writer = Self;
fn make_writer(&self) -> Self::Writer {
self.clone()
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_exp/src/machine.rs | fantoch_exp/src/machine.rs | use crate::args;
use crate::config::{Placement, RegionIndex};
use crate::{FantochFeature, ProcessType, RunMode, Testbed};
use color_eyre::eyre::WrapErr;
use color_eyre::Report;
use fantoch::id::{ProcessId, ShardId};
use fantoch::planet::Region;
use std::collections::HashMap;
use std::future::Future;
use std::path::Path;
use std::pin::Pin;
const SETUP_SCRIPT: &str = "exp_files/setup.sh";
pub enum Machine<'a> {
Tsunami(tsunami::Machine<'a>),
TsunamiRef(&'a tsunami::Machine<'a>),
Local,
}
impl<'a> Machine<'a> {
pub fn ip(&self) -> String {
match self {
Self::Tsunami(vm) => vm.public_ip.clone(),
Self::TsunamiRef(vm) => vm.public_ip.clone(),
Self::Local => String::from("127.0.0.1"),
}
}
pub async fn exec(&self, command: impl ToString) -> Result<String, Report> {
match self {
Self::Tsunami(vm) => Self::tsunami_exec(vm, command).await,
Self::TsunamiRef(vm) => Self::tsunami_exec(vm, command).await,
Self::Local => {
Self::exec_command(Self::create_command(command)).await
}
}
}
pub fn prepare_exec(
&self,
command: impl ToString,
) -> tokio::process::Command {
match &self {
Self::Tsunami(vm) => Self::tsunami_prepare_exec(vm, command),
Self::TsunamiRef(vm) => Self::tsunami_prepare_exec(vm, command),
Self::Local => Self::create_command(command),
}
}
pub async fn script_exec(
&self,
path: &str,
args: Vec<String>,
) -> Result<String, Report> {
let args = args.join(" ");
let command = format!("chmod u+x {} && ./{} {}", path, path, args);
self.exec(command).await.wrap_err("chmod && ./script")
}
pub async fn copy_to(
&self,
local_path: impl AsRef<Path>,
remote_path: impl AsRef<Path>,
) -> Result<(), Report> {
match self {
Self::Tsunami(vm) => {
Self::tsunami_copy_to(vm, local_path, remote_path).await
}
Self::TsunamiRef(vm) => {
Self::tsunami_copy_to(vm, local_path, remote_path).await
}
Self::Local => Self::local_copy(local_path, remote_path).await,
}
}
pub async fn copy_from(
&self,
remote_path: impl AsRef<Path>,
local_path: impl AsRef<Path>,
) -> Result<(), Report> {
match self {
Self::Tsunami(vm) => {
Self::tsunami_copy_from(vm, remote_path, local_path).await
}
Self::TsunamiRef(vm) => {
Self::tsunami_copy_from(vm, remote_path, local_path).await
}
Self::Local => Self::local_copy(remote_path, local_path).await,
}
}
async fn tsunami_exec(
vm: &tsunami::Machine<'_>,
command: impl ToString,
) -> Result<String, Report> {
Self::ssh_exec(
vm.username.as_ref(),
vm.public_ip.as_ref(),
vm.private_key.as_ref().expect("private key should be set"),
command,
)
.await
}
fn tsunami_prepare_exec(
vm: &tsunami::Machine<'_>,
command: impl ToString,
) -> tokio::process::Command {
Self::prepare_ssh_exec(
vm.username.as_ref(),
vm.public_ip.as_ref(),
vm.private_key.as_ref().expect("private key should be set"),
command,
)
}
async fn tsunami_copy_to(
vm: &tsunami::Machine<'_>,
local_path: impl AsRef<Path>,
remote_path: impl AsRef<Path>,
) -> Result<(), Report> {
let from = local_path.as_ref().display();
let to = format!(
"{}@{}:{}",
vm.username,
vm.public_ip,
remote_path.as_ref().display()
);
let scp_command = format!(
"scp -o StrictHostKeyChecking=no -i {} {} {}",
vm.private_key
.as_ref()
.expect("private key should be set")
.as_path()
.display(),
from,
to,
);
Self::create_command(scp_command).output().await?;
Ok(())
}
async fn tsunami_copy_from(
vm: &tsunami::Machine<'_>,
remote_path: impl AsRef<Path>,
local_path: impl AsRef<Path>,
) -> Result<(), Report> {
let from = format!(
"{}@{}:{}",
vm.username,
vm.public_ip,
remote_path.as_ref().display()
);
let to = local_path.as_ref().display();
let scp_command = format!(
"scp -o StrictHostKeyChecking=no -i {} {} {}",
vm.private_key
.as_ref()
.expect("private key should be set")
.as_path()
.display(),
from,
to,
);
Self::create_command(scp_command).output().await?;
Ok(())
}
async fn local_copy(
from: impl AsRef<Path>,
to: impl AsRef<Path>,
) -> Result<(), Report> {
let cp_command =
format!("cp {} {}", from.as_ref().display(), to.as_ref().display());
Self::create_command(cp_command).output().await?;
Ok(())
}
pub async fn ssh_exec(
username: &str,
public_ip: &str,
private_key: &std::path::PathBuf,
command: impl ToString,
) -> Result<String, Report> {
Self::exec_command(Self::prepare_ssh_exec(
username,
public_ip,
private_key,
command,
))
.await
}
fn prepare_ssh_exec(
username: &str,
public_ip: &str,
private_key: &std::path::PathBuf,
command: impl ToString,
) -> tokio::process::Command {
let ssh_command = format!(
"ssh -o StrictHostKeyChecking=no -i {} {}@{} {}",
private_key.as_path().display(),
username,
public_ip,
Self::escape(command)
);
Self::create_command(ssh_command)
}
fn create_command(command_arg: impl ToString) -> tokio::process::Command {
let command_arg = command_arg.to_string();
tracing::debug!("{}", command_arg);
let mut command = tokio::process::Command::new("bash");
command.arg("-c");
command.arg(command_arg);
command
}
async fn exec_command(
mut command: tokio::process::Command,
) -> Result<String, Report> {
let out = command.output().await.wrap_err("ssh command")?;
let out = String::from_utf8(out.stdout)
.wrap_err("output conversion to utf8")?
.trim()
.to_string();
Ok(out)
}
fn escape(command: impl ToString) -> String {
format!("\"{}\"", command.to_string())
}
}
pub struct Machines<'a> {
placement: Placement,
// potentially more than one process machine per region (if partial
// replication)
servers: HashMap<ProcessId, Machine<'a>>,
// only one client machine per region
clients: HashMap<Region, Machine<'a>>,
}
impl<'a> Machines<'a> {
pub fn new(
placement: Placement,
servers: HashMap<ProcessId, Machine<'a>>,
clients: HashMap<Region, Machine<'a>>,
) -> Self {
assert_eq!(
placement.len(),
servers.len(),
"placement and servers should have the same cardinality"
);
Self {
placement,
servers,
clients,
}
}
pub fn placement(&self) -> &Placement {
&self.placement
}
pub fn servers(&self) -> impl Iterator<Item = (&ProcessId, &Machine<'_>)> {
self.servers.iter()
}
pub fn server(&self, process_id: &ProcessId) -> &Machine<'_> {
self.servers
.get(process_id)
.expect("server vm should exist")
}
pub fn clients(&self) -> impl Iterator<Item = (&Region, &Machine<'_>)> {
self.clients.iter()
}
pub fn vms(&self) -> impl Iterator<Item = &Machine<'_>> {
self.servers
.iter()
.map(|(_, vm)| vm)
.chain(self.clients.iter().map(|(_, vm)| vm))
}
pub fn server_count(&self) -> usize {
self.servers.len()
}
pub fn client_count(&self) -> usize {
self.clients.len()
}
pub fn vm_count(&self) -> usize {
self.server_count() + self.client_count()
}
pub fn process_region(&self, target_id: &ProcessId) -> &Region {
for ((region, _shard_id), (process_id, _region_index)) in
self.placement.iter()
{
if target_id == process_id {
return region;
}
}
panic!("process with id {:?} should be in placement", target_id)
}
pub fn region_index(&self, target_region: &Region) -> usize {
for ((region, _shard_id), (_process_id, region_index)) in
self.placement.iter()
{
if target_region == region {
return *region_index;
}
}
panic!("region {:?} should be in placement", target_region)
}
pub fn processes_in_region(
&self,
target_region: &Region,
) -> (Vec<ProcessId>, RegionIndex) {
let mut ids = Vec::new();
let mut region_indexes = Vec::new();
// track the ids and region indexes for this region
for ((region, _shard_id), (process_id, region_index)) in
self.placement.iter()
{
if target_region == region {
ids.push(*process_id);
region_indexes.push(*region_index)
}
}
// compute the region index
region_indexes.sort();
region_indexes.dedup();
assert_eq!(
region_indexes.len(),
1,
"there should be a single region index for each region"
);
let region_index = region_indexes.remove(0);
(ids, region_index)
}
pub fn sorted_processes(
&self,
shard_count: usize,
n: usize,
process_id: ProcessId,
shard_id: ShardId,
region_index: usize,
) -> Vec<(ProcessId, ShardId)> {
let mut sorted_processes = Vec::new();
// make sure we're the first process
sorted_processes.push((process_id, shard_id));
// for each region, add:
// - all (but self), if same region
// - the one some my shard, if different region
for index in (region_index..=n).chain(1..region_index) {
// select the processes in this region index;
let index_processes =
self.placement.iter().filter(|(_, (_, peer_region_index))| {
*peer_region_index == index
});
let region_ids: Vec<_> = if index == region_index {
// select all (but self)
index_processes
.filter_map(|((_, peer_shard_id), (peer_id, _))| {
if *peer_id != process_id {
Some((*peer_id, *peer_shard_id))
} else {
None
}
})
.collect()
} else {
// select the one process from my shard
index_processes
.filter_map(|((_, peer_shard_id), (peer_id, _))| {
if *peer_shard_id == shard_id {
Some((*peer_id, *peer_shard_id))
} else {
None
}
})
.collect()
};
sorted_processes.extend(region_ids);
}
assert_eq!(
sorted_processes.len(),
n + shard_count - 1,
"the number of sorted processes should be n + shards - 1"
);
// return sorted processes
sorted_processes
}
}
pub fn fantoch_setup(
branch: String,
run_mode: RunMode,
features: Vec<FantochFeature>,
testbed: Testbed,
) -> Box<
dyn for<'r> Fn(
&'r tsunami::Machine<'_>,
) -> Pin<
Box<dyn Future<Output = Result<(), Report>> + Send + 'r>,
> + Send
+ Sync
+ 'static,
> {
assert!(
matches!(testbed, Testbed::Aws | Testbed::Baremetal),
"fantoch_setup should only be called with the aws and baremetal testbeds"
);
Box::new(move |vm| {
let vm = Machine::TsunamiRef(vm);
let testbed = testbed.name();
let mode = run_mode.name();
let branch = branch.clone();
let features = fantoch_features_as_arg(&features);
Box::pin(async move {
// files
let script_file = "setup.sh";
// first copy file to the machine
vm.copy_to(SETUP_SCRIPT, script_file)
.await
.wrap_err("copy_to setup script")?;
// execute setup script
let mut done = false;
while !done {
let stdout = vm
.script_exec(
script_file,
args![testbed, mode, branch, features, "2>&1"],
)
.await?;
tracing::trace!("full output:\n{}", stdout);
// check if there was no warning about the packages we need
let all_available = vec![
"build-essential",
"pkg-config",
"libssl-dev",
"chrony",
"perf-tools-unstable",
"linux-tools-common",
"linux-tools-generic",
"htop",
"dstat",
"lsof",
]
.into_iter()
.all(|package| {
let msg = format!("Package {} is not available", package);
!stdout.contains(&msg)
});
// check if commands we may need are actually installed
let all_found = vec![
"Command 'dstat' not found",
"Command 'lsof' not found",
"flamegraph: command not found",
"chrony: command not found",
]
.into_iter()
.all(|msg| !stdout.contains(&msg));
// we're done if no warnings and all commands are actually
// installed
done = all_available && all_found;
if !done {
tracing::warn!(
"trying again since at least one package was not available"
);
}
}
Ok(())
})
})
}
pub fn veleta_fantoch_setup() -> Box<
dyn for<'r> Fn(
&'r tsunami::Machine<'_>,
) -> Pin<
Box<dyn Future<Output = Result<(), Report>> + Send + 'r>,
> + Send
+ Sync
+ 'static,
> {
// TODO: actually, we need to still do some setup on veleta machines like
// increasing max number of open files, which was done "manually" in the
// first time
Box::new(|_| Box::pin(async { Ok(()) }))
}
pub async fn local_fantoch_setup(
branch: String,
run_mode: RunMode,
features: Vec<FantochFeature>,
testbed: Testbed,
) -> Result<(), Report> {
assert!(
testbed == Testbed::Local,
"local_fantoch_setup should only be called with the local testbed"
);
let testbed = testbed.name();
let mode = run_mode.name();
let features = fantoch_features_as_arg(&features);
let vm = Machine::Local;
// execute setup script
let stdout = vm
.script_exec(
SETUP_SCRIPT,
args![testbed, mode, branch, features, "2>&1"],
)
.await?;
tracing::trace!("full output:\n{}", stdout);
Ok(())
}
fn fantoch_features_as_arg(features: &Vec<FantochFeature>) -> String {
features
.iter()
.map(|feature| feature.name())
.collect::<Vec<_>>()
.join(",")
}
pub fn fantoch_bin_script(
process_type: ProcessType,
binary: &str,
args: Vec<String>,
run_mode: RunMode,
max_log_level: &tracing::Level,
err_file: impl ToString,
) -> String {
// binary=info makes sure that we also capture any logs in there
let env_vars = format!(
"RUST_LOG={}={},fantoch={},fantoch_ps={}",
binary, max_log_level, max_log_level, max_log_level,
);
let run_command = run_mode.run_command(process_type, &env_vars, binary);
let args = args.join(" ");
format!("{} {} > {} 2>&1", run_command, args, err_file.to_string())
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_exp/src/bin/ping.rs | fantoch_exp/src/bin/ping.rs | use color_eyre::eyre::WrapErr;
use color_eyre::Report;
use fantoch_exp::args;
use fantoch_exp::machine::Machine;
use rusoto_core::Region;
use std::time::Duration;
use tokio::fs::File;
use tokio::io::AsyncWriteExt;
use tracing::instrument;
use tracing_futures::Instrument;
use tsunami::providers::aws::LaunchMode;
use tsunami::Tsunami;
const LAUCH_MODE: LaunchMode = LaunchMode::OnDemand;
const INSTANCE_TYPE: &str = "m5.large";
const MAX_SPOT_INSTANCE_REQUEST_WAIT_SECS: u64 = 5 * 60; // 5 minutes
const PING_DURATION_SECS: usize = 60 * 60; // 60 minutes
/// This script should be called like: $ script hosts seconds output
/// - hosts: file where each line looks like "region::ip"
/// - seconds: number of seconds the ping will run
/// - output: the file where the output will be written
const SCRIPT: &str = "./../ping_exp_gcp/region_ping_loop.sh";
const HOSTS: &str = "./hosts";
#[tokio::main]
async fn main() -> Result<(), Report> {
// all AWS regions
let _all_regions = vec![
Region::AfSouth1,
Region::ApEast1,
Region::ApNortheast1,
// Region::ApNortheast2, special-region
Region::ApSouth1,
Region::ApSoutheast1,
Region::ApSoutheast2,
Region::CaCentral1,
Region::EuCentral1,
Region::EuNorth1,
Region::EuSouth1,
Region::EuWest1,
Region::EuWest2,
Region::EuWest3,
Region::MeSouth1,
Region::SaEast1,
Region::UsEast1,
Region::UsEast2,
Region::UsWest1,
Region::UsWest2,
];
// AWS regions used in experiments
let regions = vec![
Region::EuWest1,
Region::UsWest1,
Region::ApSoutheast1,
Region::CaCentral1,
Region::SaEast1,
];
ping_experiment(
LAUCH_MODE,
regions,
INSTANCE_TYPE,
MAX_SPOT_INSTANCE_REQUEST_WAIT_SECS,
PING_DURATION_SECS,
)
.await
}
async fn ping_experiment(
launch_mode: LaunchMode,
regions: Vec<Region>,
instance_type: impl ToString + Clone,
max_spot_instance_request_wait_secs: u64,
experiment_duration_secs: usize,
) -> Result<(), Report> {
let mut launcher: tsunami::providers::aws::Launcher<_> = Default::default();
let result = ping_experiment_run(
&mut launcher,
launch_mode,
regions,
instance_type,
max_spot_instance_request_wait_secs,
experiment_duration_secs,
)
.await;
tracing::info!("experiment result: {:?}", result);
// make sure we always terminate
launcher.terminate_all().await?;
result
}
pub async fn ping_experiment_run(
launcher: &mut tsunami::providers::aws::Launcher<
rusoto_credential::DefaultCredentialsProvider,
>,
launch_mode: LaunchMode,
regions: Vec<Region>,
instance_type: impl ToString + Clone,
max_spot_instance_request_wait_secs: u64,
experiment_duration_secs: usize,
) -> Result<(), Report> {
let mut descriptors = Vec::with_capacity(regions.len());
for region in ®ions {
// get region name
let name = region.name().to_string();
// create setup
let setup = tsunami::providers::aws::Setup::default()
.instance_type(instance_type.clone())
.region_with_ubuntu_ami(region.clone())
.await?
.setup(|vm| {
Box::pin(async move {
let update = vm
.ssh
.command("sudo")
.arg("apt")
.arg("update")
.status()
.await;
if let Err(e) = update {
tracing::warn!("apt update failed: {}", e);
};
Ok(())
})
});
// save setup
descriptors.push((name, setup))
}
// spawn and connect
launcher.set_mode(launch_mode);
let max_wait =
Some(Duration::from_secs(max_spot_instance_request_wait_secs));
launcher.spawn(descriptors, max_wait).await?;
let mut vms = launcher.connect_all().await?;
// create HOSTS file content: each line should be "region::ip"
// - create ping future for each region along the way
let mut pings = Vec::with_capacity(regions.len());
let hosts = regions
.iter()
.map(|region| {
let region_name = region.name();
let vm = vms.remove(region_name).unwrap();
// compute host entry
let host = format!("{}::{}", region_name, vm.public_ip);
// create ping future
let region_span =
tracing::info_span!("region", name = ?region_name);
let ping =
ping(vm, experiment_duration_secs).instrument(region_span);
pings.push(ping);
// return host name
host
})
.collect::<Vec<_>>()
.join("\n");
// create HOSTS file
let mut file = File::create(HOSTS).await?;
file.write_all(hosts.as_bytes()).await?;
for result in futures::future::join_all(pings).await {
let () = result?;
}
Ok(())
}
#[instrument]
async fn ping(
vm: tsunami::Machine<'_>,
experiment_duration_secs: usize,
) -> Result<(), Report> {
tracing::info!(
"will launch ping experiment with {} seconds",
experiment_duration_secs
);
// files
let script_file = "script.sh";
let hosts_file = "hosts";
let output_file = format!("{}.dat", vm.nickname);
let vm = Machine::Tsunami(vm);
// first copy both SCRIPT and HOSTS files to the machine
vm.copy_to(SCRIPT, script_file)
.await
.wrap_err("copy_to script")?;
vm.copy_to(HOSTS, hosts_file)
.await
.wrap_err("copy_to hosts")?;
tracing::debug!("both files are copied to remote machine");
// execute script remotely: "$ script.sh hosts seconds output"
let args = args![hosts_file, experiment_duration_secs, output_file];
let stdout = vm.script_exec(script_file, args).await?;
tracing::debug!("script ended {}", stdout);
// copy output file
vm.copy_from(&output_file, &output_file)
.await
.wrap_err("copy_from")?;
tracing::info!("output file is copied to local machine");
Ok(())
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_exp/src/bin/main.rs | fantoch_exp/src/bin/main.rs | use color_eyre::eyre::WrapErr;
use color_eyre::Report;
use fantoch::client::{KeyGen, Workload};
use fantoch::config::Config;
use fantoch::planet::Planet;
use fantoch_exp::bench::ExperimentTimeouts;
use fantoch_exp::machine::Machines;
use fantoch_exp::progress::TracingProgressBar;
use fantoch_exp::{FantochFeature, Protocol, RunMode, Testbed};
use rusoto_core::Region;
use std::path::Path;
use std::time::Duration;
use tsunami::providers::aws::LaunchMode;
use tsunami::Tsunami;
// timeouts
const fn minutes(minutes: u64) -> Duration {
let one_minute = 60;
Duration::from_secs(one_minute * minutes)
}
const EXPERIMENT_TIMEOUTS: ExperimentTimeouts = ExperimentTimeouts {
start: Some(minutes(20)),
run: Some(minutes(20)),
stop: Some(minutes(20)),
};
// latency dir
const LATENCY_AWS: &str = "../latency_aws/2020_06_05";
// const LATENCY_AWS: &str = "../latency_aws/2021_02_13";
// aws experiment config
const LAUNCH_MODE: LaunchMode = LaunchMode::OnDemand;
// const SERVER_INSTANCE_TYPE: &str = "m5.4xlarge";
const SERVER_INSTANCE_TYPE: &str = "c5.2xlarge";
const CLIENT_INSTANCE_TYPE: &str = "m5.2xlarge";
const MAX_SPOT_INSTANCE_REQUEST_WAIT_SECS: u64 = 5 * 60; // 5 minutes
// processes config
const EXECUTE_AT_COMMIT: bool = false;
const EXECUTOR_CLEANUP_INTERVAL: Duration = Duration::from_millis(10);
const EXECUTOR_MONITOR_PENDING_INTERVAL: Option<Duration> = None;
const GC_INTERVAL: Option<Duration> = Some(Duration::from_millis(50));
const SEND_DETACHED_INTERVAL: Duration = Duration::from_millis(5);
// clients config
const COMMANDS_PER_CLIENT_WAN: usize = 500;
const COMMANDS_PER_CLIENT_LAN: usize = 5_000;
const TOTAL_KEYS_PER_SHARD: usize = 1_000_000;
// batching config
const BATCH_MAX_DELAY: Duration = Duration::from_millis(5);
// fantoch run config
const BRANCH: &str = "main";
// tracing max log level: compile-time level should be <= run-time level
const MAX_LEVEL_COMPILE_TIME: tracing::Level = tracing::Level::INFO;
const MAX_LEVEL_RUN_TIME: tracing::Level = tracing::Level::INFO;
// release run
const FEATURES: &[FantochFeature] = &[FantochFeature::Jemalloc];
const RUN_MODE: RunMode = RunMode::Release;
// heaptrack run (no jemalloc feature as heaptrack doesn't support it)
// const FEATURES: &[FantochFeature] = &[];
// const RUN_MODE: RunMode = RunMode::Heaptrack;
// flamegraph run
// const FEATURES: &[FantochFeature] = &[FantochFeature::Jemalloc];
// const RUN_MODE: RunMode = RunMode::Flamegraph;
// list of protocol binaries to cleanup before running the experiment
const PROTOCOLS_TO_CLEANUP: &[Protocol] = &[
// Protocol::Basic,
Protocol::TempoAtomic,
Protocol::AtlasLocked,
Protocol::EPaxosLocked,
Protocol::FPaxos,
Protocol::CaesarLocked,
];
macro_rules! config {
($n:expr, $f:expr, $nfr:expr) => {{
let mut config = config!($n, $f, false, None, false, EXECUTE_AT_COMMIT);
config.set_nfr($nfr);
config
}};
($n:expr, $f:expr, $tiny_quorums:expr, $clock_bump_interval:expr, $skip_fast_ack:expr) => {{
config!(
$n,
$f,
$tiny_quorums,
$clock_bump_interval,
$skip_fast_ack,
EXECUTE_AT_COMMIT
)
}};
($n:expr, $f:expr, $tiny_quorums:expr, $clock_bump_interval:expr, $skip_fast_ack:expr, $execute_at_commit:expr) => {{
let mut config = Config::new($n, $f);
config.set_tempo_tiny_quorums($tiny_quorums);
if let Some(interval) = $clock_bump_interval {
config.set_tempo_clock_bump_interval::<Option<Duration>>(interval);
}
config.set_skip_fast_ack($skip_fast_ack);
config.set_execute_at_commit($execute_at_commit);
config.set_executor_cleanup_interval(EXECUTOR_CLEANUP_INTERVAL);
if let Some(interval) = EXECUTOR_MONITOR_PENDING_INTERVAL {
config.set_executor_monitor_pending_interval(interval);
}
if let Some(interval) = GC_INTERVAL {
config.set_gc_interval(interval);
}
config.set_tempo_detached_send_interval(SEND_DETACHED_INTERVAL);
config
}};
}
#[tokio::main]
async fn main() -> Result<(), Report> {
fast_path_plot().await
// fairness_and_tail_latency_plot().await
// increasing_load_plot().await
// batching_plot().await
// increasing_sites_plot().await
// nfr_plot().await
// partial_replication_plot().await
}
#[allow(dead_code)]
async fn nfr_plot() -> Result<(), Report> {
// folder where all results will be stored
let results_dir = "../results_nfr";
let regions = vec![
Region::EuWest1,
Region::UsWest1,
Region::ApSoutheast1,
Region::CaCentral1,
Region::SaEast1,
Region::ApEast1,
Region::UsEast1,
Region::ApNortheast1,
Region::EuNorth1,
Region::ApSouth1,
Region::UsWest2,
];
let ns = vec![7];
let nfrs = vec![false, true];
// pair of protocol and whether it provides configurable fault-tolerance
let protocols = vec![
(Protocol::TempoAtomic, true),
(Protocol::AtlasLocked, true),
(Protocol::EPaxosLocked, false),
];
let clients_per_region = vec![256];
let batch_max_sizes = vec![1];
let shard_count = 1;
let keys_per_command = 1;
let payload_size = 100;
let cpus = 12;
let key_gen = KeyGen::Zipf {
total_keys_per_shard: TOTAL_KEYS_PER_SHARD,
coefficient: 0.99,
};
let read_only_percentages = vec![20, 50, 80, 100];
let mut workloads = Vec::new();
for read_only_percentage in read_only_percentages {
let mut workload = Workload::new(
shard_count,
key_gen,
keys_per_command,
COMMANDS_PER_CLIENT_WAN,
payload_size,
);
workload.set_read_only_percentage(read_only_percentage);
workloads.push(workload);
}
let mut skip = |_, _, _| false;
let mut all_configs = Vec::new();
for n in ns {
// take the first n regions
let regions: Vec<_> = regions.clone().into_iter().take(n).collect();
assert_eq!(regions.len(), n);
// create configs
let mut configs = Vec::new();
for nfr in nfrs.clone() {
for (protocol, configurable_f) in protocols.clone() {
if configurable_f {
for f in vec![1, 2] {
configs.push((protocol, n, f, nfr));
}
} else {
let minority = n / 2;
configs.push((protocol, n, minority, nfr));
}
}
}
println!("n = {}", n);
println!("{:#?}", configs);
let mut configs: Vec<_> = configs
.into_iter()
.map(|(protocol, n, f, nfr)| (protocol, config!(n, f, nfr)))
.collect();
// set shards in each config
configs.iter_mut().for_each(|(_protocol, config)| {
config.set_shard_count(shard_count)
});
all_configs.push((configs, regions));
}
let total_config_count: usize =
all_configs.iter().map(|(configs, _)| configs.len()).sum();
let mut progress = TracingProgressBar::init(
(workloads.len()
* clients_per_region.len()
* total_config_count
* batch_max_sizes.len()) as u64,
);
for (configs, regions) in all_configs {
// create AWS planet
let planet = Some(Planet::from(LATENCY_AWS));
baremetal_bench(
regions,
shard_count,
planet,
configs,
clients_per_region.clone(),
workloads.clone(),
batch_max_sizes.clone(),
cpus,
&mut skip,
&mut progress,
results_dir,
)
.await?;
}
Ok(())
}
#[allow(dead_code)]
async fn fast_path_plot() -> Result<(), Report> {
// folder where all results will be stored
let results_dir = "../results_fast_path";
let regions = vec![
Region::EuWest1,
Region::UsWest1,
Region::ApSoutheast1,
Region::CaCentral1,
Region::SaEast1,
Region::ApEast1,
Region::UsEast1,
];
let ns = vec![5, 7];
let clients_per_region = vec![1, 8];
let batch_max_sizes = vec![1];
let shard_count = 1;
let keys_per_command = 1;
let payload_size = 100;
let cpus = 12;
let conflict_rates = vec![0, 5, 10, 20, 40, 60, 80, 100];
let key_gens: Vec<_> = conflict_rates
.into_iter()
.map(|conflict_rate| KeyGen::ConflictPool {
conflict_rate,
pool_size: 1,
})
.collect();
let mut workloads = Vec::new();
for key_gen in key_gens {
let workload = Workload::new(
shard_count,
key_gen,
keys_per_command,
COMMANDS_PER_CLIENT_WAN,
payload_size,
);
workloads.push(workload);
}
let mut experiments_to_skip = 0;
let mut skip = |_, _, _| {
if experiments_to_skip == 0 {
false
} else {
experiments_to_skip -= 1;
true
}
};
// pair of protocol and whether it provides configurable fault-tolerance
let protocols = vec![
(Protocol::TempoAtomic, true),
(Protocol::AtlasLocked, true),
(Protocol::EPaxosLocked, false),
];
let mut all_configs = Vec::new();
for n in ns {
// take the first n regions
let regions: Vec<_> = regions.clone().into_iter().take(n).collect();
assert_eq!(regions.len(), n);
// create configs
let mut configs = Vec::new();
for (protocol, configurable_f) in protocols.clone() {
let start_f = 2;
let minority = n / 2;
if configurable_f {
for f in start_f..=minority {
configs.push((protocol, n, f));
}
} else {
configs.push((protocol, n, minority));
}
}
println!("n = {}", n);
println!("{:#?}", configs);
let mut configs: Vec<_> = configs
.into_iter()
.map(|(protocol, n, f)| {
(protocol, config!(n, f, false, None, false))
})
.collect();
// set shards in each config
configs.iter_mut().for_each(|(_protocol, config)| {
config.set_shard_count(shard_count)
});
all_configs.push((configs, regions));
}
let total_config_count: usize =
all_configs.iter().map(|(configs, _)| configs.len()).sum();
let mut progress = TracingProgressBar::init(
(workloads.len()
* clients_per_region.len()
* total_config_count
* batch_max_sizes.len()) as u64,
);
for (configs, regions) in all_configs {
// create AWS planet
let planet = Some(Planet::from(LATENCY_AWS));
baremetal_bench(
regions,
shard_count,
planet,
configs,
clients_per_region.clone(),
workloads.clone(),
batch_max_sizes.clone(),
cpus,
&mut skip,
&mut progress,
results_dir,
)
.await?;
}
Ok(())
}
#[allow(dead_code)]
async fn increasing_sites_plot() -> Result<(), Report> {
// folder where all results will be stored
let results_dir = "../results_increasing_sites";
let regions = vec![
Region::EuWest1,
Region::UsWest1,
Region::ApSoutheast1,
Region::CaCentral1,
Region::SaEast1,
Region::ApEast1,
Region::UsEast1,
Region::ApNortheast1,
Region::EuNorth1,
Region::ApSouth1,
Region::UsWest2,
/* Region::EuWest2,
* Region::UsEast2, */
];
let ns = vec![3, 5, 7, 9, 11];
let clients_per_region = vec![256];
let batch_max_sizes = vec![1];
let shard_count = 1;
let keys_per_command = 1;
let payload_size = 100;
let cpus = 12;
let key_gens = vec![KeyGen::ConflictPool {
conflict_rate: 2,
pool_size: 1,
}];
let mut workloads = Vec::new();
for key_gen in key_gens {
let workload = Workload::new(
shard_count,
key_gen,
keys_per_command,
COMMANDS_PER_CLIENT_WAN,
payload_size,
);
workloads.push(workload);
}
let mut skip = |_, _, _| false;
// pair of protocol and whether it provides configurable fault-tolerance
let protocols = vec![
(Protocol::TempoAtomic, true),
(Protocol::FPaxos, true),
(Protocol::AtlasLocked, true),
(Protocol::EPaxosLocked, false),
];
let mut all_configs = Vec::new();
for n in ns {
// take the first n regions
let regions: Vec<_> = regions.clone().into_iter().take(n).collect();
assert_eq!(regions.len(), n);
// create configs
let mut configs = Vec::new();
for (protocol, configurable_f) in protocols.clone() {
if configurable_f {
let max_f = if n == 3 { 1 } else { 2 };
for f in 1..=max_f {
configs.push((protocol, n, f));
}
} else {
let minority = n / 2;
configs.push((protocol, n, minority));
}
}
let mut configs: Vec<_> = configs
.into_iter()
.map(|(protocol, n, f)| {
(protocol, config!(n, f, false, None, false))
})
.collect();
// set shards in each config
configs.iter_mut().for_each(|(_protocol, config)| {
config.set_shard_count(shard_count)
});
all_configs.push((configs, regions));
}
let total_config_count: usize =
all_configs.iter().map(|(configs, _)| configs.len()).sum();
let mut progress = TracingProgressBar::init(
(workloads.len()
* clients_per_region.len()
* total_config_count
* batch_max_sizes.len()) as u64,
);
for (configs, regions) in all_configs {
// create AWS planet
let planet = Some(Planet::from(LATENCY_AWS));
baremetal_bench(
regions,
shard_count,
planet,
configs,
clients_per_region.clone(),
workloads.clone(),
batch_max_sizes.clone(),
cpus,
&mut skip,
&mut progress,
results_dir,
)
.await?;
}
Ok(())
}
#[allow(dead_code)]
async fn partial_replication_plot() -> Result<(), Report> {
// folder where all results will be stored
let results_dir = "../results_partial_replication";
// THROUGHPUT
let regions = vec![Region::EuWest1, Region::UsWest1, Region::ApSoutheast1];
let n = regions.len();
let mut configs = vec![
// (protocol, (n, f, tiny quorums, clock bump interval, skip fast ack))
(Protocol::TempoAtomic, config!(n, 1, false, None, false)),
// (Protocol::AtlasLocked, config!(n, 1, false, None, false)),
];
let clients_per_region = vec![
// 256,
1024,
// for Atlas s=2,4 zipf=0.7 r=50%:
// 1024 * 2,
// 1024 * 4,
// 1024 * 8,
// for Atlas s=2 zipf=0.7 r=95%:
// 1024 * 10,
// for Atlas s=2 zipf=0.7 r=95%:
// 1024 * 12,
// 1024 * 16,
// for Atlas s=2 zipf=0.5 r=50% | Atlas s=4 zipf=0.7 r=95%:
// 1024 * 20,
// 1024 * 24,
// 1024 * 32,
// 1024 * 34,
1024 * 36,
// 1024 * 40,
// 1024 * 44,
1024 * 48,
1024 * 52,
1024 * 64,
1024 * 72,
1024 * 80,
// 1024 * 88,
// 1024 * 96,
// 1024 * 104,
// 1024 * 112,
// 1024 * 128,
// 1024 * 144,
];
let batch_max_sizes = vec![1];
// shard_counts: 2, 4, 6
let shard_count = 6;
let keys_per_command = 2;
let payload_size = 100;
let cpus = 12;
let mut workloads = Vec::new();
for coefficient in vec![0.5, 0.7] {
// janus*:
// for read_only_percentage in vec![100, 95, 50] {
// tempo:
for read_only_percentage in vec![0] {
let key_gen = KeyGen::Zipf {
total_keys_per_shard: TOTAL_KEYS_PER_SHARD,
coefficient,
};
let mut workload = Workload::new(
shard_count,
key_gen,
keys_per_command,
COMMANDS_PER_CLIENT_WAN,
payload_size,
);
workload.set_read_only_percentage(read_only_percentage);
workloads.push(workload);
}
}
// don't skip
let mut skip = |_, _, _| false;
// set shards in each config
configs
.iter_mut()
.for_each(|(_protocol, config)| config.set_shard_count(shard_count));
// init logging
let mut progress = TracingProgressBar::init(
(workloads.len()
* clients_per_region.len()
* configs.len()
* batch_max_sizes.len()) as u64,
);
// create AWS planet
let planet = Some(Planet::from(LATENCY_AWS));
baremetal_bench(
regions,
shard_count,
planet,
configs,
clients_per_region,
workloads,
batch_max_sizes,
cpus,
&mut skip,
&mut progress,
results_dir,
)
.await
}
#[allow(dead_code)]
async fn batching_plot() -> Result<(), Report> {
// folder where all results will be stored
let results_dir = "../results_batching";
// THROUGHPUT
let regions = vec![
Region::EuWest1,
Region::UsWest1,
Region::ApSoutheast1,
Region::CaCentral1,
Region::SaEast1,
];
let n = regions.len();
let mut configs = vec![
// (protocol, (n, f, tiny quorums, clock bump interval, skip fast ack))
(Protocol::TempoAtomic, config!(n, 1, false, None, false)),
// (Protocol::FPaxos, config!(n, 1, false, None, false)),
];
let clients_per_region = vec![
1024,
// 1024 * 2,
// 1024 * 4,
1024 * 8,
1024 * 16,
1024 * 20,
1024 * 24,
1024 * 28,
// 1024 * 44,
// 1024 * 48,
// 1024 * 52,
// 1024 * 56,
// 1024 * 60,
// 1024 * 64,
];
let batch_max_sizes = vec![1, 10000];
let shard_count = 1;
let keys_per_command = 1;
let payload_sizes = vec![256, 1024, 4096];
let cpus = 12;
let key_gen = KeyGen::ConflictPool {
conflict_rate: 2,
pool_size: 1,
};
let mut workloads = Vec::new();
for payload_size in payload_sizes {
let workload = Workload::new(
shard_count,
key_gen,
keys_per_command,
COMMANDS_PER_CLIENT_WAN,
payload_size,
);
workloads.push(workload);
}
let mut skip = |_, _, _| false;
// set shards in each config
configs
.iter_mut()
.for_each(|(_protocol, config)| config.set_shard_count(shard_count));
// init logging
let mut progress = TracingProgressBar::init(
(workloads.len()
* clients_per_region.len()
* configs.len()
* batch_max_sizes.len()) as u64,
);
// create AWS planet
let planet = Some(Planet::from(LATENCY_AWS));
baremetal_bench(
regions.clone(),
shard_count,
planet.clone(),
configs.clone(),
clients_per_region.clone(),
workloads.clone(),
batch_max_sizes,
cpus,
&mut skip,
&mut progress,
results_dir,
)
.await?;
Ok(())
}
#[allow(dead_code)]
async fn increasing_load_plot() -> Result<(), Report> {
// folder where all results will be stored
let results_dir = "../results_increasing_load";
// THROUGHPUT
let regions = vec![
Region::EuWest1,
Region::UsWest1,
Region::ApSoutheast1,
Region::CaCentral1,
Region::SaEast1,
];
let n = regions.len();
let caesar_execute_at_commit = true;
let mut configs = vec![
// (protocol, (n, f, tiny quorums, clock bump interval, skip fast ack))
// (Protocol::Basic, config!(n, 1, false, None, false)),
(Protocol::TempoAtomic, config!(n, 1, false, None, false)),
(Protocol::TempoAtomic, config!(n, 2, false, None, false)),
(Protocol::FPaxos, config!(n, 1, false, None, false)),
(Protocol::FPaxos, config!(n, 2, false, None, false)),
(Protocol::AtlasLocked, config!(n, 1, false, None, false)),
(Protocol::AtlasLocked, config!(n, 2, false, None, false)),
// (Protocol::EPaxosLocked, config!(n, 2, false, None, false)),
(
Protocol::CaesarLocked,
config!(n, 2, false, None, false, caesar_execute_at_commit),
),
];
let clients_per_region = vec![
32,
512,
1024,
1024 * 2,
1024 * 4,
1024 * 8,
1024 * 16,
1024 * 20,
];
let batch_max_sizes = vec![1];
let shard_count = 1;
let keys_per_command = 1;
let payload_size = 4096;
let cpus = 12;
let key_gens = vec![
KeyGen::ConflictPool {
conflict_rate: 2,
pool_size: 1,
},
KeyGen::ConflictPool {
conflict_rate: 10,
pool_size: 1,
},
];
let mut workloads = Vec::new();
for key_gen in key_gens {
let workload = Workload::new(
shard_count,
key_gen,
keys_per_command,
COMMANDS_PER_CLIENT_WAN,
payload_size,
);
workloads.push(workload);
}
let mut skip = |protocol, _, clients| {
// skip Atlas with more than 4096 clients
protocol == Protocol::AtlasLocked && clients > 1024 * 20
};
// set shards in each config
configs
.iter_mut()
.for_each(|(_protocol, config)| config.set_shard_count(shard_count));
// init logging
let mut progress = TracingProgressBar::init(
(workloads.len()
* clients_per_region.len()
* configs.len()
* batch_max_sizes.len()) as u64,
);
// create AWS planet
let planet = Some(Planet::from(LATENCY_AWS));
baremetal_bench(
regions,
shard_count,
planet,
configs,
clients_per_region,
workloads,
batch_max_sizes,
cpus,
&mut skip,
&mut progress,
results_dir,
)
.await
}
#[allow(dead_code)]
async fn fairness_and_tail_latency_plot() -> Result<(), Report> {
let results_dir = "../results_fairness_and_tail_latency";
let regions = vec![
Region::EuWest1,
Region::UsWest1,
Region::ApSoutheast1,
Region::CaCentral1,
Region::SaEast1,
];
let n = regions.len();
let mut configs = vec![
// (protocol, (n, f, tiny quorums, clock bump interval, skip fast ack))
(Protocol::FPaxos, config!(n, 1, false, None, false)),
(Protocol::FPaxos, config!(n, 2, false, None, false)),
(Protocol::TempoAtomic, config!(n, 1, false, None, false)),
(Protocol::TempoAtomic, config!(n, 2, false, None, false)),
(Protocol::AtlasLocked, config!(n, 1, false, None, false)),
(Protocol::AtlasLocked, config!(n, 2, false, None, false)),
(Protocol::EPaxosLocked, config!(n, 2, false, None, false)),
(Protocol::CaesarLocked, config!(n, 2, false, None, false)),
];
let clients_per_region = vec![256, 512];
let batch_max_sizes = vec![1];
let shard_count = 1;
let keys_per_command = 1;
let payload_size = 100;
let cpus = 8;
let key_gen = KeyGen::ConflictPool {
conflict_rate: 2,
pool_size: 1,
};
let key_gens = vec![key_gen];
let mut workloads = Vec::new();
for key_gen in key_gens {
let workload = Workload::new(
shard_count,
key_gen,
keys_per_command,
COMMANDS_PER_CLIENT_WAN,
payload_size,
);
workloads.push(workload);
}
let mut skip = |protocol, _, clients| {
// only run FPaxos with 512 clients
protocol == Protocol::FPaxos && clients != 512
};
// set shards in each config
configs
.iter_mut()
.for_each(|(_protocol, config)| config.set_shard_count(shard_count));
// init logging
let mut progress = TracingProgressBar::init(
(workloads.len()
* clients_per_region.len()
* configs.len()
* batch_max_sizes.len()) as u64,
);
// create AWS planet
// let planet = Some(Planet::from(LATENCY_AWS));
// baremetal_bench(
aws_bench(
regions,
shard_count,
// planet,
configs,
clients_per_region,
workloads,
batch_max_sizes,
cpus,
&mut skip,
&mut progress,
results_dir,
)
.await
}
#[allow(dead_code)]
async fn whatever_plot() -> Result<(), Report> {
// folder where all results will be stored
let results_dir = "../results_scalability";
// THROUGHPUT
let regions = vec![
Region::EuWest1,
Region::UsWest1,
Region::ApSoutheast1,
/* Region::CaCentral1,
* Region::SaEast1, */
];
let n = regions.len();
let mut configs = vec![
// (protocol, (n, f, tiny quorums, clock bump interval, skip fast ack))
(Protocol::TempoAtomic, config!(n, 1, false, None, false)),
/* (Protocol::TempoAtomic, config!(n, 2, false, None, false)), */
/*
(Protocol::FPaxos, config!(n, 1, false, None, false)),
(Protocol::FPaxos, config!(n, 2, false, None, false)),
(Protocol::AtlasLocked, config!(n, 1, false, None, false)),
(Protocol::AtlasLocked, config!(n, 2, false, None, false)),
*/
];
let clients_per_region = vec![
// 32,
// 64,
// 128,
// 256,
// 512, 768, 1024,
// 1280,
// 1536,
// 1792,
2048,
// 2560,
// 3072,
// 3584,
// 4096,
// 1024 * 4,
// 1024 * 8,
// 1024 * 12,
// 1024 * 16,
// 1024 * 32,
];
let batch_max_sizes = vec![1];
let shard_count = 1;
let keys_per_command = 1;
let payload_size = 100;
let cpus = 2;
let coefficients = vec![
// 0.5, 0.75, 1.0,
// 1.25, 1.5, 1.75,
2.0, 2.5, 3.0, 3.5, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0,
];
let key_gens = coefficients.into_iter().map(|coefficient| KeyGen::Zipf {
total_keys_per_shard: 1_000_000,
coefficient,
});
let mut workloads = Vec::new();
for key_gen in key_gens {
let workload = Workload::new(
shard_count,
key_gen,
keys_per_command,
COMMANDS_PER_CLIENT_LAN,
payload_size,
);
workloads.push(workload);
}
let mut skip = |protocol, _, clients| {
// skip Atlas with more than 4096 clients
protocol == Protocol::AtlasLocked && clients > 1024 * 20
};
/*
// MULTI_KEY
let configs = vec![
// (protocol, (n, f, tiny quorums, clock bump interval, skip fast ack))
(Protocol::TempoAtomic, config!(n, 1, false, None, false)),
(Protocol::TempoAtomic, config!(n, 2, false, None, false)),
(Protocol::TempoLocked, config!(n, 1, false, None, false)),
(Protocol::TempoLocked, config!(n, 2, false, None, false)),
];
let clients_per_region =
vec![256, 1024, 1024 * 4, 1024 * 8, 1024 * 16, 1024 * 32];
let zipf_key_count = 1_000_000;
let skip = |_, _, _| false;
*/
/*
// PARTIAL REPLICATION
let regions = vec![
Region::EuWest1,
Region::UsWest1,
Region::ApSoutheast1,
];
let n = regions.len();
let mut configs = vec![
// (protocol, (n, f, tiny quorums, clock bump interval, skip fast ack))
// (Protocol::AtlasLocked, config!(n, 1, false, None, false)),
(Protocol::TempoAtomic, config!(n, 1, false, None, false)),
];
let clients_per_region = vec![
// 1024 / 4,
// 1024 / 2,
1024,
// 1024 * 2,
1024 * 4,
1024 * 8,
// 1024 * 12,
1024 * 16,
// 1024 * 20,
// 1024 * 24,
1024 * 32,
// 1024 * 36,
// 1024 * 40,
1024 * 48,
// 1024 * 56,
1024 * 64,
// 1024 * 96,
// 1024 * 128,
// 1024 * 160,
// 1024 * 192,
// 1024 * 224,
// 1024 * 240,
// 1024 * 256,
// 1024 * 272,
];
let clients_per_region = vec![
// 1024,
// 1024 * 2,
// 1024 * 4,
// 1024 * 6,
// 1024 * 8,
// 1024 * 12,
// 1024 * 16,
// 1024 * 20,
1024 * 24,
];
let clients_per_region = vec![
1024,
1024 * 4,
1024 * 8,
1024 * 16,
1024 * 32,
1024 * 48,
1024 * 64,
];
let shard_count = 5;
let keys_per_shard = 1_000_000;
let key_gen = KeyGen::Zipf {
coefficient: 128.0,
keys_per_shard,
};
let keys_per_command = 2;
let payload_size = 0;
let mut workloads = Vec::new();
let workload = Workload::new(
shard_count,
key_gen,
keys_per_command,
COMMANDS_PER_CLIENT,
payload_size,
);
workloads.push(workload);
let skip = |_, _, _| false;
*/
// set shards in each config
configs
.iter_mut()
.for_each(|(_protocol, config)| config.set_shard_count(shard_count));
// init logging
let mut progress = TracingProgressBar::init(
(workloads.len()
* clients_per_region.len()
* configs.len()
* batch_max_sizes.len()) as u64,
);
// create AWS planet
// let planet = Some(Planet::from(LATENCY_AWS));
let planet = None; // if delay is not to be injected
baremetal_bench(
regions,
shard_count,
planet,
configs,
clients_per_region,
workloads,
batch_max_sizes,
cpus,
&mut skip,
&mut progress,
results_dir,
)
.await
/*
local_bench(
regions,
shard_count,
planet,
configs,
clients_per_region,
workloads,
batch_max_size,
skip,
progress,
)
.await
aws_bench(
regions,
shard_count,
configs,
clients_per_region,
workloads,
batch_max_size,
skip,
progress,
).await
*/
}
#[allow(dead_code)]
async fn local_bench(
regions: Vec<Region>,
shard_count: usize,
planet: Option<Planet>,
configs: Vec<(Protocol, Config)>,
clients_per_region: Vec<usize>,
workloads: Vec<Workload>,
batch_max_sizes: Vec<usize>,
cpus: usize,
skip: &mut impl Fn(Protocol, Config, usize) -> bool,
progress: &mut TracingProgressBar,
results_dir: impl AsRef<Path>,
) -> Result<(), Report>
where
{
// setup baremetal machines
let machines = fantoch_exp::testbed::local::setup(
regions,
shard_count,
BRANCH.to_string(),
RUN_MODE,
all_features(),
)
.await
.wrap_err("local spawn")?;
// run benchmarks
run_bench(
machines,
Testbed::Local,
planet,
configs,
clients_per_region,
workloads,
batch_max_sizes,
cpus,
skip,
progress,
results_dir,
)
.await
.wrap_err("run bench")?;
Ok(())
}
#[allow(dead_code)]
async fn baremetal_bench(
regions: Vec<Region>,
shard_count: usize,
planet: Option<Planet>,
configs: Vec<(Protocol, Config)>,
clients_per_region: Vec<usize>,
workloads: Vec<Workload>,
batch_max_sizes: Vec<usize>,
cpus: usize,
skip: &mut impl FnMut(Protocol, Config, usize) -> bool,
progress: &mut TracingProgressBar,
results_dir: impl AsRef<Path>,
) -> Result<(), Report>
where
{
// create launcher
let mut launchers = fantoch_exp::testbed::baremetal::create_launchers(
®ions,
shard_count,
);
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | true |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_exp/src/testbed/aws.rs | fantoch_exp/src/testbed/aws.rs | use super::Nickname;
use crate::machine::{Machine, Machines};
use crate::{FantochFeature, RunMode, Testbed};
use color_eyre::Report;
use std::collections::HashMap;
use std::time::Duration;
use tsunami::Tsunami;
pub async fn setup(
launcher: &mut tsunami::providers::aws::Launcher<
rusoto_credential::DefaultCredentialsProvider,
>,
launch_mode: tsunami::providers::aws::LaunchMode,
regions: Vec<rusoto_core::Region>,
shard_count: usize,
server_instance_type: String,
client_instance_type: String,
max_spot_instance_request_wait_secs: u64,
branch: String,
run_mode: RunMode,
features: Vec<FantochFeature>,
) -> Result<Machines<'_>, Report> {
// create nicknames for all machines
let nicknames = super::create_nicknames(shard_count, ®ions);
// setup machines
let vms = spawn_and_setup(
launcher,
launch_mode,
nicknames,
server_instance_type,
client_instance_type,
max_spot_instance_request_wait_secs,
branch,
run_mode,
features,
)
.await?;
// create placement, servers, and clients
let region_count = regions.len();
let process_count = region_count * shard_count;
let client_count = region_count;
let placement = super::create_placement(shard_count, regions);
let mut servers = HashMap::with_capacity(process_count);
let mut clients = HashMap::with_capacity(client_count);
for (Nickname { shard_id, region }, vm) in vms {
let vm = Machine::Tsunami(vm);
match shard_id {
Some(shard_id) => {
let (process_id, _region_index) = placement
.get(&(region, shard_id))
.expect("region and shard id should exist in placement");
assert!(servers.insert(*process_id, vm).is_none());
}
None => {
// add to clients
assert!(clients.insert(region, vm).is_none());
}
}
}
let machines = Machines::new(placement, servers, clients);
Ok(machines)
}
async fn spawn_and_setup<'a>(
launcher: &'a mut tsunami::providers::aws::Launcher<
rusoto_credential::DefaultCredentialsProvider,
>,
launch_mode: tsunami::providers::aws::LaunchMode,
nicknames: Vec<Nickname>,
server_instance_type: String,
client_instance_type: String,
max_spot_instance_request_wait_secs: u64,
branch: String,
run_mode: RunMode,
features: Vec<FantochFeature>,
) -> Result<Vec<(Nickname, tsunami::Machine<'a>)>, Report> {
// create machine descriptors
let mut descriptors = Vec::with_capacity(nicknames.len());
for nickname in nicknames {
// get instance name
let name = nickname.to_string();
// get instance type and region
let instance_type = if nickname.shard_id.is_some() {
// in this case, it's a server
server_instance_type.clone()
} else {
// otherwise, it's a client
client_instance_type.clone()
};
let region = nickname
.region
.name()
.parse::<rusoto_core::Region>()
.expect("creating a rusoto_core::Region should work");
// create setup
let setup = tsunami::providers::aws::Setup::default()
.instance_type(instance_type)
.region_with_ubuntu_ami(region)
.await?
.setup(crate::machine::fantoch_setup(
branch.clone(),
run_mode,
features.clone(),
Testbed::Aws,
));
// save setup
descriptors.push((name, setup))
}
// spawn and connect
launcher
.set_mode(launch_mode)
// make sure ports are open
// TODO create VPC and use private ips
.open_ports();
let max_wait =
Some(Duration::from_secs(max_spot_instance_request_wait_secs));
launcher.spawn(descriptors, max_wait).await?;
let vms = launcher.connect_all().await?;
// return vms
let vms = vms
.into_iter()
.map(|(name, vm)| {
let nickname = Nickname::from_string(name);
(nickname, vm)
})
.collect();
Ok(vms)
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_exp/src/testbed/local.rs | fantoch_exp/src/testbed/local.rs | use super::Nickname;
use crate::machine::{Machine, Machines};
use crate::{FantochFeature, RunMode, Testbed};
use color_eyre::eyre::WrapErr;
use color_eyre::Report;
use std::collections::HashMap;
pub async fn setup<'a>(
regions: Vec<rusoto_core::Region>,
shard_count: usize,
branch: String,
run_mode: RunMode,
features: Vec<FantochFeature>,
) -> Result<Machines<'a>, Report> {
// setup local machine that will be the holder of all machines
crate::machine::local_fantoch_setup(
branch,
run_mode,
features,
Testbed::Local,
)
.await
.wrap_err("local setup")?;
// create nicknames for all machines
let nicknames = super::create_nicknames(shard_count, ®ions);
// create placement, servers, and clients
let server_count = regions.len();
let client_count = regions.len();
let placement = super::create_placement(shard_count, regions);
let mut servers = HashMap::with_capacity(server_count);
let mut clients = HashMap::with_capacity(client_count);
for Nickname { region, shard_id } in nicknames {
let vm = Machine::Local;
let unique_insert = match shard_id {
Some(shard_id) => {
// it's a server; find it's process id
let (process_id, _region_index) =
placement.get(&(region, shard_id)).expect(
"pair region and shard id should exist in placement",
);
servers.insert(*process_id, vm).is_none()
}
None => {
// it's a client
clients.insert(region, vm).is_none()
}
};
assert!(unique_insert);
}
// check that we have enough machines
assert_eq!(
servers.len(),
server_count * shard_count,
"not enough server vms"
);
assert_eq!(clients.len(), client_count, "not enough client vms");
let machines = Machines::new(placement, servers, clients);
Ok(machines)
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_exp/src/testbed/mod.rs | fantoch_exp/src/testbed/mod.rs | pub mod aws;
pub mod baremetal;
pub mod local;
use crate::config::Placement;
use fantoch::id::{ProcessId, ShardId};
use fantoch::planet::Region;
use std::collections::HashMap;
const NICKNAME_SEP: &str = "_";
const SERVER_TAG: &str = "server";
const CLIENT_TAG: &str = "client";
pub struct Nickname {
pub region: Region,
pub shard_id: Option<ShardId>,
}
impl Nickname {
pub fn new<S: Into<String>>(region: S, shard_id: Option<ShardId>) -> Self {
let region = Region::new(region);
Self { region, shard_id }
}
pub fn to_string(&self) -> String {
if let Some(shard_id) = self.shard_id {
// if there's a shard, then it's server
format!(
"{}{}{:?}{}{}",
SERVER_TAG, NICKNAME_SEP, self.region, NICKNAME_SEP, shard_id
)
} else {
// otherwise it is a client
format!("{}{}{:?}", CLIENT_TAG, NICKNAME_SEP, self.region)
}
}
pub fn from_string<S: Into<String>>(nickname: S) -> Self {
let nickname = nickname.into();
let parts: Vec<_> = nickname.split(NICKNAME_SEP).collect();
match parts[0] {
SERVER_TAG => {
assert_eq!(parts.len(), 3);
let region = parts[1];
let shard_id = parts[2]
.parse::<ShardId>()
.expect("shard id should be a number");
Self::new(region, Some(shard_id))
}
CLIENT_TAG => {
assert_eq!(parts.len(), 2);
let region = parts[1];
Self::new(region, None)
}
tag => {
panic!("found unexpected tag {} in nickname", tag);
}
}
}
}
pub fn create_nicknames(
shard_count: usize,
regions: &Vec<rusoto_core::Region>,
) -> Vec<Nickname> {
// create nicknames for all machines
let mut nicknames = Vec::new();
for region in regions.iter() {
// create servers for this region
for shard_id in 0..shard_count as ShardId {
nicknames.push(Nickname::new(region.name(), Some(shard_id)));
}
// create client for this region
nicknames.push(Nickname::new(region.name(), None));
}
nicknames
}
/// If shard_count = 3, and regions = [A, B, C, D, E], this function outputs a
/// map with 15 entries:
/// - (A, 0) -> 1
/// - (A, 1) -> 6
/// - (A, 2) -> 11
/// - (B, 0) -> 2
/// - (B, 1) -> 7
/// - (B, 2) -> 12
/// - (C, 0) -> 3
/// - and so on
///
/// Note that the order in the `regions` passed in is respected when generating
/// the process ids.
pub fn create_placement(
shard_count: usize,
regions: Vec<rusoto_core::Region>,
) -> Placement {
let n = regions.len();
let placement: HashMap<_, _> = regions
.into_iter()
.enumerate()
.flat_map(|(index, region)| {
let region_index = index + 1;
(0..shard_count)
.map(move |shard_id| (region_index, region.clone(), shard_id))
})
.map(|(region_index, region, shard_id)| {
let process_id = region_index + (shard_id * n);
let region = Region::new(region.name());
(
(region, shard_id as ShardId),
(process_id as ProcessId, region_index),
)
})
.collect();
// check that we correctly generated processs ids
let id_to_shard_id: HashMap<_, _> =
fantoch::util::all_process_ids(shard_count, n).collect();
for ((_, shard_id), (process_id, _)) in placement.iter() {
let expected = id_to_shard_id
.get(process_id)
.expect("generated process id should exist in all ids");
assert_eq!(expected, shard_id)
}
// return generated placement
placement
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_exp/src/testbed/baremetal.rs | fantoch_exp/src/testbed/baremetal.rs | use super::Nickname;
use crate::machine::{Machine, Machines};
use crate::{FantochFeature, RunMode, Testbed};
use color_eyre::eyre::WrapErr;
use color_eyre::Report;
use std::collections::HashMap;
const MACHINES: &str = "exp_files/machines";
const PRIVATE_KEY: &str = "~/.ssh/id_rsa";
pub fn create_launchers(
regions: &Vec<rusoto_core::Region>,
shard_count: usize,
) -> Vec<tsunami::providers::baremetal::Machine> {
let server_count = regions.len();
let client_count = regions.len();
let machine_count = server_count * shard_count + client_count;
// create one launcher per machine
(0..machine_count)
.map(|_| tsunami::providers::baremetal::Machine::default())
.collect()
}
pub async fn setup<'a>(
launcher_per_machine: &'a mut Vec<tsunami::providers::baremetal::Machine>,
regions: Vec<rusoto_core::Region>,
shard_count: usize,
branch: String,
run_mode: RunMode,
features: Vec<FantochFeature>,
) -> Result<Machines<'a>, Report> {
let server_count = regions.len();
let client_count = regions.len();
let machine_count = server_count * shard_count + client_count;
assert_eq!(
launcher_per_machine.len(),
machine_count,
"not enough launchers"
);
// get ips and check that we have enough of them
let content = tokio::fs::read_to_string(MACHINES).await?;
let machines: Vec<_> = content.lines().take(machine_count).collect();
assert_eq!(machines.len(), machine_count, "not enough machines");
// create nicknames for all machines
let nicknames = super::create_nicknames(shard_count, ®ions);
// get machine and launcher iterators
let mut machines_iter = machines.into_iter();
let mut launcher_iter = launcher_per_machine.iter_mut();
// setup machines
let mut launches = Vec::with_capacity(machine_count);
for nickname in nicknames {
// find one machine and a launcher for this machine
let machine = machines_iter.next().unwrap();
let launcher = launcher_iter.next().unwrap();
// create baremetal setup
let setup = baremetal_setup(
machine,
branch.clone(),
run_mode,
features.clone(),
)
.await
.wrap_err("baremetal setup")?;
// save baremetal launch
let launch = baremetal_launch(launcher, nickname, setup);
launches.push(launch);
}
// create placement, servers, and clients
let placement = super::create_placement(shard_count, regions);
let mut servers = HashMap::with_capacity(server_count);
let mut clients = HashMap::with_capacity(client_count);
for result in futures::future::join_all(launches).await {
let vm = result.wrap_err("baremetal launch")?;
let Nickname { region, shard_id } = Nickname::from_string(&vm.nickname);
let vm = Machine::Tsunami(vm);
let unique_insert = match shard_id {
Some(shard_id) => {
// it's a server; find it's process id
let (process_id, _region_index) =
placement.get(&(region, shard_id)).expect(
"pair region and shard id should exist in placement",
);
servers.insert(*process_id, vm).is_none()
}
None => {
// it's a client
clients.insert(region, vm).is_none()
}
};
assert!(unique_insert);
}
// check that we have enough machines
assert_eq!(
servers.len(),
server_count * shard_count,
"not enough server vms"
);
assert_eq!(clients.len(), client_count, "not enough client vms");
let machines = Machines::new(placement, servers, clients);
Ok(machines)
}
async fn baremetal_setup(
machine: &str,
branch: String,
run_mode: RunMode,
features: Vec<FantochFeature>,
) -> Result<tsunami::providers::baremetal::Setup, Report> {
let parts: Vec<_> = machine.split('@').collect();
assert_eq!(parts.len(), 2, "machine should have the form username@addr");
let username = parts[0].to_string();
let hostname = parts[1].to_string();
// fetch public ip
let command = String::from("hostname -I");
let ips = Machine::ssh_exec(
&username,
&hostname,
&std::path::PathBuf::from(PRIVATE_KEY),
command,
)
.await
.wrap_err("hostname -I")?;
tracing::info!("hostname -I of {}: {:?}", machine, ips);
// hostname should return at least one ip like so "10.10.5.61 172.17.0.1"
let parts: Vec<_> = ips.split(' ').collect();
let mut ip = parts[0];
// one of the veleta machines returns
// "169.254.0.2 10.10.5.204 11.1.212.203 172.17.0.1 192.168.224.1" and we
// want the "10.10.*.*";
// thus, if more than one ip is returned, give preference to that one
for part in parts {
if part.starts_with("10.10") {
ip = part;
break;
}
}
// append ssh port
// - TODO: I think this should be fixed in tsunami, not here
let addr = format!("{}:22", ip);
tracing::debug!("hostname -I: extracted {:?}", ip);
let fantoch_setup =
if machine.contains("veleta") && !machine.contains("veleta8") {
// don't setup if this is a veleta machine that is not veleta8
crate::machine::veleta_fantoch_setup()
} else {
crate::machine::fantoch_setup(
branch,
run_mode,
features,
Testbed::Baremetal,
)
};
let setup =
tsunami::providers::baremetal::Setup::new(addr, Some(username))?
.key_path(PRIVATE_KEY)
.setup(fantoch_setup);
Ok(setup)
}
async fn baremetal_launch(
launcher: &mut tsunami::providers::baremetal::Machine,
nickname: Nickname,
setup: tsunami::providers::baremetal::Setup,
) -> Result<tsunami::Machine<'_>, Report> {
// get region and nickname
let region = nickname.region.name().clone();
let nickname = nickname.to_string();
// create launch descriptor
let descriptor = tsunami::providers::LaunchDescriptor {
region,
max_wait: None,
machines: vec![(nickname.clone(), setup)],
};
// do launch the machine
use tsunami::providers::Launcher;
launcher.launch(descriptor).await?;
let mut machines = launcher.connect_all().await?;
// check that a single machine was returned
assert_eq!(
machines.len(),
1,
"baremetal launched didn't return a single machine"
);
assert!(
machines.contains_key(&nickname),
"baremetal machines incorrectly identified"
);
// fetch the machine
let machine = machines.remove(&nickname).unwrap();
assert_eq!(
machine.nickname, nickname,
"baremetal machine has the wrong nickname"
);
Ok(machine)
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_plot/src/lib.rs | fantoch_plot/src/lib.rs | #![deny(rust_2018_idioms)]
mod db;
mod fmt;
#[cfg(feature = "pyo3")]
pub mod plot;
// Re-exports.
pub use db::{ExperimentData, LatencyPrecision, ResultsDB, Search};
pub use fmt::PlotFmt;
#[cfg(feature = "pyo3")]
use color_eyre::eyre::WrapErr;
#[cfg(feature = "pyo3")]
use color_eyre::Report;
#[cfg(feature = "pyo3")]
use fantoch::client::KeyGen;
// use fantoch::executor::ExecutorMetricsKind;
use fantoch::id::ProcessId;
#[cfg(feature = "pyo3")]
use fantoch::protocol::ProtocolMetricsKind;
#[cfg(feature = "pyo3")]
use fantoch_exp::Protocol;
#[cfg(feature = "pyo3")]
use plot::axes::Axes;
#[cfg(feature = "pyo3")]
use plot::figure::Figure;
#[cfg(feature = "pyo3")]
use plot::pyplot::PyPlot;
#[cfg(feature = "pyo3")]
use plot::Matplotlib;
#[cfg(feature = "pyo3")]
use pyo3::prelude::*;
#[cfg(feature = "pyo3")]
use pyo3::types::PyDict;
#[cfg(feature = "pyo3")]
use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet};
// defaults: [6.4, 4.8]
// copied from: https://github.com/jonhoo/thesis/blob/master/graphs/common.py
#[cfg(feature = "pyo3")]
const GOLDEN_RATIO: f64 = 1.61803f64;
#[cfg(feature = "pyo3")]
const FIGWIDTH: f64 = 8.5 / GOLDEN_RATIO;
// no longer golden ratio
#[cfg(feature = "pyo3")]
const FIGSIZE: (f64, f64) = (FIGWIDTH, (FIGWIDTH / GOLDEN_RATIO) - 0.6);
// adjust are percentages:
// - setting top to 0.80, means we leave the top 20% free
// - setting bottom to 0.20, means we leave the bottom 20% free
#[cfg(feature = "pyo3")]
const ADJUST_TOP: f64 = 0.81;
#[cfg(feature = "pyo3")]
const ADJUST_BOTTOM: f64 = 0.15;
#[derive(Debug, Clone)]
pub enum ErrorBar {
With(f64),
Without,
}
impl ErrorBar {
pub fn name(&self) -> String {
match self {
Self::Without => String::new(),
Self::With(percentile) => format!("_p{}", percentile * 100f64),
}
}
}
#[derive(Clone, Copy)]
pub enum LatencyMetric {
Average,
Percentile(f64),
}
impl LatencyMetric {
pub fn name(&self) -> String {
match self {
Self::Average => String::new(),
Self::Percentile(percentile) => {
format!("_p{}", percentile * 100f64)
}
}
}
}
#[derive(Clone, Copy)]
pub enum HeatmapMetric {
CPU,
NetRecv,
NetSend,
}
impl HeatmapMetric {
pub fn name(&self) -> String {
match self {
Self::CPU => String::from("cpu"),
Self::NetRecv => String::from("net_in"),
Self::NetSend => String::from("net_out"),
}
}
pub fn utilization(&self, value: f64) -> usize {
let max = match self {
Self::CPU => 100f64,
Self::NetSend | Self::NetRecv => {
// 10GBit to B
10_000_000_000f64 / 8f64
}
};
(value * 100f64 / max) as usize
}
}
#[derive(Clone, Copy)]
pub enum ThroughputYAxis {
Latency(LatencyMetric),
CPU,
}
#[cfg(feature = "pyo3")]
impl ThroughputYAxis {
pub fn name(&self) -> String {
match self {
Self::Latency(latency) => format!("latency{}", latency.name()),
Self::CPU => String::from("cpu"),
}
}
fn y_label(&self, latency_precision: LatencyPrecision) -> String {
match self {
Self::Latency(_) => {
format!("latency ({})", latency_precision.name())
}
Self::CPU => String::from("CPU utilization (%)"),
}
}
}
#[derive(PartialEq, Eq, Hash)]
pub enum Style {
Label,
Color,
Hatch,
Marker,
LineStyle,
LineWidth,
}
pub enum MetricsType {
Process(ProcessId),
ProcessGlobal,
ClientGlobal,
}
impl MetricsType {
pub fn name(&self) -> String {
match self {
Self::Process(process_id) => format!("process_{}", process_id),
Self::ProcessGlobal => String::from("process_global"),
Self::ClientGlobal => String::from("client_global"),
}
}
}
#[cfg(feature = "pyo3")]
enum AxisToScale {
X,
Y,
}
#[cfg(feature = "pyo3")]
pub fn set_global_style(newsgott: bool) -> Result<(), Report> {
// start python
let gil = Python::acquire_gil();
let py = gil.python();
let lib = Matplotlib::new(py, newsgott)?;
// need to load `PyPlot` for the following to work
let _ = PyPlot::new(py)?;
// adjust fig size
let kwargs = pydict!(py, ("figsize", FIGSIZE));
lib.rc("figure", Some(kwargs))?;
// adjust font size
let kwargs = pydict!(py, ("size", 12));
lib.rc("font", Some(kwargs))?;
let kwargs = pydict!(py, ("fontsize", 12));
lib.rc("legend", Some(kwargs))?;
// adjust font type to type 1
let kwargs = pydict!(py, ("fonttype", 42));
lib.rc("pdf", Some(kwargs))?;
lib.rc("ps", Some(kwargs))?;
// adjust axes linewidth
let kwargs = pydict!(py, ("linewidth", 1));
lib.rc("axes", Some(kwargs))?;
Ok(())
}
#[cfg(feature = "pyo3")]
pub fn recovery_plot(
taiwan: (Vec<u64>, Vec<u64>),
finland: (Vec<u64>, Vec<u64>),
south_carolina: (Vec<u64>, Vec<u64>),
all_sites: (Vec<u64>, Vec<u64>),
output_dir: Option<&str>,
output_file: &str,
) -> Result<(), Report> {
// start python
let gil = Python::acquire_gil();
let py = gil.python();
let plt = PyPlot::new(py)?;
// start plot:
// - adjust height space and width space between plots
let kwargs = pydict!(py, ("hspace", 0.5), ("wspace", 0.2));
let (fig, _) = start_plot(py, &plt, Some(kwargs))?;
// increase width and height
let (width, height) = FIGSIZE;
fig.set_size_inches(width + 1.5, height + 1.5)?;
// the number of plotted protocols will be 2
let plotted = 2;
// the xrange is fixed
let x_range = (10, 70);
// the yrange depends on whether we have all sites or not
let y_range = |subplot_title| {
if subplot_title == "all sites" {
(0, 3000)
} else {
(0, 1200)
}
};
for (subplot, (atlas_data, fpaxos_data), subplot_title) in vec![
(1, taiwan, "Taiwan"),
(2, finland, "Finland"),
(3, south_carolina, "South Carolina"),
(4, all_sites, "all sites"),
] {
let ax = plt.subplot(2, 2, subplot, None)?;
// set title
ax.set_title(subplot_title)?;
for (search, y) in vec![
(Search::new(3, 1, Protocol::AtlasLocked), atlas_data),
(Search::new(3, 1, Protocol::FPaxos), fpaxos_data),
] {
let x: Vec<_> = (1..=y.len()).collect();
// plot it!
let kwargs = line_style(py, search, &None)?;
ax.plot(x, y, None, Some(kwargs))?;
}
// set x limits
let (x_min, x_max) = x_range;
let kwargs = pydict!(py, ("xmin", x_min), ("xmax", x_max));
ax.set_xlim(Some(kwargs))?;
// set y limits
let (y_min, y_max) = y_range(subplot_title);
let kwargs = pydict!(py, ("ymin", y_min), ("ymax", y_max));
ax.set_ylim(Some(kwargs))?;
// set legend only in the first plot
if subplot == 1 {
// specific pull-up for this kind of plot
let x_bbox_to_anchor = Some(1.01);
let y_bbox_to_anchor = Some(1.51);
add_legend(
plotted,
None,
x_bbox_to_anchor,
y_bbox_to_anchor,
None,
py,
&ax,
)?;
}
// set labels
if subplot == 1 || subplot == 3 {
ax.set_ylabel("throughput (K ops/s)", None)?;
}
if subplot == 3 || subplot == 4 {
ax.set_xlabel("time (s)", None)?;
}
}
// end plot
end_plot(plotted > 0, output_dir, output_file, py, &plt, Some(fig))?;
Ok(())
}
#[cfg(feature = "pyo3")]
pub fn nfr_plot(
n: usize,
read_only_percentages: Vec<usize>,
protocols: Vec<(Protocol, Option<usize>)>,
key_gen: KeyGen,
clients_per_region: usize,
payload_size: usize,
legend_order: Option<Vec<usize>>,
style_fun: Option<Box<dyn Fn(&Search) -> HashMap<Style, String>>>,
latency_precision: LatencyPrecision,
output_dir: Option<&str>,
output_file: &str,
db: &ResultsDB,
) -> Result<(), Report> {
const FULL_REGION_WIDTH: f64 = 10f64;
const MAX_COMBINATIONS: usize = 5;
// 80% of `FULL_REGION_WIDTH` when `MAX_COMBINATIONS` is reached
const BAR_WIDTH: f64 = FULL_REGION_WIDTH * 0.8 / MAX_COMBINATIONS as f64;
assert!(
protocols.len() <= MAX_COMBINATIONS,
"nfr_plot: expected less searches than the max number of combinations"
);
// compute x: one per `read_only_percentage`
let x: Vec<_> = (0..read_only_percentages.len())
.map(|i| i as f64 * FULL_REGION_WIDTH)
.collect();
// we need to shift all to the left by half of the number of combinations
let protocol_count = protocols.len();
let shift_left = protocol_count as f64 / 2f64;
// we also need to shift half bar to the right
let shift_right = 0.5;
let protocols =
protocols.into_iter().enumerate().map(|(index, protocol)| {
// compute index according to shifts
let mut base = index as f64 - shift_left + shift_right;
// HACK to separate move `f = 1` (i.e. the first 2 searches) a bit
// to the left and `f = 2` (i.e. the remaining 3
// searches) a bit to the right
if protocol_count == 5 {
if index < 2 {
base += 0.25;
} else {
base += 0.75;
}
}
// compute combination's shift
let shift = base * BAR_WIDTH;
(shift, protocol)
});
// start python
let gil = Python::acquire_gil();
let py = gil.python();
let plt = PyPlot::new(py)?;
// start plot
let (fig, ax) = start_plot(py, &plt, None)?;
// keep track of the number of plotted instances
let mut plotted = 0;
// compute legend order: if not defined, then it's the order given by
// `protocols`
let legend_order = legend_order
.unwrap_or_else(|| (0..protocols.len()).collect::<Vec<_>>());
assert_eq!(
legend_order.len(),
protocols.len(),
"legend order should contain the same number of protocols"
);
let protocols_with_legends = protocols.into_iter().zip(legend_order);
let mut legends = BTreeMap::new();
for nfr in vec![false, true] {
for ((shift, (protocol, f)), legend_order) in
protocols_with_legends.clone()
{
// set `f` to be a minority in case it's not set
let f = f.unwrap_or(n / 2);
let mut y = Vec::new();
let mut fast_path_ratios = Vec::new();
for read_only_percentage in read_only_percentages.clone() {
let mut search = Search::new(n, f, protocol);
// filter by key gen, clients per region and payload size
search
.key_gen(key_gen)
.clients_per_region(clients_per_region)
.payload_size(payload_size)
.read_only_percentage(read_only_percentage)
.nfr(nfr);
let mut exp_data = db.find(search)?;
match exp_data.len() {
0 => {
eprintln!(
"missing data for {} f = {} nfr = {} reads = {}",
PlotFmt::protocol_name(search.protocol),
search.f,
nfr,
read_only_percentage,
);
y.push(0);
continue;
}
1 => (),
_ => {
let matches: Vec<_> = exp_data
.into_iter()
.map(|(timestamp, _, _)| {
timestamp.path().display().to_string()
})
.collect();
panic!("found more than 1 matching experiment for this search criteria: search {:?} | matches {:?}", search, matches);
}
};
let (_, _, exp_data) = exp_data.pop().unwrap();
// compute average latency
let histogram = &exp_data.global_client_latency;
let avg = histogram.mean(latency_precision).round() as u64;
y.push(avg);
// gather fast path ratios
let (_, _, fast_path_ratio) =
exp_data.global_protocol_metrics.fast_path_stats();
fast_path_ratios.push(fast_path_ratio as i64);
}
println!(
"{:<7} f = {:?} nfr = {} | {:?}",
PlotFmt::protocol_name(protocol),
f,
nfr,
y,
);
println!(
"[FP%] {:<7} f = {:?} nfr = {} | {:?}",
PlotFmt::protocol_name(protocol),
f,
nfr,
fast_path_ratios,
);
// compute x: shift all values by `shift`
let x: Vec<_> = x.iter().map(|&x| x + shift).collect();
// if nfr, plot it normally;
// if not nfr (in which case, latency should be higher, plot a black
// bar)
if nfr {
let search = Search::new(n, f, protocol);
let kwargs = bar_style(py, search, &style_fun, BAR_WIDTH)?;
let line = ax.bar(x, y, Some(kwargs))?;
// save line with its legend order
legends.insert(
legend_order,
(line, PlotFmt::label(search.protocol, search.f)),
);
plotted += 1;
} else {
// plot a light gray bar
let light_gray = "#d3d3d3";
let kwargs = pydict!(
py,
("width", BAR_WIDTH),
("edgecolor", "black"),
("linewidth", 1),
("color", light_gray),
);
ax.bar(x, y, Some(kwargs))?;
}
}
}
// set xticks
ax.set_xticks(x, None)?;
// create labels with the number of sites
let labels: Vec<_> = read_only_percentages
.into_iter()
.map(|read_only_percentage| {
format!("w = {}%", 100 - read_only_percentage)
})
.collect();
ax.set_xticklabels(labels, None)?;
// set labels
let ylabel = format!("latency ({})", latency_precision.name());
ax.set_ylabel(&ylabel, None)?;
// legend
// HACK:
let legend_column_spacing = if protocol_count == 7 {
Some(1.25)
} else {
None
};
let x_bbox_to_anchor = Some(0.46);
let y_bbox_to_anchor = Some(1.34);
add_legend(
plotted,
Some(legends),
x_bbox_to_anchor,
y_bbox_to_anchor,
legend_column_spacing,
py,
&ax,
)?;
// end plot
end_plot(plotted > 0, output_dir, output_file, py, &plt, Some(fig))?;
Ok(())
}
#[cfg(feature = "pyo3")]
pub fn fast_path_plot<F>(
searches: Vec<Search>,
clients_per_region: usize,
conflict_rates: Vec<usize>,
search_refine: F,
style_fun: Option<Box<dyn Fn(&Search) -> HashMap<Style, String>>>,
output_dir: Option<&str>,
output_file: &str,
db: &ResultsDB,
) -> Result<(), Report>
where
F: Fn(&mut Search, usize, usize),
{
// start python
let gil = Python::acquire_gil();
let py = gil.python();
let plt = PyPlot::new(py)?;
// start plot
let (fig, ax) = start_plot(py, &plt, None)?;
// increase height
let (width, height) = FIGSIZE;
fig.set_size_inches(width, height + 0.23)?;
// keep track of the number of plotted instances
let mut plotted = 0;
for mut search in searches {
let mut fast_path_ratios = Vec::new();
for conflict_rate in conflict_rates.clone() {
search_refine(&mut search, clients_per_region, conflict_rate);
let mut exp_data = db.find(search)?;
match exp_data.len() {
0 => {
eprintln!(
"missing data for {} f = {}",
PlotFmt::protocol_name(search.protocol),
search.f
);
fast_path_ratios.push(-1);
continue;
}
1 => (),
_ => {
let matches: Vec<_> = exp_data
.into_iter()
.map(|(timestamp, _, _)| {
timestamp.path().display().to_string()
})
.collect();
panic!("found more than 1 matching experiment for this search criteria: search {:?} | matches {:?}", search, matches);
}
};
let (_, _, exp_data) = exp_data.pop().unwrap();
let (_, _, fast_path_ratio) =
exp_data.global_protocol_metrics.fast_path_stats();
fast_path_ratios.push(fast_path_ratio as i64);
}
println!(
"{:<7} n = {} f = {:?} c = {} | {:?}",
PlotFmt::protocol_name(search.protocol),
search.n,
search.f,
clients_per_region,
fast_path_ratios,
);
// plot it! (if there's something to be plotted)
if !fast_path_ratios.is_empty() {
let kwargs = line_style(py, search, &style_fun)?;
ax.plot(
conflict_rates.clone(),
fast_path_ratios,
None,
Some(kwargs),
)?;
plotted += 1;
}
}
// add a worst-case line
let kwargs =
line_style(py, Search::new(3, 1, Protocol::Basic), &style_fun)?;
ax.plot(
conflict_rates.clone(),
conflict_rates
.clone()
.into_iter()
.map(|conflict_rate| 100 - conflict_rate)
.collect(),
None,
Some(kwargs),
)?;
plotted += 1;
// set xticks
ax.set_xticks(conflict_rates, None)?;
// set x limits
let kwargs = pydict!(py, ("xmin", 0), ("xmax", 100));
ax.set_xlim(Some(kwargs))?;
// set y limits
let kwargs = pydict!(py, ("ymin", 0), ("ymax", 100));
ax.set_ylim(Some(kwargs))?;
// set labels
ax.set_xlabel("conflict (%)", None)?;
ax.set_ylabel("fast path (%)", None)?;
// legend
let x_bbox_to_anchor = None;
let y_bbox_to_anchor = Some(1.345);
add_legend(
plotted,
None,
x_bbox_to_anchor,
y_bbox_to_anchor,
None,
py,
&ax,
)?;
// end plot
end_plot(plotted > 0, output_dir, output_file, py, &plt, Some(fig))?;
Ok(())
}
#[cfg(feature = "pyo3")]
pub fn increasing_sites_plot(
ns: Vec<usize>,
protocols: Vec<(Protocol, Option<usize>)>,
key_gen: KeyGen,
clients_per_region: usize,
payload_size: usize,
legend_order: Option<Vec<usize>>,
style_fun: Option<Box<dyn Fn(&Search) -> HashMap<Style, String>>>,
latency_precision: LatencyPrecision,
error_bar: ErrorBar,
output_dir: Option<&str>,
output_file: &str,
db: &ResultsDB,
) -> Result<(), Report> {
const FULL_REGION_WIDTH: f64 = 10f64;
const MAX_COMBINATIONS: usize = 7;
// 80% of `FULL_REGION_WIDTH` when `MAX_COMBINATIONS` is reached
const BAR_WIDTH: f64 = FULL_REGION_WIDTH * 0.8 / MAX_COMBINATIONS as f64;
assert!(
protocols.len() <= MAX_COMBINATIONS,
"increasing_sites_plot: expected less searches than the max number of combinations"
);
// compute x: one per `n`
let x: Vec<_> = (0..ns.len())
.map(|i| i as f64 * FULL_REGION_WIDTH)
.collect();
// we need to shift all to the left by half of the number of combinations
let protocol_count = protocols.len();
let shift_left = protocol_count as f64 / 2f64;
// we also need to shift half bar to the right
let shift_right = 0.5;
let protocols =
protocols.into_iter().enumerate().map(|(index, protocol)| {
// compute index according to shifts
let mut base = index as f64 - shift_left + shift_right;
// HACK to separate move `f = 1` (i.e. the first 3 searches) a bit
// to the left and `f = 2` (i.e. the remaining 4
// searches) a bit to the right
if protocol_count == 7 {
if index < 3 {
base += 0.25;
} else {
base += 0.75;
}
}
// compute combination's shift
let shift = base * BAR_WIDTH;
(shift, protocol)
});
// start python
let gil = Python::acquire_gil();
let py = gil.python();
let plt = PyPlot::new(py)?;
// start plot
let (fig, ax) = start_plot(py, &plt, None)?;
// keep track of the number of plotted instances
let mut plotted = 0;
// compute legend order: if not defined, then it's the order given by
// `protocols`
let legend_order = legend_order
.unwrap_or_else(|| (0..protocols.len()).collect::<Vec<_>>());
assert_eq!(
legend_order.len(),
protocols.len(),
"legend order should contain the same number of protocols"
);
let mut legends = BTreeMap::new();
for ((shift, (protocol, f)), legend_order) in
protocols.into_iter().zip(legend_order)
{
let mut y = Vec::new();
let mut from_err = Vec::new();
let mut to_err = Vec::new();
for n in ns.clone() {
// set `f` to be a minority in case it's not set
let f = f.unwrap_or(n / 2);
let mut search = Search::new(n, f, protocol);
// filter by key gen, clients per region and payload size
search
.key_gen(key_gen)
.clients_per_region(clients_per_region)
.payload_size(payload_size);
let mut exp_data = db.find(search)?;
match exp_data.len() {
0 => {
eprintln!(
"missing data for {} f = {}",
PlotFmt::protocol_name(search.protocol),
search.f
);
y.push(0);
from_err.push(0);
to_err.push(0);
continue;
}
1 => (),
_ => {
let matches: Vec<_> = exp_data
.into_iter()
.map(|(timestamp, _, _)| {
timestamp.path().display().to_string()
})
.collect();
panic!("found more than 1 matching experiment for this search criteria: search {:?} | matches {:?}", search, matches);
}
};
let (_, _, exp_data) = exp_data.pop().unwrap();
// compute average latency
let histogram = &exp_data.global_client_latency;
let avg = histogram.mean(latency_precision).round() as u64;
y.push(avg);
// maybe create error bar
let error_bar = if let ErrorBar::With(percentile) = error_bar {
let percentile =
histogram.percentile(percentile, latency_precision).round();
percentile as u64 - avg
} else {
0
};
// this represents the top of the bar
from_err.push(0);
// this represents the height of the error bar starting at the
// top of the bar
to_err.push(error_bar);
let regions = vec![
"eu-west-1",
"us-west-1",
"ap-southeast-1",
"ca-central-1",
"sa-east-1",
"ap-east-1",
"us-east-1",
"ap-northeast-1",
"eu-north-1",
"ap-south-1",
"us-west-2",
];
let per_site = exp_data
.client_latency
.iter()
.map(|(region, histogram)| {
let avg = histogram.mean(latency_precision).round() as u64;
(region.name(), avg)
})
.collect::<HashMap<_, _>>();
let mut per_site_fmt = String::new();
for region in regions {
if let Some(avg) = per_site.get(®ion.to_string()) {
per_site_fmt =
format!("{} {} {:>3} |", per_site_fmt, region, avg);
}
}
println!("n = {:<2} |{}", n, per_site_fmt);
}
println!(
"{:<7} f = {:?} | {:?}",
PlotFmt::protocol_name(protocol),
f,
y,
);
// compute x: shift all values by `shift`
let x: Vec<_> = x.iter().map(|&x| x + shift).collect();
// create dummy search
let n = 0;
let f = f.unwrap_or(0);
let search = Search::new(n, f, protocol);
// plot it error bars:
let kwargs = bar_style(py, search, &style_fun, BAR_WIDTH)?;
pytry!(py, kwargs.set_item("yerr", (from_err, to_err)));
let line = ax.bar(x, y, Some(kwargs))?;
plotted += 1;
// save line with its legend order
legends.insert(
legend_order,
(line, PlotFmt::label(search.protocol, search.f)),
);
}
// set xticks
ax.set_xticks(x, None)?;
// create labels with the number of sites
let labels: Vec<_> = ns.into_iter().map(|n| format!("r = {}", n)).collect();
ax.set_xticklabels(labels, None)?;
// set labels
let ylabel = format!("latency ({})", latency_precision.name());
ax.set_ylabel(&ylabel, None)?;
// legend
// HACK:
let legend_column_spacing = if protocol_count == 7 {
Some(1.25)
} else {
None
};
let x_bbox_to_anchor = Some(0.46);
let y_bbox_to_anchor = Some(1.34);
add_legend(
plotted,
Some(legends),
x_bbox_to_anchor,
y_bbox_to_anchor,
legend_column_spacing,
py,
&ax,
)?;
// end plot
end_plot(plotted > 0, output_dir, output_file, py, &plt, Some(fig))?;
Ok(())
}
#[cfg(feature = "pyo3")]
pub fn fairness_plot<R>(
searches: Vec<Search>,
legend_order: Option<Vec<usize>>,
style_fun: Option<Box<dyn Fn(&Search) -> HashMap<Style, String>>>,
latency_precision: LatencyPrecision,
n: usize,
error_bar: ErrorBar,
output_dir: Option<&str>,
output_file: &str,
db: &ResultsDB,
f: impl Fn(&ExperimentData) -> R,
) -> Result<Vec<(Search, R)>, Report> {
const FULL_REGION_WIDTH: f64 = 10f64;
const MAX_COMBINATIONS: usize = 7;
// 80% of `FULL_REGION_WIDTH` when `MAX_COMBINATIONS` is reached
const BAR_WIDTH: f64 = FULL_REGION_WIDTH * 0.8 / MAX_COMBINATIONS as f64;
assert!(
searches.len() <= MAX_COMBINATIONS,
"latency_plot: expected less searches than the max number of combinations"
);
// compute x: one per region
// - the +1 is for the 'average' group
let x: Vec<_> = (0..n + 1).map(|i| i as f64 * FULL_REGION_WIDTH).collect();
// we need to shift all to the left by half of the number of combinations
let search_count = searches.len();
let shift_left = search_count as f64 / 2f64;
// we also need to shift half bar to the right
let shift_right = 0.5;
let searches = searches.into_iter().enumerate().map(|(index, search)| {
// compute index according to shifts
let mut base = index as f64 - shift_left + shift_right;
// HACK to separate move `f = 1` (i.e. the first 3 searches) a bit to
// the left and `f = 2` (i.e. the remaining 4 searches) a bit to the
// right
if search_count == 7 {
if search.f == 1 && index < 3 {
base += 0.25;
}
if search.f == 2 && index >= 3 {
base += 0.75;
}
}
// compute combination's shift
let shift = base * BAR_WIDTH;
(shift, search)
});
// keep track of all regions
let mut all_regions = HashSet::new();
// aggregate the output of `f` for each search
let mut results = Vec::new();
// start python
let gil = Python::acquire_gil();
let py = gil.python();
let plt = PyPlot::new(py)?;
// start plot
let (fig, ax) = start_plot(py, &plt, None)?;
// keep track of the number of plotted instances
let mut plotted = 0;
// compute legend order: if not defined, then it's the order given by
// `searches`
let legend_order =
legend_order.unwrap_or_else(|| (0..searches.len()).collect::<Vec<_>>());
assert_eq!(
legend_order.len(),
searches.len(),
"legend order should contain the same number of searches"
);
let mut legends = BTreeMap::new();
for ((shift, search), legend_order) in
searches.into_iter().zip(legend_order)
{
// check `n`
assert_eq!(
search.n, n,
"latency_plot: value of n in search doesn't match the provided"
);
let mut exp_data = db.find(search)?;
match exp_data.len() {
0 => {
eprintln!(
"missing data for {} f = {}",
PlotFmt::protocol_name(search.protocol),
search.f
);
continue;
}
1 => (),
_ => {
let matches: Vec<_> = exp_data
.into_iter()
.map(|(timestamp, _, _)| {
timestamp.path().display().to_string()
})
.collect();
panic!("found more than 1 matching experiment for this search criteria: search {:?} | matches {:?}", search, matches);
}
};
let (_, _, exp_data) = exp_data.pop().unwrap();
// compute y: avg latencies sorted by region name
let mut from_err = Vec::new();
let mut to_err = Vec::new();
let mut per_region_latency: Vec<_> = exp_data
.client_latency
.iter()
.map(|(region, histogram)| {
// compute average latency
let avg = histogram.mean(latency_precision).round() as u64;
// maybe create error bar
let error_bar = if let ErrorBar::With(percentile) = error_bar {
let percentile = histogram
.percentile(percentile, latency_precision)
.round();
percentile as u64 - avg
} else {
0
};
// this represents the top of the bar
from_err.push(0);
// this represents the height of the error bar starting at the
// top of the bar
to_err.push(error_bar);
(region.clone(), avg)
})
.collect();
// sort by region and get region latencies
per_region_latency.sort();
let (regions, mut y): (HashSet<_>, Vec<_>) =
per_region_latency.into_iter().unzip();
// compute the stddev between region latencies
// let hist = fantoch::metrics::Histogram::from(y.clone());
// let stddev = hist.stddev().value().round() as u64;
let stddev = 0;
// add stddev as an error bar
from_err.push(0);
to_err.push(stddev);
// add global client latency to the 'average' group
y.push(
exp_data
.global_client_latency
.mean(latency_precision)
.round() as u64,
);
println!(
"{:<7} f = {} | {:?} | stddev = {}",
PlotFmt::protocol_name(search.protocol),
search.f,
y,
stddev,
);
// compute x: shift all values by `shift`
let x: Vec<_> = x.iter().map(|&x| x + shift).collect();
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | true |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_plot/src/fmt.rs | fantoch_plot/src/fmt.rs | use fantoch::planet::Region;
use fantoch_exp::Protocol;
pub struct PlotFmt;
impl PlotFmt {
pub fn region_name(region: Region) -> &'static str {
match region.name().as_str() {
"ap-southeast-1" => "Singapore",
"ca-central-1" => "Canada",
"eu-west-1" => "Ireland",
"sa-east-1" => "S. Paulo", // São Paulo
"us-west-1" => "N. California", // Northern California
name => {
panic!("PlotFmt::region_name: name {} not supported!", name);
}
}
}
pub fn protocol_name(protocol: Protocol) -> &'static str {
match protocol {
Protocol::AtlasLocked => "Atlas",
Protocol::EPaxosLocked => "EPaxos",
Protocol::CaesarLocked => "Caesar",
Protocol::FPaxos => "FPaxos",
Protocol::TempoAtomic => "Tempo",
Protocol::TempoLocked => "Tempo-L",
Protocol::Basic => "Inconsistent",
}
}
pub fn label(protocol: Protocol, f: usize) -> String {
match protocol {
Protocol::EPaxosLocked | Protocol::CaesarLocked => {
Self::protocol_name(protocol).to_string()
}
_ => format!("{} f = {}", Self::protocol_name(protocol), f),
}
}
pub fn color(protocol: Protocol, f: usize) -> String {
match (protocol, f) {
(Protocol::AtlasLocked, 1) => "#27ae60",
(Protocol::AtlasLocked, 2) => "#16a085",
(Protocol::AtlasLocked, 3) => "#2980b9", // "#111111"
(Protocol::EPaxosLocked, _) => "#444444",
(Protocol::CaesarLocked, _) => "#bdc3c7",
(Protocol::FPaxos, 1) => "#2980b9",
(Protocol::FPaxos, 2) => "#34495e",
(Protocol::TempoAtomic, 1) => "#f1c40f",
(Protocol::TempoAtomic, 2) => "#e67e22",
(Protocol::TempoAtomic, 3) => "#c23616", // "#333333"
(Protocol::TempoLocked, 1) => "#2980b9",
(Protocol::TempoLocked, 2) => "#c0392b",
(Protocol::Basic, _) => "#576574",
_ => panic!(
"PlotFmt::color: protocol = {:?} and f = {} combination not supported!",
protocol, f
),
}.to_string()
}
pub fn background_color(protocol: Protocol) -> String {
match protocol {
Protocol::AtlasLocked => "#ecf0f1",
Protocol::FPaxos => "#95a5a6",
Protocol::TempoAtomic => "#353b48",
_ => panic!(
"PlotFmt::background_color: protocol = {:?} not supported!",
protocol
),
}
.to_string()
}
// Possible values: {'/', '\', '|', '-', '+', 'x', 'o', 'O', '.', '*'}
pub fn hatch(protocol: Protocol, f: usize) -> String {
match (protocol, f) {
(Protocol::FPaxos, 1) => "/", // 1
(Protocol::FPaxos, 2) => "\\",
(Protocol::EPaxosLocked, _) => "//", // 2
(Protocol::CaesarLocked, _) => "\\\\",
(Protocol::AtlasLocked, 1) => "///", // 3
(Protocol::AtlasLocked, 2) => "\\\\\\",
(Protocol::TempoLocked, 1) => "////", // 4
(Protocol::TempoLocked, 2) => "\\\\\\\\",
(Protocol::TempoAtomic, 1) => "//////", // 6
(Protocol::TempoAtomic, 2) => "\\\\\\\\\\\\",
(Protocol::Basic, _) => "\\\\\\\\\\\\", // 6
_ => panic!(
"PlotFmt::hatch: protocol = {:?} and f = {} combination not supported!",
protocol, f
),
}.to_string()
}
// Possible values: https://matplotlib.org/3.1.1/api/markers_api.html#module-matplotlib.markers
pub fn marker(protocol: Protocol, f: usize) -> String {
match (protocol, f) {
(Protocol::AtlasLocked, 1) => "o",
(Protocol::AtlasLocked, 2) => "s",
(Protocol::AtlasLocked, 3) => "P",
(Protocol::EPaxosLocked, _) => "D",
(Protocol::CaesarLocked, _) => "H",
(Protocol::FPaxos, 1) => "+",
(Protocol::FPaxos, 2) => "x",
(Protocol::TempoAtomic, 1) => "v",
(Protocol::TempoAtomic, 2) => "^",
(Protocol::TempoAtomic, 3) => "p",
(Protocol::TempoLocked, 1) => "o",
(Protocol::TempoLocked, 2) => "s",
(Protocol::Basic, _) => "P",
_ => panic!(
"PlotFmt::marker: protocol = {:?} and f = {} combination not supported!",
protocol, f
),
}.to_string()
}
// Possible values: {'-', '--', '-.', ':', ''}
pub fn linestyle(protocol: Protocol, f: usize) -> String {
match (protocol, f) {
(Protocol::AtlasLocked, _) => "--",
(Protocol::EPaxosLocked, _) => ":",
(Protocol::CaesarLocked, _) => ":",
(Protocol::FPaxos, _) => "-.",
(Protocol::TempoAtomic, _) => "-",
(Protocol::TempoLocked, _) => "-",
(Protocol::Basic, _) => "-.",
}
.to_string()
}
pub fn linewidth(_f: usize) -> String {
"1.6".to_string()
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_plot/src/plot/axes.rs | fantoch_plot/src/plot/axes.rs | use crate::plot::axis::Axis;
use crate::plot::spines::Spines;
use crate::pytry;
use color_eyre::Report;
use pyo3::prelude::*;
use pyo3::types::{PyDict, PyFloat, PyTuple};
pub struct Axes<'a> {
ax: &'a PyAny,
pub xaxis: Axis<'a>,
pub yaxis: Axis<'a>,
pub spines: Spines<'a>,
}
impl<'a> Axes<'a> {
pub fn new(ax: &'a PyAny) -> Result<Self, Report> {
let xaxis = Axis::new(pytry!(ax.py(), ax.getattr("xaxis")));
let yaxis = Axis::new(pytry!(ax.py(), ax.getattr("yaxis")));
let spines = pytry!(ax.py(), ax.getattr("spines"));
let spines = pytry!(ax.py(), spines.downcast::<PyDict>());
let spines = Spines::new(spines);
Ok(Self {
ax,
xaxis,
yaxis,
spines,
})
}
pub fn ax(&self) -> &PyAny {
self.ax
}
pub fn grid(&self, kwargs: Option<&PyDict>) -> Result<(), Report> {
pytry!(self.py(), self.ax.call_method("grid", (), kwargs));
Ok(())
}
pub fn set_axis_off(&self) -> Result<(), Report> {
pytry!(self.py(), self.ax.call_method0("set_axis_off"));
Ok(())
}
pub fn set_title(&self, title: &str) -> Result<(), Report> {
pytry!(self.py(), self.ax.call_method1("set_title", (title,)));
Ok(())
}
pub fn set_xlabel(
&self,
label: &str,
kwargs: Option<&PyDict>,
) -> Result<(), Report> {
pytry!(
self.py(),
self.ax.call_method("set_xlabel", (label,), kwargs)
);
Ok(())
}
pub fn set_ylabel(
&self,
label: &str,
kwargs: Option<&PyDict>,
) -> Result<(), Report> {
pytry!(
self.py(),
self.ax.call_method("set_ylabel", (label,), kwargs)
);
Ok(())
}
pub fn set_xticks<T>(
&self,
ticks: Vec<T>,
kwargs: Option<&PyDict>,
) -> Result<(), Report>
where
T: IntoPy<PyObject>,
{
pytry!(
self.py(),
self.ax.call_method("set_xticks", (ticks,), kwargs)
);
Ok(())
}
pub fn set_yticks<T>(
&self,
ticks: Vec<T>,
kwargs: Option<&PyDict>,
) -> Result<(), Report>
where
T: IntoPy<PyObject>,
{
pytry!(
self.py(),
self.ax.call_method("set_yticks", (ticks,), kwargs)
);
Ok(())
}
pub fn set_xticklabels<L>(
&self,
labels: Vec<L>,
kwargs: Option<&PyDict>,
) -> Result<(), Report>
where
L: IntoPy<PyObject>,
{
pytry!(
self.py(),
self.ax.call_method("set_xticklabels", (labels,), kwargs)
);
Ok(())
}
pub fn set_yticklabels<L>(
&self,
labels: Vec<L>,
kwargs: Option<&PyDict>,
) -> Result<(), Report>
where
L: IntoPy<PyObject>,
{
pytry!(
self.py(),
self.ax.call_method("set_yticklabels", (labels,), kwargs)
);
Ok(())
}
pub fn tick_params(&self, kwargs: Option<&PyDict>) -> Result<(), Report> {
pytry!(self.py(), self.ax.call_method("tick_params", (), kwargs));
Ok(())
}
pub fn set_xscale(&self, value: &str) -> Result<(), Report> {
pytry!(self.py(), self.ax.call_method1("set_xscale", (value,)));
Ok(())
}
pub fn set_yscale(&self, value: &str) -> Result<(), Report> {
pytry!(self.py(), self.ax.call_method1("set_yscale", (value,)));
Ok(())
}
pub fn get_xlim(&self) -> Result<(f64, f64), Report> {
let xlim = pytry!(self.py(), self.ax.call_method0("get_xlim"));
let xlim = pytry!(self.py(), xlim.downcast::<PyTuple>());
let left = pytry!(
self.py(),
xlim.get_item(0)
.expect("left xlim should be set")
.downcast::<PyFloat>()
);
let right = pytry!(
self.py(),
xlim.get_item(1)
.expect("right xlim should be set")
.downcast::<PyFloat>()
);
Ok((left.value(), right.value()))
}
pub fn set_xlim(&self, kwargs: Option<&PyDict>) -> Result<(), Report> {
pytry!(self.py(), self.ax.call_method("set_xlim", (), kwargs));
Ok(())
}
pub fn get_ylim(&self) -> Result<(f64, f64), Report> {
let xlim = pytry!(self.py(), self.ax.call_method0("get_ylim"));
let xlim = pytry!(self.py(), xlim.downcast::<PyTuple>());
let left = pytry!(
self.py(),
xlim.get_item(0)
.expect("left ylim should be set")
.downcast::<PyFloat>()
);
let right = pytry!(
self.py(),
xlim.get_item(1)
.expect("right ylim should be set")
.downcast::<PyFloat>()
);
Ok((left.value(), right.value()))
}
pub fn set_ylim(&self, kwargs: Option<&PyDict>) -> Result<(), Report> {
pytry!(self.py(), self.ax.call_method("set_ylim", (), kwargs));
Ok(())
}
// any questions about legend positioning should be answered here: https://stackoverflow.com/a/43439132/4262469
// - that's how great the answer is!
pub fn legend(
&self,
legends: Option<Vec<(&PyAny, String)>>,
kwargs: Option<&PyDict>,
) -> Result<(), Report> {
match legends {
None => {
pytry!(self.py(), self.ax.call_method("legend", (), kwargs));
}
Some(legends) => {
let (handles, legends): (Vec<_>, Vec<_>) =
legends.into_iter().unzip();
pytry!(
self.py(),
self.ax.call_method("legend", (handles, legends), kwargs)
);
}
}
Ok(())
}
pub fn plot<X, Y>(
&self,
x: Vec<X>,
y: Vec<Y>,
fmt: Option<&str>,
kwargs: Option<&PyDict>,
) -> Result<(), Report>
where
X: IntoPy<PyObject>,
Y: IntoPy<PyObject>,
{
if let Some(fmt) = fmt {
pytry!(self.py(), self.ax.call_method("plot", (x, y, fmt), kwargs));
} else {
pytry!(self.py(), self.ax.call_method("plot", (x, y), kwargs));
};
Ok(())
}
pub fn bar<X, H>(
&self,
x: Vec<X>,
height: Vec<H>,
kwargs: Option<&PyDict>,
) -> Result<&PyAny, Report>
where
X: IntoPy<PyObject>,
H: IntoPy<PyObject>,
{
let result =
pytry!(self.py(), self.ax.call_method("bar", (x, height), kwargs));
Ok(result)
}
pub fn imshow<D>(
&self,
data: Vec<D>,
kwargs: Option<&PyDict>,
) -> Result<AxesImage<'_>, Report>
where
D: IntoPy<PyObject>,
{
let im = AxesImage::new(pytry!(
self.py(),
self.ax.call_method("imshow", (data,), kwargs)
));
Ok(im)
}
fn py(&self) -> Python<'_> {
self.ax.py()
}
}
pub struct AxesImage<'a> {
im: &'a PyAny,
}
impl<'a> AxesImage<'a> {
pub fn new(im: &'a PyAny) -> Self {
Self { im }
}
pub fn im(&self) -> &PyAny {
self.im
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_plot/src/plot/pyplot.rs | fantoch_plot/src/plot/pyplot.rs | use crate::plot::axes::Axes;
use crate::plot::figure::Figure;
use crate::plot::table::Table;
use crate::pytry;
use color_eyre::Report;
use pyo3::prelude::*;
use pyo3::types::{PyDict, PyTuple};
use pyo3::PyNativeType;
pub struct PyPlot<'p> {
plt: &'p PyModule,
}
impl<'p> PyPlot<'p> {
pub fn new(py: Python<'p>) -> Result<Self, Report> {
let plt = pytry!(py, PyModule::import(py, "matplotlib.pyplot"));
Ok(Self { plt })
}
pub fn subplot<I>(
&self,
nrows: usize,
ncols: usize,
index: I,
kwargs: Option<&PyDict>,
) -> Result<Axes<'_>, Report>
where
I: IntoPy<PyObject>,
{
let result = pytry!(
self.py(),
self.plt
.getattr("subplot")?
.call((nrows, ncols, index), kwargs)
);
let ax = Axes::new(result)?;
Ok(ax)
}
pub fn subplots(
&self,
kwargs: Option<&PyDict>,
) -> Result<(Figure<'_>, Axes<'_>), Report> {
// check that `ncols` and `nrows` was not set
if let Some(kwargs) = kwargs {
assert!(
kwargs.get_item("ncols").is_none(),
"ncols shouldn't be set here; use `PyPlot::subplot` instead"
);
assert!(
kwargs.get_item("nrows").is_none(),
"nrows shouldn't be set here; use `PyPlot::subplot` instead"
);
}
let result =
pytry!(self.py(), self.plt.getattr("subplots")?.call((), kwargs));
let tuple = pytry!(self.py(), result.downcast::<PyTuple>());
let fig =
Figure::new(tuple.get_item(0).expect("subplots fig should be set"));
let ax =
Axes::new(tuple.get_item(1).expect("subplots ax should be set"))?;
Ok((fig, ax))
}
pub fn subplots_adjust(
&self,
kwargs: Option<&PyDict>,
) -> Result<(), Report> {
pytry!(
self.py(),
self.plt.getattr("subplots_adjust")?.call((), kwargs)
);
Ok(())
}
pub fn table(&self, kwargs: Option<&PyDict>) -> Result<Table<'_>, Report> {
let result =
pytry!(self.py(), self.plt.getattr("table")?.call((), kwargs));
let table = Table::new(result);
Ok(table)
}
pub fn axis(&self, option: &str) -> Result<(), Report> {
pytry!(self.py(), self.plt.getattr("axis")?.call1((option,)));
Ok(())
}
pub fn text(
&self,
x: f64,
y: f64,
text: &str,
kwargs: Option<&PyDict>,
) -> Result<(), Report> {
pytry!(
self.py(),
self.plt.getattr("text")?.call((x, y, text), kwargs)
);
Ok(())
}
pub fn savefig(
&self,
path: &str,
kwargs: Option<&PyDict>,
) -> Result<(), Report> {
pytry!(
self.py(),
self.plt.getattr("savefig")?.call((path,), kwargs)
);
Ok(())
}
pub fn close(&self, kwargs: Option<&PyDict>) -> Result<(), Report> {
pytry!(self.py(), self.plt.getattr("close")?.call((), kwargs));
Ok(())
}
pub fn tight_layout(&self) -> Result<(), Report> {
pytry!(self.py(), self.plt.getattr("tight_layout")?.call0());
Ok(())
}
fn py(&self) -> Python<'_> {
self.plt.py()
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_plot/src/plot/table.rs | fantoch_plot/src/plot/table.rs | use crate::pytry;
use color_eyre::Report;
use pyo3::prelude::*;
// https://matplotlib.org/api/table_api.html#matplotlib.table.Table
pub struct Table<'a> {
table: &'a PyAny,
}
impl<'a> Table<'a> {
pub fn new(table: &'a PyAny) -> Self {
Self { table }
}
pub fn auto_set_font_size(&self, auto: bool) -> Result<(), Report> {
pytry!(
self.py(),
self.table.call_method1("auto_set_font_size", (auto,))
);
Ok(())
}
pub fn set_fontsize(&self, size: f64) -> Result<(), Report> {
pytry!(self.py(), self.table.call_method1("set_fontsize", (size,)));
Ok(())
}
fn py(&self) -> Python<'_> {
self.table.py()
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_plot/src/plot/mod.rs | fantoch_plot/src/plot/mod.rs | pub mod axes;
pub mod axis;
pub mod figure;
pub mod pyplot;
pub mod spines;
pub mod table;
use color_eyre::Report;
use pyo3::prelude::*;
use pyo3::types::PyDict;
use pyo3::PyNativeType;
#[macro_export]
macro_rules! pytry {
($py:expr, $e:expr) => {{
match $e {
Ok(v) => v,
Err(e) => {
let e: PyErr = e.into();
color_eyre::eyre::bail!("{:?}", e.print($py))
}
}
}};
}
#[macro_export]
macro_rules! pydict {
($py:expr, $($tup:expr),*) => {{
#[allow(unused_mut)]
let mut dict = pyo3::types::PyDict::new($py);
let mut res = Ok(dict);
$(
let (key, value) = $tup;
if let Err(e) = dict.set_item(key, value) {
res = Err(e);
}
)*
$crate::pytry!($py, res)
}};
($py:expr, $($tup:expr,)*) => {{
$crate::pydict![$py, $($tup),*]
}};
}
pub struct Matplotlib<'p> {
lib: &'p PyModule,
}
impl<'p> Matplotlib<'p> {
pub fn new(py: Python<'p>, newsgott: bool) -> Result<Self, Report> {
// create matplotlib
let lib = pytry!(py, PyModule::import(py, "matplotlib"));
let lib = Self { lib };
// maybe set NewsGotT as font
if newsgott {
// load font
let font_manager =
pytry!(py, PyModule::import(py, "matplotlib.font_manager"));
let fonts: Vec<&str> = font_manager
.call_method1("findSystemFonts", ("./fonts/",))?
.extract()?;
for font in fonts {
font_manager
.getattr("fontManager")?
.call_method1("addfont", (font,))?;
}
// set font
let kwargs = pydict!(py, ("family", "NewsGotT"));
lib.rc("font", Some(kwargs))?;
}
Ok(lib)
}
pub fn rc(
&self,
name: &str,
kwargs: Option<&PyDict>,
) -> Result<(), Report> {
pytry!(self.lib.py(), self.lib.getattr("rc")?.call((name,), kwargs));
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use pyplot::PyPlot;
// Failing in CI for some reason: https://github.com/vitorenesduarte/fantoch/pull/209
#[ignore]
#[test]
fn save_pdf_test() {
let path = ".test.pdf";
if let Err(e) = save_pdf(path) {
panic!("error while saving pdf: {:?}", e);
}
// check that the file was indeed created
assert_eq!(std::path::Path::new(path).is_file(), true);
}
fn save_pdf(path: &str) -> Result<(), Report> {
let gil = Python::acquire_gil();
let py = gil.python();
let plt = PyPlot::new(py)?;
let x = vec!["us-east-1", "ca-central-1", "eu-west-2"];
let y = vec![10, 20, 30];
let (fig, ax) = plt.subplots(None)?;
ax.plot(x, y, Some("o-"), None)?;
ax.set_xlabel("regions", None)?;
ax.set_ylabel("latency (ms)", None)?;
let kwargs = pydict!(py, ("format", "pdf"));
plt.savefig(path, Some(kwargs))?;
let kwargs = pydict!(py, ("fig", fig.fig()));
plt.close(Some(kwargs))?;
Ok(())
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_plot/src/plot/axis.rs | fantoch_plot/src/plot/axis.rs | use crate::pytry;
use color_eyre::Report;
use pyo3::prelude::*;
// https://matplotlib.org/api/axis_api.html?highlight=axis#matplotlib.axis.Axis
pub struct Axis<'a> {
axis: &'a PyAny,
}
impl<'a> Axis<'a> {
pub fn new(axis: &'a PyAny) -> Self {
Self { axis }
}
pub fn set_visible(&self, visible: bool) -> Result<(), Report> {
pytry!(self.py(), self.axis.call_method1("set_visible", (visible,)));
Ok(())
}
fn py(&self) -> Python<'_> {
self.axis.py()
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_plot/src/plot/figure.rs | fantoch_plot/src/plot/figure.rs | use crate::plot::axes::Axes;
use crate::pytry;
use color_eyre::Report;
use pyo3::prelude::*;
use pyo3::types::PyDict;
pub struct Figure<'a> {
fig: &'a PyAny,
}
impl<'a> Figure<'a> {
pub fn new(fig: &'a PyAny) -> Self {
Self { fig }
}
pub fn fig(&self) -> &PyAny {
self.fig
}
pub fn subplots_adjust(
&self,
kwargs: Option<&PyDict>,
) -> Result<(), Report> {
pytry!(
self.py(),
self.fig.call_method("subplots_adjust", (), kwargs)
);
Ok(())
}
pub fn set_size_inches(
&self,
width: f64,
height: f64,
) -> Result<(), Report> {
pytry!(
self.py(),
self.fig.call_method1("set_size_inches", (width, height))
);
Ok(())
}
pub fn add_axes(&self, dimensions: Vec<f64>) -> Result<Axes<'_>, Report> {
let ax = Axes::new(pytry!(
self.py(),
self.fig.call_method1("add_axes", (dimensions,),)
))?;
Ok(ax)
}
pub fn colorbar(
&self,
im: &PyAny,
kwargs: Option<&PyDict>,
) -> Result<ColorBar<'_>, Report> {
let cbar = ColorBar::new(pytry!(
self.py(),
self.fig.call_method("colorbar", (im,), kwargs)
))?;
Ok(cbar)
}
fn py(&self) -> Python<'_> {
self.fig.py()
}
}
pub struct ColorBar<'a> {
bar: &'a PyAny,
}
impl<'a> ColorBar<'a> {
pub fn new(bar: &'a PyAny) -> Result<Self, Report> {
Ok(Self { bar })
}
pub fn set_label(
&self,
label: &str,
kwargs: Option<&PyDict>,
) -> Result<(), Report> {
pytry!(
self.py(),
self.bar.call_method("set_label", (label,), kwargs)
);
Ok(())
}
pub fn set_ticks<T>(
&self,
ticks: Vec<T>,
kwargs: Option<&PyDict>,
) -> Result<(), Report>
where
T: IntoPy<PyObject>,
{
pytry!(
self.py(),
self.bar.call_method("set_ticks", (ticks,), kwargs)
);
Ok(())
}
pub fn set_ticklabels<L>(
&self,
labels: Vec<L>,
kwargs: Option<&PyDict>,
) -> Result<(), Report>
where
L: IntoPy<PyObject>,
{
pytry!(
self.py(),
self.bar.call_method("set_ticklabels", (labels,), kwargs)
);
Ok(())
}
fn py(&self) -> Python<'_> {
self.bar.py()
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_plot/src/plot/spines.rs | fantoch_plot/src/plot/spines.rs | use crate::pytry;
use color_eyre::Report;
use pyo3::prelude::*;
use pyo3::types::PyDict;
use pyo3::PyNativeType;
pub struct Spines<'a> {
spines: &'a PyDict,
}
impl<'a> Spines<'a> {
pub fn new(spines: &'a PyDict) -> Self {
Self { spines }
}
// TODO provide instead methods `Spines::values` and `Spine::set_visible`.
pub fn set_all_visible(&self, visible: bool) -> Result<(), Report> {
for spine in self.spines.values() {
pytry!(self.py(), spine.call_method1("set_visible", (visible,)));
}
Ok(())
}
fn py(&self) -> Python<'_> {
self.spines.py()
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_plot/src/db/compress.rs | fantoch_plot/src/db/compress.rs | use crate::db::Dstat;
use fantoch::metrics::Histogram;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::fmt;
#[derive(Clone, Copy)]
pub enum LatencyPrecision {
Micros,
Millis,
}
impl LatencyPrecision {
pub fn name(&self) -> String {
match self {
Self::Micros => String::from("us"),
Self::Millis => String::from("ms"),
}
}
}
#[derive(Clone, Serialize, Deserialize)]
pub struct MicrosHistogramCompress {
hist: HistogramCompress,
}
impl MicrosHistogramCompress {
pub fn from(histogram: &Histogram) -> Self {
Self {
hist: HistogramCompress::from(histogram),
}
}
pub fn min(&self, precision: LatencyPrecision) -> f64 {
Self::convert(self.hist.min(), precision)
}
pub fn max(&self, precision: LatencyPrecision) -> f64 {
Self::convert(self.hist.max(), precision)
}
pub fn mean(&self, precision: LatencyPrecision) -> f64 {
Self::convert(self.hist.mean(), precision)
}
pub fn stddev(&self, precision: LatencyPrecision) -> f64 {
Self::convert(self.hist.stddev(), precision)
}
pub fn percentile(
&self,
percentile: f64,
precision: LatencyPrecision,
) -> f64 {
Self::convert(self.hist.percentile(percentile), precision)
}
fn convert(micros: f64, latency_precision: LatencyPrecision) -> f64 {
match latency_precision {
LatencyPrecision::Micros => micros,
LatencyPrecision::Millis => micros / 1000.0,
}
}
}
// same as `Histogram`'s
impl fmt::Debug for MicrosHistogramCompress {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let latency_precision = LatencyPrecision::Millis;
write!(
f,
"min={:<6} max={:<6} std={:<6} avg={:<6} p5={:<6} p95={:<6} p99={:<6} p99.9={:<6} p99.99={:<6}",
self.min(latency_precision).round(),
self.max(latency_precision).round(),
self.stddev(latency_precision).round(),
self.mean(latency_precision).round(),
self.percentile(0.05, latency_precision).round(),
self.percentile(0.95, latency_precision).round(),
self.percentile(0.99, latency_precision).round(),
self.percentile(0.999, latency_precision).round(),
self.percentile(0.9999, latency_precision).round()
)
}
}
#[derive(Clone, Serialize, Deserialize)]
pub struct DstatCompress {
pub cpu_usr: HistogramCompress,
pub cpu_sys: HistogramCompress,
pub cpu_wait: HistogramCompress,
pub net_recv: HistogramCompress,
pub net_send: HistogramCompress,
pub mem_used: HistogramCompress,
}
impl DstatCompress {
pub fn from(dstat: &Dstat) -> Self {
Self {
cpu_usr: HistogramCompress::from(&dstat.cpu_usr),
cpu_sys: HistogramCompress::from(&dstat.cpu_sys),
cpu_wait: HistogramCompress::from(&dstat.cpu_wait),
net_recv: HistogramCompress::from(&dstat.net_recv),
net_send: HistogramCompress::from(&dstat.net_send),
mem_used: HistogramCompress::from(&dstat.mem_used),
}
}
pub fn cpu_usr_mad(&self) -> (u64, u64) {
Self::mad(&self.cpu_usr, None)
}
pub fn cpu_sys_mad(&self) -> (u64, u64) {
Self::mad(&self.cpu_sys, None)
}
pub fn cpu_wait_mad(&self) -> (u64, u64) {
Self::mad(&self.cpu_wait, None)
}
pub fn net_recv_mad(&self) -> (u64, u64) {
Self::mad(&self.net_recv, Some(1_000_000f64))
}
pub fn net_send_mad(&self) -> (u64, u64) {
Self::mad(&self.net_send, Some(1_000_000f64))
}
pub fn mem_used_mad(&self) -> (u64, u64) {
Self::mad(&self.mem_used, Some(1_000_000f64))
}
// mad: mean and standard-deviation.
fn mad(hist: &HistogramCompress, norm: Option<f64>) -> (u64, u64) {
let mut mean = hist.mean();
let mut stddev = hist.stddev();
if let Some(norm) = norm {
mean /= norm;
stddev /= norm;
}
(mean.round() as u64, stddev.round() as u64)
}
}
impl fmt::Debug for DstatCompress {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let usr = self.cpu_usr_mad();
let sys = self.cpu_sys_mad();
let wait = self.cpu_wait_mad();
let recv = self.net_recv_mad();
let send = self.net_send_mad();
let used = self.mem_used_mad();
writeln!(f, "cpu:")?;
writeln!(f, " usr {:>4} stddev={}", usr.0, usr.1)?;
writeln!(f, " sys {:>4} stddev={}", sys.0, sys.1)?;
writeln!(f, " wait {:>4} stddev={}", wait.0, wait.1)?;
writeln!(f, "net:")?;
writeln!(f, " (MB/s) receive {:>4} stddev={}", recv.0, recv.1)?;
writeln!(f, " (MB/s) send {:>4} stddev={}", send.0, send.1)?;
writeln!(f, "mem:")?;
writeln!(f, " (MB) used {:>4} stddev={}", used.0, used.1)?;
Ok(())
}
}
#[derive(Clone, Serialize, Deserialize)]
pub struct HistogramCompress {
min: f64,
max: f64,
mean: f64,
stddev: f64,
percentiles: HashMap<String, f64>,
}
impl HistogramCompress {
fn from(histogram: &Histogram) -> Self {
let min = histogram.min().value();
let max = histogram.max().value();
let mean = histogram.mean().value();
let stddev = histogram.stddev().value();
// all percentiles from 0.01 to 1.0 (step by 0.01) + 0.951 + 0.999 (step
// by 0.001) + 0.9999 + 0.99999
let percentiles = (0..100)
.map(|percentile| percentile as f64 / 100f64)
.chain(
(950..=998)
.step_by(2)
.map(|percentile| percentile as f64 / 1000f64),
)
.chain(vec![0.999, 0.9999, 0.99999])
.map(|percentile| {
(
percentile.to_string(),
histogram.percentile(percentile).value(),
)
})
.collect();
Self {
min,
max,
mean,
stddev,
percentiles,
}
}
pub fn min(&self) -> f64 {
self.min
}
pub fn max(&self) -> f64 {
self.max
}
pub fn mean(&self) -> f64 {
self.mean
}
pub fn stddev(&self) -> f64 {
self.stddev
}
pub fn percentile(&self, percentile: f64) -> f64 {
if let Some(value) = self.percentiles.get(&percentile.to_string()) {
*value
} else {
panic!(
"percentile {:?} should exist in compressed histogram",
percentile
)
}
}
}
// same as `Histogram`'s
impl fmt::Debug for HistogramCompress {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"min={:<6} max={:<6} std={:<6} avg={:<6} p5={:<6} p95={:<6} p99={:<6} p99.9={:<6} p99.99={:<6}",
self.min().round(),
self.max().round(),
self.stddev().round(),
self.mean().round(),
self.percentile(0.05).round(),
self.percentile(0.95).round(),
self.percentile(0.99).round(),
self.percentile(0.999).round(),
self.percentile(0.9999).round()
)
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_plot/src/db/exp_data.rs | fantoch_plot/src/db/exp_data.rs | use crate::db::{Dstat, DstatCompress, MicrosHistogramCompress};
use fantoch::client::ClientData;
use fantoch::executor::ExecutorMetrics;
use fantoch::id::ProcessId;
use fantoch::metrics::Histogram;
use fantoch::planet::Region;
use fantoch::protocol::ProtocolMetrics;
use fantoch::run::task::server::metrics_logger::ProcessMetrics;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::time::Duration;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ExperimentData {
pub process_metrics: HashMap<ProcessId, (Region, ProcessMetrics)>,
pub global_protocol_metrics: ProtocolMetrics,
pub global_executor_metrics: ExecutorMetrics,
pub process_dstats: HashMap<ProcessId, DstatCompress>,
pub global_process_dstats: DstatCompress,
pub global_client_dstats: DstatCompress,
pub client_latency: HashMap<Region, MicrosHistogramCompress>,
pub global_client_latency: MicrosHistogramCompress,
pub client_throughput: HashMap<Region, f64>,
pub global_client_throughput: f64,
}
impl ExperimentData {
pub fn new(
process_metrics: HashMap<ProcessId, (Region, ProcessMetrics)>,
process_dstats: HashMap<ProcessId, Dstat>,
client_metrics: HashMap<Region, ClientData>,
client_dstats: HashMap<Region, Dstat>,
global_client_metrics: ClientData,
) -> Self {
// create global protocol and executor metrics
let mut global_protocol_metrics = ProtocolMetrics::new();
let mut global_executor_metrics = ExecutorMetrics::new();
for (_, (_, process_metrics)) in process_metrics.iter() {
global_protocol_metrics.merge(&process_metrics.protocol_metrics());
global_executor_metrics.merge(&process_metrics.executor_metrics());
}
// compress process dstats and create global process dstat
let mut global_process_dstats = Dstat::new();
let process_dstats = process_dstats
.into_iter()
.map(|(process_id, process_dstat)| {
// merge with global process dstat
global_process_dstats.merge(&process_dstat);
// compress process dstat
let process_dstat = DstatCompress::from(&process_dstat);
(process_id, process_dstat)
})
.collect();
// compress global process dstat
let global_process_dstats = DstatCompress::from(&global_process_dstats);
// merge all client dstats
let mut global_client_dstats = Dstat::new();
for (_, client_dstat) in client_dstats {
global_client_dstats.merge(&client_dstat);
}
// compress global client dstat
let global_client_dstats = DstatCompress::from(&global_client_dstats);
// create latency histogram per region (and also compute throughput)
let mut client_throughput =
HashMap::with_capacity(client_metrics.len());
let client_latency = client_metrics
.into_iter()
.map(|(region, client_data)| {
// compute throughput
let throughput = client_data.throughput();
client_throughput.insert(region.clone(), throughput);
// create latency histogram
let latency = Self::extract_micros(client_data.latency_data());
let histogram = Histogram::from(latency);
// compress client histogram
let histogram = MicrosHistogramCompress::from(&histogram);
(region, histogram)
})
.collect();
// create global latency histogram (and also compute throughput)
let global_client_throughput = global_client_metrics.throughput();
let latency =
Self::extract_micros(global_client_metrics.latency_data());
let global_client_latency = Histogram::from(latency);
// compress global client histogram
let global_client_latency =
MicrosHistogramCompress::from(&global_client_latency);
Self {
process_metrics,
global_protocol_metrics,
global_executor_metrics,
process_dstats,
global_process_dstats,
client_latency,
global_client_dstats,
global_client_latency,
client_throughput,
global_client_throughput,
}
}
fn extract_micros(
latency_data: impl Iterator<Item = Duration>,
) -> impl Iterator<Item = u64> {
latency_data.map(move |duration| duration.as_micros() as u64)
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_plot/src/db/results_db.rs | fantoch_plot/src/db/results_db.rs | use crate::db::dstat::Dstat;
use crate::db::exp_data::ExperimentData;
use crate::Search;
use color_eyre::eyre::{self, WrapErr};
use color_eyre::Report;
use fantoch::client::ClientData;
use fantoch::planet::Region;
use fantoch::run::task::server::metrics_logger::ProcessMetrics;
use fantoch_exp::{ExperimentConfig, ProcessType, SerializationFormat};
use rayon::prelude::*;
use std::collections::HashMap;
use std::fs::DirEntry;
use std::path::Path;
use std::sync::{Arc, Mutex};
const SNAPSHOT_SUFFIX: &str = "_experiment_data_snapshot.bincode.gz";
#[derive(Debug)]
pub struct ResultsDB {
results: Vec<(DirEntry, ExperimentConfig, ExperimentData)>,
}
impl ResultsDB {
pub fn list_timestamps(results_dir: &str) -> Result<Vec<DirEntry>, Report> {
// find all timestamps
let read_dir = std::fs::read_dir(results_dir)
.wrap_err("read results directory")?;
let mut timestamps = Vec::new();
for timestamp in read_dir {
let timestamp = timestamp.wrap_err("incorrect directory entry")?;
// ignore snapshot files
if !timestamp
.path()
.display()
.to_string()
.ends_with(SNAPSHOT_SUFFIX)
{
timestamps.push(timestamp);
}
}
Ok(timestamps)
}
pub fn load(results_dir: &str) -> Result<Self, Report> {
let timestamps = Self::list_timestamps(results_dir)?;
// holder for results
let mut results = Vec::with_capacity(timestamps.len());
// track the number of loaded entries
let loaded_entries = Arc::new(Mutex::new(0));
let total_entries = timestamps.len();
// load all entries
let loads: Vec<_> = timestamps
.into_par_iter()
.map(|timestamp| {
let loaded_entries = loaded_entries.clone();
Self::load_entry(timestamp, loaded_entries, total_entries)
})
.inspect(|entry| {
if let Err(e) = entry {
println!("error: {:?}", e);
}
})
.collect();
for entry in loads {
let entry = entry.wrap_err("load entry");
match entry {
Ok(entry) => {
results.push(entry);
}
Err(e) => {
let missing_file =
String::from("No such file or directory (os error 2)");
if e.root_cause().to_string() == missing_file {
// if some file was not found, it may be because the
// experiment is still running; in this case, ignore the
// error
println!("entry ignored...");
} else {
// if not, quit
return Err(e);
}
}
}
}
Ok(Self { results })
}
fn load_entry(
timestamp: DirEntry,
loaded_entries: Arc<Mutex<usize>>,
total_entries: usize,
) -> Result<(DirEntry, ExperimentConfig, ExperimentData), Report> {
// register load start time
let start = std::time::Instant::now();
// read the configuration of this experiment
let exp_config_path =
format!("{}/exp_config.json", timestamp.path().display());
let exp_config: ExperimentConfig = fantoch_exp::deserialize(
exp_config_path,
SerializationFormat::Json,
)
.wrap_err_with(|| {
format!(
"deserialize experiment config of {:?}",
timestamp.path().display()
)
})?;
// check if there's snapshot of experiment data
let snapshot =
format!("{}{}", timestamp.path().display(), SNAPSHOT_SUFFIX);
let exp_data = if Path::new(&snapshot).exists() {
// if there is, simply load it
fantoch_exp::deserialize(&snapshot, SerializationFormat::BincodeGz)
.wrap_err_with(|| {
format!("deserialize experiment data snapshot {}", snapshot)
})?
} else {
// otherwise load it
let exp_data = Self::load_experiment_data(×tamp, &exp_config)?;
// create snapshot
fantoch_exp::serialize(
&exp_data,
&snapshot,
SerializationFormat::BincodeGz,
)
.wrap_err_with(|| {
format!("deserialize experiment data snapshot {}", snapshot)
})?;
// and return it
exp_data
};
// register that a new entry is loaded
let mut loaded_entries = loaded_entries
.lock()
.expect("locking loaded entries should work");
*loaded_entries += 1;
println!(
"loaded {:?} after {:?} | {} of {}",
timestamp.path().display(),
start.elapsed(),
loaded_entries,
total_entries,
);
Ok((timestamp, exp_config, exp_data))
}
pub fn find(
&self,
search: Search,
) -> Result<Vec<&(DirEntry, ExperimentConfig, ExperimentData)>, Report>
{
let filtered = self
.results
.iter()
.filter(move |(_, exp_config, _)| {
// filter out configurations with different n
if exp_config.config.n() != search.n {
return false;
}
// filter out configurations with different f
if exp_config.config.f() != search.f {
return false;
}
// filter out configurations with different protocol
if exp_config.protocol != search.protocol {
return false;
}
// filter out configurations with different nfr (if set)
if let Some(nfr) = search.nfr {
if exp_config.config.nfr() != nfr {
return false;
}
}
// filter out configurations with different shard_count (if set)
if let Some(shard_count) = search.shard_count {
if exp_config.config.shard_count() != shard_count {
return false;
}
}
// filter out configurations with different cpus (if set)
if let Some(cpus) = search.cpus {
if exp_config.cpus != cpus {
return false;
}
}
// filter out configurations with different workers (if set)
if let Some(workers) = search.workers {
if exp_config.workers != workers {
return false;
}
}
// filter out configurations with different clients_per_region
// (if set)
if let Some(clients_per_region) = search.clients_per_region {
if exp_config.clients_per_region != clients_per_region {
return false;
}
}
// filter out configurations with different key generator (if
// set)
if let Some(key_gen) = search.key_gen {
if exp_config.workload.key_gen() != key_gen {
return false;
}
}
// filter out configuration with different keys_per_command (if
// set)
if let Some(keys_per_command) = search.keys_per_command {
if exp_config.workload.keys_per_command()
!= keys_per_command
{
return false;
}
}
// filter out configurations with different read_only_percentage
// (if set)
if let Some(read_only_percentage) = search.read_only_percentage
{
if exp_config.workload.read_only_percentage()
!= read_only_percentage
{
return false;
}
}
// filter out configurations with different payload_size (if
// set)
if let Some(payload_size) = search.payload_size {
if exp_config.workload.payload_size() != payload_size {
return false;
}
}
// filter out configurations with different batch_max_size (if
// set)
if let Some(batch_max_size) = search.batch_max_size {
if exp_config.batch_max_size != batch_max_size {
return false;
}
}
// if this exp config was not filtered-out until now, then
// return it
true
})
.collect();
Ok(filtered)
}
fn load_experiment_data(
timestamp: &DirEntry,
exp_config: &ExperimentConfig,
) -> Result<ExperimentData, Report> {
// client metrics
let mut client_metrics = HashMap::new();
for (region, _, _, region_index) in exp_config.placement.iter() {
// only load client metrics for this region if we haven't already
if !client_metrics.contains_key(region) {
// create client file prefix
let process_type = ProcessType::Client(*region_index);
let prefix =
fantoch_exp::config::file_prefix(process_type, region);
// load this region's client metrics (there's a single client
// machine per region)
let client: ClientData =
Self::load_metrics(×tamp, prefix)?;
assert!(client_metrics
.insert(region.clone(), client)
.is_none());
}
}
// clean-up client data
let (start, end) = Self::prune_before_last_start_and_after_first_end(
&mut client_metrics,
)?;
// create global client data (from cleaned-up client data)
let global_client_metrics =
Self::global_client_metrics(&client_metrics);
// client dstats (need to be after processing client metrics so that we
// have a `start` and an `end` for pruning)
let mut client_dstats = HashMap::new();
for (region, _, _, region_index) in exp_config.placement.iter() {
// only load client dstats for this region if we haven't already
if !client_dstats.contains_key(region) {
// create client file prefix
let process_type = ProcessType::Client(*region_index);
let prefix =
fantoch_exp::config::file_prefix(process_type, region);
// load this region's client dstat
let client = Self::load_dstat(×tamp, prefix, start, end)?;
assert!(client_dstats.insert(region.clone(), client).is_none());
}
}
// process metrics and dstats
let mut process_metrics = HashMap::new();
let mut process_dstats = HashMap::new();
for (region, _, process_id, _) in exp_config.placement.iter() {
let process_id = *process_id;
// create process file prefix
let process_type = ProcessType::Server(process_id);
let prefix = fantoch_exp::config::file_prefix(process_type, region);
// load this process metrics (there will be more than one per region
// with partial replication)
let process: ProcessMetrics =
Self::load_metrics(×tamp, prefix.clone())?;
process_metrics.insert(process_id, (region.clone(), process));
// load this process dstat
let process = Self::load_dstat(×tamp, prefix, start, end)?;
process_dstats.insert(process_id, process);
}
// return experiment data
Ok(ExperimentData::new(
process_metrics,
process_dstats,
client_metrics,
client_dstats,
global_client_metrics,
))
}
fn load_metrics<T>(
timestamp: &DirEntry,
prefix: String,
) -> Result<T, Report>
where
T: serde::de::DeserializeOwned,
{
let path = format!(
"{}/{}_metrics.bincode.gz",
timestamp.path().display(),
prefix,
);
let metrics =
fantoch_exp::deserialize(&path, SerializationFormat::BincodeGz)
.wrap_err_with(|| format!("deserialize metrics {}", path))?;
Ok(metrics)
}
fn load_dstat(
timestamp: &DirEntry,
prefix: String,
start: u64,
end: u64,
) -> Result<Dstat, Report> {
let path =
format!("{}/{}_dstat.csv", timestamp.path().display(), prefix);
Dstat::from(start, end, &path)
.wrap_err_with(|| format!("deserialize dstat {}", path))
}
// Here we make sure that we will only consider that points in which all the
// clients are running, i.e. we prune data points that are from
// - before the last client starting (i.e. the max of all start times)
// - after the first client ending (i.e. the min of all end times)
fn prune_before_last_start_and_after_first_end(
client_metrics: &mut HashMap<Region, ClientData>,
) -> Result<(u64, u64), Report> {
// compute start and end times for all clients
let mut starts = Vec::with_capacity(client_metrics.len());
let mut ends = Vec::with_capacity(client_metrics.len());
for client_data in client_metrics.values() {
let bounds = client_data.start_and_end();
let (start, end) = if let Some(bounds) = bounds {
bounds
} else {
eyre::bail!(
"found empty client data without start and end times"
);
};
starts.push(start);
ends.push(end);
}
// compute the global start and end
let start =
starts.into_iter().max().expect("global start should exist");
let end = ends.into_iter().min().expect("global end should exist");
// prune client data outside of global start and end
for (_, client_data) in client_metrics.iter_mut() {
client_data.prune(start, end);
}
Ok((start, end))
}
// Merge all `ClientData` to get a global view.
fn global_client_metrics(
client_metrics: &HashMap<Region, ClientData>,
) -> ClientData {
let mut global = ClientData::new();
for client_data in client_metrics.values() {
global.merge(client_data);
}
global
}
pub fn data_to_json<P: AsRef<Path>>(
&self,
output_dir: P,
) -> Result<(), Report> {
for (dir_entry, exp_config, exp_data) in &self.results {
// ensure experiment dir exists
let dir_entry_path = dir_entry.path();
let timestamp = dir_entry_path
.file_name()
.expect("experiment result folder should have a name");
let timestamp_dir = output_dir.as_ref().join(timestamp);
std::fs::create_dir_all(×tamp_dir)?;
// save exp config
let exp_config_path = timestamp_dir.join("exp_config.json");
fantoch_exp::serialize(
exp_config,
&exp_config_path,
SerializationFormat::JsonPretty,
)
.wrap_err_with(|| {
format!("serialize exp config {:?}", exp_config_path)
})?;
// save experiment data
let exp_data_path = timestamp_dir.join("exp_data.json");
fantoch_exp::serialize(
exp_data,
&exp_data_path,
SerializationFormat::JsonPretty,
)
.wrap_err_with(|| {
format!("serialize exp data {:?}", exp_data_path)
})?;
}
Ok(())
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_plot/src/db/mod.rs | fantoch_plot/src/db/mod.rs | mod compress;
mod dstat;
mod exp_data;
mod results_db;
// Re-exports.
pub use compress::{DstatCompress, LatencyPrecision, MicrosHistogramCompress};
pub use dstat::Dstat;
pub use exp_data::ExperimentData;
pub use results_db::ResultsDB;
use fantoch::client::KeyGen;
use fantoch_exp::Protocol;
#[derive(Debug, Clone, Copy)]
pub struct Search {
pub n: usize,
pub f: usize,
pub protocol: Protocol,
pub nfr: Option<bool>,
pub shard_count: Option<usize>,
pub cpus: Option<usize>,
pub workers: Option<usize>,
pub clients_per_region: Option<usize>,
pub key_gen: Option<KeyGen>,
pub keys_per_command: Option<usize>,
pub read_only_percentage: Option<usize>,
pub payload_size: Option<usize>,
pub batch_max_size: Option<usize>,
}
impl Search {
pub fn new(n: usize, f: usize, protocol: Protocol) -> Self {
Self {
n,
f,
protocol,
nfr: None,
shard_count: None,
cpus: None,
workers: None,
clients_per_region: None,
key_gen: None,
keys_per_command: None,
read_only_percentage: None,
payload_size: None,
batch_max_size: None,
}
}
pub fn nfr(&mut self, nfr: bool) -> &mut Self {
self.nfr = Some(nfr);
self
}
pub fn shard_count(&mut self, shard_count: usize) -> &mut Self {
self.shard_count = Some(shard_count);
self
}
pub fn cpus(&mut self, cpus: usize) -> &mut Self {
self.cpus = Some(cpus);
self
}
pub fn workers(&mut self, workers: usize) -> &mut Self {
self.workers = Some(workers);
self
}
pub fn clients_per_region(
&mut self,
clients_per_region: usize,
) -> &mut Self {
self.clients_per_region = Some(clients_per_region);
self
}
pub fn key_gen(&mut self, key_gen: KeyGen) -> &mut Self {
self.key_gen = Some(key_gen);
self
}
pub fn keys_per_command(&mut self, keys_per_command: usize) -> &mut Self {
self.keys_per_command = Some(keys_per_command);
self
}
pub fn read_only_percentage(
&mut self,
read_only_percentage: usize,
) -> &mut Self {
self.read_only_percentage = Some(read_only_percentage);
self
}
pub fn payload_size(&mut self, payload_size: usize) -> &mut Self {
self.payload_size = Some(payload_size);
self
}
pub fn batch_max_size(&mut self, batch_max_size: usize) -> &mut Self {
self.batch_max_size = Some(batch_max_size);
self
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_plot/src/db/dstat.rs | fantoch_plot/src/db/dstat.rs | use color_eyre::eyre::WrapErr;
use color_eyre::Report;
use csv::ReaderBuilder;
use fantoch::metrics::Histogram;
use serde::{Deserialize, Deserializer};
use std::fs::File;
use std::io::{BufRead, BufReader};
#[derive(Default, Clone)]
pub struct Dstat {
pub cpu_usr: Histogram,
pub cpu_sys: Histogram,
pub cpu_wait: Histogram,
pub net_recv: Histogram,
pub net_send: Histogram,
pub mem_used: Histogram,
}
impl Dstat {
pub fn new() -> Self {
Default::default()
}
pub fn merge(&mut self, other: &Self) {
self.cpu_usr.merge(&other.cpu_usr);
self.cpu_sys.merge(&other.cpu_sys);
self.cpu_wait.merge(&other.cpu_wait);
self.net_recv.merge(&other.net_recv);
self.net_send.merge(&other.net_send);
self.mem_used.merge(&other.mem_used);
}
pub fn from(start: u64, end: u64, path: &str) -> Result<Self, Report> {
// create all histograms
let mut cpu_usr = Histogram::new();
let mut cpu_sys = Histogram::new();
let mut cpu_wait = Histogram::new();
let mut net_recv = Histogram::new();
let mut net_send = Histogram::new();
let mut mem_used = Histogram::new();
// open csv file
if let Ok(file) = File::open(path) {
let mut buf = BufReader::new(file);
// skip first 5 lines (non-header lines)
for _ in 0..5 {
let mut s = String::new();
// ignore empty lines
while s.trim().is_empty() {
buf.read_line(&mut s)?;
}
}
// create csv reader:
// - `flexible(true)` makes `reader.records()` not throw a error in
// case there's a row with not enough fields
let mut reader =
ReaderBuilder::new().flexible(true).from_reader(buf);
// get dstat headers
let headers = reader.headers().wrap_err("csv headers")?.clone();
for row in reader.records() {
// fetch row
let row = row.wrap_err("csv record")?;
// skip row if doesn't have enough fields/columns
if row.len() < headers.len() {
continue;
}
// parse csv row
let row: DstatRow =
row.deserialize(Some(&headers)).wrap_err_with(|| {
format!("deserialize dstat row {}", path)
})?;
// only consider the record if within bounds
if row.epoch >= start && row.epoch <= end {
cpu_usr.increment(row.cpu_usr);
cpu_sys.increment(row.cpu_sys);
cpu_wait.increment(row.cpu_wait);
net_recv.increment(row.net_recv);
net_send.increment(row.net_send);
mem_used.increment(row.mem_used);
}
}
} else {
println!("missing dstat file: {:?}", path);
}
// create self
let dstat = Self {
cpu_usr,
cpu_sys,
cpu_wait,
net_recv,
net_send,
mem_used,
};
Ok(dstat)
}
}
// Fields we're generating:
// "time","epoch","usr","sys","idl","wai","stl","read","writ","recv","send"
// ,"used","free","buff","cach","read","writ"
#[derive(Debug, Deserialize)]
struct DstatRow {
#[serde(deserialize_with = "parse_epoch")]
epoch: u64,
// cpu metrics
#[serde(rename = "usr")]
#[serde(deserialize_with = "f64_to_u64")]
cpu_usr: u64,
#[serde(rename = "sys")]
#[serde(deserialize_with = "f64_to_u64")]
cpu_sys: u64,
#[serde(rename = "wai")]
#[serde(deserialize_with = "f64_to_u64")]
cpu_wait: u64,
// net metrics
#[serde(rename = "recv")]
#[serde(deserialize_with = "f64_to_u64")]
net_recv: u64,
#[serde(rename = "send")]
#[serde(deserialize_with = "f64_to_u64")]
net_send: u64,
// memory metrics
#[serde(rename = "used")]
#[serde(deserialize_with = "f64_to_u64")]
mem_used: u64,
}
fn parse_epoch<'de, D>(de: D) -> Result<u64, D::Error>
where
D: Deserializer<'de>,
{
let epoch = String::deserialize(de)?;
let epoch = epoch
.parse::<f64>()
.wrap_err("parse epoch")
.map_err(serde::de::Error::custom)?;
// convert epoch to milliseconds
let epoch = epoch * 1000f64;
let epoch = epoch.round() as u64;
Ok(epoch)
}
fn f64_to_u64<'de, D>(de: D) -> Result<u64, D::Error>
where
D: Deserializer<'de>,
{
let n = String::deserialize(de)?;
let n = n.parse::<f64>().expect("dstat value should be a float");
let n = n.round() as u64;
Ok(n)
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_plot/src/bin/migrate.rs | fantoch_plot/src/bin/migrate.rs | use color_eyre::eyre::WrapErr;
use color_eyre::Report;
use fantoch::client::{KeyGen, Workload};
use fantoch::config::Config;
use fantoch::id::ProcessId;
use fantoch::planet::Planet;
use fantoch_exp::{
ExperimentConfig, FantochFeature, PlacementFlat, Protocol, RunMode,
SerializationFormat, Testbed,
};
use fantoch_plot::ResultsDB;
use serde::{Deserialize, Serialize};
use std::time::Duration;
#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)]
pub enum PreviousKeyGen {
ConflictPool {
conflict_rate: usize,
pool_size: usize,
},
Zipf {
coefficient: f64,
total_keys_per_shard: usize,
},
}
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
pub struct PreviousWorkload {
/// number of shards
shard_count: u64,
// key generator
key_gen: PreviousKeyGen,
/// number of keys accessed by the command
keys_per_command: usize,
/// number of commands to be submitted in this workload
commands_per_client: usize,
/// percentage of read-only commands
read_only_percentage: usize,
/// size of payload in command (in bytes)
payload_size: usize,
/// number of commands already issued in this workload
command_count: usize,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub struct PreviousConfig {
/// number of processes
n: usize,
/// number of tolerated faults
f: usize,
/// number of shards
shard_count: usize,
/// if enabled, then execution is skipped
execute_at_commit: bool,
/// defines the interval between executor cleanups
executor_cleanup_interval: Duration,
/// defines the interval between between executed notifications sent to
/// the local worker process
executor_executed_notification_interval: Duration,
/// defines whether the executor should monitor pending commands, and if
/// so, the interval between each monitor
executor_monitor_pending_interval: Option<Duration>,
/// defines whether the executor should monitor the execution order of
/// commands
executor_monitor_execution_order: bool,
/// defines the interval between garbage collections
gc_interval: Option<Duration>,
/// starting leader process
leader: Option<ProcessId>,
/// defines whether protocols (atlas, epaxos and tempo) should employ the
/// NFR optimization
nfr: bool,
/// defines whether tempo should employ tiny quorums or not
tempo_tiny_quorums: bool,
/// defines the interval between clock bumps, if any
tempo_clock_bump_interval: Option<Duration>,
/// defines the interval the sending of `MDetached` messages in tempo, if
/// any
tempo_detached_send_interval: Option<Duration>,
/// defines whether caesar should employ the wait condition
caesar_wait_condition: bool,
/// defines whether protocols should try to bypass the fast quorum process
/// ack (which is only possible if the fast quorum size is 2)
skip_fast_ack: bool,
}
#[derive(Deserialize, Serialize)]
pub struct PreviousExperimentConfig {
pub placement: PlacementFlat,
pub planet: Option<Planet>,
pub run_mode: RunMode,
pub features: Vec<FantochFeature>,
pub testbed: Testbed,
pub protocol: Protocol,
pub config: PreviousConfig,
pub clients_per_region: usize,
pub workload: PreviousWorkload,
pub batch_max_size: usize,
pub batch_max_delay: Duration,
pub process_tcp_nodelay: bool,
pub tcp_buffer_size: usize,
pub tcp_flush_interval: Option<Duration>,
pub process_channel_buffer_size: usize,
pub cpus: usize,
pub workers: usize,
pub executors: usize,
pub multiplexing: usize,
pub client_tcp_nodelay: bool,
pub client_channel_buffer_size: usize,
}
fn main() -> Result<(), Report> {
for results_dir in vec![
// "/home/vitor.enes/eurosys_results/results_fairness_and_tail_latency",
// "/home/vitor.enes/eurosys_results/results_increasing_load",
// "/home/vitor.enes/eurosys_results/results_partial_replication",
// "/home/vitor.enes/eurosys_results/results_batching",
// "/home/vitor.enes/thesis_results/results_increasing_sites",
// "/home/vitor.enes/thesis_results/results_fast_path",
] {
// load results
let timestamps =
ResultsDB::list_timestamps(results_dir).wrap_err("load results")?;
for timestamp in timestamps {
// read the configuration of this experiment
let exp_config_path =
format!("{}/exp_config.json", timestamp.path().display());
println!("migrating {}", exp_config_path);
let previous: Result<PreviousExperimentConfig, _> =
fantoch_exp::deserialize(
&exp_config_path,
SerializationFormat::Json,
)
.wrap_err_with(|| {
format!(
"deserialize experiment config of {:?}",
timestamp.path().display()
)
});
match previous {
Ok(previous) => {
let key_gen = match previous.workload.key_gen {
PreviousKeyGen::ConflictPool {
conflict_rate,
pool_size,
} => KeyGen::ConflictPool {
conflict_rate,
pool_size,
},
PreviousKeyGen::Zipf {
coefficient,
total_keys_per_shard,
} => KeyGen::Zipf {
coefficient,
total_keys_per_shard,
},
};
// create workoad
let mut workload = Workload::new(
previous.workload.shard_count as usize,
key_gen,
previous.workload.keys_per_command,
previous.workload.commands_per_client,
previous.workload.payload_size,
);
// NOTE THAT `set_read_only_percentage` IS REQUIRED!
workload.set_read_only_percentage(
previous.workload.read_only_percentage,
);
let mut config =
Config::new(previous.config.n, previous.config.f);
config.set_shard_count(previous.config.shard_count);
config.set_execute_at_commit(
previous.config.execute_at_commit,
);
config.set_executor_cleanup_interval(
previous.config.executor_cleanup_interval,
);
config.set_executor_executed_notification_interval(
previous.config.executor_executed_notification_interval,
);
config.set_executor_monitor_pending_interval(
previous.config.executor_monitor_pending_interval,
);
config.set_executor_monitor_execution_order(
previous.config.executor_monitor_execution_order,
);
config.set_gc_interval(previous.config.gc_interval);
config.set_leader(previous.config.leader);
config.set_nfr(previous.config.nfr);
config.set_tempo_tiny_quorums(
previous.config.tempo_tiny_quorums,
);
config.set_tempo_clock_bump_interval(
previous.config.tempo_clock_bump_interval,
);
config.set_tempo_detached_send_interval(
previous.config.tempo_detached_send_interval,
);
config.set_caesar_wait_condition(
previous.config.caesar_wait_condition,
);
config.set_skip_fast_ack(previous.config.skip_fast_ack);
let exp_config = ExperimentConfig {
placement: previous.placement,
planet: previous.planet,
run_mode: previous.run_mode,
features: previous.features,
testbed: previous.testbed,
protocol: previous.protocol,
config,
clients_per_region: previous.clients_per_region,
process_tcp_nodelay: previous.process_tcp_nodelay,
tcp_buffer_size: previous.tcp_buffer_size,
tcp_flush_interval: previous.tcp_flush_interval,
process_channel_buffer_size: previous
.process_channel_buffer_size,
cpus: previous.cpus,
workers: previous.workers,
executors: previous.executors,
multiplexing: previous.multiplexing,
workload,
batch_max_size: previous.batch_max_size,
batch_max_delay: previous.batch_max_delay,
client_tcp_nodelay: previous.client_tcp_nodelay,
client_channel_buffer_size: previous
.client_channel_buffer_size,
};
// save experiment config
fantoch_exp::serialize(
exp_config,
&exp_config_path,
SerializationFormat::Json,
)
.wrap_err("migrate_exp_config")?;
}
Err(e) => {
let missing_file =
String::from("No such file or directory (os error 2)");
if e.root_cause().to_string() == missing_file {
// if some file was not found, it may be because the
// folder is empty; in this case, ignore the
// error
println!("entry ignored...");
} else {
// if not, quit
return Err(e);
}
}
}
}
}
Ok(())
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_plot/src/bin/data_to_json.rs | fantoch_plot/src/bin/data_to_json.rs | use color_eyre::eyre::WrapErr;
use color_eyre::Report;
use fantoch_plot::ResultsDB;
fn main() -> Result<(), Report> {
processed_data_to_json()?;
Ok(())
}
#[allow(dead_code)]
fn processed_data_to_json() -> Result<(), Report> {
let results_dir = "../results_fairness_and_tail_latency";
let output_dir = "../results_fairness_and_tail_latency_processed";
let db = ResultsDB::load(results_dir).wrap_err("load results")?;
db.data_to_json(output_dir)?;
Ok(())
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_plot/src/bin/main.rs | fantoch_plot/src/bin/main.rs | use color_eyre::eyre::WrapErr;
use color_eyre::Report;
use fantoch::client::KeyGen;
use fantoch::planet::{Planet, Region};
use fantoch_exp::Protocol;
use fantoch_plot::{
ErrorBar, ExperimentData, HeatmapMetric, LatencyMetric, LatencyPrecision,
MetricsType, PlotFmt, ResultsDB, Search, Style, ThroughputYAxis,
};
use serde::Deserialize;
use std::collections::HashMap;
use std::fs::File;
use std::io::BufReader;
// latency dir
// const LATENCY_AWS: &str = "../latency_aws/2021_02_13";
const LATENCY_AWS: &str = "../latency_aws/2020_06_05";
// folder where all plots will be stored
const PLOT_DIR: Option<&str> = Some("plots");
// if true, dstats per process will be generated
const ALL_DSTATS: bool = true;
fn main() -> Result<(), Report> {
// set global style
let newsgott = true;
fantoch_plot::set_global_style(newsgott)?;
// partial_replication_all()?;
// multi_key()?;
// single_key_all()?;
show_distance_matrix();
thesis()?;
Ok(())
}
#[allow(dead_code)]
fn thesis() -> Result<(), Report> {
eurosys()?;
fast_path_plot()?;
increasing_sites_plot()?;
nfr_plot()?;
recovery_plot()?;
Ok(())
}
#[allow(dead_code)]
fn eurosys() -> Result<(), Report> {
fairness_plot()?;
tail_latency_plot()?;
increasing_load_plot()?;
batching_plot()?;
partial_replication_plot()?;
Ok(())
}
#[derive(Default)]
struct RecoveryData {
taiwan: Vec<u64>,
finland: Vec<u64>,
south_carolina: Vec<u64>,
total: Vec<u64>,
}
#[allow(dead_code)]
fn recovery_plot() -> Result<(), Report> {
println!(">>>>>>>> RECOVERY <<<<<<<<");
let atlas_data = recovery_data("eurosys20_data/recovery/atlas.dat")?;
let fpaxos_data = recovery_data("eurosys20_data/recovery/fpaxos.dat")?;
let taiwan = (atlas_data.taiwan, fpaxos_data.taiwan);
let finland = (atlas_data.finland, fpaxos_data.finland);
let south_carolina =
(atlas_data.south_carolina, fpaxos_data.south_carolina);
let total = (atlas_data.total, fpaxos_data.total);
let path = String::from("plot_recovery.pdf");
fantoch_plot::recovery_plot(
taiwan,
finland,
south_carolina,
total,
PLOT_DIR,
&path,
)?;
Ok(())
}
fn recovery_data(path: &str) -> Result<RecoveryData, Report> {
#[derive(Debug, Deserialize)]
struct Record {
#[allow(dead_code)]
time: u64,
taiwan: u64,
finland: u64,
south_carolina: u64,
total: u64,
}
let file = File::open(path)?;
let buf = BufReader::new(file);
let mut rdr = csv::ReaderBuilder::new().delimiter(b' ').from_reader(buf);
let mut recovery_data = RecoveryData::default();
for result in rdr.deserialize() {
let record: Record = result?;
recovery_data.taiwan.push(record.taiwan);
recovery_data.finland.push(record.finland);
recovery_data.south_carolina.push(record.south_carolina);
recovery_data.total.push(record.total);
}
Ok(recovery_data)
}
#[allow(dead_code)]
fn nfr_plot() -> Result<(), Report> {
println!(">>>>>>>> NFR <<<<<<<<");
let results_dir = "/home/vitor.enes/thesis_results/results_nfr";
// fixed parameters
let key_gen = KeyGen::Zipf {
total_keys_per_shard: 1_000_000,
coefficient: 0.99,
};
let payload_size = 100;
let protocols = vec![
(Protocol::TempoAtomic, Some(1)),
(Protocol::AtlasLocked, Some(1)),
(Protocol::TempoAtomic, Some(2)),
(Protocol::AtlasLocked, Some(2)),
(Protocol::EPaxosLocked, None),
];
let legend_order = vec![0, 2, 1, 3, 4];
let ns = vec![7];
let read_only_percentages = vec![20, 50, 80, 100];
let clients_per_region = 256;
// load results
let db = ResultsDB::load(results_dir).wrap_err("load results")?;
for n in ns {
// generate plot
let path = format!("plot_nfr_n{}.pdf", n);
let style_fun = None;
let latency_precision = LatencyPrecision::Millis;
fantoch_plot::nfr_plot(
n,
read_only_percentages.clone(),
protocols.clone(),
key_gen,
clients_per_region,
payload_size,
Some(legend_order.clone()),
style_fun,
latency_precision,
PLOT_DIR,
&path,
&db,
)?;
}
Ok(())
}
#[allow(dead_code)]
fn fast_path_plot() -> Result<(), Report> {
println!(">>>>>>>> FAST PATH <<<<<<<<");
let results_dir = "/home/vitor.enes/thesis_results/results_fast_path";
// fixed parameters
let conflict_rates = vec![0, 5, 10, 20, 40, 60, 80, 100];
let payload_size = 100;
let batch_max_size = 1;
let clients_per_region = vec![1, 8];
let search_refine = |search: &mut Search,
clients_per_region: usize,
conflict_rate: usize| {
let key_gen = KeyGen::ConflictPool {
conflict_rate,
pool_size: 1,
};
search
.clients_per_region(clients_per_region)
.key_gen(key_gen)
.payload_size(payload_size)
.batch_max_size(batch_max_size);
};
// tuple with protocol and f
let protocols_n5 = vec![
(Protocol::TempoAtomic, 2),
(Protocol::AtlasLocked, 2),
(Protocol::EPaxosLocked, 2),
];
let protocols_n7 = vec![
(Protocol::TempoAtomic, 2),
(Protocol::TempoAtomic, 3),
(Protocol::AtlasLocked, 2),
(Protocol::AtlasLocked, 3),
(Protocol::EPaxosLocked, 3),
];
// change basic to worst case
let style_fun = |search: &Search| {
let mut style = HashMap::new();
if search.protocol == Protocol::Basic {
style.insert(Style::Label, "worst-case".to_string());
}
style
};
// load results
let db = ResultsDB::load(results_dir).wrap_err("load results")?;
// generate fast path plots
for (n, protocols) in vec![(5, protocols_n5), (7, protocols_n7)] {
// create searches
let searches: Vec<_> = protocols
.into_iter()
.map(|(protocol, f)| Search::new(n, f, protocol))
.collect();
for clients_per_region in clients_per_region.clone() {
let path =
format!("plot_fast_path_n{}_c{}.pdf", n, clients_per_region);
fantoch_plot::fast_path_plot(
searches.clone(),
clients_per_region,
conflict_rates.clone(),
search_refine,
Some(Box::new(style_fun)),
PLOT_DIR,
&path,
&db,
)?;
}
}
Ok(())
}
#[allow(dead_code)]
fn increasing_sites_plot() -> Result<(), Report> {
println!(">>>>>>>> INCREASING SITES <<<<<<<<");
let results_dir =
"/home/vitor.enes/thesis_results/results_increasing_sites";
// fixed parameters
let key_gen = KeyGen::ConflictPool {
conflict_rate: 2,
pool_size: 1,
};
let payload_size = 100;
let protocols = vec![
(Protocol::TempoAtomic, Some(1)),
(Protocol::AtlasLocked, Some(1)),
(Protocol::FPaxos, Some(1)),
(Protocol::TempoAtomic, Some(2)),
(Protocol::AtlasLocked, Some(2)),
(Protocol::FPaxos, Some(2)),
(Protocol::EPaxosLocked, None),
];
let legend_order = vec![0, 2, 4, 1, 3, 5, 6];
let ns = vec![3, 5, 7, 9, 11];
let clients_per_region = 256;
let error_bar = ErrorBar::Without;
// load results
let db = ResultsDB::load(results_dir).wrap_err("load results")?;
// generate plot
let path = String::from("plot_increasing_sites.pdf");
let style_fun = None;
let latency_precision = LatencyPrecision::Millis;
fantoch_plot::increasing_sites_plot(
ns,
protocols,
key_gen,
clients_per_region,
payload_size,
Some(legend_order),
style_fun,
latency_precision,
error_bar,
PLOT_DIR,
&path,
&db,
)?;
Ok(())
}
#[allow(dead_code)]
fn fairness_plot() -> Result<(), Report> {
println!(">>>>>>>> FAIRNESS <<<<<<<<");
let results_dir =
"/home/vitor.enes/eurosys_results/results_fairness_and_tail_latency";
// fixed parameters
let key_gen = KeyGen::ConflictPool {
conflict_rate: 2,
pool_size: 1,
};
let payload_size = 100;
let protocols = vec![
(Protocol::TempoAtomic, 1),
(Protocol::AtlasLocked, 1),
(Protocol::FPaxos, 1),
(Protocol::TempoAtomic, 2),
(Protocol::AtlasLocked, 2),
(Protocol::FPaxos, 2),
(Protocol::CaesarLocked, 2),
];
let legend_order = vec![0, 2, 4, 1, 3, 5, 6];
let n = 5;
let clients_per_region = 512;
let error_bar = ErrorBar::Without;
// load results
let db = ResultsDB::load(results_dir).wrap_err("load results")?;
// create searches
let searches: Vec<_> = protocols
.into_iter()
.map(|(protocol, f)| {
let mut search = Search::new(n, f, protocol);
match protocol {
Protocol::FPaxos => {
// if fpaxos, don't filter by key gen as contention does not
// affect the results
}
Protocol::AtlasLocked
| Protocol::TempoAtomic
| Protocol::CaesarLocked => {
search.key_gen(key_gen);
}
_ => {
panic!("unsupported protocol: {:?}", protocol);
}
}
// filter by clients per region and payload size in all protocols
search
.clients_per_region(clients_per_region)
.payload_size(payload_size);
search
})
.collect();
// generate latency plot
let path = String::from("plot_fairness.pdf");
let style_fun = None;
let latency_precision = LatencyPrecision::Millis;
let results = fantoch_plot::fairness_plot(
searches,
Some(legend_order),
style_fun,
latency_precision,
n,
error_bar,
PLOT_DIR,
&path,
&db,
fmt_exp_data,
)?;
for (search, histogram_fmt) in results {
println!(
"{:<7} f = {} | {}",
PlotFmt::protocol_name(search.protocol),
search.f,
histogram_fmt,
);
}
Ok(())
}
#[allow(dead_code)]
fn tail_latency_plot() -> Result<(), Report> {
println!(">>>>>>>> TAIL LATENCY <<<<<<<<");
let results_dir =
"/home/vitor.enes/eurosys_results/results_fairness_and_tail_latency";
// fixed parameters
let key_gen = KeyGen::ConflictPool {
conflict_rate: 2,
pool_size: 1,
};
let payload_size = 100;
let protocols = vec![
(Protocol::TempoAtomic, 1),
(Protocol::TempoAtomic, 2),
(Protocol::AtlasLocked, 1),
(Protocol::AtlasLocked, 2),
(Protocol::CaesarLocked, 2),
// (Protocol::FPaxos, 1),
(Protocol::EPaxosLocked, 2),
];
let n = 5;
let clients_per_region_top = 256;
let clients_per_region_bottom = 512;
// load results
let db = ResultsDB::load(results_dir).wrap_err("load results")?;
// create searches
let create_searches = |clients_per_region| {
protocols
.clone()
.into_iter()
.map(|(protocol, f)| {
let mut search = Search::new(n, f, protocol);
search
.key_gen(key_gen)
.payload_size(payload_size)
.clients_per_region(clients_per_region);
search
})
.collect()
};
let top_searches = create_searches(clients_per_region_top);
let bottom_searches = create_searches(clients_per_region_bottom);
let x_range = Some((100.0, 15_000.0));
// generate cdf plot
let path = String::from("plot_tail_latency.pdf");
let style_fun = None;
let latency_precision = LatencyPrecision::Millis;
let y_bbox_to_anchor = Some(1.56);
// increase height
let height_adjust = Some(1.5);
fantoch_plot::cdf_plot_split(
top_searches,
bottom_searches,
x_range,
style_fun,
latency_precision,
y_bbox_to_anchor,
height_adjust,
PLOT_DIR,
&path,
&db,
)?;
Ok(())
}
#[allow(dead_code)]
fn increasing_load_plot() -> Result<(), Report> {
println!(">>>>>>>> INCREASING LOAD <<<<<<<<");
let results_dir =
"/home/vitor.enes/eurosys_results/results_increasing_load";
// fixed parameters
let top_key_gen = KeyGen::ConflictPool {
conflict_rate: 2,
pool_size: 1,
};
let bottom_key_gen = KeyGen::ConflictPool {
conflict_rate: 10,
pool_size: 1,
};
let payload_size = 4096;
let batch_max_size = 1;
let n = 5;
let leader = 1;
// generate throughput-latency plot
let clients_per_region = vec![
32,
512,
1024,
1024 * 2,
1024 * 4,
1024 * 8,
1024 * 16,
1024 * 20,
];
// load results
let db = ResultsDB::load(results_dir).wrap_err("load results")?;
let search_refine = |search: &mut Search, key_gen: KeyGen| {
match search.protocol {
Protocol::FPaxos => {
// if fpaxos, don't filter by key gen as
// contention does not affect the results
}
Protocol::AtlasLocked
| Protocol::TempoAtomic
| Protocol::EPaxosLocked
| Protocol::CaesarLocked
| Protocol::Basic => {
search.key_gen(key_gen);
}
_ => {
panic!("unsupported protocol: {:?}", search.protocol);
}
}
// filter by payload size and batch max size in all protocols
search
.payload_size(payload_size)
.batch_max_size(batch_max_size);
};
let protocols = vec![
(Protocol::TempoAtomic, 1),
(Protocol::TempoAtomic, 2),
(Protocol::AtlasLocked, 1),
(Protocol::AtlasLocked, 2),
(Protocol::FPaxos, 1),
(Protocol::FPaxos, 2),
(Protocol::CaesarLocked, 2),
/*
(Protocol::Basic, 1),
(Protocol::EPaxosLocked, 2),
*/
];
// adjust Caesar name to Caesar*
let style_fun = |search: &Search| {
let mut style = HashMap::new();
if search.protocol == Protocol::CaesarLocked {
style.insert(
Style::Label,
format!("{}*", PlotFmt::protocol_name(search.protocol)),
);
}
style
};
let path = format!("plot_increasing_load_heatmap_{}.pdf", top_key_gen);
fantoch_plot::heatmap_plot_split(
n,
protocols.clone(),
clients_per_region.clone(),
top_key_gen,
search_refine,
Some(Box::new(style_fun)),
leader,
PLOT_DIR,
&path,
&db,
)?;
let path = format!("plot_increasing_load_heatmap_{}.pdf", bottom_key_gen);
fantoch_plot::heatmap_plot_split(
n,
protocols.clone(),
clients_per_region.clone(),
bottom_key_gen,
search_refine,
Some(Box::new(style_fun)),
leader,
PLOT_DIR,
&path,
&db,
)?;
let search_gen = |(protocol, f)| Search::new(n, f, protocol);
let latency_precision = LatencyPrecision::Millis;
let x_range = None;
let y_range = Some((100.0, 1500.0));
let y_axis = ThroughputYAxis::Latency(LatencyMetric::Average);
let y_log_scale = true;
let x_bbox_to_anchor = Some(0.46);
let y_bbox_to_anchor = Some(1.42);
let legend_column_spacing = Some(1.25);
let left_margin = None;
let width_adjust = None;
let height_adjust = Some(1.0);
let path = String::from("plot_increasing_load.pdf");
fantoch_plot::throughput_something_plot_split(
n,
protocols,
search_gen,
clients_per_region,
top_key_gen,
bottom_key_gen,
search_refine,
Some(Box::new(style_fun)),
latency_precision,
x_range,
y_range,
y_axis,
y_log_scale,
x_bbox_to_anchor,
y_bbox_to_anchor,
legend_column_spacing,
left_margin,
width_adjust,
height_adjust,
PLOT_DIR,
&path,
&db,
)?;
Ok(())
}
#[allow(dead_code)]
fn batching_plot() -> Result<(), Report> {
println!(">>>>>>>> BATCHING <<<<<<<<");
let results_dir = "/home/vitor.enes/eurosys_results/results_batching";
// fixed parameters
let key_gen = KeyGen::ConflictPool {
conflict_rate: 2,
pool_size: 1,
};
let empty_key_gen = KeyGen::ConflictPool {
conflict_rate: 0,
pool_size: 1,
};
let n = 5;
let tempo = (Protocol::TempoAtomic, 1);
let fpaxos = (Protocol::FPaxos, 1);
let protocols = vec![tempo, fpaxos];
let tempo_batch_max_size = 10000;
let fpaxos_batch_max_size = 10000;
let search_gen = |(protocol, f)| Search::new(n, f, protocol);
let settings = vec![
// (batching, payload_size)
(false, 256),
(true, 256),
(false, 1024),
(true, 1024),
(false, 4096),
(true, 4096),
];
let n = 5;
let leader = 1;
// generate throughput-latency plot
let clients_per_region = vec![
32,
512,
1024,
1024 * 2,
1024 * 4,
1024 * 8,
1024 * 16,
1024 * 20,
1024 * 24,
1024 * 28,
1024 * 32,
1024 * 36,
1024 * 40,
1024 * 44,
1024 * 48,
1024 * 52,
1024 * 56,
1024 * 60,
1024 * 64,
];
// load results
let db = ResultsDB::load(results_dir).wrap_err("load results")?;
for (batching, payload_size) in settings.clone() {
let search_refine = |search: &mut Search, key_gen: KeyGen| {
// filter by key gen payload size and batch max size in all
// protocols
search.key_gen(key_gen).payload_size(payload_size);
// set batch max size if batching
let batch_max_size = if batching {
match search.protocol {
Protocol::TempoAtomic => tempo_batch_max_size,
Protocol::FPaxos => fpaxos_batch_max_size,
_ => panic!("unsupported protocol: {:?}", search.protocol),
}
} else {
1
};
search.batch_max_size(batch_max_size);
};
let path =
format!("plot_batching_heatmap_{}_{}.pdf", batching, payload_size);
let style_fun = None;
fantoch_plot::heatmap_plot_split(
n,
protocols.clone(),
clients_per_region.clone(),
key_gen,
search_refine,
style_fun,
leader,
PLOT_DIR,
&path,
&db,
)?;
let style_fun = None;
let latency_precision = LatencyPrecision::Millis;
let x_range = None;
let y_range = Some((100.0, 2000.0));
let y_axis = ThroughputYAxis::Latency(LatencyMetric::Average);
let y_log_scale = true;
let x_bbox_to_anchor = None;
let y_bbox_to_anchor = None;
let legend_column_spacing = None;
let left_margin = None;
let width_adjust = None;
let height_adjust = None;
let path = format!("plot_batching_{}_{}.pdf", batching, payload_size);
let (max_throughputs, _) =
fantoch_plot::throughput_something_plot_split(
n,
protocols.clone(),
search_gen,
clients_per_region.clone(),
key_gen,
empty_key_gen,
search_refine,
style_fun,
latency_precision,
x_range,
y_range,
y_axis,
y_log_scale,
x_bbox_to_anchor,
y_bbox_to_anchor,
legend_column_spacing,
left_margin,
width_adjust,
height_adjust,
PLOT_DIR,
&path,
&db,
)?;
for (search, max_throughput) in max_throughputs {
let name = match search.protocol {
Protocol::FPaxos => "fpaxos",
Protocol::TempoAtomic => "tempo ",
_ => unreachable!(),
};
println!(
"R {} f = {} bms = {:<5} ps = {:<4}: {}",
name,
search.f,
search.batch_max_size.unwrap(),
search.payload_size.unwrap(),
max_throughput
);
}
}
// create searches
let searches: Vec<_> = vec![
(tempo, tempo_batch_max_size),
(fpaxos, fpaxos_batch_max_size),
]
.into_iter()
.map(|(search_gen_input, batch_max_size)| {
(search_gen(search_gen_input), batch_max_size)
})
.collect();
let style_fun = None;
let path = format!("plot_batching.pdf");
let y_range = Some((0.0, 800.0));
fantoch_plot::batching_plot(
searches, style_fun, n, settings, y_range, PLOT_DIR, &path, &db,
)?;
Ok(())
}
#[allow(dead_code)]
fn scalability_plot() -> Result<(), Report> {
let results_dir = "../results_scalability";
// fixed parameters
let shard_count = 1;
let n = 3;
let f = 1;
let payload_size = 100;
let keys_per_command = 1;
let protocol = Protocol::TempoAtomic;
let coefficients = vec![
0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0, 2.5, 3.0, 3.5, 4.0, 5.0, 6.0,
7.0, 8.0, 9.0, 10.0,
];
let cpus = vec![2, 4, 6, 8, 12];
// load results
let db = ResultsDB::load(results_dir).wrap_err("load results")?;
// create searches
let searches: Vec<_> = coefficients
.into_iter()
.map(|coefficient| {
// create key gen
let key_gen = KeyGen::Zipf {
total_keys_per_shard: 1_000_000,
coefficient,
};
let mut search = Search::new(n, f, protocol);
search
.shard_count(shard_count)
.key_gen(key_gen)
.keys_per_command(keys_per_command)
.payload_size(payload_size);
search
})
.collect();
fantoch_plot::intra_machine_scalability_plot(searches, n, cpus, &db)?;
Ok(())
}
#[allow(dead_code)]
fn partial_replication_plot() -> Result<(), Report> {
println!(">>>>>>>> PARTIAL REPLICATION <<<<<<<<");
let results_dir =
"/home/vitor.enes/eurosys_results/results_partial_replication";
// fixed parameters
let top_coefficient = 0.5;
let bottom_coefficient = 0.7;
let payload_size = 100;
let n = 3;
let f = 1;
// generate throughput-latency plot
let clients_per_region = vec![
256,
1024,
1024 * 2,
1024 * 3,
1024 * 4,
1024 * 5,
1024 * 6,
1024 * 8,
1024 * 10,
1024 * 12,
1024 * 16,
1024 * 20,
1024 * 22,
1024 * 24,
1024 * 32,
1024 * 34,
1024 * 36,
1024 * 40,
1024 * 44,
1024 * 48,
1024 * 52,
1024 * 64,
1024 * 72,
1024 * 80,
1024 * 88,
1024 * 96,
1024 * 104,
1024 * 112,
1024 * 128,
1024 * 136,
1024 * 144,
];
let protocols = vec![
(Protocol::TempoAtomic, 0),
(Protocol::AtlasLocked, 100),
(Protocol::AtlasLocked, 95),
(Protocol::AtlasLocked, 50),
];
let search_gen = |(protocol, read_only_percentage)| {
let mut search = Search::new(n, f, protocol);
search.read_only_percentage(read_only_percentage);
search
};
let style_fun = |search: &Search| {
let mut style = HashMap::new();
match search.protocol {
Protocol::TempoAtomic => {
style.insert(
Style::Label,
format!("{}", PlotFmt::protocol_name(search.protocol)),
);
}
Protocol::AtlasLocked => {
let ro = search
.read_only_percentage
.expect("read-only percentage should be set in search");
style.insert(Style::Label, format!("Janus* w = {}%", 100 - ro));
let (protocol, f) = match ro {
100 => (Protocol::Basic, 2),
95 => (Protocol::TempoLocked, 1),
50 => (Protocol::TempoLocked, 2),
_ => panic!("unsupported read-only percentage: {:?}", ro),
};
style.insert(Style::Color, PlotFmt::color(protocol, f));
style.insert(Style::Marker, PlotFmt::marker(protocol, f));
style.insert(Style::Hatch, PlotFmt::hatch(protocol, f));
}
_ => panic!("unsupported protocol: {:?}", search.protocol),
}
style
};
// load results
let db = ResultsDB::load(results_dir).wrap_err("load results")?;
for (y_axis, y_range) in vec![
(
ThroughputYAxis::Latency(LatencyMetric::Average),
Some((150.0, 310.0)),
),
(
ThroughputYAxis::Latency(LatencyMetric::Percentile(0.99)),
Some((150.0, 810.0)),
),
(
ThroughputYAxis::Latency(LatencyMetric::Percentile(0.999)),
Some((150.0, 810.0)),
),
] {
for (shard_count, keys_per_command, x_range) in vec![
(1, 2, Some((0.0, 400.0))),
(2, 2, Some((0.0, 400.0))),
(4, 2, Some((0.0, 700.0))),
(6, 2, Some((0.0, 1000.0))),
] {
let search_refine = |search: &mut Search, coefficient: f64| {
let key_gen = KeyGen::Zipf {
coefficient,
total_keys_per_shard: 1_000_000,
};
search
.key_gen(key_gen)
.shard_count(shard_count)
.keys_per_command(keys_per_command)
.payload_size(payload_size);
};
let latency_precision = LatencyPrecision::Millis;
let y_log_scale = false;
let x_bbox_to_anchor = Some(0.45);
let y_bbox_to_anchor = None;
let legend_column_spacing = None;
let left_margin = Some(0.15);
let width_adjust = Some(-1.75);
let height_adjust = None;
let path = format!(
"plot_partial_replication_{}_{}_k{}.pdf",
y_axis.name(),
shard_count,
keys_per_command
);
let style_fun: Option<
Box<dyn Fn(&Search) -> HashMap<Style, String>>,
> = Some(Box::new(style_fun));
fantoch_plot::throughput_something_plot_split(
n,
protocols.clone(),
search_gen,
clients_per_region.clone(),
top_coefficient,
bottom_coefficient,
search_refine,
style_fun,
latency_precision,
x_range,
y_range,
y_axis,
y_log_scale,
x_bbox_to_anchor,
y_bbox_to_anchor,
legend_column_spacing,
left_margin,
width_adjust,
height_adjust,
PLOT_DIR,
&path,
&db,
)?;
}
}
// create searches
let searches: Vec<_> = protocols
.into_iter()
.map(|search_gen_input| search_gen(search_gen_input))
.collect();
let style_fun: Option<Box<dyn Fn(&Search) -> HashMap<Style, String>>> =
Some(Box::new(style_fun));
let settings = vec![
(2, 2, top_coefficient),
(2, 2, bottom_coefficient),
(4, 2, top_coefficient),
(4, 2, bottom_coefficient),
(6, 2, top_coefficient),
(6, 2, bottom_coefficient),
];
let y_range = Some((0.0, 1000.0));
let path = format!("plot_partial_replication.pdf");
fantoch_plot::inter_machine_scalability_plot(
searches, style_fun, n, settings, y_range, PLOT_DIR, &path, &db,
)?;
Ok(())
}
#[allow(dead_code)]
fn partial_replication_all() -> Result<(), Report> {
let results_dir = "../results_partial_replication";
// fixed parameters
let n = 3;
let mut key_gens = Vec::new();
for (coefficient, x_range, y_range) in vec![
(0.5, Some((0.0, 700.0)), Some((150.0, 400.0))),
(0.7, Some((0.0, 700.0)), Some((150.0, 400.0))),
] {
let key_gen = KeyGen::Zipf {
coefficient,
total_keys_per_shard: 1_000_000,
};
key_gens.push((key_gen, x_range, y_range));
}
let payload_size = 100;
let protocols = vec![Protocol::AtlasLocked, Protocol::TempoAtomic];
let latency_precision = LatencyPrecision::Millis;
let shard_combinations = vec![
// shard_count, shards_per_command
// (1, 1),
// (1, 2),
(2, 2),
(4, 2),
(6, 2),
];
// load results
let db = ResultsDB::load(results_dir).wrap_err("load results")?;
let clients_per_region = vec![
256,
1024,
1024 * 2,
1024 * 3,
1024 * 4,
1024 * 5,
1024 * 6,
1024 * 8,
1024 * 10,
1024 * 12,
1024 * 16,
1024 * 20,
1024 * 22,
1024 * 24,
1024 * 32,
1024 * 34,
1024 * 36,
1024 * 40,
1024 * 44,
1024 * 48,
1024 * 52,
1024 * 64,
1024 * 72,
1024 * 80,
1024 * 88,
1024 * 96,
1024 * 104,
1024 * 112,
1024 * 128,
1024 * 136,
1024 * 144,
];
let search_refine = |search: &mut Search, read_only_percentage: usize| {
match search.protocol {
Protocol::TempoAtomic => {
// if tempo atomic, don't filter by read-only percentage as
// reads are not treated in any special way
// there, and thus, it does not affect the
// results
}
Protocol::AtlasLocked | Protocol::TempoLocked => {
search.read_only_percentage(read_only_percentage);
}
_ => {
panic!("unsupported protocol: {:?}", search.protocol);
}
}
};
for read_only_percentage in vec![0, 100, 95, 50] {
for (key_gen, x_range, y_range) in key_gens.clone() {
// generate all-combo throughput-something plot
for y_axis in vec![
ThroughputYAxis::Latency(LatencyMetric::Average),
ThroughputYAxis::Latency(LatencyMetric::Percentile(0.99)),
ThroughputYAxis::Latency(LatencyMetric::Percentile(0.999)),
ThroughputYAxis::CPU,
] {
let path = format!(
"throughput_{}_n{}_{}_r{}.pdf",
y_axis.name(),
n,
key_gen,
read_only_percentage
);
// create searches
let searches = shard_combinations
.clone()
.into_iter()
.flat_map(|(shard_count, keys_per_command)| {
protocol_combinations(n, protocols.clone())
.into_iter()
.map(move |(protocol, f)| {
let mut search = Search::new(n, f, protocol);
search
.shard_count(shard_count)
.key_gen(key_gen)
.keys_per_command(keys_per_command)
.payload_size(payload_size);
search_refine(
&mut search,
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | true |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_plot/src/bin/plot_sim_output.rs | fantoch_plot/src/bin/plot_sim_output.rs | use color_eyre::eyre::WrapErr;
use color_eyre::Report;
use fantoch_plot::plot::pyplot::PyPlot;
use pyo3::prelude::*;
use std::collections::{BTreeMap, HashMap};
use std::fmt;
// file with the output of simulation
const SIM_OUTPUT: &str = "sim.out";
// folder where all plots will be stored
const PLOT_DIR: Option<&str> = Some("plots");
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
enum LastLine {
None,
PoolSize,
Conflicts,
Result,
}
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
struct Config {
pool_size: usize,
conflicts: usize,
protocol: String,
n: usize,
f: usize,
c: usize,
}
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
struct Histogram {
avg: usize,
p99: usize,
p99_9: usize,
}
#[derive(Clone, Debug, Default)]
struct Data {
wait_condition_delay: Option<Histogram>,
commit_latency: Option<Histogram>,
execution_latency: Option<Histogram>,
execution_delay: Option<Histogram>,
fast_path_rate: Option<f64>,
}
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
enum PlotType {
WaitConditionDelay,
CommitLatency,
ExecutionLatency,
ExecutionDelay,
}
impl PlotType {
fn title(&self) -> &str {
match self {
PlotType::WaitConditionDelay => "Wait Condition Delay",
PlotType::CommitLatency => "Commit Latency",
PlotType::ExecutionLatency => "Execution Latency",
PlotType::ExecutionDelay => "Execution Delay",
}
}
}
impl fmt::Debug for PlotType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
PlotType::WaitConditionDelay => write!(f, "wait_condition_delay"),
PlotType::CommitLatency => write!(f, "commit_latency"),
PlotType::ExecutionLatency => write!(f, "execution_latency"),
PlotType::ExecutionDelay => write!(f, "execution_delay"),
}
}
}
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
enum MetricType {
Avg,
P99,
P99_9,
}
impl fmt::Debug for MetricType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
MetricType::Avg => write!(f, "avg"),
MetricType::P99 => write!(f, "p99"),
MetricType::P99_9 => write!(f, "p99.9"),
}
}
}
fn main() -> Result<(), Report> {
let sim_out = std::fs::read_to_string(SIM_OUTPUT)
.wrap_err("error when reading the simulation output file")?;
let mut last_line = LastLine::None;
let mut current_pool_size = None;
let mut current_conflicts = None;
let mut all_data = HashMap::new();
for line in sim_out.lines() {
let parts: Vec<_> = line.split(":").collect();
assert_eq!(parts.len(), 2);
match last_line {
// this line should be a `PoolSize`
LastLine::None => {
parse_pool_size(parts, &mut current_pool_size, &mut last_line)
}
// this line should be a `Conflicts`
LastLine::PoolSize => {
parse_conflicts(parts, &mut current_conflicts, &mut last_line)
}
// this line should be a `Result`
LastLine::Conflicts => parse_result(
parts,
&mut current_pool_size,
&mut current_conflicts,
&mut all_data,
&mut last_line,
),
// this line could be a new `PoolSize`, `Conflicts`, or another
// `Result`
LastLine::Result => match parts[0] {
"POOL_SIZE" => parse_pool_size(
parts,
&mut current_pool_size,
&mut last_line,
),
"CONFLICTS" => parse_conflicts(
parts,
&mut current_conflicts,
&mut last_line,
),
_ => parse_result(
parts,
&mut current_pool_size,
&mut current_conflicts,
&mut all_data,
&mut last_line,
),
},
}
}
plot_data(all_data)
}
fn plot_data(all_data: HashMap<Config, Data>) -> Result<(), Report> {
let plot_types = vec![
PlotType::WaitConditionDelay,
PlotType::CommitLatency,
PlotType::ExecutionLatency,
PlotType::ExecutionDelay,
];
let metric_types =
vec![MetricType::Avg, MetricType::P99, MetricType::P99_9];
let pool_sizes = vec![100, 50, 10, 1];
let conflicts = vec![0, 2, 10, 30, 50, 100];
let protocol = String::from("Caesar");
let n = 5;
let f = 2;
let cs = vec![64, 128, 256, 512];
for pool_size in pool_sizes.clone() {
for plot_type in plot_types.clone() {
for metric_type in metric_types.clone() {
plot(
plot_type,
metric_type,
pool_size,
conflicts.clone(),
protocol.clone(),
n,
f,
cs.clone(),
&all_data,
)?;
}
}
}
Ok(())
}
fn plot(
plot_type: PlotType,
metric_type: MetricType,
pool_size: usize,
conflicts: Vec<usize>,
protocol: String,
n: usize,
f: usize,
cs: Vec<usize>,
all_data: &HashMap<Config, Data>,
) -> Result<(), Report> {
let data: Vec<_> = cs
.into_iter()
.map(|c| {
let values: Vec<_> = conflicts
.clone()
.into_iter()
.map(|conflicts| {
let config = Config {
pool_size,
conflicts,
protocol: protocol.clone(),
n,
f,
c,
};
if let Some(value) = get_plot_value(
plot_type,
metric_type,
&config,
&all_data,
) {
value
} else {
// there can only be no value if the plot type is wait
// condition
assert_eq!(plot_type, PlotType::WaitConditionDelay);
0
}
})
.collect();
(c, values)
})
.collect();
let title = format!("{} (pool size = {:?})", plot_type.title(), pool_size);
let output_file =
format!("{}_{:?}_{:?}.pdf", pool_size, metric_type, plot_type);
latency_plot(title, metric_type, conflicts, data, PLOT_DIR, &output_file)
}
fn latency_plot(
title: String,
metric_type: MetricType,
conflicts: Vec<usize>,
data: Vec<(usize, Vec<usize>)>,
output_dir: Option<&str>,
output_file: &str,
) -> Result<(), Report> {
const BLOCK_WIDTH: f64 = 10f64;
const MAX_COMBINATIONS: usize = 4;
// 80% of `BLOCK_WIDTH ` when `MAX_COMBINATIONS` is reached
const BAR_WIDTH: f64 = BLOCK_WIDTH * 0.8 / MAX_COMBINATIONS as f64;
assert_eq!(data.len(), MAX_COMBINATIONS);
// compute x: one per region
let x: Vec<_> = (0..conflicts.len())
.map(|i| i as f64 * BLOCK_WIDTH)
.collect();
// we need to shift all to the left by half of the number of combinations
let cs_count = data.len();
let shift_left = cs_count as f64 / 2f64;
// we also need to shift half bar to the right
let shift_right = 0.5;
let data = data.into_iter().enumerate().map(|(index, c)| {
// compute index according to shifts
let base = index as f64 - shift_left + shift_right;
// compute combination's shift
let shift = base * BAR_WIDTH;
(shift, c)
});
// start python
let gil = Python::acquire_gil();
let py = gil.python();
let plt = PyPlot::new(py)?;
// start plot
let (fig, ax) = fantoch_plot::start_plot(py, &plt, None)?;
// keep track of the number of plotted instances
let mut plotted = 0;
let mut legends = BTreeMap::new();
for (legend_order, (shift, (c, y))) in data.into_iter().enumerate() {
// compute x: shift all values by `shift`
let x: Vec<_> = x.iter().map(|&x| x + shift).collect();
// bar style
let kwargs = fantoch_plot::pydict!(
py,
("width", BAR_WIDTH),
("edgecolor", "black"),
("linewidth", 1),
);
let line = ax.bar(x, y, Some(kwargs))?;
plotted += 1;
// save line with its legend order
legends.insert(legend_order, (line, format!("clients = {}", c)));
}
// set xticks
ax.set_xticks(x, None)?;
let labels: Vec<_> = conflicts
.into_iter()
.map(|conflict| format!("{}%", conflict))
.collect();
ax.set_xticklabels(labels, None)?;
// set labels
let xlabel = "conflict rate";
ax.set_xlabel(xlabel, None)?;
let ylabel = format!("{:?} latency (ms)", metric_type);
ax.set_ylabel(&ylabel, None)?;
// set title
ax.set_title(&title)?;
// legend
let y_bbox_to_anchor = Some(1.24);
fantoch_plot::add_legend(
plotted,
Some(legends),
None,
y_bbox_to_anchor,
None,
py,
&ax,
)?;
// end plot
fantoch_plot::end_plot(
plotted > 0,
output_dir,
output_file,
py,
&plt,
Some(fig),
)?;
Ok(())
}
fn get_plot_value(
plot_type: PlotType,
metric_type: MetricType,
config: &Config,
all_data: &HashMap<Config, Data>,
) -> Option<usize> {
let data = if let Some(data) = all_data.get(config) {
data
} else {
panic!("config {:?} should exist", config);
};
let histogram = match plot_type {
PlotType::WaitConditionDelay => &data.wait_condition_delay,
PlotType::CommitLatency => &data.commit_latency,
PlotType::ExecutionLatency => &data.execution_latency,
PlotType::ExecutionDelay => &data.execution_delay,
};
histogram.as_ref().map(|histogram| match metric_type {
MetricType::Avg => histogram.avg,
MetricType::P99 => histogram.p99,
MetricType::P99_9 => histogram.p99_9,
})
}
fn parse_pool_size(
parts: Vec<&str>,
current_pool_size: &mut Option<usize>,
last_line: &mut LastLine,
) {
assert_eq!(parts[0], "POOL_SIZE");
let pool_size = parse_usize(parts[1], "pool size");
*current_pool_size = Some(pool_size);
// update last line
*last_line = LastLine::PoolSize;
}
fn parse_conflicts(
parts: Vec<&str>,
current_conflicts: &mut Option<usize>,
last_line: &mut LastLine,
) {
assert_eq!(parts[0], "CONFLICTS");
let conflicts = parse_usize(parts[1], "conflicts");
*current_conflicts = Some(conflicts);
// update last line
*last_line = LastLine::Conflicts;
}
fn parse_result(
parts: Vec<&str>,
current_pool_size: &mut Option<usize>,
current_conflicts: &mut Option<usize>,
all_data: &mut HashMap<Config, Data>,
last_line: &mut LastLine,
) {
// get header and entry
let header = parts[0];
let entry = parts[1];
// parse the header
let header_parts: Vec<_> = header.split("|").collect();
let config = header_parts[0];
let entry_type = header_parts[1];
// parse config
let config = parse_config(config, current_pool_size, current_conflicts);
// parse result entry
parse_result_entry(entry_type, entry, config, all_data);
// update last line
*last_line = LastLine::Result;
}
fn parse_usize(to_parse: &str, what: &str) -> usize {
match to_parse.trim().parse() {
Ok(result) => result,
Err(e) => {
panic!("error parsing {}: {:?}", what, e);
}
}
}
fn parse_config(
config: &str,
current_pool_size: &mut Option<usize>,
current_conflicts: &mut Option<usize>,
) -> Config {
let parts: Vec<_> = config.split_whitespace().collect();
// get pool size and conflicts
let pool_size = current_pool_size
.as_ref()
.cloned()
.expect("pool size should have been set");
let conflicts = current_conflicts
.as_ref()
.cloned()
.expect("conflicts should have been set");
// parse protocol
let protocol = parts[0].to_string();
// parse n
assert_eq!(parts[1], "n");
assert_eq!(parts[2], "=");
let n = parse_usize(parts[3], "n");
// parse f
assert_eq!(parts[4], "f");
assert_eq!(parts[5], "=");
let f = parse_usize(parts[6], "f");
// parse c
assert_eq!(parts[7], "c");
assert_eq!(parts[8], "=");
let c = parse_usize(parts[9], "c");
Config {
pool_size,
conflicts,
protocol,
n,
f,
c,
}
}
fn parse_result_entry(
entry_type: &str,
entry: &str,
config: Config,
all_data: &mut HashMap<Config, Data>,
) {
let mut data = all_data.entry(config).or_default();
match entry_type.trim() {
"wait condition delay" => {
assert!(data.wait_condition_delay.is_none());
if let Some(histogram) = parse_histogram(entry) {
data.wait_condition_delay = Some(histogram);
}
}
"commit latency" => {
assert!(data.commit_latency.is_none());
let histogram = parse_histogram(entry)
.expect("commit latency histogram must exist");
data.commit_latency = Some(histogram);
}
"execution latency" => {
assert!(data.execution_latency.is_none());
let histogram = parse_histogram(entry)
.expect("execution latency histogram must exist");
data.execution_latency = Some(histogram);
}
"execution delay" => {
assert!(data.execution_delay.is_none());
let histogram = parse_histogram(entry)
.expect("execution delay histogram must exist");
data.execution_delay = Some(histogram);
}
"fast path rate" => {
assert!(data.fast_path_rate.is_none());
let fast_path_rate = entry
.trim()
.parse()
.expect("fast path rate should be a float");
data.fast_path_rate = Some(fast_path_rate);
}
entry_type => {
panic!("unsupported entry type: {:?}", entry_type);
}
}
}
fn parse_histogram(histogram: &str) -> Option<Histogram> {
let parts: Vec<_> = histogram.split_whitespace().collect();
if parts[0] == "(empty)" {
return None;
}
let parse_histogram_entry = |entry_type: &str, entry: &str| {
let parts: Vec<_> = entry.split("=").collect();
assert_eq!(parts[0], entry_type);
parse_usize(parts[1], entry_type)
};
// parse avg, p99 and p99.9
let avg = parse_histogram_entry("avg", parts[0]);
let p99 = parse_histogram_entry("p99", parts[3]);
let p99_9 = parse_histogram_entry("p99.9", parts[4]);
let histogram = Histogram { avg, p99, p99_9 };
Some(histogram)
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_bote/src/lib.rs | fantoch_bote/src/lib.rs | #![deny(rust_2018_idioms)]
// This module contains the definition of `Protocol`, `ClientPlacement` and
// `ProtocolStats`.
pub mod protocol;
// This module contains the definition of `Search`.
pub mod search;
// Re-exports.
pub use search::{FTMetric, RankingParams, Search, SearchInput};
use fantoch::metrics::{Histogram, Stats};
use fantoch::planet::{Planet, Region};
#[derive(Debug)]
pub struct Bote {
planet: Planet,
}
impl Bote {
#[allow(clippy::new_without_default)]
pub fn new() -> Self {
let planet = Planet::new();
Self::from(planet)
}
pub fn from(planet: Planet) -> Self {
Self { planet }
}
/// Computes stats for a leaderless-based protocol with a given
/// `quorum_size`.
///
/// Takes as input two lists of regions:
/// - one list being the regions where `servers` are
/// - one list being the regions where `clients` are
pub fn leaderless<'a>(
&self,
servers: &[Region],
clients: &'a [Region],
quorum_size: usize,
) -> Vec<(&'a Region, u64)> {
clients
.iter()
.map(|client| {
// compute the latency from this client to the closest region
let (client_to_closest, closest) =
self.nth_closest(1, client, servers);
// compute the latency from such region to its closest quorum
let closest_to_quorum =
self.quorum_latency(closest, servers, quorum_size);
// client perceived latency is the sum of both
(client, client_to_closest + closest_to_quorum)
})
.collect()
}
/// Computes stats for a leader-based protocol with a given `quorum_size`
/// for some `leader`.
///
/// Takes as input two lists of regions:
/// - one list being the regions where `servers` are
/// - one list being the regions where `clients` are
pub fn leader<'a>(
&self,
leader: &Region,
servers: &[Region],
clients: &'a [Region],
quorum_size: usize,
) -> Vec<(&'a Region, u64)> {
// compute the latency from leader to its closest quorum
let leader_to_quorum =
self.quorum_latency(leader, servers, quorum_size);
// compute perceived latency for each client
clients
.iter()
.map(|client| {
// compute the latency from client to leader
let client_to_leader =
self.planet.ping_latency(client, &leader).unwrap();
// client perceived latency is the sum of both
(client, client_to_leader + leader_to_quorum)
})
.collect()
}
/// Computes the best leader (for some criteria) and its stats for a
/// leader-based protocol with a given `quorum_size`.
///
/// Takes as input two lists of regions:
/// - one list being the regions where `servers` are
/// - one list being the regions where `clients` are
///
/// The best leader is select based on sort criteria `stats_sort_by`.
pub fn best_leader<'a>(
&self,
servers: &'a [Region],
clients: &[Region],
quorum_size: usize,
stats_sort_by: Stats,
) -> (&'a Region, Histogram) {
// compute all stats
let mut stats = self.all_leaders_stats(servers, clients, quorum_size);
// select the best leader based on `stats_sort_by`
stats.sort_unstable_by(|(_la, sa), (_lb, sb)| match stats_sort_by {
Stats::Mean => sa.mean().cmp(&sb.mean()),
Stats::COV => sa.cov().cmp(&sb.cov()),
Stats::MDTM => sa.mdtm().cmp(&sb.mdtm()),
});
// get the lowest (in terms of `compare`) stat
stats
.into_iter()
.next()
.expect("the best leader should exist")
}
/// Computes stats for a leader-based protocol with a given `quorum_size`
/// for each possible leader.
///
/// Takes as input two lists of regions:
/// - one list being the regions where `servers` are
/// - one list being the regions where `clients` are
fn all_leaders_stats<'a>(
&self,
servers: &'a [Region],
clients: &[Region],
quorum_size: usize,
) -> Vec<(&'a Region, Histogram)> {
// compute stats for each possible leader
servers
.iter()
.map(|leader| {
// compute stats
let latency_per_client =
self.leader(leader, servers, clients, quorum_size);
let stats = Histogram::from(
latency_per_client
.into_iter()
.map(|(_client, latency)| latency),
);
(leader, stats)
})
.collect()
}
/// Computes the latency to closest quorum of size `quorum_size`.
/// It takes as input the considered source region `from` and all available
/// `regions`.
fn quorum_latency(
&self,
from: &Region,
regions: &[Region],
quorum_size: usize,
) -> u64 {
let (latency, _) = self.nth_closest(quorum_size, &from, ®ions);
*latency
}
/// Compute the latency to the nth closest region.
/// This same method can be used to find the:
/// - latency to the closest quorum
/// - latency to the closest region
fn nth_closest(
&self,
nth: usize,
from: &Region,
regions: &[Region],
) -> &(u64, Region) {
self.planet
// sort by distance
.sorted(from)
.unwrap()
.iter()
// keep only the regions in this configuration
.filter(|(_, to)| regions.contains(to))
// select the nth region
.nth(nth - 1)
.unwrap()
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::collections::HashMap;
#[test]
fn quorum_latencies() {
// create bote
let bote = Bote::new();
// considered regions
let w1 = Region::new("europe-west1");
let w2 = Region::new("europe-west2");
let w3 = Region::new("europe-west3");
let w4 = Region::new("europe-west4");
let w6 = Region::new("europe-west6");
let regions =
vec![w1.clone(), w2.clone(), w3.clone(), w4.clone(), w6.clone()];
// quorum size 2
let quorum_size = 2;
assert_eq!(bote.quorum_latency(&w1, ®ions, quorum_size), 7);
assert_eq!(bote.quorum_latency(&w2, ®ions, quorum_size), 9);
assert_eq!(bote.quorum_latency(&w3, ®ions, quorum_size), 7);
assert_eq!(bote.quorum_latency(&w4, ®ions, quorum_size), 7);
assert_eq!(bote.quorum_latency(&w6, ®ions, quorum_size), 7);
// quorum size 3
let quorum_size = 3;
assert_eq!(bote.quorum_latency(&w1, ®ions, quorum_size), 8);
assert_eq!(bote.quorum_latency(&w2, ®ions, quorum_size), 10);
assert_eq!(bote.quorum_latency(&w3, ®ions, quorum_size), 7);
assert_eq!(bote.quorum_latency(&w4, ®ions, quorum_size), 7);
assert_eq!(bote.quorum_latency(&w6, ®ions, quorum_size), 14);
}
#[test]
fn leaderless() {
// create bote
let bote = Bote::new();
// considered regions
let w1 = Region::new("europe-west1");
let w2 = Region::new("europe-west2");
let w3 = Region::new("europe-west3");
let w4 = Region::new("europe-west4");
let w6 = Region::new("europe-west6");
let regions =
vec![w1.clone(), w2.clone(), w3.clone(), w4.clone(), w6.clone()];
// quorum size 3
let quorum_size = 3;
let stats = bote.leaderless(®ions, ®ions, quorum_size);
let histogram = Histogram::from(
stats.into_iter().map(|(_client, latency)| latency),
);
// w1 -> 9, w2 -> 11, w3 -> 8, w4 -> 8, w6 -> 15
assert_eq!(histogram.mean().round(), "9.2");
assert_eq!(histogram.cov().round(), "0.3");
assert_eq!(histogram.mdtm().round(), "2.2");
// quorum size 4
let quorum_size = 4;
let stats = bote.leaderless(®ions, ®ions, quorum_size);
let histogram = Histogram::from(
stats.into_iter().map(|(_client, latency)| latency),
);
// w1 -> 11, w2 -> 14, w3 -> 9, w4 -> 10, w6 -> 15
assert_eq!(histogram.mean().round(), "10.8");
assert_eq!(histogram.cov().round(), "0.2");
assert_eq!(histogram.mdtm().round(), "2.2");
}
#[test]
fn leaderless_clients_subset() {
// create bote
let bote = Bote::new();
// considered regions
let w1 = Region::new("europe-west1");
let w2 = Region::new("europe-west2");
let w3 = Region::new("europe-west3");
let w4 = Region::new("europe-west4");
let w6 = Region::new("europe-west6");
let servers =
vec![w1.clone(), w2.clone(), w3.clone(), w4.clone(), w6.clone()];
// subset of clients: w1 w2
let clients = vec![w1.clone(), w2.clone()];
// quorum size 3
let quorum_size = 3;
let stats = bote.leaderless(&servers, &clients, quorum_size);
let histogram = Histogram::from(
stats.into_iter().map(|(_client, latency)| latency),
);
// w1 -> 9, w2 -> 11
assert_eq!(histogram.mean().round(), "9.0");
assert_eq!(histogram.cov().round(), "0.2");
assert_eq!(histogram.mdtm().round(), "1.0");
// quorum size 4
let quorum_size = 4;
let stats = bote.leaderless(&servers, &clients, quorum_size);
let histogram = Histogram::from(
stats.into_iter().map(|(_client, latency)| latency),
);
// w1 -> 11, w2 -> 14
assert_eq!(histogram.mean().round(), "11.5");
assert_eq!(histogram.cov().round(), "0.2");
assert_eq!(histogram.mdtm().round(), "1.5");
// subset of clients: w1 w3 w6
let clients = vec![w1.clone(), w3.clone(), w6.clone()];
// quorum size 3
let quorum_size = 3;
let stats = bote.leaderless(&servers, &clients, quorum_size);
let histogram = Histogram::from(
stats.into_iter().map(|(_client, latency)| latency),
);
// w1 -> 9, w3 -> 8, w6 -> 15
assert_eq!(histogram.mean().round(), "9.7");
assert_eq!(histogram.cov().round(), "0.4");
assert_eq!(histogram.mdtm().round(), "2.9");
// quorum size 4
let quorum_size = 4;
let stats = bote.leaderless(&servers, &clients, quorum_size);
let histogram = Histogram::from(
stats.into_iter().map(|(_client, latency)| latency),
);
// w1 -> 11, w3 -> 9, w6 -> 15
assert_eq!(histogram.mean().round(), "10.7");
assert_eq!(histogram.cov().round(), "0.3");
assert_eq!(histogram.mdtm().round(), "2.2");
}
#[test]
fn leader() {
// create bote
let bote = Bote::new();
// considered regions
let w1 = Region::new("europe-west1");
let w2 = Region::new("europe-west2");
let w3 = Region::new("europe-west3");
let w4 = Region::new("europe-west4");
let w6 = Region::new("europe-west6");
let regions =
vec![w1.clone(), w2.clone(), w3.clone(), w4.clone(), w6.clone()];
// quorum size 2:
let quorum_size = 2;
let leader_to_stats: HashMap<_, _> = bote
.all_leaders_stats(®ions, ®ions, quorum_size)
.into_iter()
.collect();
// quorum latency for w1 is 7
// w1 -> 8, w2 -> 17, w3 -> 15, w4 -> 14, w6 -> 21
let stats = leader_to_stats.get(&w1).unwrap();
assert_eq!(stats.mean().round(), "14.8");
assert_eq!(stats.cov().round(), "0.3");
assert_eq!(stats.mdtm().round(), "3.4");
// quorum latency for w2 is 9
// w1 -> 19, w2 -> 10, w3 -> 22, w4 -> 18, w6 -> 28
let stats = leader_to_stats.get(&w2).unwrap();
assert_eq!(stats.mean().round(), "19.2");
assert_eq!(stats.cov().round(), "0.4");
assert_eq!(stats.mdtm().round(), "4.6");
// quorum latency for w3 is 7
// w1 -> 15, w2 -> 20, w3 -> 8, w4 -> 14, w6 -> 14
let stats = leader_to_stats.get(&w3).unwrap();
assert_eq!(stats.mean().round(), "14.0");
assert_eq!(stats.cov().round(), "0.3");
assert_eq!(stats.mdtm().round(), "2.8");
}
#[test]
fn leader_clients_subset() {
// create bote
let bote = Bote::new();
// considered regions
let w1 = Region::new("europe-west1");
let w2 = Region::new("europe-west2");
let w3 = Region::new("europe-west3");
let w4 = Region::new("europe-west4");
let w6 = Region::new("europe-west6");
let servers =
vec![w1.clone(), w2.clone(), w3.clone(), w4.clone(), w6.clone()];
// quorum size 2:
let quorum_size = 2;
// subset of clients: w1 w2
let clients = vec![w1.clone(), w2.clone()];
let leader_to_stats: HashMap<_, _> = bote
.all_leaders_stats(&servers, &clients, quorum_size)
.into_iter()
.collect();
// quorum latency for w1 is 7
// w1 -> 8, w2 -> 17
let stats = leader_to_stats.get(&w1).unwrap();
assert_eq!(stats.mean().round(), "12.0");
assert_eq!(stats.cov().round(), "0.6");
assert_eq!(stats.mdtm().round(), "5.0");
// quorum latency for w2 is 9
// w1 -> 19, w2 -> 10
let stats = leader_to_stats.get(&w2).unwrap();
assert_eq!(stats.mean().round(), "14.0");
assert_eq!(stats.cov().round(), "0.5");
assert_eq!(stats.mdtm().round(), "5.0");
// quorum latency for w3 is 7
// w1 -> 15, w2 -> 20
let stats = leader_to_stats.get(&w3).unwrap();
assert_eq!(stats.mean().round(), "17.5");
assert_eq!(stats.cov().round(), "0.2");
assert_eq!(stats.mdtm().round(), "2.5");
// subset of clients: w1 w3 w6
let clients = vec![w1.clone(), w3.clone(), w6.clone()];
let leader_to_stats: HashMap<_, _> = bote
.all_leaders_stats(&servers, &clients, quorum_size)
.into_iter()
.collect();
// quorum latency for w1 is 7
// w1 -> 8, w3 -> 15, w6 -> 21
let stats = leader_to_stats.get(&w1).unwrap();
assert_eq!(stats.mean().round(), "14.3");
assert_eq!(stats.cov().round(), "0.5");
assert_eq!(stats.mdtm().round(), "4.9");
// quorum latency for w2 is 9
// w1 -> 19, w3 -> 22, w6 -> 28
let stats = leader_to_stats.get(&w2).unwrap();
assert_eq!(stats.mean().round(), "23.0");
assert_eq!(stats.cov().round(), "0.2");
assert_eq!(stats.mdtm().round(), "3.3");
// quorum latency for w3 is 7
// w1 -> 15, w3 -> 8, w6 -> 14
let stats = leader_to_stats.get(&w3).unwrap();
assert_eq!(stats.mean().round(), "12.0");
assert_eq!(stats.cov().round(), "0.4");
assert_eq!(stats.mdtm().round(), "3.3");
}
#[test]
fn best_latency_leader() {
// create bote
let bote = Bote::new();
// considered regions
let w1 = Region::new("europe-west1");
let w2 = Region::new("europe-west2");
let w3 = Region::new("europe-west3");
let w4 = Region::new("europe-west4");
let w6 = Region::new("europe-west6");
let regions =
vec![w1.clone(), w2.clone(), w3.clone(), w4.clone(), w6.clone()];
// quorum size 2:
let quorum_size = 2;
let (_, stats) =
bote.best_leader(®ions, ®ions, quorum_size, Stats::Mean);
assert_eq!(stats.mean().round(), "14.0");
assert_eq!(stats.cov().round(), "0.3");
assert_eq!(stats.mdtm().round(), "2.8");
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_bote/src/search.rs | fantoch_bote/src/search.rs | use crate::protocol::Protocol::{Atlas, EPaxos, FPaxos};
use crate::protocol::{ClientPlacement, ProtocolStats};
use crate::Bote;
use fantoch::elapsed;
use fantoch::metrics::{Histogram, Stats, F64};
use fantoch::planet::{Planet, Region};
use permutator::Combination;
use rayon::prelude::*;
use serde::{Deserialize, Serialize};
use std::collections::{BTreeMap, BTreeSet, HashMap};
use std::fmt;
use std::fs::File;
use std::io::{BufReader, BufWriter};
use std::iter::FromIterator;
macro_rules! timed {
( $s:expr, $x:expr ) => {{
let (time, result) = elapsed!($x);
println!("{}: {:?}", $s, time);
result
}};
}
// config and stats
type ConfigAndStats = (BTreeSet<Region>, ProtocolStats);
// configs: mapping from `n` to list of configurations of such size
type Configs = HashMap<usize, Vec<ConfigAndStats>>;
// all configs: mapping from clients to `Config`
type AllConfigs = Vec<(Vec<Region>, Configs)>;
// ranked: mapping from `n` to list of configurations of such size
// - these configurations are already a subset of all configurations that passed
// some filter and have a score associated with it
type Ranked<'a> = HashMap<usize, Vec<(F64, &'a ConfigAndStats)>>;
// all ranked: mapping from clients to `Ranked`
type AllRanked<'a> = Vec<(&'a Vec<Region>, Ranked<'a>)>;
#[derive(Debug, PartialEq, Deserialize, Serialize)]
pub struct Search {
all_configs: AllConfigs,
}
impl Search {
pub fn new(
min_n: usize,
max_n: usize,
search_input: SearchInput,
save_search: bool,
lat_dir: Option<&str>,
) -> Self {
// get filename
let filename = Self::filename(min_n, max_n, &search_input);
timed!("get saved search", Self::get_saved_search(&filename))
.unwrap_or_else(|| {
// create planet
let planet = if let Some(lat_dir) = lat_dir {
Planet::from(lat_dir)
} else {
Planet::new()
};
// get regions for servers and clients
let (servers, clients) =
search_input.get_inputs(max_n, &planet);
// create bote
let bote = Bote::from(planet);
// create empty config and get all configs
let all_configs = timed!(
"compute all configs",
Self::compute_all_configs(
min_n, max_n, servers, clients, bote
)
);
// create a new `Search` instance
let search = Self { all_configs };
// save it if `save_search`
if save_search {
timed!(
"save search",
Self::save_search(&filename, &search)
);
}
// and return it
search
})
}
pub fn sorted_evolving_configs(
&self,
p: &RankingParams,
) -> Vec<(F64, Vec<&ConfigAndStats>, &Vec<Region>)> {
assert_eq!(p.min_n, 3);
assert_eq!(p.max_n, 13);
// first we should rank all configs
let all_ranked = timed!("rank all", self.rank_all(p));
// show how many ranked configs we have
let count: usize = all_ranked
.iter()
.map(|(_, configs)| {
configs.iter().map(|(_, css)| css.len()).sum::<usize>()
})
.sum();
println!("config count: {}", count);
// create result variable
let mut configs = BTreeMap::new();
// TODO Transform what's below in an iterator.
// With access to `p.min_n` and `p.max_n` it should be possible.
let mut i = 0;
let count = all_ranked.len();
all_ranked.into_iter().for_each(|(clients, ranked)| {
i += 1;
if i % 100 == 0 {
println!("{} of {}", i, count);
}
Self::configs(&ranked, 3).for_each(|(score3, cs3)| {
Self::super_configs(&ranked, 5, cs3, p).for_each(
|(score5, cs5)| {
Self::super_configs(&ranked, 7, cs5, p).for_each(
|(score7, cs7)| {
Self::super_configs(&ranked, 9, cs7, p)
.for_each(|(score9, cs9)| {
Self::super_configs(
&ranked, 11, cs9, p,
)
.for_each(|(score11, cs11)| {
Self::super_configs(
&ranked, 13, cs11, p,
)
.for_each(|(score13, cs13)| {
let score = score3
+ score5
+ score7
+ score9
+ score11
+ score13;
let css = vec![
cs3, cs5, cs7, cs9, cs11,
cs13,
];
configs
.entry(score)
.or_insert_with(Vec::new)
.push((
score, css, clients,
));
});
});
});
},
);
},
);
});
});
// `configs` is sorted ASC
configs
.into_iter()
// sort DESC (highest score first)
.rev()
.flat_map(|(_, configs)| configs)
.collect()
}
pub fn stats_fmt(stats: &ProtocolStats, n: usize) -> String {
ClientPlacement::all()
.map(|placement| {
// shows stats for all possible f
let fmt: String = (1..=Self::max_f(n))
.map(|f| {
let atlas = stats.fmt(Atlas, f, placement);
let fpaxos = stats.fmt(FPaxos, f, placement);
format!("{} {} ", atlas, fpaxos)
})
.collect();
// add epaxos
let epaxos = stats.fmt(EPaxos, 0, placement);
format!("{}{} ", fmt, epaxos)
})
.collect()
}
fn compute_all_configs(
min_n: usize,
max_n: usize,
servers: Option<Vec<Region>>,
all_clients: Vec<Vec<Region>>,
bote: Bote,
) -> AllConfigs {
// get the count of client configurations
let clients_count = all_clients.len();
all_clients
// PARALLEL
.into_par_iter()
.enumerate()
.inspect(|(i, _)| {
// show progress
if i % 100 == 0 {
println!("{} of {}", i, clients_count);
}
})
.map(|(_, clients)| {
// compute servers: if we have something, use what we got,
// otherwise use the set of clients
let servers = servers.as_ref().unwrap_or(&clients);
// compute `Configs` for this set of clients
let configs = Self::compute_configs(
min_n, max_n, &servers, &clients, &bote,
);
(clients, configs)
})
.collect()
}
fn compute_configs(
min_n: usize,
max_n: usize,
regions: &[Region],
clients: &[Region],
bote: &Bote,
) -> Configs {
(min_n..=max_n)
.step_by(2)
.map(|n| {
let configs = regions
.combination(n)
.map(vec_cloned)
.map(|config| {
// compute stats
let stats = Self::compute_stats(&config, clients, bote);
// turn config into a `BTreeSet`
let config = BTreeSet::from_iter(config.into_iter());
(config, stats)
})
.collect();
(n, configs)
})
.collect()
}
pub fn compute_stats(
config: &[Region],
all_clients: &[Region],
bote: &Bote,
) -> ProtocolStats {
// compute n
let n = config.len();
let mut stats = ProtocolStats::new();
// compute best cov fpaxos f=1 leader
// - this leader will then be used for both f=1 and f=2 stats
let f = 1;
let quorum_size = FPaxos.quorum_size(n, f);
let (leader, _) =
bote.best_leader(config, all_clients, quorum_size, Stats::COV);
// compute stats for both `clients` and colocated clients i.e. `config`
let which_clients = vec![
(ClientPlacement::Input, all_clients),
(ClientPlacement::Colocated, config),
];
for (placement, clients) in which_clients {
for f in 1..=Self::max_f(n) {
// compute altas quorum size
let quorum_size = Atlas.quorum_size(n, f);
// compute atlas stats
let atlas = bote.leaderless(config, clients, quorum_size);
let atlas = Histogram::from(
atlas.into_iter().map(|(_client, latency)| latency),
);
stats.insert(Atlas, f, placement, atlas);
// compute fpaxos quorum size
let quorum_size = FPaxos.quorum_size(n, f);
// // compute best mean fpaxos stats
let fpaxos = bote.leader(leader, config, clients, quorum_size);
let fpaxos = Histogram::from(
fpaxos.into_iter().map(|(_client, latency)| latency),
);
stats.insert(FPaxos, f, placement, fpaxos);
}
// compute epaxos quorum size
let quorum_size = EPaxos.quorum_size(n, 0);
// compute epaxos stats
let epaxos = bote.leaderless(config, clients, quorum_size);
let epaxos = Histogram::from(
epaxos.into_iter().map(|(_client, latency)| latency),
);
stats.insert(EPaxos, 0, placement, epaxos);
}
// return all stats
stats
}
fn rank_all(&self, params: &RankingParams) -> AllRanked<'_> {
self.all_configs
// PARALLEL
.par_iter()
.map(|(clients, configs)| (clients, Self::rank(configs, params)))
.collect()
}
fn rank<'a>(configs: &'a Configs, params: &RankingParams) -> Ranked<'a> {
configs
.iter()
.filter_map(|(&n, css)| {
// only keep in the map `n` values between `min_n` and `max_n`
if n >= params.min_n && n <= params.max_n {
let css = css
.iter()
.filter_map(|cs| {
// get stats
let stats = &cs.1;
// only keep valid configurations
match Self::compute_score(n, stats, params) {
(true, score) => Some((score, cs)),
_ => None,
}
})
.collect();
Some((n, css))
} else {
None
}
})
.collect()
}
// TODO `configs` and `super_configs` are super similar
#[allow(clippy::iter_cloned_collect)]
fn configs<'a>(
ranked: &Ranked<'a>,
n: usize,
) -> impl Iterator<Item = (F64, &'a ConfigAndStats)> {
ranked
.get(&n)
.unwrap_or_else(|| {
panic!("configs for n = {} should be ranked!", n)
})
.iter()
.cloned()
// TODO can we avoid collecting here?
// I wasn't able to do it due to lifetime issues
.collect::<Vec<_>>()
.into_iter()
}
/// return ranked configurations such that:
/// - their size is `n`
/// - are a superset of `previous_config`
fn super_configs<'a>(
ranked: &Ranked<'a>,
n: usize,
(prev_config, prev_stats): &ConfigAndStats,
params: &RankingParams,
) -> impl Iterator<Item = (F64, &'a ConfigAndStats)> {
ranked
.get(&n)
.unwrap_or_else(|| {
panic!("super configs for n = {} should be ranked!", n)
})
.iter()
.filter(|(_, (config, stats))| {
config.is_superset(prev_config)
&& Self::min_mean_decrease(stats, prev_stats, n, params)
})
.cloned()
// TODO can we avoid collecting here?
// I wasn't able to do it due to lifetime issues
.collect::<Vec<_>>()
.into_iter()
}
/// Compute the mean latency decrease for Atlas f = 1 when the number of
/// sites increases.
fn min_mean_decrease(
stats: &ProtocolStats,
prev_stats: &ProtocolStats,
n: usize,
params: &RankingParams,
) -> bool {
// compare only for the number of faults tolerated by `prev_stats`
let n = n - 2;
let placement = ClientPlacement::Input;
params.ft_metric.fs(n).into_iter().all(|f| {
let atlas = stats.get(Atlas, f, placement);
let prev_atlas = prev_stats.get(Atlas, f, placement);
prev_atlas.mean_improv(atlas) >= params.min_mean_decrease
})
}
fn compute_score(
n: usize,
stats: &ProtocolStats,
params: &RankingParams,
) -> (bool, F64) {
// compute score and check if it is a valid configuration
let mut valid = true;
let mut score = F64::zero();
// f values accounted for when computing score and config validity
let fs = params.ft_metric.fs(n);
// placement is input
let placement = ClientPlacement::Input;
for f in fs {
// get atlas and fpaxos stats
let atlas = stats.get(Atlas, f, placement);
let fpaxos = stats.get(FPaxos, f, placement);
// compute mean latency improvement of atlas wrto to fpaxos
let fpaxos_mean_improv = fpaxos.mean_improv(atlas);
// compute fairness improvement of atlas wrto to cov fpaxos
let fpaxos_fairness_improv = fpaxos.cov_improv(atlas);
// check if it's a valid config, i.e. there's enough:
// - `min_mean_improv`
// - `min_fairness_improv`
valid = valid
&& fpaxos_mean_improv >= params.min_mean_fpaxos_improv
&& fpaxos_fairness_improv >= params.min_fairness_fpaxos_improv;
// get epaxos stats
let epaxos = stats.get(EPaxos, 0, placement);
// compute mean latency improvement of atlas wrto to epaxos
let epaxos_mean_improv = epaxos.mean_improv(atlas);
// make sure we improve on EPaxos for large n
if n == 11 || n == 13 {
valid = valid
&& epaxos_mean_improv >= params.min_mean_epaxos_improv;
}
// update score: give extra weigth for epaxos improv
let weight = F64::new(30_f64);
score += fpaxos_mean_improv + (weight * epaxos_mean_improv);
}
(valid, score)
}
fn max_f(n: usize) -> usize {
let max_f = 2;
std::cmp::min(n / 2 as usize, max_f)
}
fn filename(
min_n: usize,
max_n: usize,
search_input: &SearchInput,
) -> String {
format!("{}_{}_{}.data", min_n, max_n, search_input)
}
fn get_saved_search(name: &str) -> Option<Search> {
// open the file in read-only
File::open(name)
.ok()
// create a buf reader
.map(BufReader::new)
// and try to deserialize
.map(|reader| {
bincode::deserialize_from(reader)
.expect("error deserializing search")
})
}
fn save_search(name: &str, search: &Search) {
// if the file does not exist it will be created, otherwise truncated
File::create(name)
.ok()
// create a buf writer
.map(BufWriter::new)
// and try to serialize
.map(|writer| {
bincode::serialize_into(writer, search)
.expect("error serializing search")
})
.unwrap_or_else(|| panic!("couldn't save seach"));
}
}
/// identifies which regions considered for the search
pub enum SearchInput {
/// search within selected 13 regions, clients deployed in the 13 regions
R13C13,
/// search within 2018 17 regions, clients deployed in the 17 regions
R17C17,
/// search within the 20 regions, clients deployed in the 20 regions
R20C20,
/// search within 2018 17 regions, clients deployed in the MAX regions
/// - e.g. if the max number of regions is 11, clients are deployed in
/// those 11 regions
R17CMaxN,
}
impl fmt::Display for SearchInput {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
SearchInput::R13C13 => write!(f, "R13C13"),
SearchInput::R17C17 => write!(f, "R17C17"),
SearchInput::R20C20 => write!(f, "R20C20"),
SearchInput::R17CMaxN => write!(f, "R17CMaxN"),
}
}
}
impl SearchInput {
/// It returns a tuple where the:
/// - 1st component is the set of regions where to look for a configuration
/// - 2nd component is a list of client locations
fn get_inputs(
&self,
max_n: usize,
planet: &Planet,
) -> (Option<Vec<Region>>, Vec<Vec<Region>>) {
// selected 13-regions
let regions13 = vec![
Region::new("asia-southeast1"),
Region::new("europe-west4"),
Region::new("southamerica-east1"),
Region::new("australia-southeast1"),
Region::new("europe-west2"),
Region::new("asia-south1"),
Region::new("us-east1"),
Region::new("asia-northeast1"),
Region::new("europe-west1"),
Region::new("asia-east1"),
Region::new("us-west1"),
Region::new("europe-west3"),
Region::new("us-central1"),
];
// 17-regions available in the end of 2018
let regions17 = vec![
Region::new("asia-east1"),
Region::new("asia-northeast1"),
Region::new("asia-south1"),
Region::new("asia-southeast1"),
Region::new("australia-southeast1"),
Region::new("europe-north1"),
Region::new("europe-west1"),
Region::new("europe-west2"),
Region::new("europe-west3"),
Region::new("europe-west4"),
Region::new("northamerica-northeast1"),
Region::new("southamerica-east1"),
Region::new("us-central1"),
Region::new("us-east1"),
Region::new("us-east4"),
Region::new("us-west1"),
Region::new("us-west2"),
];
// all regions
let mut all_regions = planet.regions();
all_regions.sort();
match self {
SearchInput::R13C13 => {
let clients = vec![regions13.clone()];
(Some(regions13), clients)
}
SearchInput::R17C17 => {
let clients = vec![regions17.clone()];
(Some(regions17), clients)
}
SearchInput::R20C20 => {
let clients = vec![all_regions.clone()];
(Some(all_regions), clients)
}
SearchInput::R17CMaxN => {
let clients =
regions17.combination(max_n).map(vec_cloned).collect();
(None, clients)
}
}
}
}
fn vec_cloned<T: Clone>(vec: Vec<&T>) -> Vec<T> {
vec.into_iter().cloned().collect()
}
pub struct RankingParams {
min_mean_fpaxos_improv: F64,
min_mean_epaxos_improv: F64,
min_fairness_fpaxos_improv: F64,
min_mean_decrease: F64,
min_n: usize,
max_n: usize,
ft_metric: FTMetric,
}
impl RankingParams {
pub fn new(
min_mean_fpaxos_improv: isize,
min_mean_epaxos_improv: isize,
min_fairness_fpaxos_improv: isize,
min_mean_decrease: isize,
min_n: usize,
max_n: usize,
ft_metric: FTMetric,
) -> Self {
Self {
min_mean_fpaxos_improv: F64::new(min_mean_fpaxos_improv as f64),
min_mean_epaxos_improv: F64::new(min_mean_epaxos_improv as f64),
min_fairness_fpaxos_improv: F64::new(
min_fairness_fpaxos_improv as f64,
),
min_mean_decrease: F64::new(min_mean_decrease as f64),
min_n,
max_n,
ft_metric,
}
}
}
/// metric considered for fault tolerance
pub enum FTMetric {
F1,
F1F2,
}
impl FTMetric {
fn fs(&self, n: usize) -> Vec<usize> {
let minority = n / 2 as usize;
let max_f = match self {
FTMetric::F1 => 1,
FTMetric::F1F2 => 2,
};
(1..=std::cmp::min(minority, max_f)).collect()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn search() {
// define some search params
let min_n = 3;
let max_n = 13;
// originally `search_input = SearchInput::R17CMaxN`
let search_input = SearchInput::R13C13;
let save_search = false;
// create search
let search = Search::new(min_n, max_n, search_input, save_search, None);
// define search params:
// originally 30 was used for the `min_mean_improv`;
// here we want the test to be run asap,
// so we restrict the search the maximum possible
let min_mean_fpaxos_improv = 110;
let min_mean_epaxos_improv = 35;
let min_fairness_fpaxos_improv = 0;
let min_mean_decrease = 15;
let ft_metric = FTMetric::F1F2;
// create ranking params
let params = RankingParams::new(
min_mean_fpaxos_improv,
min_mean_epaxos_improv,
min_fairness_fpaxos_improv,
min_mean_decrease,
min_n,
max_n,
ft_metric,
);
// select the best config
let (score, css, _clients) = search
.sorted_evolving_configs(¶ms)
.into_iter()
.take(1) // take only the best one
.next()
.unwrap();
// the final sorted config
let mut sorted_config = Vec::new();
for (config, stats) in css {
// update sorted config
for region in config {
if !sorted_config.contains(region) {
sorted_config.push(region.clone())
}
}
// check stats_fmt for n = 5
let n = 5;
if config.len() == n {
let expected = "af1=avg=271 std=54 p95=339 p99=347 p99.9=347 p99.99=347 min=202 max=347 ff1=avg=419 std=99 p95=525 p99=583 p99.9=583 p99.99=583 min=202 max=583 af2=avg=314 std=42 p95=358 p99=391 p99.9=391 p99.99=391 min=265 max=391 ff2=avg=428 std=99 p95=534 p99=592 p99.9=592 p99.99=592 min=211 max=592 e=avg=271 std=54 p95=339 p99=347 p99.9=347 p99.99=347 min=202 max=347 af1C=avg=234 std=36 p95=280 p99=280 p99.9=280 p99.99=280 min=202 max=280 ff1C=avg=410 std=128 p95=525 p99=525 p99.9=525 p99.99=525 min=202 max=525 af2C=avg=280 std=14 p95=302 p99=302 p99.9=302 p99.99=302 min=265 max=302 ff2C=avg=419 std=128 p95=534 p99=534 p99.9=534 p99.99=534 min=211 max=534 eC=avg=234 std=36 p95=280 p99=280 p99.9=280 p99.99=280 min=202 max=280 ";
assert_eq!(Search::stats_fmt(stats, n), expected);
}
}
// check score
let expected_score = "10360.3";
assert_eq!(score.round(), expected_score);
// check config
let expected_config = vec![
Region::new("asia-southeast1"),
Region::new("europe-west4"),
Region::new("southamerica-east1"),
Region::new("australia-southeast1"),
Region::new("europe-west2"),
Region::new("asia-south1"),
Region::new("us-east1"),
Region::new("asia-northeast1"),
Region::new("europe-west1"),
Region::new("asia-east1"),
Region::new("us-west1"),
Region::new("europe-west3"),
Region::new("us-central1"),
];
assert_eq!(sorted_config, expected_config);
}
#[test]
fn search_save() {
let min_n = 3;
let max_n = 5;
let search_input = SearchInput::R17C17;
let filename = Search::filename(min_n, max_n, &search_input);
// create search and save it
let save_search = true;
let expected =
Search::new(min_n, max_n, search_input, save_search, None);
// get saved search and assert it is the same search
let saved = Search::get_saved_search(&filename);
assert!(saved.is_some());
assert_eq!(saved.unwrap(), expected);
// remove search file
assert!(std::fs::remove_file(filename).is_ok());
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_bote/src/protocol.rs | fantoch_bote/src/protocol.rs | use fantoch::metrics::Histogram;
use serde::{Deserialize, Serialize};
use std::collections::BTreeMap;
pub enum Protocol {
FPaxos,
EPaxos,
Atlas,
}
impl Protocol {
pub fn short_name(&self) -> &str {
match self {
Protocol::FPaxos => "f",
Protocol::EPaxos => "e",
Protocol::Atlas => "a",
}
}
pub fn quorum_size(&self, n: usize, f: usize) -> usize {
// since EPaxos always tolerates a minority of failures, we ignore the f
// passed as argument, and compute f to be a minority of n processes
match self {
Protocol::FPaxos => f + 1,
Protocol::EPaxos => {
let f = Self::minority(n);
f + ((f + 1) / 2 as usize)
}
Protocol::Atlas => Self::minority(n) + f,
}
}
fn minority(n: usize) -> usize {
n / 2
}
}
#[derive(Clone, Copy)]
pub enum ClientPlacement {
Input,
Colocated,
}
impl ClientPlacement {
pub fn short_name(&self) -> &str {
match self {
ClientPlacement::Input => "",
ClientPlacement::Colocated => "C",
}
}
pub fn all() -> impl Iterator<Item = ClientPlacement> {
vec![ClientPlacement::Input, ClientPlacement::Colocated].into_iter()
}
}
/// Mapping from protocol name to its stats.
#[derive(Debug, Default, PartialEq, Deserialize, Serialize)]
pub struct ProtocolStats(BTreeMap<String, Histogram>);
impl ProtocolStats {
pub fn new() -> ProtocolStats {
Default::default()
}
pub fn get(
&self,
protocol: Protocol,
f: usize,
placement: ClientPlacement,
) -> &Histogram {
let key = Self::key(protocol, f, placement);
self.get_and_unwrap(&key)
}
pub fn insert(
&mut self,
protocol: Protocol,
f: usize,
placement: ClientPlacement,
stats: Histogram,
) {
let key = Self::key(protocol, f, placement);
self.0.insert(key, stats);
}
pub fn fmt(
&self,
protocol: Protocol,
f: usize,
placement: ClientPlacement,
) -> String {
let key = Self::key(protocol, f, placement);
let stats = self.get_and_unwrap(&key);
format!("{}={:?}", key, stats)
}
fn key(protocol: Protocol, f: usize, placement: ClientPlacement) -> String {
let prefix = match protocol {
Protocol::EPaxos => String::from(protocol.short_name()),
_ => format!("{}f{}", protocol.short_name(), f),
};
let suffix = placement.short_name();
format!("{}{}", prefix, suffix)
}
fn get_and_unwrap(&self, key: &str) -> &Histogram {
self.0.get(key).unwrap_or_else(|| {
panic!("stats with key {} not found", key);
})
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn quorum_size() {
assert_eq!(Protocol::FPaxos.quorum_size(3, 1), 2);
assert_eq!(Protocol::FPaxos.quorum_size(5, 1), 2);
assert_eq!(Protocol::FPaxos.quorum_size(5, 2), 3);
assert_eq!(Protocol::EPaxos.quorum_size(3, 0), 2);
assert_eq!(Protocol::EPaxos.quorum_size(5, 0), 3);
assert_eq!(Protocol::EPaxos.quorum_size(7, 0), 5);
assert_eq!(Protocol::EPaxos.quorum_size(9, 0), 6);
assert_eq!(Protocol::EPaxos.quorum_size(11, 0), 8);
assert_eq!(Protocol::EPaxos.quorum_size(13, 0), 9);
assert_eq!(Protocol::EPaxos.quorum_size(15, 0), 11);
assert_eq!(Protocol::EPaxos.quorum_size(17, 0), 12);
assert_eq!(Protocol::Atlas.quorum_size(3, 1), 2);
assert_eq!(Protocol::Atlas.quorum_size(5, 1), 3);
assert_eq!(Protocol::Atlas.quorum_size(5, 2), 4);
}
#[test]
fn protocol_stats() {
let stats = Histogram::from(vec![10, 20, 40, 10]);
let f = 1;
let placement = ClientPlacement::Colocated;
let mut all_stats = ProtocolStats::new();
all_stats.insert(Protocol::Atlas, f, placement, stats.clone());
assert_eq!(all_stats.get(Protocol::Atlas, f, placement), &stats);
}
#[test]
#[should_panic]
fn protocol_stats_panic() {
let f = 1;
let placement = ClientPlacement::Colocated;
let all_stats = ProtocolStats::new();
// should panic!
all_stats.get(Protocol::Atlas, f, placement);
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_bote/src/main.rs | fantoch_bote/src/main.rs | use fantoch::planet::{Planet, Region};
use fantoch_bote::{FTMetric, RankingParams, Search, SearchInput};
fn main() {
distance_table();
search();
}
fn distance_table() {
let planet = Planet::new();
let regions = vec![
Region::new("asia-southeast1"),
Region::new("europe-west4"),
Region::new("southamerica-east1"),
Region::new("australia-southeast1"),
Region::new("europe-west2"),
Region::new("asia-south1"),
Region::new("us-east1"),
Region::new("asia-northeast1"),
Region::new("europe-west1"),
Region::new("asia-east1"),
Region::new("us-west1"),
Region::new("europe-west3"),
Region::new("us-central1"),
];
if let Ok(matrix) = planet.distance_matrix(regions) {
println!("{}", matrix);
}
}
fn search() {
// define some search params
let min_n = 3;
let max_n = 13;
// originally `search_input = SearchInput::R17CMaxN`
let search_input = SearchInput::R13C13;
let save_search = true;
// create search
let search = Search::new(min_n, max_n, search_input, save_search, None);
// define search params:
// originally 30 was used for the `min_mean_improv`;
// here we want the test to be run asap,
// so we restrict the search the maximum possible
let min_mean_fpaxos_improv = 110;
let min_mean_epaxos_improv = 35;
let min_fairness_fpaxos_improv = 0;
let min_mean_decrease = 15;
let ft_metric = FTMetric::F1F2;
// create ranking params
let params = RankingParams::new(
min_mean_fpaxos_improv,
min_mean_epaxos_improv,
min_fairness_fpaxos_improv,
min_mean_decrease,
min_n,
max_n,
ft_metric,
);
// select the best config
let (score, css, _clients) = search
.sorted_evolving_configs(¶ms)
.into_iter()
.take(1) // take only the best one
.next()
.unwrap();
println!("score: {:?}", score);
// the final sorted config
let mut sorted_config = Vec::new();
for (config, stats) in css {
// update sorted config
for region in config {
if !sorted_config.contains(region) {
sorted_config.push(region.clone())
}
}
println!("{}", Search::stats_fmt(stats, config.len()));
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/lib.rs | fantoch_ps/src/lib.rs | #![deny(rust_2018_idioms)]
// This module contains the implementation of several `Executor`'s.
pub mod executor;
// This module contains the implementation of several `Protocol`'s.
pub mod protocol;
// This module contains some utilitary functions.
pub mod util;
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/util.rs | fantoch_ps/src/util.rs | #[cfg(test)]
pub use tests::{gen_cmd, vclock};
#[cfg(test)]
mod tests {
use fantoch::command::Command;
use fantoch::id::ProcessId;
use fantoch::id::Rifl;
use fantoch::kvs::KVOp;
use rand::Rng;
use threshold::{Clock, EventSet, MaxSet, VClock};
#[cfg(test)]
/// Returns a new `VClock` setting its frontier with the sequences in the
/// iterator.
pub fn vclock<I: IntoIterator<Item = u64>>(iter: I) -> VClock<ProcessId> {
Clock::from(
iter.into_iter()
.enumerate()
.map(|(actor, seq)| ((actor + 1) as ProcessId, seq)) // make ids 1..=n
.map(|(actor, seq)| (actor, MaxSet::from_event(seq))),
)
}
#[cfg(test)]
// Generates a random `Command` with at most `max_keys_per_command` where
// the number of keys is `keys_number`.
pub fn gen_cmd(
max_keys_per_command: usize,
keys_number: usize,
noop_probability: usize,
) -> Option<Command> {
assert!(noop_probability <= 100);
// get random
let mut rng = rand::thread_rng();
// select keys per command
let key_number = rng.gen_range(1..(max_keys_per_command + 1));
// generate command data
let cmd_data: Vec<_> = (0..key_number)
.map(|_| {
// select random key
let key = format!("{}", rng.gen_range(0..keys_number));
let value = String::from("");
(key, KVOp::Put(value))
})
.collect();
// create fake rifl
let rifl = Rifl::new(0, 0);
// create multi put command
Some(Command::from(rifl, cmd_data))
}
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/bin/simulation.rs | fantoch_ps/src/bin/simulation.rs | use fantoch::client::{KeyGen, Workload};
use fantoch::config::Config;
use fantoch::executor::{ExecutorMetrics, ExecutorMetricsKind};
use fantoch::id::ProcessId;
use fantoch::metrics::Histogram;
use fantoch::planet::{Planet, Region};
use fantoch::protocol::{Protocol, ProtocolMetrics, ProtocolMetricsKind};
use fantoch::sim::Runner;
use fantoch::HashMap;
use fantoch_ps::protocol::{
AtlasSequential, CaesarLocked, EPaxosSequential, FPaxos, TempoSequential,
};
use rayon::prelude::*;
use std::time::Duration;
// latency dir
const LATENCY_AWS: &str = "../latency_aws/2020_06_05";
const STACK_SIZE: usize = 64 * 1024 * 1024; // 64mb
macro_rules! config {
($n:expr, $f:expr, $tiny_quorums:expr, $clock_bump_interval:expr, $skip_fast_ack:expr, $wait_condition:expr) => {{
let mut config = Config::new($n, $f);
config.set_tempo_tiny_quorums($tiny_quorums);
if let Some(interval) = $clock_bump_interval {
config.set_tempo_clock_bump_interval::<Option<Duration>>(interval);
}
// make sure detached votes are sent
config.set_tempo_detached_send_interval(Duration::from_millis(5));
// set caesar's wait condition
config.set_caesar_wait_condition($wait_condition);
// make sure stability is running
config.set_gc_interval(Duration::from_millis(10));
// make sure executed notification are being sent (which will only
// affect the protocols that have implemented such functionality)
config.set_executor_executed_notification_interval(
Duration::from_millis(10),
);
config.set_skip_fast_ack($skip_fast_ack);
config
}};
}
fn main() {
// build rayon thread pool:
// - give me two cpus to work
let spare = 2;
let threads = num_cpus::get() - spare;
rayon::ThreadPoolBuilder::new()
.num_threads(threads)
.stack_size(STACK_SIZE)
.build_global()
.unwrap();
let aws = true;
tempo(aws);
// fairest_leader();
}
fn aws_planet() -> (Planet, Vec<Region>) {
let planet = Planet::from(LATENCY_AWS);
let regions = vec![
Region::new("eu-west-1"),
Region::new("us-west-1"),
Region::new("ap-southeast-1"),
Region::new("ca-central-1"),
Region::new("sa-east-1"),
];
(planet, regions)
}
#[allow(dead_code)]
// average latencies observed during many AWS runs
fn aws_runs_planet() -> (Planet, Vec<Region>) {
let eu = Region::new("EU");
let us = Region::new("US");
let ap = Region::new("AP");
let ca = Region::new("CA");
let sa = Region::new("SA");
let regions =
vec![eu.clone(), us.clone(), ap.clone(), ca.clone(), sa.clone()];
let latencies = vec![
(
eu.clone(),
vec![
(eu.clone(), 0),
(us.clone(), 136),
(ap.clone(), 180),
(ca.clone(), 73),
(sa.clone(), 177),
],
),
(
us.clone(),
vec![
(eu.clone(), 136),
(us.clone(), 0),
(ap.clone(), 174),
(ca.clone(), 79),
(sa.clone(), 174),
],
),
(
ap.clone(),
vec![
(eu.clone(), 180),
(us.clone(), 174),
(ap.clone(), 0),
(ca.clone(), 206),
(sa.clone(), 317),
],
),
(
ca.clone(),
vec![
(eu.clone(), 73),
(us.clone(), 79),
(ap.clone(), 206),
(ca.clone(), 0),
(sa.clone(), 122),
],
),
(
sa.clone(),
vec![
(eu.clone(), 177),
(us.clone(), 174),
(ap.clone(), 317),
(ca.clone(), 122),
(sa.clone(), 0),
],
),
];
let latencies = latencies
.into_iter()
.map(|(region, region_latencies)| {
(region, region_latencies.into_iter().collect())
})
.collect();
let planet = Planet::from_latencies(latencies);
(planet, regions)
}
fn gcp_planet() -> (Planet, Vec<Region>) {
let planet = Planet::new();
let regions = vec![
Region::new("asia-south1"),
Region::new("europe-north1"),
Region::new("southamerica-east1"),
Region::new("australia-southeast1"),
Region::new("europe-west1"),
];
(planet, regions)
}
#[allow(dead_code)]
fn tempo(aws: bool) {
let (planet, regions) = if aws { aws_planet() } else { gcp_planet() };
println!("{}", planet.distance_matrix(regions.clone()).unwrap());
let ns = vec![5];
// let clients_per_region = vec![64, 128, 256, 512];
// let pool_sizes = vec![100, 50, 10, 1];
// let conflicts = vec![0, 2, 10, 30, 50, 100];
let clients_per_region = vec![
32,
512,
1024,
1024 * 2,
1024 * 4,
1024 * 8,
1024 * 16,
1024 * 20,
];
let pool_sizes = vec![1];
let conflicts = vec![2];
ns.into_par_iter().for_each(|n| {
let regions: Vec<_> = regions.clone().into_iter().take(n).collect();
let configs = if n == 3 {
vec![
// (protocol, (n, f, tiny quorums, clock bump interval, skip
// fast ack))
("Atlas", config!(n, 1, false, None, false, false)),
// ("EPaxos", config!(n, 1, false, None, false, false)),
// ("FPaxos", config!(n, 1, false, None, false, false)),
("Tempo", config!(n, 1, false, None, false, false)),
]
} else if n == 5 {
vec![
// (protocol, (n, f, tiny quorums, clock bump interval, skip
// fast ack))
// ("Atlas", config!(n, 1, false, None, false, false)),
// ("Atlas", config!(n, 2, false, None, false, false)),
// ("EPaxos", config!(n, 0, false, None, false, false)),
// ("FPaxos", config!(n, 1, false, None, false, false)),
// ("FPaxos", config!(n, 2, false, None, false, false)),
// ("Tempo", config!(n, 1, false, None, false, false)),
// ("Tempo", config!(n, 2, false, None, false, false)),
// ("Caesar", config!(n, 2, false, None, false, false)),
("Caesar", config!(n, 2, false, None, false, true)),
]
} else {
panic!("unsupported number of processes {}", n);
};
pool_sizes.iter().for_each(|&pool_size| {
println!("POOL_SIZE: {:?}", pool_size);
conflicts.iter().for_each(|&conflict_rate| {
println!("CONFLICTS: {:?}", conflict_rate);
clients_per_region.par_iter().for_each(|&clients| {
configs.par_iter().for_each(|&(protocol, mut config)| {
// TODO check if the protocol is leader-based, and if
// yes, run for all possible
// leader configurations
// set leader if FPaxos
if protocol == "FPaxos" {
config.set_leader(1);
}
// clients workload
let shard_count = 1;
let key_gen = KeyGen::ConflictPool {
conflict_rate,
pool_size,
};
let keys_per_command = 1;
let commands_per_client = 200;
let payload_size = 0;
let workload = Workload::new(
shard_count,
key_gen,
keys_per_command,
commands_per_client,
payload_size,
);
// process regions, client regions and planet
let process_regions = regions.clone();
let client_regions = regions.clone();
let planet = planet.clone();
let (metrics, client_latencies) = match protocol {
"Atlas" => run::<AtlasSequential>(
config,
workload,
clients,
process_regions,
client_regions,
planet,
),
"EPaxos" => run::<EPaxosSequential>(
config,
workload,
clients,
process_regions,
client_regions,
planet,
),
"FPaxos" => run::<FPaxos>(
config,
workload,
clients,
process_regions,
client_regions,
planet,
),
"Tempo" => run::<TempoSequential>(
config,
workload,
clients,
process_regions,
client_regions,
planet,
),
"Caesar" => run::<CaesarLocked>(
config,
workload,
clients,
process_regions,
client_regions,
planet,
),
_ => panic!("unsupported protocol {:?}", protocol),
};
handle_run_result(
protocol,
config,
clients,
metrics,
client_latencies,
);
})
})
})
})
});
}
#[allow(dead_code)]
fn fairest_leader() {
let (planet, regions) = aws_runs_planet();
// let (planet, regions) = aws_planet();
println!("{}", planet.distance_matrix(regions.clone()).unwrap());
let configs = vec![config!(5, 1, false, None, false, false)];
let clients_per_region = 1;
// clients workload
let shard_count = 1;
let key_gen = KeyGen::ConflictPool {
conflict_rate: 2,
pool_size: 1,
};
let keys_per_command = 1;
let commands_per_client = 500;
let payload_size = 0;
let workload = Workload::new(
shard_count,
key_gen,
keys_per_command,
commands_per_client,
payload_size,
);
for mut config in configs {
for leader in 1..=config.n() {
println!("-----------------------------");
config.set_leader(leader as ProcessId);
// process regions, client regions and planet
let process_regions = regions.clone();
let client_regions = regions.clone();
let planet = planet.clone();
let (_, client_latencies) = run::<FPaxos>(
config,
workload,
clients_per_region,
process_regions,
client_regions,
planet,
);
// compute clients stats
let histogram = client_latencies.into_iter().fold(
Histogram::new(),
|mut histogram_acc, (region, (_issued_commands, histogram))| {
println!(
"region = {:<14} | {:?}",
region.name(),
histogram
);
// add average latency to histogram
histogram_acc
.increment(histogram.mean().value().round() as u64);
histogram_acc
},
);
println!(
"LEADER: {} in region {:?} | avg = {:<3} | std = {:<3}",
leader,
regions
.get(leader - 1)
.expect("leader should exist in regions"),
histogram.mean().value().round() as u64,
histogram.stddev().value().round() as u64
);
}
}
}
#[allow(dead_code)]
fn equidistant<P: Protocol>(protocol_name: &str) {
// intra-region distance
let distance = 200;
// number of processes and f
let configs = vec![(3, 1), (5, 2)];
// total clients
let total_clients = 1000;
// clients workload
let shard_count = 1;
let key_gen = KeyGen::ConflictPool {
conflict_rate: 2,
pool_size: 1,
};
let keys_per_command = 1;
let total_commands = 500;
let payload_size = 0;
let workload = Workload::new(
shard_count,
key_gen,
keys_per_command,
total_commands,
payload_size,
);
for &(n, f) in &configs {
// create planet and regions
let (process_regions, planet) = Planet::equidistant(distance, n);
// client regions
let client_regions = process_regions.clone();
let clients_per_region = total_clients / n;
println!("running processes={} | clients={}", n, clients_per_region);
println!();
// config
let config = Config::new(n, f);
let (process_metrics, client_latencies) = run::<P>(
config,
workload,
clients_per_region,
process_regions,
client_regions,
planet,
);
handle_run_result(
protocol_name,
config,
clients_per_region,
process_metrics,
client_latencies,
);
}
}
#[allow(dead_code)]
fn increasing_regions<P: Protocol>(protocol_name: &str) {
let planet = Planet::new();
let regions13 = vec![
Region::new("asia-southeast1"),
Region::new("europe-west4"),
Region::new("southamerica-east1"),
Region::new("australia-southeast1"),
Region::new("europe-west2"),
Region::new("asia-south1"),
Region::new("us-east1"),
Region::new("asia-northeast1"),
Region::new("europe-west1"),
Region::new("asia-east1"),
Region::new("us-west1"),
Region::new("europe-west3"),
Region::new("us-central1"),
];
// number of processes and f
let ns = vec![3, 5, 7, 9, 11, 13];
let f = 1;
// clients workload
let shard_count = 1;
let key_gen = KeyGen::ConflictPool {
conflict_rate: 2,
pool_size: 1,
};
let keys_per_command = 1;
let total_commands = 500;
let payload_size = 0;
let workload = Workload::new(
shard_count,
key_gen,
keys_per_command,
total_commands,
payload_size,
);
// clients per region
let clients_per_region = 1000 / 13;
assert_eq!(clients_per_region, 76);
for &n in &ns {
println!("running processes={}", n);
println!();
// config
let config = Config::new(n, f);
// process regions
let process_regions = regions13.clone().into_iter().take(n).collect();
// client regions
let client_regions = regions13.clone();
let (process_metrics, client_latencies) = run::<P>(
config,
workload,
clients_per_region,
process_regions,
client_regions,
planet.clone(),
);
handle_run_result(
protocol_name,
config,
clients_per_region,
process_metrics,
client_latencies,
);
}
}
fn run<P: Protocol>(
config: Config,
workload: Workload,
clients_per_region: usize,
process_regions: Vec<Region>,
client_regions: Vec<Region>,
planet: Planet,
) -> (
HashMap<ProcessId, (ProtocolMetrics, ExecutorMetrics)>,
HashMap<Region, (usize, Histogram)>,
) {
// compute number of regions and total number of expected commands per
// region
let region_count = client_regions.len();
let expected_commands =
workload.commands_per_client() * clients_per_region * region_count;
// run simulation and get latencies
let mut runner: Runner<P> = Runner::new(
planet,
config,
workload,
clients_per_region,
process_regions,
client_regions,
);
let (metrics, _executors_monitors, client_latencies) = runner.run(None);
// compute clients stats
let issued_commands = client_latencies
.values()
.map(|(issued_commands, _histogram)| issued_commands)
.sum::<usize>();
if issued_commands != expected_commands {
panic!(
"only issued {} out of {} commands",
issued_commands, expected_commands,
);
}
(metrics, client_latencies)
}
fn handle_run_result(
protocol_name: &str,
config: Config,
clients_per_region: usize,
metrics: HashMap<ProcessId, (ProtocolMetrics, ExecutorMetrics)>,
client_latencies: HashMap<Region, (usize, Histogram)>,
) {
let mut fast_paths = 0;
let mut slow_paths = 0;
let mut wait_condition_delay = Histogram::new();
let mut commit_latency = Histogram::new();
let mut execution_delay = Histogram::new();
// show processes stats
metrics.into_iter().for_each(
|(process_id, (process_metrics, executor_metrics))| {
println!("{}:", process_id);
println!(" process metrics:");
println!("{:?}", process_metrics);
println!(" executor metrics:");
println!("{:?}", executor_metrics);
let process_fast_paths = process_metrics
.get_aggregated(ProtocolMetricsKind::FastPath)
.cloned()
.unwrap_or_default();
let process_slow_paths = process_metrics
.get_aggregated(ProtocolMetricsKind::SlowPath)
.cloned()
.unwrap_or_default();
let process_wait_condition_delay = process_metrics
.get_collected(ProtocolMetricsKind::WaitConditionDelay);
let process_commit_latency = process_metrics
.get_collected(ProtocolMetricsKind::CommitLatency);
let executor_execution_delay = executor_metrics
.get_collected(ExecutorMetricsKind::ExecutionDelay);
fast_paths += process_fast_paths;
slow_paths += process_slow_paths;
if let Some(h) = process_wait_condition_delay {
wait_condition_delay.merge(h);
}
if let Some(h) = process_commit_latency {
commit_latency.merge(h);
}
if let Some(h) = executor_execution_delay {
execution_delay.merge(h);
}
},
);
// compute the percentage of fast paths
let total = fast_paths + slow_paths;
let fp_percentage = (fast_paths as f64 * 100f64) / total as f64;
// compute clients stats
let execution_latency = client_latencies.into_iter().fold(
Histogram::new(),
|mut histogram_acc, (region, (_issued_commands, histogram))| {
println!("region = {:<14} | {:?}", region.name(), histogram);
// merge histograms
histogram_acc.merge(&histogram);
histogram_acc
},
);
let name = |protocol_name, wait_condition: bool| {
if protocol_name == "Caesar" && !wait_condition {
"CaesarNW"
} else {
protocol_name
}
};
let prefix = format!(
"{:<8} n = {} f = {} c = {:<3}",
name(protocol_name, config.caesar_wait_condition()),
config.n(),
config.f(),
clients_per_region
);
println!(
"{} | wait condition delay: {:?}",
prefix, wait_condition_delay
);
println!("{} | commit latency : {:?}", prefix, commit_latency);
println!("{} | execution latency : {:?}", prefix, execution_latency);
println!("{} | execution delay : {:?}", prefix, execution_delay);
println!("{} | fast path rate : {:<7.1}", prefix, fp_percentage);
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/bin/graph_executor_replay.rs | fantoch_ps/src/bin/graph_executor_replay.rs | mod common;
use clap::{Arg, Command};
use fantoch::config::Config;
use fantoch::executor::Executor;
use fantoch::run::rw::Rw;
use fantoch::time::RunTime;
use fantoch_ps::executor::GraphExecutor;
use tokio::fs::File;
const BUFFER_SIZE: usize = 8 * 1024; // 8KB
#[tokio::main]
async fn main() {
let process_id = 1;
let shard_id = 0;
let (config, execution_log) = parse_args();
// create graph executor
let mut executor = GraphExecutor::new(process_id, shard_id, config);
let time = RunTime;
// open execution log file
let file = File::open(execution_log)
.await
.expect("execution log should exist");
// create log parse
let mut rw = Rw::from(BUFFER_SIZE, BUFFER_SIZE, file);
while let Some(execution_info) = rw.recv().await {
println!("adding {:?}", execution_info);
// result should be empty as we're not wait for any rifl
executor.handle(execution_info, &time);
let res: Vec<_> = executor.to_clients_iter().collect();
assert!(res.is_empty());
println!("{:?}", executor);
}
}
fn parse_args() -> (Config, String) {
let matches = Command::new("executor_replay")
.version("0.1")
.author("Vitor Enes <vitorenesduarte@gmail.com>")
.about("Replays an execution log.")
.arg(
Arg::new("n")
.long("processes")
.value_name("PROCESS_NUMBER")
.help("total number of processes")
.required(true)
.takes_value(true),
)
.arg(
Arg::new("f")
.long("faults")
.value_name("FAULT_NUMBER")
.help("total number of allowed faults")
.required(true)
.takes_value(true),
)
.arg(
Arg::new("execution_log")
.long("execution_log")
.value_name("EXECUTION_LOG")
.help("log file with execution infos")
.required(true)
.takes_value(true),
)
.get_matches();
// parse arguments
let n = common::protocol::parse_n(matches.value_of("n"));
let f = common::protocol::parse_f(matches.value_of("f"));
let config = Config::new(n, f);
let execution_log = common::protocol::parse_execution_log(
matches.value_of("execution_log"),
)
.expect("execution log should be set");
println!("config: {:?}", config);
println!("execution log: {:?}", execution_log);
(config, execution_log)
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
vitorenesduarte/fantoch | https://github.com/vitorenesduarte/fantoch/blob/aea6e324fdce7802976cbafa88a516ed40609ce9/fantoch_ps/src/bin/atlas_locked.rs | fantoch_ps/src/bin/atlas_locked.rs | mod common;
use color_eyre::Report;
use fantoch_ps::protocol::AtlasLocked;
// TODO can we generate all the protocol binaries with a macro?
fn main() -> Result<(), Report> {
common::protocol::run::<AtlasLocked>()
}
| rust | Apache-2.0 | aea6e324fdce7802976cbafa88a516ed40609ce9 | 2026-01-04T20:24:46.253513Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.