repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/disseminator.rs | src/disseminator.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Fast block dissemination protocols.
//!
//! This module provides the [`Disseminator`] trait for block dissemination protocols.
//!
//! Also, this module provides implementations of three dissemination protocols:
//! - [`TrivialDisseminator`] implements a leader-to-everyone broadcast protocol.
//! - [`Rotor`] implements Alpenglow's Rotor, which is an evolution of Turbine.
//! - [`Turbine`] implements Solana's basic Turbine protocol.
pub mod rotor;
pub mod trivial;
pub mod turbine;
use async_trait::async_trait;
use mockall::automock;
pub use self::rotor::Rotor;
pub use self::trivial::TrivialDisseminator;
pub use self::turbine::Turbine;
use crate::shredder::Shred;
/// Abstraction of a block dissemination protocol.
#[async_trait]
#[automock]
pub trait Disseminator {
/// Sends the given shred to the network as the original source.
async fn send(&self, shred: &Shred) -> std::io::Result<()>;
/// Performs any necessary forwarding of the given shred.
async fn forward(&self, shred: &Shred) -> std::io::Result<()>;
/// Receives the next shred from the network.
async fn receive(&self) -> std::io::Result<Shred>;
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/lib.rs | src/lib.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Alpenglow: Global High-Performance Proof-of-Stake Blockchain with Erasure Coding
//!
//! Research reference implementation of the Alpenglow consensus protocol.
#![deny(rustdoc::broken_intra_doc_links)]
pub mod all2all;
pub mod consensus;
pub mod crypto;
pub mod disseminator;
pub mod logging;
pub mod network;
pub mod repair;
pub mod shredder;
#[cfg(test)]
pub mod test_utils;
pub mod types;
pub mod validator;
use std::net::SocketAddr;
use std::sync::Arc;
use serde::{Deserialize, Serialize};
use static_assertions::const_assert_eq;
use wincode::{SchemaRead, SchemaWrite};
pub use self::all2all::All2All;
pub use self::consensus::Alpenglow;
pub use self::consensus::votor::VotorEvent;
use self::crypto::{aggsig, signature};
pub use self::disseminator::Disseminator;
use self::types::Slot;
pub use self::validator::Validator;
use crate::all2all::TrivialAll2All;
use crate::consensus::{ConsensusMessage, EpochInfo};
use crate::crypto::merkle::BlockHash;
use crate::crypto::signature::SecretKey;
use crate::disseminator::Rotor;
use crate::disseminator::rotor::StakeWeightedSampler;
use crate::network::{UdpNetwork, localhost_ip_sockaddr};
use crate::repair::{RepairRequest, RepairResponse};
use crate::shredder::Shred;
// NOTE: In many places we assume that `usize` is 64 bits wide.
// So, for now, we only support 64-bit architectures.
const_assert_eq!(std::mem::size_of::<usize>(), 8);
/// Validator ID number type.
pub type ValidatorId = u64;
/// Validator stake type.
pub type Stake = u64;
/// Block identifier type.
pub type BlockId = (Slot, BlockHash);
/// Maximum number of bytes a transaction payload can contain.
const MAX_TRANSACTION_SIZE: usize = 512;
/// Parsed block with information about parent and transactions as payload.
#[derive(Clone, Debug)]
pub struct Block {
// TODO: unused
_slot: Slot,
hash: BlockHash,
parent: Slot,
parent_hash: BlockHash,
// TODO: unused
_transactions: Vec<Transaction>,
}
/// Dummy transaction containing payload bytes.
///
/// A transaction cannot hold more than [`MAX_TRANSACTION_SIZE`] payload bytes.
#[derive(Clone, Debug, SchemaRead, SchemaWrite)]
pub struct Transaction(pub Vec<u8>);
/// Validator information as known about other validators.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct ValidatorInfo {
pub id: ValidatorId,
pub stake: Stake,
pub pubkey: signature::PublicKey,
#[serde(deserialize_with = "aggsig::PublicKey::from_array_of_bytes")]
pub voting_pubkey: aggsig::PublicKey,
pub all2all_address: SocketAddr,
pub disseminator_address: SocketAddr,
/// Send [`RepairRequest`] messages to this address to ask the node to repair a block.
pub repair_request_address: SocketAddr,
/// Send [`RepairResponse`] messages to this address when replying to a node's [`RepairRequest`] message.
pub repair_response_address: SocketAddr,
}
type TestNode = Alpenglow<
TrivialAll2All<UdpNetwork<ConsensusMessage, ConsensusMessage>>,
Rotor<UdpNetwork<Shred, Shred>, StakeWeightedSampler>,
UdpNetwork<Transaction, Transaction>,
>;
struct Networks {
all2all: UdpNetwork<ConsensusMessage, ConsensusMessage>,
disseminator: UdpNetwork<Shred, Shred>,
repair: UdpNetwork<RepairRequest, RepairResponse>,
repair_request: UdpNetwork<RepairResponse, RepairRequest>,
txs: UdpNetwork<Transaction, Transaction>,
}
impl Networks {
fn new() -> Self {
Self {
all2all: UdpNetwork::new_with_any_port(),
disseminator: UdpNetwork::new_with_any_port(),
repair: UdpNetwork::new_with_any_port(),
repair_request: UdpNetwork::new_with_any_port(),
txs: UdpNetwork::new_with_any_port(),
}
}
}
/// Creates [`TestNode`] for testing and benchmarking purposes.
///
/// This code lives here to enable sharing between different testing and benchmarking.
/// It should not be used in production code.
#[must_use]
pub fn create_test_nodes(count: u64) -> Vec<TestNode> {
// open sockets with arbitrary ports
let networks = (0..count).map(|_| Networks::new()).collect::<Vec<_>>();
// prepare validator info for all nodes
let mut rng = rand::rng();
let mut sks = Vec::new();
let mut voting_sks = Vec::new();
let mut validators = Vec::new();
for (id, network) in networks.iter().enumerate() {
sks.push(SecretKey::new(&mut rng));
voting_sks.push(aggsig::SecretKey::new(&mut rng));
let all2all_address = localhost_ip_sockaddr(network.all2all.port());
let disseminator_address = localhost_ip_sockaddr(network.disseminator.port());
let repair_response_address = localhost_ip_sockaddr(network.repair.port());
let repair_request_address = localhost_ip_sockaddr(network.repair_request.port());
validators.push(ValidatorInfo {
id: id as u64,
stake: 1,
pubkey: sks[id].to_pk(),
voting_pubkey: voting_sks[id].to_pk(),
all2all_address,
disseminator_address,
repair_request_address,
repair_response_address,
});
}
// turn validator info into actual nodes
networks
.into_iter()
.enumerate()
.map(|(id, network)| {
let epoch_info = Arc::new(EpochInfo::new(id as u64, validators.clone()));
let all2all = TrivialAll2All::new(validators.clone(), network.all2all);
let disseminator = Rotor::new(network.disseminator, epoch_info.clone());
let repair_network = network.repair;
let repair_request_network = network.repair_request;
let txs_receiver = network.txs;
Alpenglow::new(
sks[id].clone(),
voting_sks[id].clone(),
all2all,
disseminator,
repair_network,
repair_request_network,
epoch_info,
txs_receiver,
)
})
.collect()
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/network.rs | src/network.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! An abstraction layer for networking.
//!
//! The [`Network`] trait provides a common interface for networking operations.
//! When implementing [`Network`], the implementor determines the types:
//! - [`Network::Send`]: The type of messages to be sent.
//! - [`Network::Recv`]: The type of messages to be received.
//!
//! Specific implementations for different underlying network stacks are provided:
//! - [`UdpNetwork`] abstracts a simple UDP socket
//! - [`TcpNetwork`] handles TCP connections under the hood
//! - [`SimulatedNetwork`] provides a simulated network for local testing
//!
//! # Examples
//!
//! ```rust
//! use alpenglow::network::{Network, localhost_ip_sockaddr};
//!
//! async fn send_ping_receive_pong(network: impl Network<Send = String, Recv = String>) {
//! let msg = "ping".to_string();
//! network.send(&msg, localhost_ip_sockaddr(1337)).await.unwrap();
//! let received = network.receive().await.unwrap();
//! assert_eq!(&received, "pong");
//! }
//! ```
pub mod simulated;
mod tcp;
mod udp;
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use async_trait::async_trait;
pub use self::simulated::SimulatedNetwork;
pub use self::tcp::TcpNetwork;
pub use self::udp::UdpNetwork;
use crate::Transaction;
use crate::consensus::ConsensusMessage;
use crate::repair::{RepairRequest, RepairResponse};
use crate::shredder::Shred;
/// Maximum payload size of a UDP packet.
pub const MTU_BYTES: usize = 1500;
/// Abstraction of a network interface for sending and receiving messages.
#[async_trait]
pub trait Network: Send + Sync {
type Send;
type Recv;
/// Sends the `message` to all the addresses in `addrs`.
///
/// Note that a possible strategy for the implementators is to send to one address after another.
/// In this strategy, it is possible that if sending to one address fails, the implementator gives up sending to the remaining addresses.
/// This means that the function is not atomic, if it fails, some messages may still have been sent.
//
// NOTE: Consider return a `Vec<Result<()>>` to indicate per address failures.
async fn send_to_many(
&self,
message: &Self::Send,
addrs: impl Iterator<Item = SocketAddr> + Send,
) -> std::io::Result<()>;
/// Sends the `message` to `addr`.
async fn send(&self, message: &Self::Send, addr: SocketAddr) -> std::io::Result<()>;
// TODO: implement brodcast at `Network` level?
async fn receive(&self) -> std::io::Result<Self::Recv>;
}
/// A marker trait that constrains [`Network`] to send and receive [`Shred`]
pub trait ShredNetwork: Network<Recv = Shred, Send = Shred> {}
impl<N> ShredNetwork for N where N: Network<Recv = Shred, Send = Shred> {}
/// A marker trait that constrains [`Network`] to receive [`Transaction`]
pub trait TransactionNetwork: Network<Recv = Transaction> {}
impl<N> TransactionNetwork for N where N: Network<Recv = Transaction> {}
/// A marker trait that constrains [`Network`] to send and receive [`ConsensusMessage`]
pub trait ConsensusNetwork: Network<Recv = ConsensusMessage, Send = ConsensusMessage> {}
impl<N> ConsensusNetwork for N where N: Network<Recv = ConsensusMessage, Send = ConsensusMessage> {}
/// A marker trait that constrains [`Network`] to send [`RepairResponse`] and receive [`RepairRequest`]
pub trait RepairRequestNetwork: Network<Recv = RepairRequest, Send = RepairResponse> {}
impl<N> RepairRequestNetwork for N where N: Network<Recv = RepairRequest, Send = RepairResponse> {}
/// A marker trait that constrains [`Network`] to send [`RepairRequest`] and receive [`RepairResponse`]
pub trait RepairNetwork: Network<Recv = RepairResponse, Send = RepairRequest> {}
impl<N> RepairNetwork for N where N: Network<Recv = RepairResponse, Send = RepairRequest> {}
/// Returns a [`SocketAddr`] bound to the localhost IPv4 and given port.
///
/// NOTE: port 0 is generally reserved and used to get the OS to assign a port.
/// Using this function with port=0 on actual networks might lead to unexpected behaviour.
// TODO: prevent being able to call this function with port = 0.
pub fn localhost_ip_sockaddr(port: u16) -> SocketAddr {
SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), port)
}
/// Returns a [`SocketAddr`] that could be bound any arbitrary IP and port.
///
/// This is present here to enable sharing of code between testing and benchmarking.
/// This should not be used in production.
pub fn dontcare_sockaddr() -> SocketAddr {
SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 1234)
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/test_utils.rs | src/test_utils.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Utility types and functions for tests and benchmarks.
use std::sync::Arc;
use rand::RngCore;
use wincode::{SchemaRead, SchemaWrite};
use crate::all2all::TrivialAll2All;
use crate::consensus::{ConsensusMessage, EpochInfo};
use crate::crypto::aggsig::SecretKey;
use crate::crypto::merkle::{BlockHash, DoubleMerkleTree};
use crate::crypto::{Hash, signature};
use crate::network::simulated::SimulatedNetworkCore;
use crate::network::{SimulatedNetwork, localhost_ip_sockaddr};
use crate::shredder::{MAX_DATA_PER_SLICE, RegularShredder, Shredder, ValidatedShred};
use crate::types::{Slice, SliceHeader, SliceIndex, SlicePayload};
use crate::{
BlockId, MAX_TRANSACTION_SIZE, Slot, Transaction, ValidatorId, ValidatorInfo, VotorEvent,
};
/// A simple ping network message.
#[derive(Clone, Debug, Default, SchemaRead, SchemaWrite)]
pub struct Ping(pub [u8; 32]);
/// A simple pong network message.
#[derive(Clone, Debug, Default, SchemaRead, SchemaWrite)]
pub struct Pong(pub [u8; 32]);
/// A simple network message that can be either a ping or a pong.
#[derive(Clone, Debug, PartialEq, Eq, SchemaRead, SchemaWrite)]
pub enum PingOrPong {
Ping([u8; 32]),
Pong([u8; 32]),
}
/// Generates [`ValidatorInfo`] for the given number of validators.
///
/// Returns the voting secret keys of all validators and the [`EpochInfo`] of validator 0.
pub fn generate_validators(num_validators: u64) -> (Vec<SecretKey>, Arc<EpochInfo>) {
let mut rng = rand::rng();
let mut sks = Vec::new();
let mut voting_sks = Vec::new();
let mut validators = Vec::new();
for i in 0..num_validators {
sks.push(signature::SecretKey::new(&mut rng));
voting_sks.push(SecretKey::new(&mut rng));
validators.push(ValidatorInfo {
id: i,
stake: 1,
pubkey: sks[i as usize].to_pk(),
voting_pubkey: voting_sks[i as usize].to_pk(),
all2all_address: localhost_ip_sockaddr(0),
disseminator_address: localhost_ip_sockaddr(0),
repair_request_address: localhost_ip_sockaddr(0),
repair_response_address: localhost_ip_sockaddr(0),
});
}
let epoch_info = Arc::new(EpochInfo::new(0, validators));
(voting_sks, epoch_info)
}
/// Creates [`TrivialAll2All`] instances for the given validators.
///
/// These are connected via a [`SimulatedNetworkCore`].
pub async fn generate_all2all_instances(
mut validators: Vec<ValidatorInfo>,
) -> Vec<TrivialAll2All<SimulatedNetwork<ConsensusMessage, ConsensusMessage>>> {
let core = Arc::new(
SimulatedNetworkCore::default()
.with_jitter(0.0)
.with_packet_loss(0.0),
);
for (i, val) in validators.iter_mut().enumerate() {
val.all2all_address = localhost_ip_sockaddr(i.try_into().unwrap());
}
let mut all2all = Vec::new();
for i in 0..validators.len() {
let network = core.join_unlimited(i as ValidatorId).await;
all2all.push(TrivialAll2All::new(validators.clone(), network));
}
all2all
}
/// Creates a random block with the given number of slices and shreds it.
///
/// Returns the block hash, the double-Merkle tree, and all shreds by slice.
pub fn create_random_shredded_block(
slot: Slot,
num_slices: usize,
sk: &signature::SecretKey,
) -> (BlockHash, DoubleMerkleTree, Vec<Vec<ValidatedShred>>) {
let mut shredder = RegularShredder::default();
let mut shreds = Vec::with_capacity(num_slices);
for slice in create_random_block(slot, num_slices) {
shreds.push(shredder.shred(slice.clone(), sk).unwrap().to_vec());
}
let merkle_roots = shreds
.iter()
.map(|slice_shreds| slice_shreds[0].merkle_root.clone())
.collect::<Vec<_>>();
let tree = DoubleMerkleTree::new(&merkle_roots);
let block_hash = tree.get_root();
(block_hash, tree, shreds)
}
/// Creates a random block with the given number of slices.
///
/// In most cases, you should use [`create_random_shredded_block`] instead.
///
/// Returns all slices, as [`Slice`].
pub fn create_random_block(slot: Slot, num_slices: usize) -> Vec<Slice> {
let final_slice_index = SliceIndex::new_unchecked(num_slices - 1);
let parent_slot = Slot::genesis();
assert_ne!(slot, parent_slot);
let mut slices = Vec::new();
for slice_index in final_slice_index.until() {
let parent = if slice_index.is_first() {
Some((parent_slot, Hash::random_for_test().into()))
} else {
None
};
let payload = create_random_slice_payload_valid_txs(parent);
let header = SliceHeader {
slot,
slice_index,
is_last: slice_index == final_slice_index,
};
slices.push(Slice::from_parts(header, payload, None));
}
slices
}
/// Asserts that two [`VotorEvent`]s are equal.
///
/// Panics if they are not equal.
pub fn assert_votor_events_match(ev0: VotorEvent, ev1: VotorEvent) {
match (ev0, ev1) {
(
VotorEvent::ParentReady {
slot: s0,
parent_slot: ps0,
parent_hash: ph0,
},
VotorEvent::ParentReady {
slot: s1,
parent_slot: ps1,
parent_hash: ph1,
},
) => {
assert_eq!(s0, s1);
assert_eq!(ps0, ps1);
assert_eq!(ph0, ph1);
}
(VotorEvent::CertCreated(c0), VotorEvent::CertCreated(c1)) => assert_eq!(c0, c1),
(VotorEvent::Standstill(s0, c0, v0), VotorEvent::Standstill(s1, c1, v1)) => {
assert_eq!(s0, s1);
assert_eq!(c0, c1);
assert_eq!(v0, v1);
}
(VotorEvent::SafeToNotar(s0, h0), VotorEvent::SafeToNotar(s1, h1)) => {
assert_eq!(s0, s1);
assert_eq!(h0, h1);
}
(
VotorEvent::Block {
slot: s0,
block_info: b0,
},
VotorEvent::Block {
slot: s1,
block_info: b1,
},
) => {
assert_eq!(s0, s1);
assert_eq!(b0, b1);
}
(VotorEvent::Timeout(s0), VotorEvent::Timeout(s1))
| (VotorEvent::TimeoutCrashedLeader(s0), VotorEvent::TimeoutCrashedLeader(s1))
| (VotorEvent::SafeToSkip(s0), VotorEvent::SafeToSkip(s1)) => assert_eq!(s0, s1),
(VotorEvent::FirstShred(s0), VotorEvent::FirstShred(s1)) => assert_eq!(s0, s1),
(ev0, ev1) => {
panic!("{ev0:?} does not match {ev1:?}");
}
}
}
/// Creates a valid [`SlicePayload`] which contains valid transactions that can be decoded.
fn create_random_slice_payload_valid_txs(parent: Option<BlockId>) -> SlicePayload {
// HACK: manually picked number of maximally sized transactions that fit in the slice
// without going over the [`MAX_DATA_PER_SLICE`] limit.
const NUM_TXS_PER_SLICE: usize = 61;
let mut data = vec![0; MAX_TRANSACTION_SIZE];
rand::rng().fill_bytes(&mut data);
let tx = Transaction(data);
let tx = wincode::serialize(&tx).expect("serialization should not panic");
let txs = vec![tx; NUM_TXS_PER_SLICE];
let txs = wincode::serialize(&txs).expect("serialization should not panic");
let payload = SlicePayload::new(parent, txs);
let payload: Vec<u8> = payload.into();
assert!(payload.len() <= MAX_DATA_PER_SLICE);
SlicePayload::from(payload.as_slice())
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/types.rs | src/types.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
pub mod slice;
pub mod slice_index;
pub mod slot;
pub use self::slice::Slice;
pub(crate) use self::slice::{SliceHeader, SlicePayload};
pub use self::slice_index::SliceIndex;
pub use self::slot::{SLOTS_PER_EPOCH, SLOTS_PER_WINDOW, Slot};
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/all2all.rs | src/all2all.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Direct all-to-all broadcast protocols.
//!
//! The [`All2All`] trait gives a simple interface for broadcasting messages.
//! It does not impose restrictions on the guarantees that should be provided.
//! However, each implementor should clearly document which guarantees it provides.
//!
//! This module provides two implementations of the [`All2All`] trait:
//! - [`TrivialAll2All`] implements a simple best-effort all-to-all broadcast protocol.
//! - [`RobustAll2All`] is a more robust implementation, handling retransmits.
//!
//! The exact guarantees, however, also depend on the underlying [`Network`],
//! since both implementations are generic over the [`Network`] trait.
//! For example, [`TrivialAll2All`] over a TCP-based network might still give
//! strong reliability guarantess.
//!
//! # Examples
//!
//! ```rust
//! use alpenglow::all2all::All2All;
//! use alpenglow::consensus::ConsensusMessage;
//!
//! async fn broadcast_all(msgs: &[ConsensusMessage], all2all: impl All2All) -> std::io::Result<()> {
//! for msg in msgs {
//! all2all.broadcast(msg).await?;
//! }
//! Ok(())
//! }
//! ```
//!
//! [`Network`]: crate::network::Network
mod robust;
mod trivial;
use async_trait::async_trait;
pub use self::robust::RobustAll2All;
pub use self::trivial::TrivialAll2All;
use crate::consensus::ConsensusMessage;
/// Abstraction for a direct all-to-all network communication protocol.
#[async_trait]
pub trait All2All {
/// Broadcasts the given message to all known nodes.
///
/// Which delivery guarantees are provided depends on the implementor.
/// This is allowed to be best-effort or any stronger set of guarantees.
/// Each implementor should clearly document which guarantees it provides.
///
/// # Errors
///
/// Implementors should return an [`std::io::Error`] iff the underlying network fails.
async fn broadcast(&self, msg: &ConsensusMessage) -> std::io::Result<()>;
/// Receives a message from any of the other nodes.
///
/// Resolves to the next successfully deserialized [`ConsensusMessage`].
/// Does not provide information on which node sent the message.
///
/// # Errors
///
/// Implementors should return an [`std::io::Error`] iff the underlying network fails.
async fn receive(&self) -> std::io::Result<ConsensusMessage>;
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/shredder.rs | src/shredder.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Shredding and deshredding of blocks.
//!
//! This module defines the [`Shredder`] trait for shredding blocks into shreds.
//!
//! It also provides several shredders implementing this trait:
//! - [`RegularShredder`] augments data shreds with coding shreds.
//! - [`CodingOnlyShredder`] only outputs coding shreds.
//! - [`AontShredder`] uses the RAONT-RS all-or-nothing construction.
//! - [`PetsShredder`] uses the PETS all-or-nothing construction.
//!
//! Finally, it defines the relevant low-level data type:
//! - [`Shred`] is a single part of the block that fits into a UDP datagram,
//! that also contains the slice header, Merkle path and leader signature.
//!
//! It also uses the [`Slice`] struct defined in the [`crate::types::slice`] module.
mod pool;
mod reed_solomon;
mod shred_index;
mod validated_shred;
mod validated_shreds;
use aes::Aes128;
use aes::cipher::{Array, KeyIvInit, StreamCipher};
use ctr::Ctr64LE;
use rand::{RngCore, rng};
use thiserror::Error;
use wincode::{SchemaRead, SchemaWrite};
pub use self::pool::{ShredderGuard, ShredderPool};
use self::reed_solomon::{
RawShreds, ReedSolomonCoder, ReedSolomonDeshredError, ReedSolomonShredError,
};
pub use self::shred_index::ShredIndex;
pub use self::validated_shred::{ShredVerifyError, ValidatedShred};
use crate::crypto::merkle::{SliceMerkleTree, SliceProof, SliceRoot};
use crate::crypto::signature::{SecretKey, Signature};
use crate::crypto::{MerkleTree, hash};
use crate::shredder::validated_shreds::ValidatedShreds;
use crate::types::{Slice, SliceHeader, SlicePayload};
/// Number of data shreds the payload of a slice is split into.
pub const DATA_SHREDS: usize = 32;
/// Total number of shreds the shredder outputs for a slice.
///
/// Generally, includes both data and coding shreds.
/// How many are data and coding depends on the specific shredder.
pub const TOTAL_SHREDS: usize = 64;
/// Maximum number of payload bytes a single shred can hold.
pub const MAX_DATA_PER_SHRED: usize = 1024;
/// Maximum number of bytes an entire slice can hold, incl. padding.
pub const MAX_DATA_PER_SLICE_AFTER_PADDING: usize = DATA_SHREDS * MAX_DATA_PER_SHRED;
/// Maximum number of payload bytes a slice can hold.
/// Our padding scheme requires that you leave at least one byte of padding.
pub const MAX_DATA_PER_SLICE: usize = MAX_DATA_PER_SLICE_AFTER_PADDING - 1;
/// Errors that may occur during shredding.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Error)]
pub enum ShredError {
#[error("too much data to fit into slice")]
TooMuchData,
}
impl From<ReedSolomonShredError> for ShredError {
fn from(err: ReedSolomonShredError) -> Self {
match err {
ReedSolomonShredError::TooMuchData => Self::TooMuchData,
}
}
}
/// Errors that may occur during deshredding.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Error)]
pub enum DeshredError {
#[error("could not deshred malformed input")]
BadEncoding,
#[error("too much data to fit into slice")]
TooMuchData,
#[error("not enough shreds to deshred")]
NotEnoughShreds,
#[error("shreds are part of invalid Merkle tree")]
InvalidMerkleTree,
#[error("shreds array contains invalid sequence")]
InvalidLayout,
}
impl From<ReedSolomonDeshredError> for DeshredError {
fn from(err: ReedSolomonDeshredError) -> Self {
match err {
ReedSolomonDeshredError::TooMuchData => Self::TooMuchData,
ReedSolomonDeshredError::NotEnoughShreds => Self::NotEnoughShreds,
ReedSolomonDeshredError::InvalidPadding => Self::BadEncoding,
}
}
}
impl From<ReedSolomonShredError> for DeshredError {
fn from(err: ReedSolomonShredError) -> Self {
match err {
ReedSolomonShredError::TooMuchData => Self::TooMuchData,
}
}
}
#[derive(Clone, Debug, SchemaRead, SchemaWrite)]
pub enum ShredPayloadType {
Data(ShredPayload),
Coding(ShredPayload),
}
/// A shred is the smallest unit of data that is used when disseminating blocks.
/// Shreds are crafted to fit into an MTU size packet.
#[derive(Clone, Debug, SchemaRead, SchemaWrite)]
pub struct Shred {
pub(crate) payload_type: ShredPayloadType,
pub(crate) merkle_root: SliceRoot,
merkle_root_sig: Signature,
merkle_path: SliceProof,
}
impl Shred {
/// Verifies only the Merkle proof of this shred.
///
/// For full verification, see [`ValidatedShred::try_new`].
///
/// Returns `true` iff the Merkle root matches the given root and the proof is valid.
#[must_use]
pub fn verify_path_only(&self, root: &SliceRoot) -> bool {
if &self.merkle_root != root {
return false;
}
SliceMerkleTree::check_proof(
&self.payload().data,
*self.payload().shred_index,
&self.merkle_root,
&self.merkle_path,
)
}
/// References the payload contained in this shred.
pub const fn payload(&self) -> &ShredPayload {
match &self.payload_type {
ShredPayloadType::Coding(p) | ShredPayloadType::Data(p) => p,
}
}
/// Mutably references the payload contained in this shred.
pub const fn payload_mut(&mut self) -> &mut ShredPayload {
match &mut self.payload_type {
ShredPayloadType::Coding(p) | ShredPayloadType::Data(p) => p,
}
}
/// Returns `true` iff this is a data shred.
pub const fn is_data(&self) -> bool {
matches!(self.payload_type, ShredPayloadType::Data(_))
}
/// Returns `true` iff this is a coding shred.
pub const fn is_coding(&self) -> bool {
matches!(self.payload_type, ShredPayloadType::Coding(_))
}
}
/// Base payload of a shred, regardless of its type.
#[derive(Clone, Debug, SchemaRead, SchemaWrite)]
pub struct ShredPayload {
/// Slice header replicated in each shred.
pub(crate) header: SliceHeader,
/// Index of this shred within the slice.
pub(crate) shred_index: ShredIndex,
/// Raw payload bytes of this shred, part of the erasure-coded slice payload.
pub(crate) data: Vec<u8>,
}
impl ShredPayload {
/// Returns the index of this shred within the entire slot.
#[must_use]
pub fn index_in_slot(&self) -> usize {
self.header.slice_index.inner() * TOTAL_SHREDS + *self.shred_index
}
}
/// A trait for shredding and deshredding.
///
/// Abstracts the process of turning a raw payload of bytes for an entire slice
/// into shreds and turning shreds back into the raw payload of a slice.
pub trait Shredder: Default {
/// Maximum number of payload bytes that fit into a slice.
///
/// For the regular shredder, this is [`MAX_DATA_PER_SLICE`].
/// However, this can be less if the specfic shredder adds some overhead.
const MAX_DATA_SIZE: usize;
/// When [`Shredder::shred`] is called, how many data shreds will be produced.
const DATA_OUTPUT_SHREDS: usize;
/// When [`Shredder::shred`] is called, how many coding shreds will be produced.
const CODING_OUTPUT_SHREDS: usize;
/// Splits the given slice into [`TOTAL_SHREDS`] shreds, which depending on
/// the specific implementation can be any combination of data and coding.
///
/// # Errors
///
/// - Implementations may return an error if the input is invalid or if the
/// shredding process fails for any implementation-specific reason.
/// - Should always return [`ShredError::TooMuchData`] if the `slice` is
/// too big, i.e., more than [`Shredder::MAX_DATA_SIZE`] bytes.
fn shred(
&mut self,
slice: Slice,
sk: &SecretKey,
) -> Result<[ValidatedShred; TOTAL_SHREDS], ShredError>;
/// Puts the given shreds back together into a complete slice.
///
/// Additionally, outputs all [`TOTAL_SHREDS`] reconstructed shreds.
/// This includes all (potentially data and coding) shreds sent originally.
///
/// # Errors
///
/// - Implementations may return an error if the input is invalid or if the
/// deshredding process fails for any implementation-specific reason.
/// - Should always return [`DeshredError::TooMuchData`] if the reconstructed
/// slice is too big, i.e., more than [`Shredder::MAX_DATA_SIZE`] bytes.
///
/// - Any implementation of this needs to make sure to:
/// 1. Reconstruct all shreds (data and coding) under the Merkle tree.
/// 2. Verify the entire Merkle tree.
/// 3. Return [`DeshredError::InvalidMerkleTree`] if this fails.
fn deshred(
&mut self,
shreds: &[Option<ValidatedShred>; TOTAL_SHREDS],
) -> Result<(Slice, [ValidatedShred; TOTAL_SHREDS]), DeshredError> {
let shreds =
ValidatedShreds::try_new(shreds, Self::DATA_OUTPUT_SHREDS, Self::CODING_OUTPUT_SHREDS)
.ok_or(DeshredError::InvalidLayout)?;
self.deshred_validated_shreds(shreds)
}
/// The core deshreding implementation that the actual shredders provide.
///
/// NOTE: this is not part of the public API, normally, [`Shredder::deshred()`] should be used.
fn deshred_validated_shreds(
&mut self,
shreds: ValidatedShreds,
) -> Result<(Slice, [ValidatedShred; TOTAL_SHREDS]), DeshredError>;
}
/// A shredder that augments the [`DATA_SHREDS`] data shreds with
/// `TOTAL_SHREDS - DATA_SHREDS` coding shreds and outputs both.
pub struct RegularShredder(ReedSolomonCoder);
impl Shredder for RegularShredder {
const MAX_DATA_SIZE: usize = MAX_DATA_PER_SLICE;
const DATA_OUTPUT_SHREDS: usize = DATA_SHREDS;
const CODING_OUTPUT_SHREDS: usize = TOTAL_SHREDS - DATA_SHREDS;
fn shred(
&mut self,
slice: Slice,
sk: &SecretKey,
) -> Result<[ValidatedShred; TOTAL_SHREDS], ShredError> {
let (header, payload) = slice.deconstruct();
let raw_shreds = self.0.shred(&payload.to_bytes())?;
Ok(data_and_coding_to_output_shreds(header, raw_shreds, sk))
}
fn deshred_validated_shreds(
&mut self,
shreds: ValidatedShreds,
) -> Result<(Slice, [ValidatedShred; TOTAL_SHREDS]), DeshredError> {
let payload_bytes = self.0.deshred(shreds)?;
let payload = SlicePayload::from(payload_bytes.as_slice());
// deshreding succeeded above, there should be at least one shred in the array so the unwrap() below should be safe
let any_shred = shreds.to_shreds().iter().find_map(|s| s.as_ref()).unwrap();
let slice = Slice::from_shreds(payload, any_shred);
let header = slice.to_header();
// additional Merkle tree validity check
let merkle_root = any_shred.merkle_root.clone();
let raw_shreds = self.0.shred(&payload_bytes)?;
let tree = build_merkle_tree(&raw_shreds);
if tree.get_root() != merkle_root {
return Err(DeshredError::InvalidMerkleTree);
}
// turn reconstructed shreds into output shreds (with root, path, sig)
let leader_sig = any_shred.merkle_root_sig;
let reconstructed_shreds =
create_output_shreds_for_other_leader(header, raw_shreds, tree, leader_sig);
assert_eq!(reconstructed_shreds.len(), TOTAL_SHREDS);
Ok((slice, reconstructed_shreds))
}
}
impl Default for RegularShredder {
fn default() -> Self {
Self(ReedSolomonCoder::new(Self::CODING_OUTPUT_SHREDS))
}
}
/// A shredder that only produces [`TOTAL_SHREDS`] coding shreds.
pub struct CodingOnlyShredder(ReedSolomonCoder);
impl Shredder for CodingOnlyShredder {
const MAX_DATA_SIZE: usize = MAX_DATA_PER_SLICE;
const DATA_OUTPUT_SHREDS: usize = 0;
const CODING_OUTPUT_SHREDS: usize = TOTAL_SHREDS;
fn shred(
&mut self,
slice: Slice,
sk: &SecretKey,
) -> Result<[ValidatedShred; TOTAL_SHREDS], ShredError> {
let (header, payload) = slice.deconstruct();
let mut raw_shreds = self.0.shred(&payload.to_bytes())?;
raw_shreds.data = vec![];
Ok(data_and_coding_to_output_shreds(header, raw_shreds, sk))
}
fn deshred_validated_shreds(
&mut self,
shreds: ValidatedShreds,
) -> Result<(Slice, [ValidatedShred; TOTAL_SHREDS]), DeshredError> {
let payload_bytes = self.0.deshred(shreds)?;
let payload = SlicePayload::from(payload_bytes.as_slice());
// deshreding succeeded above, there should be at least one shred in the array so the unwrap() below should be safe
let any_shred = shreds.to_shreds().iter().find_map(|s| s.as_ref()).unwrap();
let slice = Slice::from_shreds(payload, any_shred);
// additional Merkle tree validity check
let merkle_root = any_shred.merkle_root.clone();
let mut raw_shreds = self.0.shred(&payload_bytes)?;
raw_shreds.data = vec![];
let tree = build_merkle_tree(&raw_shreds);
if tree.get_root() != merkle_root {
return Err(DeshredError::InvalidMerkleTree);
}
// turn reconstructed shreds into output shreds (with root, path, sig)
let (header, _payload) = slice.clone().deconstruct();
let leader_sig = any_shred.merkle_root_sig;
let reconstructed_shreds =
create_output_shreds_for_other_leader(header, raw_shreds, tree, leader_sig);
assert_eq!(reconstructed_shreds.len(), TOTAL_SHREDS);
Ok((slice, reconstructed_shreds))
}
}
impl Default for CodingOnlyShredder {
fn default() -> Self {
Self(ReedSolomonCoder::new(Self::CODING_OUTPUT_SHREDS))
}
}
/// A shredder that uses the PETS all-or-nothing construction.
///
/// It outputs `DATA_SHREDS - 1` encrypted data shreds and
/// `TOTAL_SHREDS - DATA_SHREDS + 1` coding shreds.
///
/// See also: <https://arxiv.org/abs/2502.02774>
pub struct PetsShredder(ReedSolomonCoder);
impl Shredder for PetsShredder {
// needs 16 bytes for symmmetric encryption key
const MAX_DATA_SIZE: usize = MAX_DATA_PER_SLICE - 16;
const DATA_OUTPUT_SHREDS: usize = DATA_SHREDS - 1;
const CODING_OUTPUT_SHREDS: usize = TOTAL_SHREDS - DATA_SHREDS + 1;
fn shred(
&mut self,
slice: Slice,
sk: &SecretKey,
) -> Result<[ValidatedShred; TOTAL_SHREDS], ShredError> {
let (header, payload) = slice.deconstruct();
let mut payload: Vec<u8> = payload.into();
assert!(payload.len() <= Self::MAX_DATA_SIZE);
let mut rng = rng();
let mut key = Array::from([0; 16]);
rng.fill_bytes(&mut key);
let iv = Array::from([0; 16]);
let mut cipher = Ctr64LE::<Aes128>::new(&key, &iv);
cipher.apply_keystream(&mut payload);
payload.extend_from_slice(&key);
let mut raw_shreds = self.0.shred(&payload)?;
// delete data shred containing key
raw_shreds.data.pop();
Ok(data_and_coding_to_output_shreds(header, raw_shreds, sk))
}
fn deshred_validated_shreds(
&mut self,
shreds: ValidatedShreds,
) -> Result<(Slice, [ValidatedShred; TOTAL_SHREDS]), DeshredError> {
let mut buffer = self.0.deshred(shreds)?;
if buffer.len() < 16 {
return Err(DeshredError::BadEncoding);
}
// deshreding succeeded above, there should be at least one shred in the array so the unwrap() below should be safe
let any_shred = shreds.to_shreds().iter().find_map(|s| s.as_ref()).unwrap();
// additional Merkle tree validity check
let merkle_root = any_shred.merkle_root.clone();
let header = any_shred.payload().header.clone();
let mut raw_shreds = self.0.shred(&buffer)?;
raw_shreds.data.pop();
let tree = build_merkle_tree(&raw_shreds);
if tree.get_root() != merkle_root {
return Err(DeshredError::InvalidMerkleTree);
}
// decrypt slice
let tail = buffer.split_off(buffer.len() - 16);
let iv = Array::from([0; 16]);
let key = Array::try_from(tail.as_slice()).expect("tail should have correct length");
let mut cipher = Ctr64LE::<Aes128>::new(&key, &iv);
cipher.apply_keystream(&mut buffer);
let payload = SlicePayload::from(buffer.as_slice());
let slice = Slice::from_shreds(payload, any_shred);
// turn reconstructed shreds into output shreds (with root, path, sig)
let leader_sig = any_shred.merkle_root_sig;
let reconstructed_shreds =
create_output_shreds_for_other_leader(header, raw_shreds, tree, leader_sig);
assert_eq!(reconstructed_shreds.len(), TOTAL_SHREDS);
Ok((slice, reconstructed_shreds))
}
}
impl Default for PetsShredder {
fn default() -> Self {
Self(ReedSolomonCoder::new(Self::CODING_OUTPUT_SHREDS))
}
}
/// A shredder that uses the RAONT-RS all-or-nothing construction.
///
/// It outputs [`DATA_SHREDS`] encrypted data shreds and
/// `TOTAL_SHREDS - DATA_SHREDS` coding shreds.
///
/// See also: <https://eprint.iacr.org/2016/1014>
pub struct AontShredder(ReedSolomonCoder);
impl Shredder for AontShredder {
// needs 16 bytes for symmmetric encryption key
const MAX_DATA_SIZE: usize = MAX_DATA_PER_SLICE - 16;
const DATA_OUTPUT_SHREDS: usize = DATA_SHREDS;
const CODING_OUTPUT_SHREDS: usize = TOTAL_SHREDS - DATA_SHREDS;
fn shred(
&mut self,
slice: Slice,
sk: &SecretKey,
) -> Result<[ValidatedShred; TOTAL_SHREDS], ShredError> {
let (header, payload) = slice.deconstruct();
let mut payload: Vec<u8> = payload.into();
assert!(payload.len() <= Self::MAX_DATA_SIZE);
let mut rng = rng();
let mut key = Array::from([0; 16]);
rng.fill_bytes(&mut key);
let iv = Array::from([0; 16]);
let mut cipher = Ctr64LE::<Aes128>::new(&key, &iv);
cipher.apply_keystream(&mut payload);
let hash = hash(&payload);
for i in 0..16 {
payload.push(hash.as_ref()[i] ^ key[i]);
}
let raw_shreds = self.0.shred(&payload)?;
Ok(data_and_coding_to_output_shreds(header, raw_shreds, sk))
}
fn deshred_validated_shreds(
&mut self,
shreds: ValidatedShreds,
) -> Result<(Slice, [ValidatedShred; TOTAL_SHREDS]), DeshredError> {
let mut buffer = self.0.deshred(shreds)?;
if buffer.len() < 16 {
return Err(DeshredError::BadEncoding);
}
// deshreding succeeded above, there should be at least one shred in the array so the unwrap() below should be safe
let any_shred = shreds.to_shreds().iter().find_map(|s| s.as_ref()).unwrap();
// additional Merkle tree validity check
let merkle_root = any_shred.merkle_root.clone();
let header = any_shred.payload().header.clone();
let raw_shreds = self.0.shred(&buffer)?;
let tree = build_merkle_tree(&raw_shreds);
if tree.get_root() != merkle_root {
return Err(DeshredError::InvalidMerkleTree);
}
// decrypt slice
let tail = buffer.split_off(buffer.len() - 16);
let hash = hash(&buffer);
let iv = Array::from([0; 16]);
let mut key = Array::try_from(tail.as_slice()).unwrap();
for i in 0..16 {
key[i] ^= hash.as_ref()[i];
}
let mut cipher = Ctr64LE::<Aes128>::new(&key, &iv);
cipher.apply_keystream(&mut buffer);
let payload = SlicePayload::from(buffer.as_slice());
let slice = Slice::from_shreds(payload, any_shred);
// turn reconstructed shreds into output shreds (with root, path, sig)
let leader_sig = any_shred.merkle_root_sig;
let reconstructed_shreds =
create_output_shreds_for_other_leader(header, raw_shreds, tree, leader_sig);
assert_eq!(reconstructed_shreds.len(), TOTAL_SHREDS);
Ok((slice, reconstructed_shreds))
}
}
impl Default for AontShredder {
fn default() -> Self {
Self(ReedSolomonCoder::new(Self::CODING_OUTPUT_SHREDS))
}
}
/// Generates the Merkle tree, signs the root, and outputs shreds.
///
/// Each returned shred contains the Merkle root, its own path and the signature.
fn data_and_coding_to_output_shreds(
header: SliceHeader,
raw_shreds: RawShreds,
sk: &SecretKey,
) -> [ValidatedShred; TOTAL_SHREDS] {
let tree = build_merkle_tree(&raw_shreds);
let merkle_root = tree.get_root();
let merkle_root_sig = sk.sign(merkle_root.as_ref());
let convert = |shred_index: ShredIndex, data: Vec<u8>| -> (SliceProof, ShredPayload) {
let merkle_path = tree.create_proof(*shred_index);
let payload = ShredPayload {
header: header.clone(),
shred_index,
data,
};
(merkle_path, payload)
};
let num_data = raw_shreds.data.len();
let data = raw_shreds
.data
.into_iter()
.enumerate()
.map(|(shred_index, d)| {
let shred_index = ShredIndex::new(shred_index).unwrap();
let (merkle_path, payload) = convert(shred_index, d);
(merkle_path, ShredPayloadType::Data(payload))
});
let coding = raw_shreds
.coding
.into_iter()
.enumerate()
.map(|(offset, c)| {
let shred_index = num_data + offset;
let shred_index = ShredIndex::new(shred_index).unwrap();
let (merkle_path, payload) = convert(shred_index, c);
(merkle_path, ShredPayloadType::Coding(payload))
});
data.chain(coding)
.map(|(merkle_path, payload)| {
ValidatedShred::new_validated(Shred {
payload_type: payload,
merkle_root: merkle_root.clone(),
merkle_root_sig,
merkle_path,
})
})
.collect::<Vec<_>>()
.try_into()
.unwrap()
}
/// Puts the root, path, and signature of the leader into shreds.
///
/// This is analogous to [`data_and_coding_to_output_shreds`], but for another leader.
/// Instead of signing the root, copies the existing signature from another shred.
/// Also, requires the Merkle tree to already be calculated from reconstructed shreds.
///
/// Each returned shred contains the Merkle root, its own path and the signature.
fn create_output_shreds_for_other_leader(
header: SliceHeader,
raw_shreds: RawShreds,
tree: SliceMerkleTree,
leader_signature: Signature,
) -> [ValidatedShred; TOTAL_SHREDS] {
let convert = |shred_index: ShredIndex, data: Vec<u8>| -> (SliceProof, ShredPayload) {
let merkle_path = tree.create_proof(*shred_index);
let payload = ShredPayload {
header: header.clone(),
shred_index,
data,
};
(merkle_path, payload)
};
let num_data = raw_shreds.data.len();
let data = raw_shreds
.data
.into_iter()
.enumerate()
.map(|(shred_index, d)| {
let shred_index = ShredIndex::new(shred_index).unwrap();
let (merkle_path, payload) = convert(shred_index, d);
(merkle_path, ShredPayloadType::Data(payload))
});
let coding = raw_shreds
.coding
.into_iter()
.enumerate()
.map(|(offset, c)| {
let shred_index = num_data + offset;
let shred_index = ShredIndex::new(shred_index).unwrap();
let (merkle_path, payload) = convert(shred_index, c);
(merkle_path, ShredPayloadType::Coding(payload))
});
let merkle_root = tree.get_root().clone();
data.chain(coding)
.map(|(merkle_path, payload)| {
ValidatedShred::new_validated(Shred {
payload_type: payload,
merkle_root: merkle_root.clone(),
merkle_root_sig: leader_signature,
merkle_path,
})
})
.collect::<Vec<_>>()
.try_into()
.unwrap()
}
/// Builds the Merkle tree for a slice, where the leaves are the given shreds.
fn build_merkle_tree(raw_shreds: &RawShreds) -> SliceMerkleTree {
// zero-allocation chaining of slices
let leaves = raw_shreds.data.iter().chain(&raw_shreds.coding);
MerkleTree::new(leaves)
}
#[cfg(test)]
mod tests {
use color_eyre::Result;
use super::*;
use crate::types::slice::create_slice_with_invalid_txs;
/// Constructs a valid layout of `Shred`s from the input.
fn into_array(shreds: &[ValidatedShred]) -> [Option<ValidatedShred>; TOTAL_SHREDS] {
assert!(shreds.len() <= TOTAL_SHREDS);
let mut ret = [const { None }; TOTAL_SHREDS];
for shred in shreds {
ret[*shred.payload().shred_index] = Some(shred.clone());
}
ret
}
#[test]
fn regular_shredding() -> Result<()> {
let mut shredder = RegularShredder::default();
let sk = SecretKey::new(&mut rng());
let mut slice = create_slice_with_invalid_txs(MAX_DATA_PER_SLICE);
let shreds = shredder.shred(slice.clone(), &sk)?;
assert_eq!(shreds.len(), TOTAL_SHREDS);
// restore from all shreds
let all = into_array(&shreds);
let (slice_restored, _) = shredder.deshred(&all)?;
slice.merkle_root = slice_restored.merkle_root.clone();
assert_eq!(slice_restored, slice);
// restore only from data shreds
let coding = into_array(&shreds[..DATA_SHREDS]);
let (slice_restored, _) = shredder.deshred(&coding)?;
assert_eq!(slice_restored, slice);
// restore using as many coding shreds as possible
let data = into_array(&shreds[TOTAL_SHREDS - DATA_SHREDS..]);
let (slice_restored, _) = shredder.deshred(&data)?;
assert_eq!(slice_restored, slice);
// restore from non-consecutive shreds
let nc_shreds = [&shreds[..1], &shreds[DATA_SHREDS + 1..]].concat();
let nc_shreds = into_array(&nc_shreds);
let (slice_restored, _) = shredder.deshred(&nc_shreds)?;
assert_eq!(slice_restored, slice);
// restore from half coding / half data shreds
let start = DATA_SHREDS / 2;
let end = DATA_SHREDS / 2 + DATA_SHREDS;
let input = into_array(&shreds[start..end]);
let (slice_restored, _) = shredder.deshred(&input)?;
assert_eq!(slice_restored, slice);
// restore from all but one shred
let input = into_array(&shreds[1..]);
let (slice_restored, _) = shredder.deshred(&input)?;
assert_eq!(slice_restored, slice);
// cannot restore from one shred
let input = into_array(&shreds[..1]);
let result = shredder.deshred(&input);
assert_eq!(result.err(), Some(DeshredError::NotEnoughShreds));
// cannot restore from too few shreds
let input = into_array(&shreds[..DATA_SHREDS - 1]);
let result = shredder.deshred(&input);
assert_eq!(result.err(), Some(DeshredError::NotEnoughShreds));
Ok(())
}
#[test]
fn coding_only_shredding() -> Result<()> {
let mut shredder = CodingOnlyShredder::default();
let sk = SecretKey::new(&mut rng());
let mut slice = create_slice_with_invalid_txs(MAX_DATA_PER_SLICE);
let shreds = shredder.shred(slice.clone(), &sk)?;
assert_eq!(shreds.len(), TOTAL_SHREDS);
// restore from all shreds
let input = into_array(&shreds);
let (slice_restored, _) = shredder.deshred(&input)?;
slice.merkle_root = slice_restored.merkle_root.clone();
assert_eq!(slice_restored, slice);
// restore from just enough shreds
let input = into_array(&shreds[..DATA_SHREDS]);
let (slice_restored, _) = shredder.deshred(&input)?;
assert_eq!(slice_restored, slice);
// restore from non-consecutive shreds
let nc_shreds = [&shreds[..1], &shreds[DATA_SHREDS + 1..]].concat();
let input = into_array(&nc_shreds);
let (slice_restored, _) = shredder.deshred(&input)?;
assert_eq!(slice_restored, slice);
// restore from all but one shred
let input = into_array(&shreds[1..]);
let (slice_restored, _) = shredder.deshred(&input)?;
assert_eq!(slice_restored, slice);
// cannot restore from one shred
let input = into_array(&shreds[..1]);
let result = shredder.deshred(&input);
assert_eq!(result.err(), Some(DeshredError::NotEnoughShreds));
// cannot restore from too few shreds
let input = into_array(&shreds[..DATA_SHREDS - 1]);
let result = shredder.deshred(&input);
assert_eq!(result.err(), Some(DeshredError::NotEnoughShreds));
Ok(())
}
#[test]
fn aont_shredding() -> Result<()> {
let mut shredder = AontShredder::default();
let sk = SecretKey::new(&mut rng());
let mut slice = create_slice_with_invalid_txs(MAX_DATA_PER_SLICE - 16);
let shreds = shredder.shred(slice.clone(), &sk)?;
assert_eq!(shreds.len(), TOTAL_SHREDS);
// restore from all shreds
let input = into_array(&shreds);
let (slice_restored, _) = shredder.deshred(&input)?;
slice.merkle_root = slice_restored.merkle_root.clone();
assert_eq!(slice_restored, slice);
// restore from just enough shreds
let input = into_array(&shreds[..DATA_SHREDS]);
let (slice_restored, _) = shredder.deshred(&input)?;
assert_eq!(slice_restored, slice);
// restore from non-consecutive shreds
let nc_shreds = [&shreds[..1], &shreds[DATA_SHREDS + 1..]].concat();
let input = into_array(&nc_shreds);
let (slice_restored, _) = shredder.deshred(&input)?;
assert_eq!(slice_restored, slice);
// restore from half coding / half data shreds
let start = DATA_SHREDS / 2;
let end = DATA_SHREDS / 2 + DATA_SHREDS;
let input = into_array(&shreds[start..end]);
let (slice_restored, _) = shredder.deshred(&input)?;
assert_eq!(slice_restored, slice);
// restore from all but one shred
let input = into_array(&shreds[1..]);
let (slice_restored, _) = shredder.deshred(&input)?;
assert_eq!(slice_restored, slice);
// cannot restore from one shred
let input = into_array(&shreds[..1]);
let result = shredder.deshred(&input);
assert_eq!(result.err(), Some(DeshredError::NotEnoughShreds));
// cannot restore from too few shreds
let input = into_array(&shreds[..DATA_SHREDS - 1]);
let result = shredder.deshred(&input);
assert_eq!(result.err(), Some(DeshredError::NotEnoughShreds));
Ok(())
}
#[test]
fn pets_shredding() -> Result<()> {
let mut shredder = PetsShredder::default();
let sk = SecretKey::new(&mut rng());
let mut slice = create_slice_with_invalid_txs(MAX_DATA_PER_SLICE - 16);
let shreds = shredder.shred(slice.clone(), &sk)?;
assert_eq!(shreds.len(), TOTAL_SHREDS);
// restore from all shreds
let input = into_array(&shreds);
let (slice_restored, _) = shredder.deshred(&input)?;
slice.merkle_root = slice_restored.merkle_root.clone();
assert_eq!(slice_restored, slice);
// restore from just enough shreds
let input = into_array(&shreds[..DATA_SHREDS]);
let (slice_restored, _) = shredder.deshred(&input)?;
assert_eq!(slice_restored, slice);
// restore from non-consecutive shreds
let nc_shreds = [&shreds[..1], &shreds[DATA_SHREDS + 1..]].concat();
let input = into_array(&nc_shreds);
let (slice_restored, _) = shredder.deshred(&input)?;
assert_eq!(slice_restored, slice);
// restore from half coding / half data shreds
let start = DATA_SHREDS / 2;
let end = DATA_SHREDS / 2 + DATA_SHREDS;
let input = into_array(&shreds[start..end]);
let (slice_restored, _) = shredder.deshred(&input)?;
assert_eq!(slice_restored, slice);
// restore from all but one shred
let input = into_array(&shreds[1..]);
let (slice_restored, _) = shredder.deshred(&input)?;
assert_eq!(slice_restored, slice);
// cannot restore from one shred
let input = into_array(&shreds[..1]);
let result = shredder.deshred(&input);
assert_eq!(result.err(), Some(DeshredError::NotEnoughShreds));
// cannot restore from too few shreds
let input = into_array(&shreds[..DATA_SHREDS - 1]);
let result = shredder.deshred(&input);
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | true |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/validator.rs | src/validator.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
pub struct Validator {}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/main.rs | src/main.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::borrow::Cow;
use alpenglow::{create_test_nodes, logging};
use color_eyre::Result;
use fastrace::collector::Config;
use fastrace::prelude::*;
use fastrace_opentelemetry::OpenTelemetryReporter;
use log::warn;
use opentelemetry::{InstrumentationScope, KeyValue};
use opentelemetry_otlp::{SpanExporter, WithExportConfig};
use opentelemetry_sdk::Resource;
#[tokio::main]
async fn main() -> Result<()> {
// enable fancy `color_eyre` error messages
color_eyre::install()?;
// enable `fastrace` tracing
let reporter = OpenTelemetryReporter::new(
SpanExporter::builder()
.with_tonic()
.with_endpoint("http://127.0.0.1:4317".to_string())
.with_protocol(opentelemetry_otlp::Protocol::Grpc)
.with_timeout(opentelemetry_otlp::OTEL_EXPORTER_OTLP_TIMEOUT_DEFAULT)
.build()
.expect("initialize oltp exporter"),
Cow::Owned(
Resource::builder()
.with_attributes([KeyValue::new("service.name", "alpenglow-main")])
.build(),
),
InstrumentationScope::builder("alpenglow")
.with_version(env!("CARGO_PKG_VERSION"))
.build(),
);
fastrace::set_reporter(reporter, Config::default());
logging::enable_logforth();
{
let parent = SpanContext::random();
// spawn local cluster
let nodes = create_test_nodes(2);
let mut node_tasks = Vec::new();
let mut cancel_tokens = Vec::new();
for (i, node) in nodes.into_iter().enumerate() {
let span_name = format!("node {i}");
let span = Span::root(span_name, parent);
cancel_tokens.push(node.get_cancel_token());
node_tasks.push(tokio::spawn(node.run().in_span(span)));
}
tokio::signal::ctrl_c().await.unwrap();
warn!("shutting down all nodes");
for token in &cancel_tokens {
token.cancel();
}
futures::future::join_all(node_tasks).await;
}
fastrace::flush();
Ok(())
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/repair.rs | src/repair.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Block repair sub-protocol.
//!
//! This module implements the double-Merkle based block repair protocol.
//! It uses the fact that the block hash is the root of a Merkle tree, where
//! the leaves of this tree are the Merkle roots of each of the block's slices.
//! Each repair response is accompanied by a Merkle proof and can thus be
//! individually verified.
use std::collections::{BTreeMap, BinaryHeap, HashSet};
use std::net::SocketAddr;
use std::sync::Arc;
use std::time::{Duration, Instant};
use log::{debug, trace, warn};
use tokio::sync::RwLock;
use wincode::{SchemaRead, SchemaWrite};
use crate::consensus::{Blockstore, DELTA, EpochInfo, Pool};
use crate::crypto::merkle::{DoubleMerkleProof, DoubleMerkleTree, MerkleRoot, SliceRoot};
use crate::crypto::{Hash, hash};
use crate::disseminator::rotor::{SamplingStrategy, StakeWeightedSampler};
use crate::network::{Network, RepairNetwork, RepairRequestNetwork};
use crate::shredder::{Shred, ShredIndex};
use crate::types::SliceIndex;
use crate::{BlockId, ValidatorId};
/// Maximum time to wait for a response to a repair request.
///
/// After a request times out we retry it from another node.
const REPAIR_TIMEOUT: Duration = DELTA.checked_mul(2).unwrap();
/// Different types of [`RepairRequest`] messages.
#[derive(Clone, Debug, PartialEq, Eq, SchemaRead, SchemaWrite)]
pub enum RepairRequestType {
/// Request for the total number of slices in block with a given hash.
LastSliceRoot(BlockId),
/// Request for the root hash of a slice, identified by block hash and slice index.
SliceRoot(BlockId, SliceIndex),
/// Request for shred, identified by block hash, slice index and shred index.
Shred(BlockId, SliceIndex, ShredIndex),
}
impl RepairRequestType {
/// Digests the [`RepairRequestType`] into a [`crate::crypto::Hash`].
fn hash(&self) -> Hash {
let repair = RepairRequest {
req_type: self.clone(),
sender: 0,
};
let msg_bytes = wincode::serialize(&repair).unwrap();
hash(&msg_bytes)
}
}
/// Request messages for the repair sub-protocol.
#[derive(Clone, Debug, SchemaRead, SchemaWrite)]
pub struct RepairRequest {
/// The validator that sent the message.
sender: ValidatorId,
/// The type of repair message sent.
req_type: RepairRequestType,
}
/// Response messages for the repair sub-protocol.
///
/// Each response type corresponds to a specific request message type.
/// Each response contains the request message that it is a response to.
/// If well-formed, it thus contains the corresponding variant of [`RepairRequest`].
#[derive(Clone, Debug, SchemaRead, SchemaWrite)]
pub enum RepairResponse {
/// Response with the last slice's Merkle root hash, plus corresponding proof.
LastSliceRoot(RepairRequestType, SliceIndex, SliceRoot, DoubleMerkleProof),
/// Response with the Merkle root hash of a specific slice, plus corresponding proof.
SliceRoot(RepairRequestType, SliceRoot, DoubleMerkleProof),
/// Response with a specific shred.
Shred(RepairRequestType, Shred),
}
impl RepairResponse {
/// Returns a reference to the [`RepairRequestType`] that this response corresponds to.
#[must_use]
const fn request_type(&self) -> &RepairRequestType {
match self {
Self::LastSliceRoot(req_type, _, _, _)
| Self::SliceRoot(req_type, _, _)
| Self::Shred(req_type, _) => req_type,
}
}
}
/// Handle repair requests from other nodes.
///
/// This is separated from [`Repair`] to handle repair requests and responses on separate sockets and tokio tasks.
/// This allows us to prioritise repairing blocks for ourselves over serving repair requests for other nodes.
pub struct RepairRequestHandler<N: Network> {
epoch_info: Arc<EpochInfo>,
blockstore: Arc<RwLock<Box<dyn Blockstore + Send + Sync>>>,
network: N,
}
impl<N> RepairRequestHandler<N>
where
N: RepairRequestNetwork,
{
/// Creates a new repair request handler instance.
///
/// Given `network` instance will be used for receiving repair requests and sending repair responses.
/// The blockstore will be used to handle the repair requests.
pub fn new(
epoch_info: Arc<EpochInfo>,
blockstore: Arc<RwLock<Box<dyn Blockstore + Send + Sync>>>,
network: N,
) -> Self {
Self {
epoch_info,
blockstore,
network,
}
}
/// Main loop of the repair request handler.
///
/// Listens for repair requests on `self.network`.
/// Looks up the corresponding data in `self.blockstore` and sends replies.
pub async fn run(&self) {
loop {
let request = self.network.receive().await.unwrap();
self.answer_request(request).await.unwrap();
}
}
/// Tries to answer the given repair request.
///
/// If we do not have the necessary information in blockstore, the request is ignored.
/// Otherwise, the correct response is sent back to the sender of the request.
async fn answer_request(&self, request: RepairRequest) -> std::io::Result<()> {
trace!("answering repair request: {request:?}");
let response = match &request.req_type {
RepairRequestType::LastSliceRoot(block_id) => {
let blockstore = self.blockstore.read().await;
let Some(last_slice) = blockstore.get_last_slice_index(block_id) else {
return Ok(());
};
let Some(root) = blockstore.get_slice_root(block_id, last_slice) else {
return Ok(());
};
let Some(proof) = blockstore.create_double_merkle_proof(block_id, last_slice)
else {
return Ok(());
};
RepairResponse::LastSliceRoot(request.req_type, last_slice, root.clone(), proof)
}
RepairRequestType::SliceRoot(block_id, slice) => {
let blockstore = self.blockstore.read().await;
let Some(root) = blockstore.get_slice_root(block_id, *slice) else {
return Ok(());
};
let Some(proof) = blockstore.create_double_merkle_proof(block_id, *slice) else {
return Ok(());
};
RepairResponse::SliceRoot(request.req_type, root.clone(), proof)
}
RepairRequestType::Shred(block_id, slice, shred) => {
let blockstore = self.blockstore.read().await;
let Some(shred) = blockstore.get_shred(block_id, *slice, *shred).cloned() else {
return Ok(());
};
RepairResponse::Shred(request.req_type, shred.into_shred())
}
};
self.send_response(response, request.sender).await
}
async fn send_response(
&self,
response: RepairResponse,
validator: ValidatorId,
) -> std::io::Result<()> {
let to = self.epoch_info.validator(validator).repair_response_address;
self.network.send(&response, to).await
}
}
/// Instance of double-Merkle based block repair protocol.
///
/// This is used by the node to repair blocks that it is missing.
/// This does not answer repair requests from other nodes, that is handled by [`RepairRequestHandler`].
pub struct Repair<N: Network> {
blockstore: Arc<RwLock<Box<dyn Blockstore + Send + Sync>>>,
pool: Arc<RwLock<Box<dyn Pool + Send + Sync>>>,
slice_roots: BTreeMap<(BlockId, SliceIndex), SliceRoot>,
outstanding_requests: BTreeMap<Hash, RepairRequestType>,
request_timeouts: BinaryHeap<(Instant, Hash)>,
network: N,
sampler: StakeWeightedSampler,
epoch_info: Arc<EpochInfo>,
}
impl<N> Repair<N>
where
N: RepairNetwork,
{
/// Creates a new repair instance.
///
/// Given `network` will be used for sending repair requests and receiving repair responses.
/// Any repaired shreds will be written into the provided `blockstore`.
pub fn new(
blockstore: Arc<RwLock<Box<dyn Blockstore + Send + Sync>>>,
pool: Arc<RwLock<Box<dyn Pool + Send + Sync>>>,
network: N,
epoch_info: Arc<EpochInfo>,
) -> Self {
let validators = epoch_info.validators.clone();
let sampler = StakeWeightedSampler::new(validators);
Self {
blockstore,
pool,
slice_roots: BTreeMap::new(),
outstanding_requests: BTreeMap::new(),
request_timeouts: BinaryHeap::new(),
network,
sampler,
epoch_info,
}
}
/// Main loop of the repair protocol.
///
/// Listens to incoming requests for blocks to repair on `self.repair_channel`.
/// Inititates the corresponding repair process and handles ongoing repairs.
pub async fn repair_loop(&mut self, mut repair_receiver: tokio::sync::mpsc::Receiver<BlockId>) {
loop {
let next_timeout = self.request_timeouts.peek().map(|(t, _)| t);
let sleep_duration = match next_timeout {
None => std::time::Duration::MAX,
Some(t) => t.duration_since(Instant::now()),
};
tokio::select! {
// handle repair response from network
res = self.network.receive() => self.handle_response(res.unwrap()).await,
// handle request for repairing new block
Some(block_id) = repair_receiver.recv() => {
self.repair_block(block_id).await;
}
// handle next request timeout
() = tokio::time::sleep(sleep_duration) => {
let Some((_, hash)) = self.request_timeouts.pop() else {
continue;
};
if let Some(request) = self.outstanding_requests.remove(&hash) {
debug!("retrying timed-out repair request {request:?}");
self.send_request(request).await.unwrap();
}
}
}
}
}
/// Starts repair process for the block specified by `slot` and `block_hash`.
pub async fn repair_block(&mut self, block_id: BlockId) {
let (slot, block_hash) = &block_id;
let h = &hex::encode(block_hash.as_hash())[..8];
if self.blockstore.read().await.get_block(&block_id).is_some() {
trace!("ignoring repair for block {h} in slot {slot}, already have the block");
return;
}
debug!("repairing block {h} in slot {slot}");
let req = RepairRequestType::LastSliceRoot(block_id);
self.send_request(req).await.unwrap();
}
/// Handles a repair response, storing the received data.
///
/// If the response contains a shred, it will be stored in the [`Blockstore`].
/// Otherwise, metadata is stored in the [`Repair`] struct itself.
/// Does nothing if the provided `response` is not well-formed.
async fn handle_response(&mut self, response: RepairResponse) {
trace!("handling repair response: {response:?}");
let request_hash = response.request_type().hash();
// check whether we are (still) waiting on response to this request
let Some(_) = self.outstanding_requests.remove(&request_hash) else {
warn!("received repair response for unknown request {response:?}");
return;
};
match response {
RepairResponse::LastSliceRoot(req_type, last_slice, root, proof) => {
// check validity of response
let RepairRequestType::LastSliceRoot(block_id) = &req_type else {
warn!("repair response (LastSliceRoot) to mismatching request {req_type:?}");
return;
};
let (_, block_hash) = block_id;
if !DoubleMerkleTree::check_proof_last(
&root,
last_slice.inner(),
block_hash,
&proof,
) {
warn!("repair response (LastSliceRoot) with invalid proof");
return;
}
// store slice Merkle root
self.slice_roots
.insert((block_id.clone(), last_slice), root);
// issue next requests
// TODO: do not request last slice root again
// TODO: already requests shreds for last slice here
for slice in last_slice.until() {
let req_type = RepairRequestType::SliceRoot(block_id.clone(), slice);
self.send_request(req_type).await.unwrap();
}
}
RepairResponse::SliceRoot(req_type, root, proof) => {
// check validity of response
let RepairRequestType::SliceRoot(ref block_id, slice) = req_type else {
warn!("repair response (SliceRoot) to mismatching request {req_type:?}");
return;
};
let (_, block_hash) = block_id;
if !DoubleMerkleTree::check_proof(&root, slice.inner(), block_hash, &proof) {
warn!("repair response (SliceRoot) with invalid proof");
return;
}
// store slice Merkle root
self.slice_roots.insert((block_id.clone(), slice), root);
// issue next requests
// HACK: workaround for when other nodes don't have the first `DATA_SHREDS` shreds
for shred_index in ShredIndex::all() {
let req = RepairRequestType::Shred(block_id.clone(), slice, shred_index);
self.send_request(req).await.unwrap();
}
}
RepairResponse::Shred(req_type, shred) => {
// check validity of response
let RepairRequestType::Shred(ref block_id, slice, index) = req_type else {
warn!("repair response (Shred) to mismatching request {req_type:?}");
return;
};
let (slot, block_hash) = block_id;
if shred.payload().header.slot != *slot
|| shred.payload().header.slice_index != slice
|| shred.payload().shred_index != index
{
warn!("repair response (Shred) for mismatching shred index");
return;
}
let Some(root) = self.slice_roots.get(&(block_id.clone(), slice)) else {
unreachable!("issued repair request (Shred) before knowing slice root");
};
if !shred.verify_path_only(root) {
warn!("repair response (Shred) with invalid Merkle proof");
return;
}
// store shred
let res = self
.blockstore
.write()
.await
.add_shred_from_repair(block_hash.clone(), shred)
.await;
if let Ok(Some(block_info)) = res {
assert_eq!(block_info.hash, *block_hash);
self.pool
.write()
.await
.add_block((*slot, block_info.hash), block_info.parent)
.await;
debug!(
"successfully repaired block {} in slot {}",
&hex::encode(block_hash.as_hash())[..8],
slot
);
}
}
}
}
async fn send_request(&mut self, req_type: RepairRequestType) -> std::io::Result<()> {
let hash = req_type.hash();
let expiry = Instant::now() + REPAIR_TIMEOUT;
self.outstanding_requests
.insert(hash.clone(), req_type.clone());
self.request_timeouts.retain(|(_, h)| h != &hash);
self.request_timeouts.push((expiry, hash));
let request = RepairRequest {
sender: self.epoch_info.own_id,
req_type,
};
// HACK: magic number to fix high-failure scenarios
let mut to_all = HashSet::new();
for _ in 0..10 {
to_all.insert(self.pick_random_peer());
if to_all.len() == 3 {
break;
}
}
self.network
.send_to_many(&request, to_all.into_iter())
.await?;
Ok(())
}
fn pick_random_peer(&self) -> SocketAddr {
let mut rng = rand::rng();
let mut peer_info = self.sampler.sample_info(&mut rng);
while peer_info.id == self.epoch_info.own_id {
peer_info = self.sampler.sample_info(&mut rng);
}
peer_info.repair_request_address
}
}
#[cfg(test)]
mod tests {
use std::collections::BTreeSet;
use tokio::sync::mpsc::Sender;
use super::*;
use crate::consensus::{BlockstoreImpl, PoolImpl};
use crate::crypto::signature::SecretKey;
use crate::network::simulated::SimulatedNetworkCore;
use crate::network::{SimulatedNetwork, localhost_ip_sockaddr};
use crate::shredder::TOTAL_SHREDS;
use crate::test_utils::{create_random_shredded_block, generate_validators};
use crate::types::Slot;
use crate::types::slice_index::MAX_SLICES_PER_BLOCK;
/// Creates a small network of 2 validators.
///
/// Validator 0: Is the leader of the genesis window and does not have repair set up.
/// Validator 1: Has repair set up and is not the leader.
///
/// Returns:
/// - sender side of the repair channel for validator 1
/// - blockstore of validator 1
/// - network interface where validator 0 should accept [`RepairRequest`] messages
/// - network interface where validator 0 should accept [`RepairResponse`] messages
/// - leader secret key of validator 0
async fn create_repair_instance() -> (
Sender<BlockId>,
Arc<RwLock<Box<dyn Blockstore + Send + Sync>>>,
SimulatedNetwork<RepairResponse, RepairRequest>,
SimulatedNetwork<RepairRequest, RepairResponse>,
SecretKey,
) {
// create EpochInfo for 2 validators and the corresponding network
let (_, epoch_info) = generate_validators(2);
let mut epoch_info = Arc::try_unwrap(epoch_info).unwrap();
let leader_key = SecretKey::new(&mut rand::rng());
let v0 = epoch_info.validators.get_mut(0).unwrap();
v0.pubkey = leader_key.to_pk();
v0.repair_request_address = localhost_ip_sockaddr(0);
v0.repair_response_address = localhost_ip_sockaddr(1);
let core = Arc::new(SimulatedNetworkCore::new(1, 0.0, 0.0));
let v0_repair_request_network = core
.join_unlimited(v0.repair_request_address.port() as u64)
.await;
let v0_repair_network = core
.join_unlimited(v0.repair_response_address.port() as u64)
.await;
let v1 = epoch_info.validators.get_mut(1).unwrap();
v1.repair_request_address = localhost_ip_sockaddr(2);
v1.repair_response_address = localhost_ip_sockaddr(3);
epoch_info.own_id = 1;
let v1_repair_request_network = core
.join_unlimited(v1.repair_request_address.port() as u64)
.await;
let v1_repair_network = core
.join_unlimited(v1.repair_response_address.port() as u64)
.await;
let epoch_info = Arc::new(epoch_info);
// set up blockstore
let (votor_tx, votor_rx) = tokio::sync::mpsc::channel(100);
let blockstore: Arc<RwLock<Box<dyn Blockstore + Send + Sync>>> = Arc::new(RwLock::new(
Box::new(BlockstoreImpl::new(epoch_info.clone(), votor_tx.clone())),
));
// set up pool
let (repair_tx, repair_rx) = tokio::sync::mpsc::channel(100);
let pool: Arc<RwLock<Box<dyn Pool + Send + Sync>>> = Arc::new(RwLock::new(Box::new(
PoolImpl::new(epoch_info.clone(), votor_tx, repair_tx.clone()),
)));
// create and start Repair instance
let mut repair = Repair::new(
Arc::clone(&blockstore),
pool,
v1_repair_network,
epoch_info.clone(),
);
tokio::spawn(async move {
repair.repair_loop(repair_rx).await;
// keep votor_rx alive
drop(votor_rx);
});
let repair_request_handler =
RepairRequestHandler::new(epoch_info, blockstore.clone(), v1_repair_request_network);
tokio::spawn(async move {
repair_request_handler.run().await;
});
(
repair_tx,
blockstore,
v0_repair_request_network,
v0_repair_network,
leader_key,
)
}
#[tokio::test]
async fn repair_tiny_block() {
repair_block(1).await;
}
#[tokio::test]
async fn repair_regular_block() {
repair_block(10).await;
}
// test takes a long time to run in debug mode.
// so ignored for normal runs and ran as part of sequential tests
#[tokio::test]
#[ignore]
async fn repair_large_block() {
repair_block(MAX_SLICES_PER_BLOCK).await;
}
async fn repair_block(num_slices: usize) {
let (repair_channel, blockstore, other_network_request, _other_network_reply, sk) =
create_repair_instance().await;
// create a block to repair
let slot = Slot::genesis().next();
let (block_hash, merkle_tree, shreds) = create_random_shredded_block(slot, num_slices, &sk);
let block_to_repair = (slot, block_hash);
// ask repair instance to repair this block
repair_channel.send(block_to_repair.clone()).await.unwrap();
// expect LastSliceRoot request first
let msg = other_network_request.receive().await.unwrap();
let req_type = RepairRequestType::LastSliceRoot(block_to_repair.clone());
assert_eq!(msg.req_type, req_type);
// answer LastSliceRoot request
let response = RepairResponse::LastSliceRoot(
req_type,
SliceIndex::new_unchecked(num_slices - 1),
shreds.last().unwrap()[0].merkle_root.clone(),
merkle_tree.create_proof(num_slices - 1),
);
let port1 = localhost_ip_sockaddr(3);
other_network_request.send(&response, port1).await.unwrap();
// expect SliceRoot requests next
let mut slice_roots_requested = BTreeSet::new();
for _ in 0..num_slices {
let msg = other_network_request.receive().await.unwrap();
for slice in SliceIndex::all().take(num_slices) {
let req_type = RepairRequestType::SliceRoot(block_to_repair.clone(), slice);
if msg.req_type == req_type {
slice_roots_requested.insert(slice);
break;
}
}
}
// assert all other slice roots requested + answer the requests
for slice in SliceIndex::all().take(num_slices) {
assert!(slice_roots_requested.contains(&slice));
let req_type = RepairRequestType::SliceRoot(block_to_repair.clone(), slice);
let root = shreds[slice.inner()][0].merkle_root.clone();
let proof = merkle_tree.create_proof(slice.inner());
let response = RepairResponse::SliceRoot(req_type, root, proof);
other_network_request.send(&response, port1).await.unwrap();
// expect Shred requests for this slice next
let mut shreds_requested = BTreeSet::new();
for _ in ShredIndex::all() {
let msg = other_network_request.receive().await.unwrap();
for shred_index in ShredIndex::all() {
let req_type =
RepairRequestType::Shred(block_to_repair.clone(), slice, shred_index);
if msg.req_type == req_type {
shreds_requested.insert(shred_index);
break;
}
}
}
// assert all shreds requested + answer the requests
let slice_shreds = shreds[slice.inner()].clone();
for (shred_index, shred) in slice_shreds.into_iter().take(TOTAL_SHREDS).enumerate() {
let shred_index = ShredIndex::new(shred_index).unwrap();
assert!(shreds_requested.contains(&shred_index));
let req_type =
RepairRequestType::Shred(block_to_repair.clone(), slice, shred_index);
let response = RepairResponse::Shred(req_type, shred.into_shred());
other_network_request.send(&response, port1).await.unwrap();
}
}
// after some time block should be repaired
tokio::time::sleep(Duration::from_millis(100)).await;
assert!(
blockstore
.read()
.await
.get_block(&block_to_repair)
.is_some()
);
}
#[tokio::test]
async fn answer_requests() {
const SLICES: usize = 2;
let (_sender, blockstore, _other_network_request, other_network, sk) =
create_repair_instance().await;
// create a block to repair
let slot = Slot::genesis().next();
let (block_hash, _, shreds) = create_random_shredded_block(slot, SLICES, &sk);
let block_to_repair = (slot, block_hash.clone());
// ingest the block into blockstore
for slice_shreds in shreds.clone() {
let mut b = blockstore.write().await;
for shred in slice_shreds {
let _ = b.add_shred_from_disseminator(shred.into_shred()).await;
}
}
assert_eq!(
blockstore.read().await.disseminated_block_hash(slot),
Some(&block_hash)
);
assert!(
blockstore
.read()
.await
.get_block(&block_to_repair)
.is_some()
);
// request last slice root to learn how many slices there are
let request = RepairRequest {
req_type: RepairRequestType::LastSliceRoot(block_to_repair.clone()),
sender: 0,
};
let port1 = localhost_ip_sockaddr(2);
other_network.send(&request, port1).await.unwrap();
// verify reponse
let msg = other_network.receive().await.unwrap();
let RepairResponse::LastSliceRoot(req_type, last_slice, root, proof) = msg else {
panic!("not LastSliceRoot response");
};
assert_eq!(req_type, request.req_type);
assert_eq!(last_slice.inner(), SLICES - 1);
assert_eq!(root, shreds[last_slice.inner()][0].merkle_root);
let correct_proof = blockstore
.read()
.await
.create_double_merkle_proof(&block_to_repair, last_slice)
.unwrap();
assert_eq!(proof, correct_proof);
// request slice roots
for slice in SliceIndex::all().take(SLICES) {
let request = RepairRequest {
req_type: RepairRequestType::SliceRoot(block_to_repair.clone(), slice),
sender: 0,
};
other_network.send(&request, port1).await.unwrap();
// verify response
let msg = other_network.receive().await.unwrap();
let RepairResponse::SliceRoot(req_type, root, proof) = msg else {
panic!("not SliceRoot response");
};
assert_eq!(req_type, request.req_type);
assert_eq!(root, shreds[slice.inner()][0].merkle_root);
let correct_proof = blockstore
.read()
.await
.create_double_merkle_proof(&block_to_repair, slice)
.unwrap();
assert_eq!(proof, correct_proof);
// request slice shreds
for shred_index in ShredIndex::all() {
let request = RepairRequest {
req_type: RepairRequestType::Shred(block_to_repair.clone(), slice, shred_index),
sender: 0,
};
other_network.send(&request, port1).await.unwrap();
// verify response
let msg = other_network.receive().await.unwrap();
let RepairResponse::Shred(req_type, shred) = msg else {
panic!("not Shred response");
};
assert_eq!(req_type, request.req_type);
assert_eq!(
shred.payload().data,
shreds[slice.inner()][*shred_index].payload().data
);
}
}
}
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/logging.rs | src/logging.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
use colored::{Color, ColoredString, Colorize};
use logforth::filter::env_filter::EnvFilterBuilder;
use logforth::record::Level;
use logforth::{Layout, append};
#[derive(Clone, Debug)]
struct MinimalLogforthLayout;
impl MinimalLogforthLayout {
fn colorize_record_level(&self, level: Level) -> ColoredString {
let color = match level {
Level::Fatal | Level::Fatal2 | Level::Fatal3 | Level::Fatal4 => Color::BrightRed,
Level::Error | Level::Error2 | Level::Error3 | Level::Error4 => Color::Red,
Level::Warn | Level::Warn2 | Level::Warn3 | Level::Warn4 => Color::Yellow,
Level::Info | Level::Info2 | Level::Info3 | Level::Info4 => Color::Green,
Level::Debug | Level::Debug2 | Level::Debug3 | Level::Debug4 => Color::Blue,
Level::Trace | Level::Trace2 | Level::Trace3 | Level::Trace4 => Color::Magenta,
};
ColoredString::from(level.to_string()).color(color)
}
}
impl Layout for MinimalLogforthLayout {
fn format(
&self,
record: &logforth::record::Record,
_diagnostics: &[Box<dyn logforth::Diagnostic>],
) -> Result<Vec<u8>, logforth::Error> {
let level = self.colorize_record_level(record.level());
let message = record.payload();
Ok(format!("{level:>5} {message}").into_bytes())
}
}
pub fn enable_logforth() {
enable_logforth_append(append::Stderr::default().with_layout(MinimalLogforthLayout));
}
pub fn enable_logforth_stderr() {
enable_logforth_append(append::Stderr::default());
}
fn enable_logforth_append<A: logforth::Append>(to_append: A) {
let filter = EnvFilterBuilder::from_default_env_or("info").build();
logforth::starter_log::builder()
.dispatch(|d| d.filter(filter).append(to_append))
.apply();
}
#[cfg(test)]
mod tests {
use log::{Level, debug, error, info, log_enabled, trace, warn};
use super::*;
#[test]
fn basic() {
enable_logforth();
// check logger is enabled with default level of "info"
assert!(log_enabled!(Level::Error));
assert!(log_enabled!(Level::Warn));
assert!(log_enabled!(Level::Info));
assert!(!log_enabled!(Level::Debug));
assert!(!log_enabled!(Level::Trace));
trace!("trace");
debug!("debug");
info!("info");
warn!("warn");
error!("error");
}
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/crypto.rs | src/crypto.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Cryptographic primitives.
//!
//! This module contains any cryptographic primitives used by the library.
//! These are mostly wrappers around existing libraries.
pub mod aggsig;
pub mod hash;
pub mod merkle;
pub mod signature;
pub use self::aggsig::{AggregateSignature, IndividualSignature};
pub use self::hash::{Hash, ShortHash, hash};
pub use self::merkle::MerkleTree;
pub use self::signature::Signature;
/// A type that can be converted into a byte string to be signed.
///
/// It is important to note that this may well be different from serializing
/// the type to bytes. For example, a type containing a signature can have a
/// `bytes_to_sign` implementation that serializes all fields except the
/// signature. Also, serialization may be implementation-specific (e.g. specific
/// to the storage engine) while `bytes_to_sign` is part of the protocol.
pub trait Signable {
/// Returns the exact byte string to be signed.
fn bytes_to_sign(&self) -> Vec<u8>;
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/consensus.rs | src/consensus.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Core consensus logic and data structures.
//!
//! The central structure of the consensus protocol is [`Alpenglow`].
//! It contains all state for a single consensus instance and also has access
//! to the different necessary network protocols.
//!
//! Most important component data structures defined in this module are:
//! - [`Blockstore`] holds individual shreds and reconstructed blocks for each slot.
//! - [`Pool`] holds votes and certificates for each slot.
//! - [`Votor`] handles the main voting logic.
//!
//! Some other data types for consensus are also defined here:
//! - [`Cert`] represents a certificate of votes of a specific type.
//! - [`Vote`] represents a vote of a specific type.
//! - [`EpochInfo`] holds information about the epoch and all validators.
mod block_producer;
mod blockstore;
mod cert;
mod epoch_info;
mod pool;
mod vote;
pub(crate) mod votor;
use std::marker::{Send, Sync};
use std::sync::Arc;
use std::time::{Duration, Instant};
use color_eyre::Result;
use fastrace::Span;
use fastrace::future::FutureExt;
use log::{trace, warn};
use static_assertions::const_assert;
use tokio::sync::{RwLock, mpsc};
use tokio_util::sync::CancellationToken;
use wincode::{SchemaRead, SchemaWrite};
pub use self::blockstore::{BlockInfo, Blockstore, BlockstoreImpl};
pub use self::cert::{Cert, NotarCert};
pub use self::epoch_info::EpochInfo;
pub use self::pool::{AddVoteError, Pool, PoolImpl};
pub use self::vote::Vote;
use self::votor::Votor;
use crate::consensus::block_producer::BlockProducer;
use crate::crypto::{aggsig, signature};
use crate::network::{RepairNetwork, RepairRequestNetwork, TransactionNetwork};
use crate::repair::{Repair, RepairRequestHandler};
use crate::shredder::Shred;
use crate::{All2All, Disseminator, Slot, ValidatorInfo};
/// Time bound assumed on network transmission delays during periods of synchrony.
pub const DELTA: Duration = Duration::from_millis(250);
/// Time the leader has for producing and sending the block.
const DELTA_BLOCK: Duration = Duration::from_millis(400);
/// Time the leader has for producing and sending the first slice.
const DELTA_FIRST_SLICE: Duration = Duration::from_millis(10);
const_assert!(DELTA_FIRST_SLICE.as_nanos() <= DELTA_BLOCK.as_nanos());
/// Base timeout for when leader's first slice should arrive if they sent it immediately.
const DELTA_TIMEOUT: Duration = DELTA.checked_mul(3).unwrap();
/// Timeout for standstill detection mechanism.
const DELTA_STANDSTILL: Duration = Duration::from_millis(10_000);
#[derive(Clone, Debug, SchemaRead, SchemaWrite)]
pub enum ConsensusMessage {
Vote(Vote),
Cert(Cert),
}
impl From<Vote> for ConsensusMessage {
fn from(vote: Vote) -> Self {
Self::Vote(vote)
}
}
impl From<Cert> for ConsensusMessage {
fn from(cert: Cert) -> Self {
Self::Cert(cert)
}
}
/// Alpenglow consensus protocol implementation.
pub struct Alpenglow<A: All2All, D: Disseminator, T>
where
T: TransactionNetwork + 'static,
{
/// Other validators' info.
epoch_info: Arc<EpochInfo>,
/// Blockstore for storing raw block data.
blockstore: Arc<RwLock<Box<dyn Blockstore + Send + Sync>>>,
/// Pool of votes and certificates.
pool: Arc<RwLock<Box<dyn Pool + Send + Sync>>>,
/// Block production (i.e. leader side) component of the consensus protocol.
block_producer: Arc<BlockProducer<D, T>>,
/// All-to-all broadcast network protocol for consensus messages.
all2all: Arc<A>,
/// Block dissemination network protocol for shreds.
disseminator: Arc<D>,
/// Indicates whether the node is shutting down.
cancel_token: CancellationToken,
/// Votor task handle.
votor_handle: tokio::task::JoinHandle<()>,
}
impl<A, D, T> Alpenglow<A, D, T>
where
A: All2All + Send + Sync + 'static,
D: Disseminator + Send + Sync + 'static,
T: TransactionNetwork + 'static,
{
/// Creates a new Alpenglow consensus node.
///
/// `repair_network` - [`RepairNetwork`] for sending requests and receiving responses.
/// `repair_request_network` - [`RepairRequestNetwork`] for answering incoming requests.
#[must_use]
#[allow(clippy::too_many_arguments)]
pub fn new<RN, RR>(
secret_key: signature::SecretKey,
voting_secret_key: aggsig::SecretKey,
all2all: A,
disseminator: D,
repair_network: RN,
repair_request_network: RR,
epoch_info: Arc<EpochInfo>,
txs_receiver: T,
) -> Self
where
RR: RepairRequestNetwork + 'static,
RN: RepairNetwork + 'static,
{
let cancel_token = CancellationToken::new();
let (votor_tx, votor_rx) = mpsc::channel(1024);
let (repair_tx, repair_rx) = mpsc::channel(1024);
let all2all = Arc::new(all2all);
let blockstore: Box<dyn Blockstore + Send + Sync> =
Box::new(BlockstoreImpl::new(epoch_info.clone(), votor_tx.clone()));
let blockstore = Arc::new(RwLock::new(blockstore));
let pool: Box<dyn Pool + Send + Sync> = Box::new(PoolImpl::new(
epoch_info.clone(),
votor_tx.clone(),
repair_tx,
));
let pool = Arc::new(RwLock::new(pool));
let repair_request_handler = RepairRequestHandler::new(
epoch_info.clone(),
blockstore.clone(),
repair_request_network,
);
let _repair_request_handler =
tokio::spawn(async move { repair_request_handler.run().await });
let mut repair = Repair::new(
Arc::clone(&blockstore),
Arc::clone(&pool),
repair_network,
epoch_info.clone(),
);
let _repair_handle = tokio::spawn(
async move { repair.repair_loop(repair_rx).await }
.in_span(Span::enter_with_local_parent("repair loop")),
);
let mut votor = Votor::new(
epoch_info.own_id,
voting_secret_key,
votor_tx.clone(),
votor_rx,
all2all.clone(),
);
let votor_handle = tokio::spawn(
async move { votor.voting_loop().await.unwrap() }
.in_span(Span::enter_with_local_parent("voting loop")),
);
let disseminator = Arc::new(disseminator);
let block_producer = Arc::new(BlockProducer::new(
secret_key,
epoch_info.clone(),
disseminator.clone(),
txs_receiver,
blockstore.clone(),
pool.clone(),
cancel_token.clone(),
DELTA_BLOCK,
DELTA_FIRST_SLICE,
));
Self {
epoch_info,
blockstore,
pool,
block_producer,
all2all,
disseminator,
cancel_token,
votor_handle,
}
}
/// Starts the different tasks of the Alpenglow node.
///
/// # Errors
///
/// Returns an error only if any of the tasks panics.
#[fastrace::trace(short_name = true)]
pub async fn run(self) -> Result<()> {
let msg_loop_span = Span::enter_with_local_parent("message loop");
let node = Arc::new(self);
let nn = node.clone();
let msg_loop = tokio::spawn(async move { nn.message_loop().await }.in_span(msg_loop_span));
let standstill_loop_span = Span::enter_with_local_parent("standstill detection loop");
let nn = node.clone();
let standstill_loop =
tokio::spawn(async move { nn.standstill_loop().await }.in_span(standstill_loop_span));
let block_production_span = Span::enter_with_local_parent("block production");
let block_producer = Arc::clone(&node.block_producer);
let prod_loop = tokio::spawn(
async move { block_producer.block_production_loop().await }
.in_span(block_production_span),
);
node.cancel_token.cancelled().await;
node.votor_handle.abort();
msg_loop.abort();
standstill_loop.abort();
prod_loop.abort();
let (msg_res, prod_res) = tokio::join!(msg_loop, prod_loop);
msg_res??;
prod_res??;
Ok(())
}
pub fn get_info(&self) -> &ValidatorInfo {
self.epoch_info.validator(self.epoch_info.own_id)
}
pub fn get_pool(&self) -> Arc<RwLock<Box<dyn Pool + Send + Sync>>> {
Arc::clone(&self.pool)
}
pub fn get_cancel_token(&self) -> CancellationToken {
self.cancel_token.clone()
}
/// Handles incoming messages on all the different network interfaces.
///
/// [`All2All`]: Handles incoming votes and certificates. Adds them to the [`Pool`].
/// [`Disseminator`]: Handles incoming shreds. Adds them to the [`Blockstore`].
async fn message_loop(self: &Arc<Self>) -> Result<()> {
loop {
tokio::select! {
// handle incoming votes and certificates
res = self.all2all.receive() => self.handle_all2all_message(res?).await,
// handle shreds received by block dissemination protocol
res = self.disseminator.receive() => self.handle_disseminator_shred(res?).await?,
() = self.cancel_token.cancelled() => return Ok(()),
};
}
}
/// Handles standstill detection and triggers recovery.
///
/// Keeps track of when consensus progresses, i.e., [`Pool`] finalizes new blocks.
/// Triggers standstill recovery if no progress was detected for a long time.
async fn standstill_loop(self: &Arc<Self>) -> Result<()> {
let mut finalized_slot = Slot::new(0);
let mut last_progress = Instant::now();
loop {
let slot = self.pool.read().await.finalized_slot();
if slot > finalized_slot {
finalized_slot = slot;
last_progress = Instant::now();
} else if last_progress.elapsed() > DELTA_STANDSTILL {
self.pool.read().await.recover_from_standstill().await;
last_progress = Instant::now();
}
tokio::time::sleep(DELTA_BLOCK).await;
}
}
#[fastrace::trace(short_name = true)]
async fn handle_all2all_message(&self, msg: ConsensusMessage) {
trace!("received all2all msg: {msg:?}");
match msg {
ConsensusMessage::Vote(v) => match self.pool.write().await.add_vote(v).await {
Ok(()) => {}
Err(AddVoteError::Slashable(offence)) => {
warn!("slashable offence detected: {offence}");
}
Err(err) => trace!("ignoring invalid vote: {err}"),
},
ConsensusMessage::Cert(c) => match self.pool.write().await.add_cert(c).await {
Ok(()) => {}
Err(err) => trace!("ignoring invalid cert: {err}"),
},
}
}
#[fastrace::trace(short_name = true)]
async fn handle_disseminator_shred(&self, shred: Shred) -> std::io::Result<()> {
// potentially forward shred
self.disseminator.forward(&shred).await?;
// if we are the leader, we already have the shred
let slot = shred.payload().header.slot;
if self.epoch_info.leader(slot).id == self.epoch_info.own_id {
return Ok(());
}
// otherwise, ingest into blockstore
let res = self
.blockstore
.write()
.await
.add_shred_from_disseminator(shred)
.await;
if let Ok(Some(block_info)) = res {
let mut guard = self.pool.write().await;
let block_id = (slot, block_info.hash);
guard.add_block(block_id, block_info.parent).await;
}
Ok(())
}
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/shredder/reed_solomon.rs | src/shredder/reed_solomon.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Implements Reed-Solomon shreding and deshreding.
//!
//! This is a low-level module that is used in various shredder implementations.
//! It is mostly a wrapper around the [`reed_solomon_simd`] crate.
use reed_solomon_simd::{ReedSolomonDecoder, ReedSolomonEncoder};
use static_assertions::const_assert;
use thiserror::Error;
use super::{
DATA_SHREDS, MAX_DATA_PER_SLICE, MAX_DATA_PER_SLICE_AFTER_PADDING, ShredPayloadType,
TOTAL_SHREDS,
};
use crate::shredder::MAX_DATA_PER_SHRED;
use crate::shredder::validated_shreds::ValidatedShreds;
/// Errors that may be returned by [`ReedSolomonCoder::shred`].
#[derive(Clone, Copy, Debug, PartialEq, Eq, Error)]
pub(super) enum ReedSolomonShredError {
#[error("too much data for slice")]
TooMuchData,
}
/// Errors that may be returned by [`ReedSolomonCoder::deshred`].
#[derive(Clone, Copy, Debug, PartialEq, Eq, Error)]
pub(super) enum ReedSolomonDeshredError {
#[error("not enough shreds to reconstruct")]
NotEnoughShreds,
#[error("too much data for slice")]
TooMuchData,
#[error("invalid padding detected")]
InvalidPadding,
}
/// The data and coding shreds returned from [`ReedSolomonCoder::shred`] on success.
pub(super) struct RawShreds {
/// A list of data shreds.
pub(super) data: Vec<Vec<u8>>,
/// A list of coding shreds.
pub(super) coding: Vec<Vec<u8>>,
}
/// Reed-Solomon coder for shreds.
///
/// This is a wrapper around both [`ReedSolomonEncoder`] and [`ReedSolomonDecoder`].
/// Therefore, it can be used for both encoding and decoding.
/// Reusing this over multiple slices prevents reallocating working memory.
pub(super) struct ReedSolomonCoder {
num_coding: usize,
encoder: ReedSolomonEncoder,
decoder: ReedSolomonDecoder,
}
impl ReedSolomonCoder {
/// Creates a new Reed-Solomon coder.
///
/// It is initialized for [`DATA_SHREDS`] data shreds and `num_coding` coding shreds.
/// It is also initialized for up to [`MAX_DATA_PER_SHRED`] bytes per fragment.
pub(super) fn new(num_coding: usize) -> ReedSolomonCoder {
// max shreds supported by RS field
const_assert!(DATA_SHREDS + TOTAL_SHREDS <= 65536);
assert!(num_coding <= TOTAL_SHREDS);
let encoder = ReedSolomonEncoder::new(DATA_SHREDS, num_coding, MAX_DATA_PER_SHRED).unwrap();
let decoder = ReedSolomonDecoder::new(DATA_SHREDS, num_coding, MAX_DATA_PER_SHRED).unwrap();
ReedSolomonCoder {
num_coding,
encoder,
decoder,
}
}
/// Reed-Solomon encodes the `payload` into [`RawShreds`].
///
/// For this, it splits the given slice into [`DATA_SHREDS`] data shreds.
/// Then, it generates and adds `num_coding` additional Reed-Solomon coding shreds.
///
/// First, however, padding is added to the payload to make it a multiple of `2 * DATA_SHREDS`.
/// Bit padding of one 1bit and as many 0 bits as needed is added.
/// In the byte representation this looks like `[0x80, 0x00, ..., 0x00]`.
///
/// # Errors
///
/// If the provided payload is larger than [`MAX_DATA_PER_SLICE_AFTER_PADDING`] then returns [`ReedSolomonDeshredError::TooMuchData`].
pub(super) fn shred(&mut self, payload: &[u8]) -> Result<RawShreds, ReedSolomonShredError> {
if payload.len() > MAX_DATA_PER_SLICE {
return Err(ReedSolomonShredError::TooMuchData);
}
// determine padding length & configure encoder for shred length
let padding_bytes = 2 * DATA_SHREDS - payload.len() % (2 * DATA_SHREDS);
let shred_bytes = (payload.len() + padding_bytes).div_ceil(DATA_SHREDS);
self.encoder
.reset(DATA_SHREDS, self.num_coding, shred_bytes)
.expect("shred size with padding should be supported");
// add padding to last shreds
let last_shreds_bytes = (2 * DATA_SHREDS).next_multiple_of(shred_bytes);
let boundary = payload.len() - (last_shreds_bytes - padding_bytes);
let mut last_shreds = Vec::with_capacity(last_shreds_bytes);
last_shreds.extend_from_slice(&payload[boundary..]);
last_shreds.push(0x80);
last_shreds.resize(last_shreds_bytes, 0);
// chunk data
let mut data = Vec::with_capacity(DATA_SHREDS);
payload[..boundary]
.chunks(shred_bytes)
.chain(last_shreds.chunks(shred_bytes))
.for_each(|chunk| {
self.encoder
.add_original_shard(chunk)
.expect("adding correct number of chunks of currect size");
data.push(chunk.to_vec());
});
// perform coding
let result = self
.encoder
.encode()
.expect("we just added enough data shreds");
let coding = result.recovery_iter().map(<[u8]>::to_vec).collect();
Ok(RawShreds { data, coding })
}
/// Reconstructs the raw data from the given shreds.
///
/// Removes the padding before returning the data.
/// See [`ReedSolomonCoder::shred`] for details on the padding scheme.
///
/// Errors
///
/// If fewer than [`DATA_SHREDS`] elements in `shreds` are `Some()` then returns [`ReedSolomonDeshredError::NotEnoughShreds`].
/// If the restored payload is larger than [`MAX_DATA_PER_SLICE_AFTER_PADDING`] then returns [`ReedSolomonDeshredError::TooMuchData`].
pub(super) fn deshred(
&mut self,
shreds: ValidatedShreds,
) -> Result<Vec<u8>, ReedSolomonDeshredError> {
let shreds = shreds.to_shreds();
let shreds_cnt = shreds.iter().filter(|s| s.is_some()).count();
if shreds_cnt < DATA_SHREDS {
return Err(ReedSolomonDeshredError::NotEnoughShreds);
}
// configure decoder for shred size
let shred_bytes = shreds.iter().flatten().next().unwrap().payload().data.len();
self.decoder
.reset(DATA_SHREDS, self.num_coding, shred_bytes)
.expect("size of validated shred should be supported");
let coding_offset = TOTAL_SHREDS - self.num_coding;
// filter to split data and coding shreds
let data = shreds.iter().take(coding_offset).filter_map(|s| {
s.as_ref().map(|s| match &s.payload_type {
ShredPayloadType::Data(d) => (*d.shred_index, &d.data),
ShredPayloadType::Coding(_) => panic!("should be a data shred"),
})
});
let coding = shreds.iter().skip(coding_offset).filter_map(|s| {
s.as_ref().map(|s| match &s.payload_type {
ShredPayloadType::Coding(c) => (*c.shred_index - coding_offset, &c.data),
ShredPayloadType::Data(_) => panic!("should be a coding shred"),
})
});
for (i, d) in data.clone() {
self.decoder
.add_original_shard(i, d)
.expect("validated shred should have correct index and size");
}
for (i, c) in coding {
self.decoder
.add_recovery_shard(i, c)
.expect("validated shred should have correct index and size");
}
let restored = self.decoder.decode().expect("just added enough shreds");
let mut data_shreds = vec![None; DATA_SHREDS];
for (i, d) in data {
data_shreds[i] = Some(d);
}
// restore data from data shreds (from input and restored)
let mut restored_payload = Vec::with_capacity(MAX_DATA_PER_SLICE_AFTER_PADDING);
for (i, d) in data_shreds.into_iter().enumerate() {
let shred_data = match d {
Some(data_ref) => data_ref,
None => restored
.restored_original(i)
.expect("all non-existing data shreds are restored"),
};
if restored_payload.len() + shred_data.len() > MAX_DATA_PER_SLICE_AFTER_PADDING {
return Err(ReedSolomonDeshredError::TooMuchData);
}
restored_payload.extend_from_slice(shred_data);
}
// remove padding
let padding_bytes = restored_payload
.iter()
.rev()
.take_while(|b| **b == 0)
.count()
+ 1;
if restored_payload[restored_payload.len() - padding_bytes] != 0x80 {
return Err(ReedSolomonDeshredError::InvalidPadding);
}
restored_payload.truncate(restored_payload.len().saturating_sub(padding_bytes));
Ok(restored_payload)
}
}
#[cfg(test)]
mod tests {
use static_assertions::const_assert;
use super::*;
use crate::Slot;
use crate::crypto::signature::SecretKey;
use crate::shredder::{ValidatedShred, data_and_coding_to_output_shreds};
use crate::types::slice::create_slice_with_invalid_txs;
use crate::types::{SliceHeader, SliceIndex};
#[test]
fn restore_full() {
let (header, payload) = create_slice_with_invalid_txs(MAX_DATA_PER_SLICE).deconstruct();
shred_deshred_restore(header, payload.into());
}
#[test]
fn restore_tiny() {
let (header, payload) = create_slice_with_invalid_txs(DATA_SHREDS - 1).deconstruct();
shred_deshred_restore(header, payload.into());
}
#[test]
fn restore_empty() {
let header = SliceHeader {
slot: Slot::new(0),
slice_index: SliceIndex::first(),
is_last: true,
};
let payload = Vec::new();
shred_deshred_restore(header, payload);
}
#[test]
fn restore_various() {
const_assert!(MAX_DATA_PER_SLICE >= 2 * DATA_SHREDS);
let slice_bytes = MAX_DATA_PER_SLICE / 2;
for offset in 0..DATA_SHREDS {
let (header, payload) =
create_slice_with_invalid_txs(slice_bytes + offset).deconstruct();
shred_deshred_restore(header, payload.into());
}
}
#[test]
fn shred_too_much_data() {
let payload = vec![0; MAX_DATA_PER_SLICE + 1];
let mut rs = ReedSolomonCoder::new(TOTAL_SHREDS - DATA_SHREDS);
let res = rs.shred(&payload);
assert!(res.is_err());
assert_eq!(res.err().unwrap(), ReedSolomonShredError::TooMuchData);
}
#[test]
fn deshred_not_enough_shreds() {
let (header, payload) = create_slice_with_invalid_txs(MAX_DATA_PER_SLICE).deconstruct();
let mut rs = ReedSolomonCoder::new(TOTAL_SHREDS - DATA_SHREDS);
let shreds = rs.shred(&payload.to_bytes()).unwrap();
let sk = SecretKey::new(&mut rand::rng());
let mut shreds = data_and_coding_to_output_shreds(header, shreds, &sk).map(Some);
for shred in shreds.iter_mut().skip(DATA_SHREDS - 1) {
*shred = None;
}
let validated_shreds =
ValidatedShreds::try_new(&shreds, DATA_SHREDS, TOTAL_SHREDS - DATA_SHREDS).unwrap();
let res = rs.deshred(validated_shreds);
assert!(res.is_err());
assert_eq!(res.err().unwrap(), ReedSolomonDeshredError::NotEnoughShreds);
}
fn shred_deshred_restore(header: SliceHeader, payload: Vec<u8>) {
let mut rs = ReedSolomonCoder::new(TOTAL_SHREDS - DATA_SHREDS);
let shreds = rs.shred(&payload).unwrap();
let shreds = take_and_map_enough_shreds(header, shreds);
let validated_shreds =
ValidatedShreds::try_new(&shreds, DATA_SHREDS, TOTAL_SHREDS - DATA_SHREDS).unwrap();
let restored = rs.deshred(validated_shreds).unwrap();
assert_eq!(restored, payload);
}
fn take_and_map_enough_shreds(
header: SliceHeader,
shreds: RawShreds,
) -> [Option<ValidatedShred>; TOTAL_SHREDS] {
let sk = SecretKey::new(&mut rand::rng());
let mut shreds = data_and_coding_to_output_shreds(header, shreds, &sk).map(Some);
for shred in shreds.iter_mut().skip(TOTAL_SHREDS - DATA_SHREDS) {
*shred = None;
}
shreds
}
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/shredder/validated_shreds.rs | src/shredder/validated_shreds.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Defines the [`ValidatedShreds`] type.
use crate::shredder::{TOTAL_SHREDS, ValidatedShred};
/// Validated shreds array type.
///
/// Using strong type to enforce certain constraints:
/// - Shreds are in the correct order.
/// - Shred indices match expected shred type.
/// - Shreds are all the same size.
#[derive(Clone, Copy)]
pub struct ValidatedShreds<'a>(&'a [Option<ValidatedShred>; TOTAL_SHREDS]);
impl<'a> ValidatedShreds<'a> {
/// Creates a new [`ValidatedShreds`].
///
/// # Panics
///
/// Panics if the input array contains a shred at the wrong index.
pub(super) fn try_new(
shreds: &'a [Option<ValidatedShred>; TOTAL_SHREDS],
data_shreds: usize,
coding_shreds: usize,
) -> Option<Self> {
assert_eq!(data_shreds + coding_shreds, TOTAL_SHREDS);
// check all shred sizes match
let some_shred = shreds.iter().flatten().next();
let shred_size = some_shred.map_or(0, |s| s.payload().data.len());
for s in shreds.iter().flatten() {
if s.payload().data.len() != shred_size {
return None;
}
}
// check index shred index matches expected shred type
for (i, shred) in shreds.iter().enumerate() {
let Some(shred) = shred else {
continue;
};
assert_eq!(*shred.payload().shred_index, i);
if i < data_shreds && !shred.is_data() || i >= data_shreds && !shred.is_coding() {
return None;
}
}
Some(Self(shreds))
}
/// Returns the inner reference to an array of [`ValidatedShred`]s.
pub(super) fn to_shreds(self) -> &'a [Option<ValidatedShred>; TOTAL_SHREDS] {
self.0
}
}
#[cfg(test)]
mod tests {
use rand::rng;
use super::*;
use crate::crypto::signature::SecretKey;
use crate::shredder::{DATA_SHREDS, MAX_DATA_PER_SLICE, RegularShredder, Shredder};
use crate::types::slice::create_slice_with_invalid_txs;
#[test]
fn validity_tests() {
let mut shredder = RegularShredder::default();
let sk = SecretKey::new(&mut rng());
let slice = create_slice_with_invalid_txs(MAX_DATA_PER_SLICE);
// there are data shreds in coding shred positions in the array
let shreds = shredder.shred(slice.clone(), &sk).unwrap().map(Some);
assert!(ValidatedShreds::try_new(&shreds, 1, TOTAL_SHREDS - 1).is_none());
// there are coding shreds in data shred positions in the array
let shreds = shredder.shred(slice.clone(), &sk).unwrap().map(Some);
assert!(ValidatedShreds::try_new(&shreds, TOTAL_SHREDS - 1, 1).is_none());
// mixing shreds of different sizes
let small_slice = create_slice_with_invalid_txs(100);
let small_shreds = shredder.shred(small_slice, &sk).unwrap().map(Some);
let mut shreds = shreds;
shreds[0] = small_shreds[0].clone();
assert!(
ValidatedShreds::try_new(&shreds, DATA_SHREDS, TOTAL_SHREDS - DATA_SHREDS).is_none()
);
}
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/shredder/shred_index.rs | src/shredder/shred_index.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Defines the [`ShredIndex`] type.
use std::fmt::Display;
use std::mem::MaybeUninit;
use std::ops::Deref;
use serde::de::{self, Visitor};
use serde::{Deserialize, Serialize};
use wincode::{SchemaRead, SchemaWrite};
use crate::shredder::TOTAL_SHREDS;
/// Shred index type.
///
/// Using strong type to enforce certain constraints, e.g. it is never >= [`TOTAL_SHREDS`].
#[repr(transparent)]
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Serialize, SchemaWrite)]
pub struct ShredIndex(usize);
impl ShredIndex {
/// Creates a new shred index.
pub fn new(index: usize) -> Option<Self> {
if index >= TOTAL_SHREDS {
None
} else {
Some(Self(index))
}
}
/// Returns an iterator that iterates over all the valid shred indices.
pub(crate) fn all() -> impl Iterator<Item = Self> {
(0..TOTAL_SHREDS).map(Self)
}
}
impl Deref for ShredIndex {
type Target = usize;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl Display for ShredIndex {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.0)
}
}
impl<'de> Deserialize<'de> for ShredIndex {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
deserializer.deserialize_u64(ShredIndexVisitor)
}
}
struct ShredIndexVisitor;
impl<'de> Visitor<'de> for ShredIndexVisitor {
type Value = ShredIndex;
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(formatter, "expected a usize between 0 and {TOTAL_SHREDS}")
}
fn visit_u64<E>(self, v: u64) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
ShredIndex::new(v as usize).ok_or(de::Error::custom(
"input {v} is not in the range [0:{TOTAL_SHREDS})",
))
}
}
impl<'de> SchemaRead<'de> for ShredIndex {
type Dst = Self;
fn read(
reader: &mut impl wincode::io::Reader<'de>,
dst: &mut MaybeUninit<Self::Dst>,
) -> wincode::ReadResult<()> {
// SAFETY: Any read of `std::mem::size_of(usize)` bytes correctly initializes `usize`.
unsafe {
reader.copy_into_t(dst)?;
if dst.assume_init_ref().0 >= TOTAL_SHREDS {
Err(wincode::ReadError::Custom("shred index out of bounds"))
} else {
Ok(())
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn valid_serde() {
let vs = [0, 1, TOTAL_SHREDS - 10, TOTAL_SHREDS - 1];
let vs = vs.into_iter().map(|v| v.to_string());
for v in vs {
serde_json::from_str::<ShredIndex>(&v).unwrap();
}
}
#[test]
fn invalid_serde() {
let vs = [
(-1).to_string(),
i64::MIN.to_string(),
TOTAL_SHREDS.to_string(),
(TOTAL_SHREDS + 1).to_string(),
(i64::MAX).to_string(),
(u64::MAX).to_string(),
(usize::MAX).to_string(),
];
for v in vs {
serde_json::from_str::<ShredIndex>(&v).unwrap_err();
}
}
#[test]
fn valid_wincode() {
let vs = [0, 1, TOTAL_SHREDS - 10, TOTAL_SHREDS - 1];
let vs = vs.iter().map(wincode::serialize);
for res in vs {
let v = res.unwrap();
wincode::deserialize::<ShredIndex>(&v).unwrap();
}
}
#[test]
fn invalid_wincode() {
let vs = [TOTAL_SHREDS, TOTAL_SHREDS + 1, usize::MAX];
let vs = vs.iter().map(wincode::serialize);
for res in vs {
let v = res.unwrap();
wincode::deserialize::<ShredIndex>(&v).unwrap_err();
}
}
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/shredder/validated_shred.rs | src/shredder/validated_shred.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Defines the [`ValidatedShred`] type.
use std::collections::btree_map::Entry;
use std::ops::{Deref, DerefMut};
use crate::crypto::merkle::{SliceMerkleTree, SliceRoot};
use crate::crypto::signature::PublicKey;
use crate::shredder::Shred;
use crate::types::SliceIndex;
/// Different errors returned from [`ValidatedShred::try_new`].
#[derive(Debug)]
pub enum ShredVerifyError {
/// The shred contained an invalid Merkle proof.
InvalidProof,
/// The signature verification failed.
InvalidSignature,
/// Leader showed equivocation.
/// The Merkle root does not match the root from a previous shred.
Equivocation,
}
/// A verified wrapper around a [`Shred`].
///
/// It uses the new type pattern to encode verification in the type system.
/// The encapsulated [`Shred`] has passed all required checks.
#[repr(transparent)]
#[derive(Clone, Debug)]
pub struct ValidatedShred(Shred);
impl ValidatedShred {
/// Performs various verification checks on the [`Shred`] and if they succeed, returns a shred.
///
/// `cached_merkle_root`: Refers to Merkle root of the slice, if known from earlier shred.
/// It is used to potentially skip expensive signature verification or detect equivocation.
///
/// # Errors
///
/// Returns [`ShredVerifyError`] if the [`Shred`] does not pass all verification checks.
pub fn try_new(
shred: Shred,
cached_merkle_root: Entry<SliceIndex, SliceRoot>,
pk: &PublicKey,
) -> Result<Self, ShredVerifyError> {
if !SliceMerkleTree::check_proof(
&shred.payload().data,
*shred.payload().shred_index,
&shred.merkle_root,
&shred.merkle_path,
) {
return Err(ShredVerifyError::InvalidProof);
}
match cached_merkle_root {
Entry::Occupied(entry) => {
if entry.get() == &shred.merkle_root {
return Ok(Self(shred));
}
if shred.merkle_root_sig.verify(shred.merkle_root.as_ref(), pk) {
Err(ShredVerifyError::Equivocation)
} else {
Err(ShredVerifyError::InvalidSignature)
}
}
Entry::Vacant(entry) => {
if shred.merkle_root_sig.verify(shred.merkle_root.as_ref(), pk) {
entry.insert(shred.merkle_root.clone());
Ok(Self(shred))
} else {
Err(ShredVerifyError::InvalidSignature)
}
}
}
}
/// Creates a new [`ValidatedShred`] when the inner [`Shred`] does not need to be verified.
///
/// Used only by the parent module to create a validated shred when it is guaranteed that the inner shred comes from verified sources and does not need to be verified.
pub(super) fn new_validated(shred: Shred) -> Self {
Self(shred)
}
/// Get access to the inner [`Shred`] consuming self.
pub fn into_shred(self) -> Shred {
self.0
}
}
impl Deref for ValidatedShred {
type Target = Shred;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for ValidatedShred {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
#[cfg(test)]
mod tests {
use std::collections::BTreeMap;
use rand::rng;
use super::*;
use crate::crypto::signature::SecretKey;
use crate::shredder::{MAX_DATA_PER_SLICE, RegularShredder, Shredder};
use crate::types::slice::create_slice_with_invalid_txs;
fn create_random_shred() -> (Shred, SecretKey) {
let mut shredder = RegularShredder::default();
let sk = SecretKey::new(&mut rng());
let slice = create_slice_with_invalid_txs(MAX_DATA_PER_SLICE - 16);
let shreds = shredder.shred(slice, &sk).unwrap();
let shred = shreds[shreds.len() - 1].clone().into_shred();
(shred, sk)
}
#[test]
fn shred_verification() {
let mut map = BTreeMap::new();
let slice_index = SliceIndex::first();
let random_pk = SecretKey::new(&mut rng()).to_pk();
let (shred, sk) = create_random_shred();
// checking against other public key should fail
let res = ValidatedShred::try_new(shred.clone(), map.entry(slice_index), &random_pk);
assert!(matches!(res, Err(ShredVerifyError::InvalidSignature)));
assert!(!map.contains_key(&slice_index));
// checking against correct public key should succeed
let res = ValidatedShred::try_new(shred, map.entry(slice_index), &sk.to_pk());
assert!(res.is_ok());
assert!(map.contains_key(&slice_index));
let (invalid_shred, invalid_shred_sk) = create_random_shred();
// checking against other public key should fail
// and should not be considered as equivocation
let res =
ValidatedShred::try_new(invalid_shred.clone(), map.entry(slice_index), &random_pk);
assert!(matches!(res, Err(ShredVerifyError::InvalidSignature)));
// checking different shred (with different Merkle root and valid sig)
// against existing map entry should fail and detect equivocation
let res = ValidatedShred::try_new(
invalid_shred,
map.entry(slice_index),
&invalid_shred_sk.to_pk(),
);
assert!(matches!(res, Err(ShredVerifyError::Equivocation)));
}
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/shredder/pool.rs | src/shredder/pool.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Pool of shredder instances.
//!
//! # Examples
//!
//! Obtain a shredder from the pool, use it, and let it return on drop.
//!
//! ```rust
//! use alpenglow::shredder::{RegularShredder, ShredderPool, Shredder};
//!
//! fn use_shredder<S: Shredder>(shredder: &mut S) {
//! // ...
//! }
//!
//! let shredder_pool = ShredderPool::<RegularShredder>::with_size(1);
//! {
//! let mut shredder = shredder_pool.checkout().unwrap();
//! use_shredder(&mut (*shredder));
//! // shredder is automatically returned to pool when dropped
//! }
//! ```
use std::ops::{Deref, DerefMut};
use std::sync::{Arc, Mutex};
use super::Shredder;
/// A pool of shredders of the same type.
pub struct ShredderPool<S: Shredder> {
shredders: Arc<Mutex<Vec<S>>>,
}
impl<S: Shredder> ShredderPool<S> {
/// Creates a new pool with the provided shredders.
pub fn new(shredders: Vec<S>) -> Self {
Self {
shredders: Arc::new(Mutex::new(shredders)),
}
}
/// Takes a shredder from the pool.
///
/// The shredder is automatically returned to the pool when dropped.
///
/// Returns [`None`] iff the pool is empty.
pub fn checkout(&self) -> Option<ShredderGuard<S>> {
self.shredders
.lock()
.unwrap()
.pop()
.map(|shredder| ShredderGuard {
pool: Arc::clone(&self.shredders),
shredder: Some(shredder),
})
}
}
impl<S: Shredder + Default> ShredderPool<S> {
/// Creates a new pool with `size` shredders.
pub fn with_size(size: usize) -> Self {
let shredders = (0..size).map(|_| S::default()).collect();
Self::new(shredders)
}
}
/// Guard holding a single shredder from a pool.
///
/// The shredder is automatically returned to the pool when dropped.
pub struct ShredderGuard<S: Shredder> {
pool: Arc<Mutex<Vec<S>>>,
shredder: Option<S>,
}
impl<S: Shredder> Deref for ShredderGuard<S> {
type Target = S;
fn deref(&self) -> &Self::Target {
self.shredder.as_ref().expect("should exist until dropping")
}
}
impl<S: Shredder> DerefMut for ShredderGuard<S> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.shredder.as_mut().expect("should exist until dropping")
}
}
impl<S: Shredder> Drop for ShredderGuard<S> {
fn drop(&mut self) {
let shredder = self.shredder.take().expect("should exist until dropping");
self.pool.lock().unwrap().push(shredder);
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::shredder::RegularShredder;
#[test]
fn checkout_sequentially() {
let shredder_pool = ShredderPool::with_size(1);
for _ in 0..10 {
// taking one shredder at a time works
let mut guard = shredder_pool.checkout().unwrap();
// taking a second shredder should fail
assert!(shredder_pool.checkout().is_none());
let _shredder: &mut RegularShredder = &mut guard;
}
}
#[test]
fn checkout_concurrently() {
let shredder_pool = ShredderPool::with_size(2);
// taking two shredders at a time works
let mut guard1 = shredder_pool.checkout().unwrap();
let _shredder1: &mut RegularShredder = &mut guard1;
let mut guard2 = shredder_pool.checkout().unwrap();
let _shredder2: &mut RegularShredder = &mut guard2;
// taking a third shredder should fail
assert!(shredder_pool.checkout().is_none());
drop(guard1);
drop(guard2);
}
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/disseminator/turbine.rs | src/disseminator/turbine.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Implementation of Solana's Turbine block dissemination protocol.
//!
//! For each slot and shred index, a different Turbine tree is built.
//! Each tree corresponds to a stake-weighted shuffling of the validators.
//!
//! See also: <https://docs.anza.xyz/consensus/turbine-block-propagation>
mod weighted_shuffle;
use async_trait::async_trait;
use moka::future::Cache;
use rand::prelude::*;
pub(crate) use self::weighted_shuffle::WeightedShuffle;
use super::Disseminator;
use crate::network::{Network, ShredNetwork};
use crate::shredder::Shred;
use crate::{Slot, ValidatorId, ValidatorInfo};
/// Default fanout for the Turbine tree.
pub const DEFAULT_FANOUT: usize = 200;
/// Maximum number of different Turbine trees cached.
///
/// A [`TurbineTree`] takes roughly 1600 bytes of memory to store.
/// So, caching up to 2^16 trees occupies roughly 100 MiB of memory.
const MAX_CACHED_TREES: u64 = 65536;
/// Implementation of Solana's Turbine block dissemination protocol.
pub struct Turbine<N: Network> {
validator_id: ValidatorId,
validators: Vec<ValidatorInfo>,
network: N,
fanout: usize,
tree_cache: Cache<(Slot, usize), TurbineTree>,
}
/// View of the Turbine tree from a specific validator's perspective.
///
/// Only contains the information relevant for sending and receiving shreds.
/// The rest of the tree is not stored, especially to make caching more efficient.
#[derive(Clone, Debug)]
pub(crate) struct TurbineTree {
root: ValidatorId,
#[allow(dead_code)]
parent: Option<ValidatorId>,
children: Vec<ValidatorId>,
}
impl<N> Turbine<N>
where
N: ShredNetwork,
{
/// Creates a new Turbine instance, configured with the default fanout.
pub fn new(validator_id: ValidatorId, validators: Vec<ValidatorInfo>, network: N) -> Self {
Self {
validator_id,
validators,
network,
fanout: DEFAULT_FANOUT,
tree_cache: Cache::new(MAX_CACHED_TREES),
}
}
/// Turns this instance into a new instance with a different fanout value.
///
/// This invalidates all cached trees.
#[must_use]
pub fn with_fanout(mut self, fanout: usize) -> Self {
if fanout == self.fanout {
return self;
}
self.fanout = fanout;
self.tree_cache = Cache::new(MAX_CACHED_TREES);
self
}
/// Sends the shred to the correct Turbine tree's root validator.
/// Which Turbine tree to use is determined by the slot and shred index.
///
/// # Errors
///
/// Returns an error if the send operation on the underlying network fails.
pub async fn send_shred_to_root(&self, shred: &Shred) -> std::io::Result<()> {
let tree = self
.get_tree(shred.payload().header.slot, shred.payload().index_in_slot())
.await;
let root = tree.get_root();
let addr = self.validators[root as usize].disseminator_address;
self.network.send(shred, addr).await
}
/// Forwards the shred to all our children in the correct Turbine tree.
/// Which Turbine tree to use is determined by the slot and shred index.
///
/// # Errors
///
/// Returns an error if the send operation on the underlying network fails.
pub async fn forward_shred(&self, shred: &Shred) -> std::io::Result<()> {
let tree = self
.get_tree(shred.payload().header.slot, shred.payload().index_in_slot())
.await;
let addrs = tree
.get_children()
.iter()
.map(|child| self.validators[*child as usize].disseminator_address);
self.network.send_to_many(shred, addrs).await?;
Ok(())
}
/// Returns the correct Turbine tree for the given slot and shred index.
/// If the tree is cached, it is returned, otherwise it is built and cached.
async fn get_tree(&self, slot: Slot, shred: usize) -> TurbineTree {
if let Some(tree) = self.tree_cache.get(&(slot, shred)).await {
return tree;
}
let tree = TurbineTree::new(
&self.validators,
self.fanout,
self.validator_id,
slot,
shred,
);
self.tree_cache.insert((slot, shred), tree.clone()).await;
tree
}
}
#[async_trait]
impl<N> Disseminator for Turbine<N>
where
N: ShredNetwork,
{
async fn send(&self, shred: &Shred) -> std::io::Result<()> {
self.send_shred_to_root(shred).await
}
async fn forward(&self, shred: &Shred) -> std::io::Result<()> {
self.forward_shred(shred).await
}
async fn receive(&self) -> std::io::Result<Shred> {
self.network.receive().await
}
}
impl TurbineTree {
/// Generates a new `TurbineTree` for the given parameters.
///
/// This is deterministic, i.e., same parameters result in the same tree.
/// Only the neighborhood of the validator given by `own_id` is kept.
pub fn new(
validators: &[ValidatorInfo],
fanout: usize,
own_id: ValidatorId,
slot: Slot,
shred: usize,
) -> Self {
// seed the RNG
let seed = [
b"ALPENGLOWTURBINE",
&slot.inner().to_be_bytes()[..],
&shred.to_be_bytes()[..],
]
.concat();
assert_eq!(seed.len(), 32);
let mut rng = StdRng::from_seed(seed.try_into().unwrap());
// stake-weighted shuffle
let mut weighted_shuffle = WeightedShuffle::new(validators.iter().map(|v| v.stake));
// TODO: remove leader
let validator_ids: Vec<_> = weighted_shuffle
.shuffle(&mut rng)
.map(|i| i as ValidatorId)
.collect();
// find root & parent
let root = validator_ids[0];
let own_pos = validator_ids.iter().position(|v| *v == own_id).unwrap();
let parent_pos = match own_pos {
0 => None,
_ => Some((own_pos - 1) / fanout),
};
// find children
let offset = own_pos * fanout + 1;
let children = validator_ids
.iter()
.skip(offset)
.take(fanout)
.copied()
.collect();
Self {
root,
parent: parent_pos.map(|p| validator_ids[p]),
children,
}
}
/// Gives the root validator of this Turbine tree.
pub const fn get_root(&self) -> ValidatorId {
self.root
}
/// Gives the parent of this validator in the Turbine tree.
/// Returns `None` iff this validator is the root of the tree.
#[allow(dead_code)]
pub const fn get_parent(&self) -> Option<ValidatorId> {
self.parent
}
/// Gives the list of children of this validator in the Turbine tree.
pub fn get_children(&self) -> &[ValidatorId] {
&self.children
}
}
#[cfg(test)]
mod tests {
use std::collections::HashSet;
use std::sync::Arc;
use std::time::Duration;
use tokio::sync::Mutex;
use tokio::task;
use super::*;
use crate::crypto::aggsig;
use crate::crypto::signature::SecretKey;
use crate::network::simulated::SimulatedNetworkCore;
use crate::network::{SimulatedNetwork, dontcare_sockaddr, localhost_ip_sockaddr};
use crate::shredder::{MAX_DATA_PER_SLICE, RegularShredder, Shredder, TOTAL_SHREDS};
use crate::types::slice::create_slice_with_invalid_txs;
fn create_validator_info(count: u64) -> (Vec<SecretKey>, Vec<ValidatorInfo>) {
let mut sks = Vec::new();
let mut voting_sks = Vec::new();
let mut validators = Vec::new();
for i in 0..count {
sks.push(SecretKey::new(&mut rand::rng()));
voting_sks.push(aggsig::SecretKey::new(&mut rand::rng()));
validators.push(ValidatorInfo {
id: i,
stake: 1,
pubkey: sks[i as usize].to_pk(),
voting_pubkey: voting_sks[i as usize].to_pk(),
all2all_address: dontcare_sockaddr(),
disseminator_address: dontcare_sockaddr(),
repair_request_address: dontcare_sockaddr(),
repair_response_address: dontcare_sockaddr(),
});
}
(sks, validators)
}
async fn create_turbine_instances(
validators: &mut [ValidatorInfo],
) -> Vec<Turbine<SimulatedNetwork<Shred, Shred>>> {
let core = Arc::new(
SimulatedNetworkCore::default()
.with_jitter(0.0)
.with_packet_loss(0.0),
);
for (i, v) in validators.iter_mut().enumerate() {
v.disseminator_address = localhost_ip_sockaddr(i.try_into().unwrap());
}
let mut disseminators = Vec::new();
for i in 0..validators.len() {
let network = core.join_unlimited(i as ValidatorId).await;
let turbine = Turbine::new(i as ValidatorId, validators.to_vec(), network);
disseminators.push(turbine);
}
disseminators
}
#[test]
fn tree() {
let (_, validators) = create_validator_info(2000);
let mut trees = Vec::new();
for v in 0..validators.len() {
let v = v as ValidatorId;
let tree = TurbineTree::new(&validators, 200, v, Slot::new(0), 0);
trees.push((v, tree));
}
let root = trees[0].1.get_root();
for (v, tree) in &trees {
// all validators know the correct root
assert_eq!(tree.get_root(), root);
// validator is never its own parent
assert!(tree.get_parent().is_none() || tree.get_parent().unwrap() != *v);
// validator never appears in its own children
assert!(!tree.get_children().contains(v));
// no validator appears multiple times in the tree
let children: HashSet<_> = tree.get_children().iter().collect();
assert_eq!(children.len(), tree.get_children().len());
assert!(!tree.get_children().contains(&root));
if let Some(parent) = tree.get_parent() {
assert!(!children.contains(&parent));
}
// parent-child compatibility
for child in tree.get_children() {
let childs_parent = trees[*child as usize].1.get_parent();
assert_eq!(childs_parent, Some(*v));
}
if let Some(parent) = tree.get_parent() {
let parents_children = trees[parent as usize].1.get_children();
assert!(parents_children.contains(v));
}
}
}
#[test]
fn tree_fanouts() {
let (_, validators) = create_validator_info(500);
for v in 0..validators.len() {
let v = v as ValidatorId;
let tree = TurbineTree::new(&validators, 200, v, Slot::new(0), 0);
assert!(tree.get_children().len() <= 200);
let tree = TurbineTree::new(&validators, 1, v, Slot::new(0), 0);
assert!(tree.get_children().len() <= 1);
let tree = TurbineTree::new(&validators, 2, v, Slot::new(0), 0);
assert!(tree.get_children().len() <= 2);
let tree = TurbineTree::new(&validators, 400, v, Slot::new(0), 0);
assert!(tree.get_children().len() <= 400);
let tree = TurbineTree::new(&validators, 1000, v, Slot::new(0), 0);
assert!(tree.get_children().len() <= 500);
}
}
#[tokio::test]
async fn dissemination() {
let (sks, mut validators) = create_validator_info(10);
let mut disseminators = create_turbine_instances(&mut validators).await;
let slice = create_slice_with_invalid_txs(MAX_DATA_PER_SLICE);
let shreds = RegularShredder::default().shred(slice, &sks[0]).unwrap();
let shreds_received = Arc::new(Mutex::new(0_usize));
let mut tasks = Vec::new();
// forward & receive shreds on "non-leader" disseminator instance
for _ in 0..disseminators.len() - 1 {
let sr = shreds_received.clone();
let diss_non_leader = disseminators.pop().unwrap();
tasks.push(task::spawn(async move {
loop {
match diss_non_leader.receive().await {
Ok(shred) => {
diss_non_leader.forward(&shred).await.unwrap();
*sr.lock().await += 1;
}
_ => continue,
}
}
}));
}
tokio::time::sleep(Duration::from_millis(10)).await;
for shred in shreds {
disseminators[0].send(&shred).await.unwrap();
}
// forward shreds on the "leader" disseminator instance
assert_eq!(disseminators.len(), 1);
let leader = disseminators.pop().unwrap();
let task_leader = task::spawn(async move {
loop {
match leader.receive().await {
Ok(shred) => {
leader.forward(&shred).await.unwrap();
}
_ => continue,
}
}
});
// wait for shreds to arrive
// needs to be longer than the latency of the simulated network
tokio::time::sleep(Duration::from_millis(500)).await;
// non-leaders should have received all shreds via Turbine
assert_eq!(*shreds_received.lock().await, 9 * TOTAL_SHREDS);
task_leader.abort();
for task in tasks {
task.abort();
}
}
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/disseminator/rotor.rs | src/disseminator/rotor.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Implementation of Alpenglow's new Rotor block dissemination protocol.
//!
//! This is an evolution of Solana's original Turbine block dissemination protocol.
//! Instead of a multi-layered tree, it always uses a single layer of relayers.
//!
//! Rotor can be instantiated with any quorum sampling strategy.
//! Therefore, this module also provides multiple implementation of such.
//! See also, the [`sampling_strategy`] module and the [`SamplingStrategy`] trait.
//!
//! For an implementation of Turbine, see [`crate::disseminator::turbine::Turbine`].
pub mod sampling_strategy;
use std::sync::Arc;
use async_trait::async_trait;
use rand::prelude::*;
use self::sampling_strategy::PartitionSampler;
pub use self::sampling_strategy::{FaitAccompli1Sampler, SamplingStrategy, StakeWeightedSampler};
use super::Disseminator;
use crate::consensus::EpochInfo;
use crate::network::{Network, ShredNetwork};
use crate::shredder::{Shred, TOTAL_SHREDS};
use crate::{Slot, ValidatorId};
/// Rotor is a new block dissemination protocol presented together with Alpenglow.
pub struct Rotor<N: Network, S: SamplingStrategy> {
network: N,
sampler: S,
epoch_info: Arc<EpochInfo>,
}
impl<N: Network> Rotor<N, StakeWeightedSampler> {
/// Creates a new Rotor instance with the default sampling strategy.
///
/// Contact information for all validators is provided in `validators`.
/// Provided `network` will be used to send and receive shreds.
pub fn new(network: N, epoch_info: Arc<EpochInfo>) -> Self {
let validators = epoch_info.validators.clone();
let sampler = StakeWeightedSampler::new(validators);
Self {
network,
sampler,
epoch_info,
}
}
}
impl<N: Network> Rotor<N, FaitAccompli1Sampler<PartitionSampler>> {
/// Creates a new Rotor instance with the FA1 sampling strategy.
///
/// Contact information for all validators is provided in `validators`.
/// Provided `network` will be used to send and receive shreds.
pub fn new_fa1(network: N, epoch_info: Arc<EpochInfo>) -> Self {
let validators = epoch_info.validators.clone();
let sampler =
FaitAccompli1Sampler::new_with_partition_fallback(validators, TOTAL_SHREDS as u64);
Self {
network,
sampler,
epoch_info,
}
}
}
impl<N, S: SamplingStrategy> Rotor<N, S>
where
N: ShredNetwork,
{
/// Turns this instance into a new instance with a different sampling strategy.
#[must_use]
pub fn with_sampler(self, sampler: S) -> Self {
Self { sampler, ..self }
}
/// Sends the shred to the correct relay.
async fn send_as_leader(&self, shred: &Shred) -> std::io::Result<()> {
let relay = self.sample_relay(shred.payload().header.slot, shred.payload().index_in_slot());
let v = self.epoch_info.validator(relay);
self.network.send(shred, v.disseminator_address).await
}
/// Broadcasts a shred to all validators except for the leader and itself.
/// Does nothing if we are not the dedicated relay for this shred.
async fn broadcast_if_relay(&self, shred: &Shred) -> std::io::Result<()> {
let leader = self.epoch_info.leader(shred.payload().header.slot).id;
// do nothing if we are not the relay
let relay = self.sample_relay(shred.payload().header.slot, shred.payload().index_in_slot());
if self.epoch_info.own_id != relay {
return Ok(());
}
// otherwise, broadcast
let to = self
.epoch_info
.validators
.iter()
.filter(|v| v.id != leader && v.id != relay)
.map(|v| v.disseminator_address);
self.network.send_to_many(shred, to).await?;
Ok(())
}
fn sample_relay(&self, slot: Slot, shred: usize) -> ValidatorId {
let seed = [
slot.inner().to_be_bytes(),
shred.to_be_bytes(),
[0; 8],
[0; 8],
]
.concat();
let mut rng = StdRng::from_seed(seed.try_into().unwrap());
self.sampler.sample(&mut rng)
}
}
#[async_trait]
impl<N, S: SamplingStrategy + Send + Sync + 'static> Disseminator for Rotor<N, S>
where
N: ShredNetwork,
{
async fn send(&self, shred: &Shred) -> std::io::Result<()> {
Self::send_as_leader(self, shred).await
}
async fn forward(&self, shred: &Shred) -> std::io::Result<()> {
Self::broadcast_if_relay(self, shred).await
}
async fn receive(&self) -> std::io::Result<Shred> {
self.network.receive().await
}
}
#[cfg(test)]
mod tests {
use std::collections::HashSet;
use std::sync::Arc;
use std::time::Duration;
use tokio::sync::Mutex;
use tokio::task;
use super::*;
use crate::ValidatorInfo;
use crate::crypto::aggsig;
use crate::crypto::signature::SecretKey;
use crate::network::{UdpNetwork, dontcare_sockaddr, localhost_ip_sockaddr};
use crate::shredder::{MAX_DATA_PER_SLICE, RegularShredder, Shredder, TOTAL_SHREDS};
use crate::types::slice::create_slice_with_invalid_txs;
type MyRotor = Rotor<UdpNetwork<Shred, Shred>, StakeWeightedSampler>;
fn create_rotor_instances(count: u64, base_port: u16) -> (Vec<SecretKey>, Vec<MyRotor>) {
let mut sks = Vec::new();
let mut voting_sks = Vec::new();
let mut validators = Vec::new();
for i in 0..count {
sks.push(SecretKey::new(&mut rand::rng()));
voting_sks.push(aggsig::SecretKey::new(&mut rand::rng()));
validators.push(ValidatorInfo {
id: i,
stake: 1,
pubkey: sks[i as usize].to_pk(),
voting_pubkey: voting_sks[i as usize].to_pk(),
all2all_address: dontcare_sockaddr(),
disseminator_address: localhost_ip_sockaddr(base_port + i as u16),
repair_request_address: dontcare_sockaddr(),
repair_response_address: dontcare_sockaddr(),
});
}
let mut rotors = Vec::new();
for i in 0..count {
let epoch_info = Arc::new(EpochInfo::new(i, validators.clone()));
let network = UdpNetwork::new(base_port + i as u16);
rotors.push(Rotor::new(network, epoch_info));
}
(sks, rotors)
}
async fn test_rotor_dissemination(count: u64, base_port: u16) {
let (sks, mut rotors) = create_rotor_instances(count, base_port);
let slice = create_slice_with_invalid_txs(MAX_DATA_PER_SLICE);
let shreds = RegularShredder::default().shred(slice, &sks[0]).unwrap();
let mut shreds_received = Vec::with_capacity(rotors.len());
(0..rotors.len()).for_each(|_| shreds_received.push(Arc::new(Mutex::new(HashSet::new()))));
let mut rotor_tasks = Vec::with_capacity(rotors.len());
// forward & receive shreds on "non-leader" Rotor instance
for i in 0..rotors.len() - 1 {
let shreds_received = shreds_received[i].clone();
let rotor_non_leader = rotors.pop().unwrap();
rotor_tasks.push(task::spawn(async move {
loop {
match rotor_non_leader.receive().await {
Ok(shred) => {
rotor_non_leader.forward(&shred).await.unwrap();
let mut guard = shreds_received.lock().await;
assert!(!guard.contains(&*shred.payload().shred_index));
guard.insert(*shred.payload().shred_index);
}
_ => continue,
}
}
}));
}
tokio::time::sleep(Duration::from_millis(10)).await;
assert_eq!(rotors.len(), 1);
for shred in shreds {
rotors[0].send(&shred).await.unwrap();
}
// forward shreds on the "leader" Rotor instance
let rotor_leader = rotors.pop().unwrap();
let rotor_task_leader = task::spawn(async move {
loop {
match rotor_leader.receive().await {
Ok(shred) => {
rotor_leader.forward(&shred).await.unwrap();
}
_ => continue,
}
}
});
tokio::time::sleep(Duration::from_millis(100)).await;
// non-leader instances should have received all shreds via Rotor
for i in 0..(count - 1) {
assert_eq!(shreds_received[i as usize].lock().await.len(), TOTAL_SHREDS);
}
rotor_task_leader.abort();
for task in rotor_tasks {
task.abort();
}
}
#[tokio::test]
async fn two_instances() {
test_rotor_dissemination(2, 3000).await
}
#[tokio::test]
async fn many_instances() {
test_rotor_dissemination(10, 3100).await
}
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/disseminator/trivial.rs | src/disseminator/trivial.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
use async_trait::async_trait;
use super::Disseminator;
use crate::ValidatorInfo;
use crate::network::{Network, ShredNetwork};
use crate::shredder::Shred;
/// A trivial implementation for a block disseminator.
/// The leader just sends each shred directly to every validator.
pub struct TrivialDisseminator<N: Network> {
validators: Vec<ValidatorInfo>,
network: N,
}
impl<N: Network> TrivialDisseminator<N> {
pub const fn new(validators: Vec<ValidatorInfo>, network: N) -> Self {
Self {
validators,
network,
}
}
}
#[async_trait]
impl<N> Disseminator for TrivialDisseminator<N>
where
N: ShredNetwork,
{
async fn send(&self, shred: &Shred) -> std::io::Result<()> {
self.network
.send_to_many(
shred,
self.validators.iter().map(|v| v.disseminator_address),
)
.await?;
Ok(())
}
async fn forward(&self, _shred: &Shred) -> std::io::Result<()> {
// nothing to do
Ok(())
}
async fn receive(&self) -> std::io::Result<Shred> {
self.network.receive().await
}
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use std::time::Duration;
use tokio::sync::Mutex;
use tokio::task;
use super::*;
use crate::crypto::aggsig;
use crate::crypto::signature::SecretKey;
use crate::network::{UdpNetwork, dontcare_sockaddr, localhost_ip_sockaddr};
use crate::shredder::{MAX_DATA_PER_SLICE, RegularShredder, Shredder, TOTAL_SHREDS};
use crate::types::slice::create_slice_with_invalid_txs;
fn create_disseminator_instances(
count: u64,
base_port: u16,
) -> (
Vec<SecretKey>,
Vec<TrivialDisseminator<UdpNetwork<Shred, Shred>>>,
) {
let mut sks = Vec::new();
let mut voting_sks = Vec::new();
let mut validators = Vec::new();
for i in 0..count {
sks.push(SecretKey::new(&mut rand::rng()));
voting_sks.push(aggsig::SecretKey::new(&mut rand::rng()));
validators.push(ValidatorInfo {
id: i,
stake: 1,
pubkey: sks[i as usize].to_pk(),
voting_pubkey: voting_sks[i as usize].to_pk(),
all2all_address: dontcare_sockaddr(),
disseminator_address: localhost_ip_sockaddr(base_port + i as u16),
repair_request_address: dontcare_sockaddr(),
repair_response_address: dontcare_sockaddr(),
});
}
let mut disseminators = Vec::new();
for i in 0..count {
let network = UdpNetwork::new(base_port + i as u16);
disseminators.push(TrivialDisseminator::new(validators.clone(), network));
}
(sks, disseminators)
}
#[tokio::test]
async fn dissemination() {
let (sks, mut disseminators) = create_disseminator_instances(20, 5000);
let slice = create_slice_with_invalid_txs(MAX_DATA_PER_SLICE);
let shreds = RegularShredder::default().shred(slice, &sks[0]).unwrap();
let shreds_received = Arc::new(Mutex::new(0_usize));
let mut tasks = Vec::new();
// forward & receive shreds on "non-leader" disseminator instance
for _ in 0..disseminators.len() - 1 {
let sr = shreds_received.clone();
let diss_non_leader = disseminators.pop().unwrap();
tasks.push(task::spawn(async move {
loop {
match diss_non_leader.receive().await {
Ok(shred) => {
diss_non_leader.forward(&shred).await.unwrap();
*sr.lock().await += 1;
}
_ => continue,
}
}
}));
}
tokio::time::sleep(Duration::from_millis(10)).await;
for shred in shreds {
disseminators[0].send(&shred).await.unwrap();
}
// forward shreds on the "leader" disseminator instance
assert_eq!(disseminators.len(), 1);
let rotor_leader = disseminators.pop().unwrap();
let rotor_task_leader = task::spawn(async move {
loop {
match rotor_leader.receive().await {
Ok(shred) => {
rotor_leader.forward(&shred).await.unwrap();
}
_ => continue,
}
}
});
tokio::time::sleep(Duration::from_millis(100)).await;
// non-leaders should have received all shreds via Rotor
assert_eq!(*shreds_received.lock().await, 19 * TOTAL_SHREDS);
rotor_task_leader.abort();
for task in tasks {
task.abort();
}
}
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/disseminator/rotor/sampling_strategy.rs | src/disseminator/rotor/sampling_strategy.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Different strategies for sampling validators.
//!
//! First, this module provides a trait for randomly sampling validators.
//! To implement a new sampling strategy, you need to implement [`SamplingStrategy`],
//! by implementing [`SamplingStrategy::sample`].
//! The trait provides a default implementation for sampling `k` validators,
//! via [`SamplingStrategy::sample_multiple`].
//! However, samplers might override this for performance reasons.
//!
//! # Sampling strategies
//!
//! This module provides implementations for the following sampling strategies:
//! - [`UniformSampler`] does uniform sampling with replacement.
//! - [`StakeWeightedSampler`] samples validators proportional to their stake.
//! - [`DecayingAcceptanceSampler`] samples validators less as they approach maximum.
//! - [`TurbineSampler`] simulates the workload of Turbine.
//! - [`PartitionSampler`] splits validators into bins and samples from each bin.
//! - [`FaitAccompli1Sampler`] uses the FA1-F committee sampling strategy.
//! - [`FaitAccompli2Sampler`] uses the FA2 committee sampling strategy.
use std::sync::Mutex;
use rand::distr::weighted::WeightedIndex;
use rand::prelude::*;
use crate::disseminator::turbine::DEFAULT_FANOUT;
use crate::{Stake, ValidatorId, ValidatorInfo};
/// Sampling strategies involving rejection sampling may panic after rejecting this many samples.
const MAX_TRIES_PER_SAMPLE: usize = 100_000;
/// An abstraction for randomly sampling validators based on some distribution.
pub trait SamplingStrategy {
/// Samples a validator with this probability distribution.
///
/// Depending on the implementor, this may or may not be stateless.
///
/// # Panics
///
/// Implementations may panic if the sampler has reached an invalid state
/// or if the sampling process failed [`MAX_TRIES_PER_SAMPLE`] times.
fn sample<R: RngCore>(&self, rng: &mut R) -> ValidatorId {
self.sample_info(rng).id
}
/// Samples a validator's `ValidatorInfo` with this probability distribution.
///
/// Depending on the implementor, this may or may not be stateless.
///
/// # Panics
///
/// Implementations may panic if the sampler has reached an invalid state
/// or if the sampling process failed [`MAX_TRIES_PER_SAMPLE`] times.
fn sample_info<R: RngCore>(&self, rng: &mut R) -> &ValidatorInfo;
/// Samples `k` validators with this probability distribution.
///
/// # Panics
///
/// Panics if any of the `k` calls to [`SamplingStrategy::sample`] panics.
fn sample_multiple<R: RngCore>(&self, k: usize, rng: &mut R) -> Vec<ValidatorId> {
(0..k).map(|_| self.sample(rng)).collect()
}
/// Returns a printable name of the sampling strategy.
fn name() -> &'static str {
std::any::type_name::<Self>()
}
}
/// A trivial sampler that picks the same validator all the time.
#[derive(Clone)]
pub struct AllSameSampler(pub ValidatorInfo);
impl SamplingStrategy for AllSameSampler {
fn sample<R: RngCore>(&self, _rng: &mut R) -> ValidatorId {
self.0.id
}
fn sample_info<R: RngCore>(&self, _rng: &mut R) -> &ValidatorInfo {
&self.0
}
fn name() -> &'static str {
"all_same"
}
}
/// A basic sampler that picks all validators with equal probability.
///
/// This sampler is stateless and chooses validators with replacement.
/// Multiple samples from this are thus independent and identically distributed.
#[derive(Clone)]
pub struct UniformSampler {
validators: Vec<ValidatorInfo>,
}
impl UniformSampler {
pub const fn new(validators: Vec<ValidatorInfo>) -> Self {
Self { validators }
}
}
impl SamplingStrategy for UniformSampler {
fn sample<R: RngCore>(&self, rng: &mut R) -> ValidatorId {
rng.random_range(0..self.validators.len()) as ValidatorId
}
fn sample_info<R: RngCore>(&self, rng: &mut R) -> &ValidatorInfo {
let index = self.sample(rng) as usize;
&self.validators[index]
}
fn name() -> &'static str {
"uniform"
}
}
/// A sampler that picks validators directly proportional to their stake.
///
/// This sampler is stateless and chooses validators with replacement.
/// Multiple samples from this are thus independent and identically distributed.
#[derive(Clone)]
pub struct StakeWeightedSampler {
validators: Vec<ValidatorInfo>,
stake_index: WeightedIndex<u64>,
}
impl StakeWeightedSampler {
/// Creates a new `StakeWeightedSampler` instance.
pub fn new(validators: Vec<ValidatorInfo>) -> Self {
let stakes: Vec<Stake> = validators.iter().map(|v| v.stake).collect();
let stake_index = WeightedIndex::new(&stakes).unwrap();
Self {
validators,
stake_index,
}
}
}
impl SamplingStrategy for StakeWeightedSampler {
fn sample<R: RngCore>(&self, rng: &mut R) -> ValidatorId {
self.stake_index.sample(rng) as ValidatorId
}
fn sample_info<R: RngCore>(&self, rng: &mut R) -> &ValidatorInfo {
let index = self.sample(rng) as usize;
&self.validators[index]
}
fn name() -> &'static str {
"stake_weighted"
}
}
/// A hybrid sampler between weighted sampling with and without replacement.
///
/// Any element is sample at most `ceil(max_samples)` times.
/// Elements are rejected with probability proportional to `k / max_samples`,
/// where `k` is how often the element has been sampled before.
/// Sampling differs between, e.g., `max_samples = 2` and `max_samples = 2.5`.
///
/// - For `max_samples = 1` it is stake-weighted sampling WITHOUT replacement.
/// - For `max_samples -> inf` it approaches the behavior WITH replacement.
pub struct DecayingAcceptanceSampler {
stake_weighted: StakeWeightedSampler,
max_samples: f64,
sample_count: Mutex<Vec<usize>>,
}
impl DecayingAcceptanceSampler {
/// Creates a new `DecayingAcceptanceSampler` instance.
pub fn new(validators: Vec<ValidatorInfo>, max_samples: f64) -> Self {
let sample_count = vec![0; validators.len()];
Self {
stake_weighted: StakeWeightedSampler::new(validators),
max_samples,
sample_count: Mutex::new(sample_count),
}
}
/// Resets the internal state of this stateful sampler.
/// After resetting it is just as it was when it was first created.
pub fn reset(&self) {
let mut sample_count = self.sample_count.lock().unwrap();
*sample_count = vec![0; self.stake_weighted.validators.len()];
}
}
impl SamplingStrategy for DecayingAcceptanceSampler {
/// Samples a validator with the given probability distribution.
///
/// # Panics
///
/// Panics if after [`MAX_TRIES_PER_SAMPLE`] samples none was valid.
fn sample<R: RngCore>(&self, rng: &mut R) -> ValidatorId {
for _ in 0..MAX_TRIES_PER_SAMPLE {
let sample = self.stake_weighted.sample(rng);
let mut sample_count = self.sample_count.lock().unwrap();
let p_reject = sample_count[sample as usize] as f64 / self.max_samples;
if rng.random::<f64>() >= p_reject {
sample_count[sample as usize] += 1;
return sample;
}
}
panic!("rejected all {MAX_TRIES_PER_SAMPLE} samples");
}
fn sample_info<R: RngCore>(&self, rng: &mut R) -> &ValidatorInfo {
let index = self.sample(rng) as usize;
&self.stake_weighted.validators[index]
}
fn sample_multiple<R: RngCore>(&self, k: usize, rng: &mut R) -> Vec<ValidatorId> {
let samples = (0..k).map(|_| self.sample(rng)).collect();
self.reset();
samples
}
fn name() -> &'static str {
"decaying_acceptance"
}
}
impl Clone for DecayingAcceptanceSampler {
fn clone(&self) -> Self {
Self {
stake_weighted: self.stake_weighted.clone(),
max_samples: self.max_samples,
sample_count: Mutex::new(self.sample_count.lock().unwrap().clone()),
}
}
}
/// A sampler that simulates the probability distribution of Turbine for Rotor.
///
/// The goal is to distribute the required work for validators as in Turbine.
/// Specifically, it should respect the same upper bound on the amount of work,
/// that is, for `v` validators and given `fanout` any validator should
/// be sampled no more than with probability `fanout / v`.
#[derive(Clone)]
pub struct TurbineSampler {
fanout: usize,
stake_weighted: StakeWeightedSampler,
}
impl TurbineSampler {
/// Creates a new `TurbineSampler` instance simulating the [`DEFAULT_FANOUT`]
/// from the actual [`Turbine`] implementation.
///
/// [`Turbine`]: crate::disseminator::turbine::Turbine
pub fn new(validators: Vec<ValidatorInfo>) -> Self {
Self::new_with_fanout(validators, DEFAULT_FANOUT)
}
/// Creates a new `TurbineSampler` instance simulating the given fanout.
// TODO: support more than 2 levels of Turbine?
#[must_use]
pub fn new_with_fanout(mut validators: Vec<ValidatorInfo>, turbine_fanout: usize) -> Self {
let total_stake: Stake = validators.iter().map(|v| v.stake).sum();
// calculate expected work for each validator (only excess over leader work)
let mut expected_work = vec![0.0; validators.len()];
let validators_left = validators.len() - 1;
for leader in &validators {
let prob = leader.stake as f64 / total_stake as f64;
let stake_left = total_stake - leader.stake;
let validators_left = validators_left - 1;
for root in &validators {
if root.id == leader.id {
continue;
}
let prob = prob * root.stake as f64 / stake_left as f64;
let root_work = (turbine_fanout as f64).min(validators_left as f64);
expected_work[root.id as usize] += prob * root_work;
let stake_left = stake_left - root.stake;
let validators_left = validators_left.saturating_sub(turbine_fanout);
for maybe_level1 in &validators {
if maybe_level1.id == leader.id || maybe_level1.id == root.id {
continue;
}
let select_prob = maybe_level1.stake as f64 / stake_left as f64;
let full_level1_slots = validators_left / turbine_fanout;
let prob_full =
prob * (1.0 - (1.0 - select_prob).powi(full_level1_slots as i32));
let full_level1_work = turbine_fanout as f64;
expected_work[maybe_level1.id as usize] += prob_full * full_level1_work;
let prob_partial =
prob * (1.0 - select_prob).powi(full_level1_slots as i32) * select_prob;
let partial_level1_work = (validators_left % turbine_fanout) as f64;
expected_work[maybe_level1.id as usize] += prob_partial * partial_level1_work;
}
}
}
// turn expected work into stakes
for (i, w) in expected_work.into_iter().enumerate() {
validators[i].stake = (w * 1_000_000_000.0) as Stake;
}
Self {
fanout: turbine_fanout,
stake_weighted: StakeWeightedSampler::new(validators),
}
}
}
impl SamplingStrategy for TurbineSampler {
/// Samples a validator with the given probability distribution.
///
/// # Panics
///
/// Panics if after [`MAX_TRIES_PER_SAMPLE`] samples none was valid.
fn sample<R: RngCore>(&self, rng: &mut R) -> ValidatorId {
let n = self.stake_weighted.validators.len();
let root = self.stake_weighted.sample(rng);
if rng.random::<f64>() < self.fanout as f64 / n as f64 {
root
} else {
for _ in 0..MAX_TRIES_PER_SAMPLE {
let sample = self.stake_weighted.sample(rng);
if sample != root {
return sample;
}
}
panic!("rejected all {MAX_TRIES_PER_SAMPLE} samples");
}
}
fn sample_info<R: RngCore>(&self, rng: &mut R) -> &ValidatorInfo {
let index = self.sample(rng) as usize;
&self.stake_weighted.validators[index]
}
fn name() -> &'static str {
"turbine"
}
}
/// A sampler that samples proportional to stake with reduced variance.
///
/// This sampler operates on `k` bins of validators of equal stake.
/// Within each bin a validator is sampled with probability proportional to its stake.
/// To sample `k` validators then, one validator is drawn from each of the `k` bins.
///
/// Given that each validator has less stake than to fill one bins entirely,
/// as is the case if this is used as the fallback sampler in [`FaitAccompli1Sampler`],
/// each validator appears in at most two bins and is thus sampled at most twice.
///
/// In expectation each validator is sampled proportionally to its stake.
/// However, this is done with lower variance than [`StakeWeightedSampler`] would.
#[derive(Clone)]
pub struct PartitionSampler {
validators: Vec<ValidatorInfo>,
bins: Vec<WeightedIndex<u64>>,
pub bin_validators: Vec<Vec<ValidatorId>>,
pub bin_stakes: Vec<Vec<Stake>>,
}
impl PartitionSampler {
/// Creates a new `ParitionSampler` instance.
///
/// Partitions the given validators into `num_bins` bins of equal stake.
/// Paritioning is done randomly by splitting a randomly permuted list of nodes.
pub fn new(validators: Vec<ValidatorInfo>, num_bins: usize) -> Self {
if num_bins == 0 {
return Self {
validators,
bins: Vec::new(),
bin_validators: Vec::new(),
bin_stakes: Vec::new(),
};
}
let mut bin_validators = vec![Vec::new(); num_bins];
let mut bin_stakes = vec![Vec::new(); num_bins];
let total_stake: Stake = validators.iter().map(|v| v.stake).sum();
let stake_per_bin = total_stake.div_ceil(num_bins as Stake);
let mut validators_random = validators.clone();
validators_random.shuffle(&mut rand::rng());
// partition into bins
let mut current_bin = 0;
let mut current_bin_stake = 0;
for v in validators_random {
let mut stake = v.stake;
while stake > 0 {
bin_validators[current_bin].push(v.id);
let stake_to_take = stake.min(stake_per_bin - current_bin_stake);
current_bin_stake += stake_to_take;
bin_stakes[current_bin].push(stake_to_take);
stake -= stake_to_take;
if current_bin < num_bins - 1 && (stake > 0 || current_bin_stake == stake_per_bin) {
current_bin += 1;
current_bin_stake = 0;
}
}
}
// generate stake weighted indices for each bin
let mut bins = Vec::with_capacity(num_bins);
for stakes in &bin_stakes {
let bin = WeightedIndex::new(stakes).unwrap();
bins.push(bin);
}
Self {
validators,
bins,
bin_validators,
bin_stakes,
}
}
}
impl SamplingStrategy for PartitionSampler {
fn sample<R: RngCore>(&self, rng: &mut R) -> ValidatorId {
rng.random_range(0..self.validators.len()) as ValidatorId
}
fn sample_info<R: RngCore>(&self, rng: &mut R) -> &ValidatorInfo {
let index = self.sample(rng) as usize;
&self.validators[index]
}
fn sample_multiple<R: RngCore>(&self, _k: usize, rng: &mut R) -> Vec<ValidatorId> {
let mut samples = Vec::new();
for (bin, validators) in self.bins.iter().zip(self.bin_validators.iter()) {
let i = bin.sample(rng);
samples.push(validators[i]);
}
samples
}
fn name() -> &'static str {
"partition"
}
}
/// A sampler that uses the FA1-F committee sampling strategy.
///
/// This is a strict improvement over performing IID stake-weighted sampling.
/// It achieves lower variance by deterministically sampling high-stake validators.
///
/// FA1-F is parameterized by a fallback sampler `F` and runs in two phases:
/// 1. Any validator with more than `1/k` fractional stake, is deterministically
/// selected `floor(fractional stake * k)` times.
/// 2. For the remaining `k'` samples, sample each validator from `F`, instantiated
/// with modified stake weights: `S'(v) = S(v) - floor(S(v) * k) / k`
///
/// See also: <https://dl.acm.org/doi/pdf/10.1145/3576915.3623194>
pub struct FaitAccompli1Sampler<F: SamplingStrategy> {
validators: Vec<ValidatorInfo>,
required_samples: Vec<ValidatorId>,
pub fallback_sampler: F,
}
impl FaitAccompli1Sampler<PartitionSampler> {
/// Creates a new FA1-F sampler with a variance-reducing partition fallback sampler.
///
/// See [`PartitionSampler`] for more details.
#[must_use]
pub fn new_with_partition_fallback(validators: Vec<ValidatorInfo>, k: u64) -> Self {
let total_stake: Stake = validators.iter().map(|v| v.stake).sum();
let mut required_samples = Vec::new();
let mut validators_truncated_stake = validators.clone();
for v in &mut validators_truncated_stake {
let frac_stake = v.stake as f64 / total_stake as f64;
let samples = (frac_stake * k as f64).floor() as u64;
v.stake -= samples * total_stake / k;
required_samples.extend((0..samples).map(|_| v.id));
}
let all_zero = validators_truncated_stake.iter().all(|v| v.stake == 0);
let k_prime = k as usize - required_samples.len();
let fallback_sampler = if all_zero {
PartitionSampler::new(validators.clone(), k_prime)
} else {
PartitionSampler::new(validators_truncated_stake, k_prime)
};
Self {
validators,
required_samples,
fallback_sampler,
}
}
}
impl FaitAccompli1Sampler<StakeWeightedSampler> {
/// Creates a new FA1-F sampler with an IID stake-weighted fallback sampler.
///
/// See [`StakeWeightedSampler`] for more details.
#[must_use]
pub fn new_with_stake_weighted_fallback(validators: Vec<ValidatorInfo>, k: u64) -> Self {
let total_stake: Stake = validators.iter().map(|v| v.stake).sum();
let mut required_samples = Vec::new();
let mut validators_truncated_stake = validators.clone();
for v in &mut validators_truncated_stake {
let frac_stake = v.stake as f64 / total_stake as f64;
let samples = (frac_stake * k as f64).floor() as u64;
v.stake -= samples * total_stake / k;
required_samples.extend((0..samples).map(|_| v.id));
}
let all_zero = validators_truncated_stake.iter().all(|v| v.stake == 0);
let fallback_sampler = if all_zero {
StakeWeightedSampler::new(validators.clone())
} else {
StakeWeightedSampler::new(validators_truncated_stake)
};
Self {
validators,
required_samples,
fallback_sampler,
}
}
}
impl<F: SamplingStrategy> SamplingStrategy for FaitAccompli1Sampler<F> {
fn sample<R: RngCore>(&self, rng: &mut R) -> ValidatorId {
rng.random_range(0..self.validators.len()) as ValidatorId
}
fn sample_info<R: RngCore>(&self, rng: &mut R) -> &ValidatorInfo {
let index = self.sample(rng) as usize;
&self.validators[index]
}
fn sample_multiple<R: RngCore>(&self, k: usize, rng: &mut R) -> Vec<ValidatorId> {
let mut validators = Vec::with_capacity(k);
validators.extend_from_slice(&self.required_samples);
if validators.len() < k {
let k_prime = k - validators.len();
let additional_samples = self.fallback_sampler.sample_multiple(k_prime, rng);
validators.extend_from_slice(&additional_samples);
}
validators
}
fn name() -> &'static str {
if F::name() == "stake_weighted" {
"fa1_iid"
} else if F::name() == "partition" {
"fa1_partition"
} else {
"fa1"
}
}
}
impl<F: SamplingStrategy + Clone> Clone for FaitAccompli1Sampler<F> {
fn clone(&self) -> Self {
Self {
validators: self.validators.clone(),
required_samples: self.required_samples.clone(),
fallback_sampler: self.fallback_sampler.clone(),
}
}
}
/// A sampler that uses the FA2 committee sampling strategy.
///
/// See also: <https://dl.acm.org/doi/pdf/10.1145/3576915.3623194>
pub struct FaitAccompli2Sampler {
validators: Vec<ValidatorInfo>,
required_samples: Vec<ValidatorId>,
medium_nodes: Vec<(ValidatorId, f64)>,
fallback_sampler: StakeWeightedSampler,
}
impl FaitAccompli2Sampler {
/// Creates a new FA2 sampler instance.
///
/// This is instantiated for a fixed number of samples `k`.
/// To this end, the FA1 and FA2 pre-processing steps are applied,
/// and also a stake-weighted IID fallback sampler is generated.
pub fn new(validators: Vec<ValidatorInfo>, k: u64) -> Self {
// FA1 step
let total_stake: Stake = validators.iter().map(|v| v.stake).sum();
let mut required_samples = Vec::new();
for v in &validators {
let frac_stake = v.stake as f64 / total_stake as f64;
let samples = (frac_stake * k as f64).floor() as u64;
required_samples.extend((0..samples).map(|_| v.id));
}
// FA2 step
let f = Self::minimize_f(&validators, k);
let mut medium_nodes = Vec::new();
for (i, fi) in f.iter().enumerate() {
let rel_stake = validators[i].stake as f64 / total_stake as f64;
if *fi > rel_stake {
let p = 1.0 - (fi - rel_stake) * k as f64;
medium_nodes.push((i as ValidatorId, p));
}
}
// generate stake-weighted IID fallback sampler
let r: f64 = validators
.iter()
.enumerate()
.filter(|(i, v)| v.stake as f64 / total_stake as f64 > f[*i])
.map(|(i, v)| v.stake as f64 / total_stake as f64 - f[i])
.sum();
let new_stake_distribution: Vec<ValidatorInfo> = validators
.iter()
.cloned()
.enumerate()
.map(|(i, mut v)| {
if v.stake as f64 / total_stake as f64 > f[i] {
v.stake = ((v.stake as f64 / total_stake as f64 - f[i]) / r
* total_stake as f64) as Stake;
} else {
v.stake = 0;
}
v
})
.collect();
let fallback_sampler = if r == 0.0 {
StakeWeightedSampler::new(validators.clone())
} else {
StakeWeightedSampler::new(new_stake_distribution)
};
Self {
validators,
required_samples,
medium_nodes,
fallback_sampler,
}
}
fn minimize_f(validators: &[ValidatorInfo], k: u64) -> Vec<f64> {
let total_stake: Stake = validators.iter().map(|v| v.stake).sum();
let f: Vec<f64> = validators
.iter()
.map(|v| (v.stake as f64 / total_stake as f64 * k as f64).round() / k as f64)
.collect();
assert!(f.iter().sum::<f64>() <= 1.0);
f
}
}
impl SamplingStrategy for FaitAccompli2Sampler {
fn sample<R: RngCore>(&self, rng: &mut R) -> ValidatorId {
rng.random_range(0..self.validators.len()) as ValidatorId
}
fn sample_info<R: RngCore>(&self, rng: &mut R) -> &ValidatorInfo {
let index = self.sample(rng) as usize;
&self.validators[index]
}
fn sample_multiple<R: RngCore>(&self, k: usize, rng: &mut R) -> Vec<ValidatorId> {
// add required FA1 samples
let mut validators = Vec::with_capacity(k);
validators.extend_from_slice(&self.required_samples);
// sample medium nodes (FA2 step)
for (validator, probability) in &self.medium_nodes {
if rng.random_bool(*probability) {
validators.push(*validator);
}
}
// sample remaining validators IID stake-weighted
if validators.len() < k {
let k_prime = k - validators.len();
let additional_samples = self.fallback_sampler.sample_multiple(k_prime, rng);
validators.extend_from_slice(&additional_samples);
}
validators
}
fn name() -> &'static str {
"fa2"
}
}
impl Clone for FaitAccompli2Sampler {
fn clone(&self) -> Self {
Self {
validators: self.validators.clone(),
required_samples: self.required_samples.clone(),
medium_nodes: self.medium_nodes.clone(),
fallback_sampler: self.fallback_sampler.clone(),
}
}
}
#[cfg(test)]
mod tests {
use std::collections::HashSet;
use super::*;
use crate::ValidatorId;
use crate::crypto::aggsig;
use crate::crypto::signature::SecretKey;
use crate::disseminator::turbine::WeightedShuffle;
use crate::network::dontcare_sockaddr;
use crate::network::simulated::stake_distribution::{VALIDATOR_DATA, ValidatorData};
use crate::shredder::TOTAL_SHREDS;
fn create_validator_info(count: ValidatorId) -> Vec<ValidatorInfo> {
let mut validators = Vec::new();
for i in 0..count {
let sk = SecretKey::new(&mut rand::rng());
let voting_sk = aggsig::SecretKey::new(&mut rand::rng());
validators.push(ValidatorInfo {
id: i,
stake: 1,
pubkey: sk.to_pk(),
voting_pubkey: voting_sk.to_pk(),
all2all_address: dontcare_sockaddr(),
disseminator_address: dontcare_sockaddr(),
repair_request_address: dontcare_sockaddr(),
repair_response_address: dontcare_sockaddr(),
});
}
validators
}
#[test]
fn all_same_sampler() {
let validators = create_validator_info(10);
let sampler = AllSameSampler(validators[3].clone());
let mut rng = rand::rng();
for _ in 0..1000 {
assert_eq!(sampler.sample(&mut rng), 3);
assert_eq!(sampler.sample_info(&mut rng).id, 3);
}
for _ in 0..10 {
let sampled_vals = sampler.sample_multiple(TOTAL_SHREDS, &mut rng);
for val in sampled_vals {
assert_eq!(val, 3);
}
}
}
#[test]
fn uniform_sampler() {
// apply Hoeffding's bound to number of different samples
let validators = create_validator_info(1000);
let sampler = UniformSampler::new(validators);
let sampled = sampler.sample_multiple(1000, &mut rand::rng());
let sampled_set: HashSet<_> = sampled.iter().collect();
assert!(sampled_set.len() > 500 && sampled_set.len() < 750);
// apply Chernoff's bound to maximum appearances of any sample
let max_appearances = sampled_set
.iter()
.map(|i| sampled.iter().filter(|v| *v == *i).count())
.max()
.unwrap();
assert!(max_appearances > 1);
assert!(max_appearances < 17);
// bounds should hold even with one high-stake validator
let mut validators = create_validator_info(1000);
validators[0].stake = 1_000_000_000;
let sampler = UniformSampler::new(validators);
let sampled = sampler.sample_multiple(1000, &mut rand::rng());
let sampled_set: HashSet<_> = sampled.iter().collect();
assert!(sampled_set.len() > 500 && sampled_set.len() < 750);
let max_appearances = sampled_set
.iter()
.map(|i| sampled.iter().filter(|v| *v == *i).count())
.max()
.unwrap();
assert!(max_appearances > 1);
assert!(max_appearances < 17);
// bound should hold even with every second validator being high-stake
let mut validators = create_validator_info(1000);
for i in (0..validators.len()).step_by(2) {
validators[i].stake = 1_000_000_000;
}
let sampler = UniformSampler::new(validators);
let sampled = sampler.sample_multiple(1000, &mut rand::rng());
let sampled_set: HashSet<_> = sampled.iter().collect();
assert!(sampled_set.len() > 500 && sampled_set.len() < 750);
let max_appearances = sampled_set
.iter()
.map(|i| sampled.iter().filter(|v| *v == *i).count())
.max()
.unwrap();
assert!(max_appearances > 1);
assert!(max_appearances < 17);
}
#[test]
fn stake_weighted_sampler() {
// with equal stake, bounds from uniform sampling hold
let validators = create_validator_info(1000);
let sampler = StakeWeightedSampler::new(validators);
let sampled = sampler.sample_multiple(1000, &mut rand::rng());
let sampled_set: HashSet<_> = sampled.iter().collect();
assert!(sampled_set.len() > 500 && sampled_set.len() < 750);
let max_appearances = sampled_set
.iter()
.map(|i| sampled.iter().filter(|v| *v == *i).count())
.max()
.unwrap();
assert!(max_appearances > 1);
assert!(max_appearances < 17);
// sampling is done by stake and with replacement
let mut validators = create_validator_info(100);
validators[0].stake = 1_000_000_000;
let sampler = StakeWeightedSampler::new(validators);
assert_eq!(sampler.sample(&mut rand::rng()), 0);
let sampled = sampler.sample_multiple(100, &mut rand::rng());
let sampled0 = sampled.into_iter().filter(|v| *v == 0).count();
assert!(sampled0 == 100);
}
#[test]
fn decaying_acceptance_sampler() {
// max_samples = 1 equivalent to sampling w/o replacement
let validators = create_validator_info(100);
let sampler = DecayingAcceptanceSampler::new(validators, 1.0);
let sampled = sampler.sample_multiple(100, &mut rand::rng());
let sampled_set: HashSet<_> = sampled.iter().copied().collect();
assert_eq!(sampled_set.len(), 100);
// heavy node sampled at most max_samples times
let mut validators = create_validator_info(100);
validators[0].stake = 10_000;
let sampler = DecayingAcceptanceSampler::new(validators, 5.0);
let sampled = sampler.sample_multiple(100, &mut rand::rng());
let sampled0 = sampled.into_iter().filter(|v| *v == 0).count();
assert!(sampled0 <= 5);
// max_samples = inf equivalent to sampling with replacement
let mut validators = create_validator_info(100);
validators[0].stake = 1_000_000_000;
let sampler = DecayingAcceptanceSampler::new(validators, f64::INFINITY);
assert_eq!(sampler.sample(&mut rand::rng()), 0);
let sampled = sampler.sample_multiple(100, &mut rand::rng());
let sampled0 = sampled.into_iter().filter(|v| *v == 0).count();
assert_eq!(sampled0, 100);
// test `clone` and `reset`
// resetting after each iteration should behave the same as `max_samples = inf`
let mut sampler = sampler.clone();
sampler.max_samples = 5.0;
for _ in 0..100 {
sampler.reset();
let id = sampler.sample(&mut rand::rng());
assert_eq!(id, 0);
}
}
#[test]
#[ignore]
fn turbine_sampler() {
const SLICES: usize = 100_000;
let mut rng = rand::rng();
let mut validators = create_validator_info(1000);
// two large nodes with roughly 5% of the stake each
validators[0].stake = 55;
validators[1].stake = 55;
let total_stake = validators.len() as u64 - 2 + validators[0].stake + validators[1].stake;
// calculate work expected with `TurbineSampler`
let sampler = TurbineSampler::new(validators.clone());
let sampled = sampler.sample_multiple(TOTAL_SHREDS * SLICES, &mut rng);
let appearances0 = sampled.iter().filter(|v| **v == 0).count();
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | true |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/disseminator/turbine/weighted_shuffle.rs | src/disseminator/turbine/weighted_shuffle.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//
// Source: https://github.com/anza-xyz/agave (with modifications)
//! The `weighted_shuffle` module provides an iterator over shuffled weights.
use std::borrow::Borrow;
use std::ops::{AddAssign, SubAssign};
use rand::distr::uniform::{SampleUniform, UniformSampler};
use rand::prelude::*;
use crate::Stake;
// Each internal tree node has FANOUT many child nodes with indices:
// `(index << BIT_SHIFT) + 1 ..= (index << BIT_SHIFT) + FANOUT`
// Conversely, for each node, the parent node is obtained by:
// `parent: (index - 1) >> BIT_SHIFT`
// and the subtree weight is stored at
// `offset: (index - 1) & BIT_MASK`
// of its parent node.
const BIT_SHIFT: usize = 4;
const FANOUT: usize = 1 << BIT_SHIFT;
const BIT_MASK: usize = FANOUT - 1;
/// Implements an iterator where indices are shuffled according to their
/// weights:
/// - Returned indices are unique in the range `[0, weights.len())`.
/// - Higher weighted indices tend to appear earlier proportional to their
/// weight.
/// - Zero weighted indices are shuffled and appear only at the end, after
/// non-zero weighted indices.
#[derive(Clone)]
pub struct WeightedShuffle {
/// Number of "internal" nodes of the tree.
num_nodes: usize,
/// Underlying array implementing the tree.
/// Nodes without children are never accessed and don't need to be
/// allocated, so `tree.len() < num_nodes`.
/// `tree[i][j]` is the sum of all weights in the `j`-th sub-tree of node `i`.
tree: Vec<[Stake; FANOUT]>,
/// Current sum of all weights, excluding already sampled ones.
weight: Stake,
/// Indices of zero weighted entries.
zeros: Vec<usize>,
}
impl WeightedShuffle {
/// If weights are negative or overflow the total sum
/// they are treated as zero.
pub fn new<I>(weights: I) -> Self
where
I: IntoIterator<Item: Borrow<Stake>>,
<I as IntoIterator>::IntoIter: ExactSizeIterator,
{
let weights = weights.into_iter();
let (num_nodes, size) = get_num_nodes_and_tree_size(weights.len());
debug_assert!(size <= num_nodes);
let mut tree = vec![[0; FANOUT]; size];
let mut sum: Stake = 0;
let mut zeros = Vec::default();
for (k, weight) in weights.enumerate() {
let weight = *weight.borrow();
if weight == 0 {
zeros.push(k);
continue;
}
sum = if let Some(val) = sum.checked_add(weight) {
val
} else {
zeros.push(k);
continue;
};
// Traverse the tree from the leaf node upwards to the root,
// updating the sub-tree sums along the way.
let mut index = num_nodes + k; // leaf node
while index != 0 {
let offset = (index - 1) & BIT_MASK;
index = (index - 1) >> BIT_SHIFT; // parent node
debug_assert!(index < tree.len());
// SAFETY: Index is updated to a lesser value towards zero.
// The bitwise AND operation with `BIT_MASK` ensures that offset
// is always less than `FANOUT`, which is the size of the inner
// arrays. As a result, `tree[index][offset]` never goes out of
// bounds.
unsafe { tree.get_unchecked_mut(index).get_unchecked_mut(offset) }
.add_assign(weight);
}
}
Self {
num_nodes,
tree,
weight: sum,
zeros,
}
}
// Removes given weight at index k.
fn remove(&mut self, k: usize, weight: Stake) {
debug_assert!(self.weight >= weight);
self.weight -= weight;
// Traverse the tree from the leaf node upwards to the root,
// updating the sub-tree sums along the way.
let mut index = self.num_nodes + k; // leaf node
while index != 0 {
let offset = (index - 1) & BIT_MASK;
index = (index - 1) >> BIT_SHIFT; // parent node
debug_assert!(self.tree[index][offset] >= weight);
// SAFETY: Index is updated to a lesser value towards zero. The
// bitwise AND operation with BIT_MASK ensures that offset is
// always less than FANOUT, which is the size of the inner arrays.
// As a result, tree[index][offset] never goes out of bounds.
unsafe { self.tree.get_unchecked_mut(index).get_unchecked_mut(offset) }
.sub_assign(weight);
}
}
// Returns smallest index such that sum of weights[..=k] > val,
// along with its respective weight.
fn search(&self, mut val: Stake) -> (/*index:*/ usize, /*weight:*/ Stake) {
debug_assert!(val < self.weight);
debug_assert!(!self.tree.is_empty());
// Traverse the tree downwards from the root to the target leaf node.
let mut index = 0; // root
loop {
// SAFETY: function returns if index goes out of bounds.
let (offset, &node) = unsafe { self.tree.get_unchecked(index) }
.iter()
.enumerate()
.find(|&(_, &node)| {
if val < node {
true
} else {
val -= node;
false
}
})
.unwrap();
// Traverse to the subtree of self.tree[index].
index = (index << BIT_SHIFT) + offset + 1;
if self.tree.len() <= index {
return (index - self.num_nodes, node);
}
}
}
pub fn shuffle<'a, R: Rng>(&'a mut self, rng: &'a mut R) -> impl Iterator<Item = usize> + 'a {
std::iter::from_fn(move || {
if self.weight > 0 {
let sample =
<Stake as SampleUniform>::Sampler::sample_single(0, self.weight, rng).unwrap();
let (index, weight) = self.search(sample);
self.remove(index, weight);
return Some(index);
}
if self.zeros.is_empty() {
return None;
}
let index =
<usize as SampleUniform>::Sampler::sample_single(0usize, self.zeros.len(), rng)
.unwrap();
Some(self.zeros.swap_remove(index))
})
}
}
// Maps number of items to the number of "internal" nodes of the tree
// which "implicitly" holds those items on the leaves.
// Nodes without children are never accessed and don't need to be
// allocated, so the tree size is the second smaller number.
fn get_num_nodes_and_tree_size(count: usize) -> (/*num_nodes:*/ usize, /*tree_size:*/ usize) {
let mut size: usize = 0;
let mut nodes: usize = 1;
while nodes * FANOUT < count {
size += nodes;
nodes *= FANOUT;
}
(size + nodes, size + count.div_ceil(FANOUT))
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/bin/node.rs | src/bin/node.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::borrow::Cow;
use std::fs::File;
use std::io::Read;
use std::net::SocketAddr;
use std::sync::Arc;
use alpenglow::all2all::TrivialAll2All;
use alpenglow::consensus::{Alpenglow, ConsensusMessage, EpochInfo};
use alpenglow::crypto::aggsig;
use alpenglow::crypto::signature::SecretKey;
use alpenglow::disseminator::Rotor;
use alpenglow::disseminator::rotor::StakeWeightedSampler;
use alpenglow::network::UdpNetwork;
use alpenglow::shredder::Shred;
use alpenglow::{Transaction, ValidatorInfo, logging};
use clap::Parser;
use color_eyre::Result;
use color_eyre::eyre::Context;
use fastrace::collector::Config;
use fastrace::prelude::*;
use fastrace_opentelemetry::OpenTelemetryReporter;
use log::warn;
use opentelemetry::{InstrumentationScope, KeyValue};
use opentelemetry_otlp::{SpanExporter, WithExportConfig};
use opentelemetry_sdk::Resource;
use rand::rng;
use serde::{Deserialize, Serialize};
use tokio::io::{AsyncBufReadExt, AsyncWriteExt};
#[derive(Clone, Debug, Serialize, Deserialize)]
struct ConfigFile {
id: u64,
identity_key: SecretKey,
#[serde(deserialize_with = "aggsig::SecretKey::from_array_of_bytes")]
voting_key: aggsig::SecretKey,
port: u16,
gossip: Vec<ValidatorInfo>,
}
/// Standalone Alpenglow node.
#[derive(Clone, Debug, Parser)]
#[command(version, about, long_about = None)]
struct Args {
/// Generates configs for a cluster from a file with IPs (one per line).
#[arg(long)]
generate_config_files: Option<String>,
/// Config file name to use.
#[arg(long)]
config_name: String,
}
#[tokio::main]
async fn main() -> Result<()> {
// enable fancy `color_eyre` error messages
color_eyre::install()?;
// parse args & load config from file
let args = Args::parse();
if let Some(ip_list) = args.generate_config_files {
create_node_configs(ip_list, args.config_name).await?;
return Ok(());
}
let mut config = File::open(&args.config_name).context("Config file is required")?;
let mut config_string = String::new();
config.read_to_string(&mut config_string)?;
let config: ConfigFile = toml::from_str(&config_string).context("Can not parse config")?;
// enable `fastrace` tracing
let reporter = OpenTelemetryReporter::new(
SpanExporter::builder()
.with_tonic()
.with_endpoint("http://127.0.0.1:4317".to_string())
.with_protocol(opentelemetry_otlp::Protocol::Grpc)
.with_timeout(opentelemetry_otlp::OTEL_EXPORTER_OTLP_TIMEOUT_DEFAULT)
.build()
.expect("initialize oltp exporter"),
Cow::Owned(
Resource::builder()
.with_attributes([KeyValue::new("service.name", "alpenglow-main")])
.build(),
),
InstrumentationScope::builder("alpenglow")
.with_version(env!("CARGO_PKG_VERSION"))
.build(),
);
fastrace::set_reporter(reporter, Config::default());
logging::enable_logforth();
let span_context = SpanContext::random();
let root_span = Span::root(format!("Alpenglow node {}", config.id), span_context);
// start the node with the provided config
let node = create_node(config);
let cancel_token = node.get_cancel_token();
let node_task = tokio::spawn(node.run().in_span(root_span));
// wait for shutdown signal (Ctrl + C)
tokio::signal::ctrl_c().await?;
warn!("shutting down node");
cancel_token.cancel();
node_task.await??;
fastrace::flush();
Ok(())
}
type Node = Alpenglow<
TrivialAll2All<UdpNetwork<ConsensusMessage, ConsensusMessage>>,
Rotor<UdpNetwork<Shred, Shred>, StakeWeightedSampler>,
UdpNetwork<Transaction, Transaction>,
>;
fn create_node(config: ConfigFile) -> Node {
// turn ConfigFile into an actual node
let epoch_info = Arc::new(EpochInfo::new(config.id, config.gossip.clone()));
let start_port = config.port;
let network = UdpNetwork::new(start_port);
let all2all = TrivialAll2All::new(config.gossip, network);
let network = UdpNetwork::new(start_port + 1);
let disseminator = Rotor::new(network, epoch_info.clone());
let repair_network = UdpNetwork::new(start_port + 2);
let repair_request_network = UdpNetwork::new(start_port + 3);
let txs_receiver = UdpNetwork::new(start_port + 4);
Alpenglow::new(
config.identity_key,
config.voting_key,
all2all,
disseminator,
repair_network,
repair_request_network,
epoch_info,
txs_receiver,
)
}
async fn create_node_configs(
socket_list_filename: String,
config_base_filename: String,
) -> color_eyre::Result<()> {
// prepare ValidatorInfo for all nodes
let mut rng = rng();
let mut sks = Vec::new();
let mut voting_sks = Vec::new();
let mut ports = Vec::new();
let mut validators = Vec::new();
let mut socket_list =
tokio::io::BufReader::new(tokio::fs::File::open(socket_list_filename).await?).lines();
for id in 0.. {
let Some(line) = socket_list.next_line().await? else {
break;
};
let sockaddr = line
.parse::<SocketAddr>()
.context("Can not parse socket list")?;
sks.push(SecretKey::new(&mut rng));
ports.push(sockaddr.port());
voting_sks.push(aggsig::SecretKey::new(&mut rng));
validators.push(ValidatorInfo {
id,
stake: 1,
pubkey: sks[id as usize].to_pk(),
voting_pubkey: voting_sks[id as usize].to_pk(),
all2all_address: sockaddr,
disseminator_address: SocketAddr::new(sockaddr.ip(), sockaddr.port() + 1),
repair_request_address: SocketAddr::new(sockaddr.ip(), sockaddr.port() + 2),
repair_response_address: SocketAddr::new(sockaddr.ip(), sockaddr.port() + 3),
});
}
// write config files
for id in 0..sks.len() as u64 {
let mut file = tokio::fs::File::create(format!("{config_base_filename}_{id}.toml")).await?;
let conf = ConfigFile {
id,
port: ports[id as usize],
identity_key: sks[id as usize].clone(),
voting_key: voting_sks[id as usize].clone(),
gossip: validators.clone(),
};
let serialized = toml::to_string(&conf)?;
file.write_all(serialized.as_bytes()).await?;
file.sync_data().await?;
}
Ok(())
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/bin/performance_test.rs | src/bin/performance_test.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::collections::VecDeque;
use std::sync::Arc;
use std::time::{Duration, Instant};
use alpenglow::all2all::TrivialAll2All;
use alpenglow::consensus::{ConsensusMessage, EpochInfo};
use alpenglow::crypto::aggsig;
use alpenglow::crypto::signature::SecretKey;
use alpenglow::disseminator::Rotor;
use alpenglow::disseminator::rotor::StakeWeightedSampler;
use alpenglow::network::simulated::SimulatedNetworkCore;
use alpenglow::network::{SimulatedNetwork, UdpNetwork, localhost_ip_sockaddr};
use alpenglow::shredder::Shred;
use alpenglow::types::Slot;
use alpenglow::{Alpenglow, Transaction, ValidatorInfo, logging};
use color_eyre::Result;
use log::info;
#[tokio::main]
async fn main() -> Result<()> {
// enable fancy `color_eyre` error messages
color_eyre::install()?;
logging::enable_logforth_stderr();
latency_test(11).await;
Ok(())
}
type TestNode = Alpenglow<
TrivialAll2All<SimulatedNetwork<ConsensusMessage, ConsensusMessage>>,
Rotor<SimulatedNetwork<Shred, Shred>, StakeWeightedSampler>,
UdpNetwork<Transaction, Transaction>,
>;
async fn create_test_nodes(count: u64) -> Vec<TestNode> {
// open sockets with arbitrary ports
let mut tx_receivers = (0..count)
.map(|_| UdpNetwork::new_with_any_port())
.collect::<VecDeque<_>>();
let mut repair_networks = (0..count)
.map(|_| UdpNetwork::new_with_any_port())
.collect::<VecDeque<_>>();
let mut repair_request_networks = (0..count)
.map(|_| UdpNetwork::new_with_any_port())
.collect::<VecDeque<_>>();
// first `count` networks are for all2all and the next `count` networks are for disseminator
let core = Arc::new(SimulatedNetworkCore::default().with_packet_loss(0.0));
let mut all2all_networks = VecDeque::new();
let mut disseminator_networks = VecDeque::new();
for i in 0..count {
all2all_networks.push_back(core.join_unlimited(i).await);
disseminator_networks.push_back(core.join_unlimited(i + count).await);
}
for a in 0..count {
for b in 0..count {
if a < 6 && b < 6 {
core.set_latency(a, b, Duration::from_millis(20)).await;
core.set_latency(a + count, b + count, Duration::from_millis(20))
.await;
} else if (6..10).contains(&a) && (6..10).contains(&b) {
core.set_latency(a, b, Duration::from_millis(60)).await;
core.set_latency(a + count, b + count, Duration::from_millis(60))
.await;
} else {
core.set_latency(a, b, Duration::from_millis(100)).await;
core.set_latency(a + count, b + count, Duration::from_millis(100))
.await;
}
}
}
// prepare validator info for all nodes
let mut rng = rand::rng();
let mut sks = Vec::new();
let mut voting_sks = Vec::new();
let mut validators = Vec::new();
for id in 0..count {
sks.push(SecretKey::new(&mut rng));
voting_sks.push(aggsig::SecretKey::new(&mut rng));
let all2all_address = localhost_ip_sockaddr((id).try_into().unwrap());
let disseminator_address = localhost_ip_sockaddr((id + count).try_into().unwrap());
let repair_request_address = localhost_ip_sockaddr(repair_networks[id as usize].port());
let repair_response_address = localhost_ip_sockaddr(repair_networks[id as usize].port());
validators.push(ValidatorInfo {
id,
stake: 1,
pubkey: sks[id as usize].to_pk(),
voting_pubkey: voting_sks[id as usize].to_pk(),
all2all_address,
disseminator_address,
repair_request_address,
repair_response_address,
});
}
// turn validator info into actual nodes
validators
.iter()
.map(|v| {
let epoch_info = Arc::new(EpochInfo::new(v.id, validators.clone()));
let all2all =
TrivialAll2All::new(validators.clone(), all2all_networks.pop_front().unwrap());
let disseminator = Rotor::new(
disseminator_networks.pop_front().unwrap(),
epoch_info.clone(),
);
let repair_network = repair_networks.pop_front().unwrap();
let repair_request_network = repair_request_networks.pop_front().unwrap();
let txs_receiver = tx_receivers.pop_front().unwrap();
Alpenglow::new(
sks[v.id as usize].clone(),
voting_sks[v.id as usize].clone(),
all2all,
disseminator,
repair_network,
repair_request_network,
epoch_info,
txs_receiver,
)
})
.collect()
}
async fn latency_test(num_nodes: usize) {
// start `num_nodes` nodes
let nodes = create_test_nodes(num_nodes as u64).await;
let mut node_cancel_tokens = Vec::new();
let mut pools = Vec::new();
for node in nodes {
pools.push(node.get_pool());
node_cancel_tokens.push(node.get_cancel_token());
tokio::spawn(node.run());
}
// spawn a thread checking pool for progress
let cancel_tokens = node_cancel_tokens.clone();
let liveness_tester = tokio::spawn(async move {
let mut finalized = vec![Slot::new(0); pools.len()];
let mut times = vec![Instant::now(); pools.len()];
loop {
tokio::time::sleep(tokio::time::Duration::from_millis(1)).await;
for (i, pool) in pools.iter().enumerate() {
if cancel_tokens[i].is_cancelled() {
continue;
}
let new_finalized = pool.read().await.finalized_slot();
if new_finalized > finalized[i] {
info!(
"node {} finalized new block {} after {:.2} ms",
i,
new_finalized,
times[i].elapsed().as_secs_f64() * 1000.0
);
finalized[i] = new_finalized;
times[i] = Instant::now();
}
}
}
});
// let it run for a while
let delay = tokio::time::Duration::from_secs(60);
tokio::time::sleep(delay).await;
liveness_tester.abort();
assert!(liveness_tester.await.unwrap_err().is_cancelled());
// kill other nodes
for token in node_cancel_tokens {
token.cancel();
}
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/bin/all2all_test.rs | src/bin/all2all_test.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
use core::f64;
use std::sync::Arc;
use std::sync::atomic::AtomicUsize;
use std::time::Duration;
use alpenglow::logging;
use clap::Parser;
use color_eyre::Result;
use log::{debug, info};
use time::OffsetDateTime;
use tokio::net::UdpSocket;
use tokio::sync::{Mutex, RwLock};
use tokio::task::JoinSet;
use wincode::{SchemaRead, SchemaWrite};
// TODO: allow for different leader per round
const LEADER: usize = 0;
const BASE_PORT: u16 = 8000;
const ROUNDS: usize = 1000;
const NODES_PER_MACHINE: usize = 1;
const MACHINES: usize = 30;
const MACHINE_IPS: [&str; MACHINES] = [
"155.138.138.58",
"209.250.243.9",
"45.63.40.210",
"95.179.181.37",
"78.141.219.197",
"209.250.255.181",
"199.247.24.163",
"45.63.42.105",
"45.77.138.184",
"185.92.222.84",
"95.179.129.189",
"95.179.158.247",
"185.92.223.86",
"108.61.117.221",
"95.179.135.249",
"95.179.156.58",
"209.250.240.143",
"95.179.187.221",
"108.61.165.164",
"95.179.144.106",
"136.244.99.214",
"95.179.130.38",
"108.61.166.146",
"209.250.244.52",
"95.179.176.12",
"45.32.237.105",
"199.247.30.136",
"45.32.236.104",
"136.244.106.65",
"136.244.99.112",
];
const TOTAL_NODES: usize = MACHINES * NODES_PER_MACHINE;
const MSG_BUFFER_BYTES: usize = 1500;
/// Simple program to greet a person
#[derive(Debug, Parser)]
#[command(version, about, long_about = None)]
struct Args {
#[arg(long)]
id: usize,
}
#[derive(SchemaRead, SchemaWrite)]
enum Message {
Ping(PingMsg),
Pong(PongMsg),
Block(BlockMsg),
Vote(VoteMsg),
}
#[derive(SchemaRead, SchemaWrite)]
struct PingMsg {
machine: usize,
timestamp_nanos: i128,
}
#[derive(SchemaRead, SchemaWrite)]
struct PongMsg {
machine: usize,
timestamp_nanos: i128,
timestamp_ping_nanos: i128,
}
#[derive(SchemaRead, SchemaWrite)]
struct BlockMsg {
machine: usize,
timestamp_nanos: i128,
round: usize,
}
#[derive(SchemaRead, SchemaWrite)]
struct VoteMsg {
machine: usize,
timestamp_nanos: i128,
block_timestamp: i128,
round: usize,
signature: [u8; 64],
hash: [u8; 32],
}
#[tokio::main]
async fn main() -> Result<()> {
let args = Args::parse();
// enable fancy `color_eyre` error messages
color_eyre::install()?;
logging::enable_logforth_stderr();
let machine = Machine::new(args.id);
machine.run().await?;
Ok(())
}
struct Machine {
id: usize,
}
impl Machine {
const fn new(id: usize) -> Self {
Self { id }
}
async fn run(&self) -> std::io::Result<()> {
let packets_received = Arc::new(AtomicUsize::new(0));
let wc_vote_delay = Arc::new(Mutex::new(0.0));
let sum_vote_delay = Arc::new(Mutex::new(0.0));
let mut tasks = JoinSet::new();
let mut sockets = Vec::new();
let _ = self.ping_test().await;
// open UDP sockets
for node in 0..NODES_PER_MACHINE {
let port = BASE_PORT + u16::try_from(node).unwrap();
let addr = format!("0.0.0.0:{port}");
let socket = Arc::new(UdpSocket::bind(&addr).await?);
sockets.push(socket.clone());
info!("Node started on {}", socket.local_addr()?);
}
// UDP hole punching
info!("UDP hole punching...");
for _ in 0..60 {
tokio::time::sleep(Duration::from_millis(1000)).await;
for socket in &sockets {
for id in 0..MACHINES {
for d_port in 0..NODES_PER_MACHINE {
let ip = get_machine_ip(self.id, id);
let port = BASE_PORT + u16::try_from(d_port).unwrap();
let rcv_addr = format!("{ip}:{port}");
let time = OffsetDateTime::now_utc();
let timestamp_nanos = time.unix_timestamp_nanos();
let msg = Message::Ping(PingMsg {
machine: self.id,
timestamp_nanos,
});
let bytes = wincode::serialize(&msg).unwrap();
let _ = socket.send_to(&bytes, rcv_addr).await.unwrap();
}
}
}
}
for (node, socket) in sockets.iter().cloned().enumerate() {
// listen for and respond to incoming messages
let pr = packets_received.clone();
let wcvd = wc_vote_delay.clone();
let svd = sum_vote_delay.clone();
let self_id = self.id;
tasks.spawn(async move {
let mut round = 0;
let mut buf = [0; MSG_BUFFER_BYTES];
loop {
let (len, addr) = socket.recv_from(&mut buf).await.unwrap();
let msg: Message = wincode::deserialize(&buf[..len]).unwrap();
match msg {
Message::Vote(vote) => {
let timestamp = vote.timestamp_nanos;
let vote_time =
OffsetDateTime::from_unix_timestamp_nanos(timestamp).unwrap();
let rcv_time = OffsetDateTime::now_utc();
let delay = (rcv_time - vote_time).as_seconds_f64() * 1000.0;
debug!("vote arrived with delay {delay:.1} ms");
debug!("{len} bytes received from {addr}");
pr.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
let block_timestamp = vote.block_timestamp;
let block_time =
OffsetDateTime::from_unix_timestamp_nanos(block_timestamp).unwrap();
let delay = (rcv_time - block_time).as_seconds_f64() * 1000.0;
debug!("vote seen {delay:.1} ms after block production");
let mut wcvd_guard = wcvd.lock().await;
if delay > *wcvd_guard {
*wcvd_guard = delay;
}
drop(wcvd_guard);
let mut svd_guard = svd.lock().await;
*svd_guard += delay;
}
Message::Block(block) => {
// assert_eq!(block.round, round);
debug!("received block for round {round}");
// print stats for this round
if node == 0 {
let packets_expected =
100 * (round + 1) * NODES_PER_MACHINE * TOTAL_NODES;
let packets_received = pr.load(std::sync::atomic::Ordering::SeqCst);
let packets_lost =
packets_expected.saturating_sub(packets_received);
let packet_loss =
100.0 * packets_lost as f64 / packets_expected as f64;
info!(
"Packet loss in round {}/{}: {} ({:.1}%)",
round + 1,
ROUNDS,
packets_lost,
packet_loss
);
info!("WC vote delay: {:.1} ms", *wcvd.lock().await);
info!(
"Avg. vote delay: {:.1} ms",
*svd.lock().await / packets_received as f64
);
}
// broadcast vote
for _ in 0..100 {
for id in 0..MACHINES {
for d_port in 0..NODES_PER_MACHINE {
let port = BASE_PORT + u16::try_from(d_port).unwrap();
let ip = get_machine_ip(self_id, id);
let rcv_addr = format!("{ip}:{port}");
let time = OffsetDateTime::now_utc();
let timestamp_nanos = time.unix_timestamp_nanos();
let msg = Message::Vote(VoteMsg {
machine: self_id,
timestamp_nanos,
block_timestamp: block.timestamp_nanos,
round,
signature: [
1, 6, 9, 15, 18, 29, 30, 33, 37, 47, 51, 53, 54,
56, 73, 80, 81, 93, 94, 95, 102, 104, 106, 107,
109, 111, 115, 117, 120, 134, 136, 141, 148, 150,
152, 153, 165, 173, 181, 182, 183, 186, 203, 207,
208, 210, 211, 218, 219, 221, 223, 224, 225, 228,
229, 231, 232, 242, 246, 248, 251, 252, 254, 255,
],
hash: [
8, 22, 26, 34, 38, 42, 46, 56, 74, 75, 76, 96, 106,
110, 114, 133, 154, 163, 164, 170, 179, 190, 207,
208, 210, 214, 217, 230, 234, 238, 241, 250,
],
});
let bytes = wincode::serialize(&msg).unwrap();
let _ = socket.send_to(&bytes, rcv_addr).await.unwrap();
debug!("vote of {len} bytes sent");
}
}
tokio::time::sleep(Duration::from_millis(1)).await;
}
round += 1;
}
_ => {}
}
}
});
// leader block production loop
if self.id * NODES_PER_MACHINE + node == LEADER {
let self_id = self.id;
let socket = sockets[node].clone();
tasks.spawn(async move {
info!("Leader waiting for 30s...");
tokio::time::sleep(Duration::from_secs(30)).await;
info!("Leader block production started!");
for round in 0..ROUNDS {
tokio::time::sleep(Duration::from_millis(400)).await;
let time = OffsetDateTime::now_utc();
let timestamp_nanos = time.unix_timestamp_nanos();
let msg = Message::Block(BlockMsg {
machine: self_id,
timestamp_nanos,
round,
});
let bytes = wincode::serialize(&msg).unwrap();
for id in 0..MACHINES {
for d_port in 0..NODES_PER_MACHINE {
let port = BASE_PORT + u16::try_from(d_port).unwrap();
let ip = get_machine_ip(self_id, id);
let rcv_addr = format!("{ip}:{port}");
debug!("sending block to {rcv_addr}");
let _ = socket.send_to(&bytes, rcv_addr).await.unwrap();
}
}
}
});
}
}
let _ = tasks.join_all().await;
Ok(())
}
/// Determine minimum ping between self and other machines.
async fn ping_test(&self) -> Vec<f64> {
info!("Performing ping test...");
let pings = Arc::new(RwLock::new(vec![f64::INFINITY; MACHINES]));
let addr = format!("0.0.0.0:{}", BASE_PORT - 1);
let socket = Arc::new(UdpSocket::bind(&addr).await.unwrap());
let listener = {
let socket = socket.clone();
let pings = pings.clone();
let self_id = self.id;
tokio::task::spawn(async move {
let mut buf = [0; MSG_BUFFER_BYTES];
while let Ok((len, addr)) = socket.recv_from(&mut buf).await {
let msg: Message = wincode::deserialize(&buf[..len]).unwrap();
match msg {
Message::Ping(ping) => {
let ip = MACHINE_IPS[ping.machine];
let ping_addr = format!("{}:{}", ip, BASE_PORT - 1);
let time = OffsetDateTime::now_utc();
let timestamp_nanos = time.unix_timestamp_nanos();
let response = Message::Pong(PongMsg {
machine: self_id,
timestamp_nanos,
timestamp_ping_nanos: ping.timestamp_nanos,
});
let bytes = wincode::serialize(&response).unwrap();
let _ = socket.send_to(&bytes, ping_addr).await.unwrap();
}
Message::Pong(pong) => {
let timestamp1 = pong.timestamp_ping_nanos;
let time1 =
OffsetDateTime::from_unix_timestamp_nanos(timestamp1).unwrap();
let timestamp2 = pong.timestamp_nanos;
let time2 =
OffsetDateTime::from_unix_timestamp_nanos(timestamp2).unwrap();
let now = OffsetDateTime::now_utc();
let rtt = (now - time1).as_seconds_f64() * 1000.0;
let ping_time = rtt / 2.0;
let p1 = (time2 - time1).as_seconds_f64() * 1000.0;
let p2 = (now - time2).as_seconds_f64() * 1000.0;
debug!(
"ping of {ping_time:.1} ms ({p1:.2} + {p2:.2})observed for {addr}"
);
if ping_time < pings.read().await[pong.machine] {
pings.write().await[pong.machine] = ping_time;
}
}
_ => {}
}
}
})
};
let sender = {
let pings = pings.clone();
let self_id = self.id;
tokio::task::spawn(async move {
for round in 0.. {
tokio::time::sleep(Duration::from_millis(500)).await;
for id in 0..MACHINES {
for _ in 0..3 {
tokio::time::sleep(Duration::from_millis(10)).await;
let ip = get_machine_ip(self_id, id);
let rcv_addr = format!("{}:{}", ip, BASE_PORT - 1);
let time = OffsetDateTime::now_utc();
let timestamp_nanos = time.unix_timestamp_nanos();
let msg = Message::Ping(PingMsg {
machine: self_id,
timestamp_nanos,
});
let bytes = wincode::serialize(&msg).unwrap();
let _ = socket.send_to(&bytes, rcv_addr).await.unwrap();
}
}
let machines_missing = pings
.read()
.await
.iter()
.filter(|p| p.is_infinite())
.count();
if round > 10 && machines_missing == 0 {
break;
}
info!("{machines_missing} machines missing");
}
tokio::time::sleep(Duration::from_millis(2000)).await;
})
};
tokio::select! {
_ = sender => {},
_ = listener => unreachable!(),
}
// print determined pings
for id in 0..MACHINES {
let ping = pings.read().await[id];
let ip = get_machine_ip(self.id, id);
info!("Ping to {ip}: {ping:.1} ms");
}
pings.read().await.clone()
}
}
const fn get_machine_ip(self_id: usize, other_id: usize) -> &'static str {
if other_id == self_id {
"127.0.0.1"
} else {
MACHINE_IPS[other_id]
}
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/bin/workload_generator.rs | src/bin/workload_generator.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::net::{SocketAddr, UdpSocket};
use std::thread::sleep;
use std::time::{Duration, Instant};
use alpenglow::{Transaction, logging};
use clap::Parser;
use color_eyre::Result;
use log::info;
use rand::RngCore;
/// Worklaod generator for benchmarks.
#[derive(Debug, Clone, Parser)]
#[command(version, about, long_about = None)]
struct Args {
/// Validator address or receiving transactions.
#[arg(long)]
validator: SocketAddr,
/// Initial wait time, before sending any transactions.
#[arg(long)]
initial_delay_secs: Option<u64>,
/// Target throughput in transactions per second.
#[arg(long)]
transactions_per_second: Option<u64>,
/// Bytes per transaction.
#[arg(long)]
transaction_size: Option<u64>,
}
fn main() -> Result<()> {
// enable fancy `color_eyre` error messages
color_eyre::install()?;
logging::enable_logforth();
// parse args
let args = Args::parse();
let validator_addr = args.validator;
let target_tps = args.transactions_per_second.unwrap_or(100);
let bytes_per_tx = args.transaction_size.unwrap_or(512);
// create socket on arbitrary port
let socket = UdpSocket::bind("0.0.0.0:0")?;
// prevent blocking forever
socket.set_write_timeout(Some(Duration::from_secs(1)))?;
// initial delay
sleep(Duration::from_secs(args.initial_delay_secs.unwrap_or(0)));
let start_time = Instant::now();
let mut last_report = Instant::now();
let mut txs_sent = 0;
let mut rng = rand::rng();
let mut buf = vec![0; bytes_per_tx as usize];
loop {
rng.fill_bytes(&mut buf);
let tx = Transaction(buf.clone());
let msg_bytes = wincode::serialize(&tx)?;
socket.send_to(&msg_bytes, validator_addr).unwrap();
txs_sent += 1;
let elapsed = start_time.elapsed().as_secs_f64();
let expected_elapsed = txs_sent as f64 / target_tps as f64;
if elapsed < expected_elapsed {
let delta = expected_elapsed - elapsed;
sleep(Duration::from_secs_f64(delta));
}
if last_report.elapsed().as_secs() > 1 {
last_report = Instant::now();
let tps = txs_sent as f64 / elapsed;
let total_data = bytes_per_tx as f64 * txs_sent as f64 / 1e6;
let data_rate = total_data / elapsed * 8.0;
info!(
"sent {txs_sent} txs (TPS: {tps:.1}) | data: {total_data:.1} MB ({data_rate:.1} Mb/s)",
);
}
}
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/bin/simulations/alpenglow.rs | src/bin/simulations/alpenglow.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Simulations for the Alpenglow protocol.
//!
//! This implements the following simulations:
//! - Latency simulation for the entire happy path of the protocol.
//! - Bandwidth simulation calculating required bandwidth for voting and block dissemination.
mod bandwidth;
mod latency;
pub use bandwidth::*;
pub use latency::*;
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/bin/simulations/discrete_event_simulator.rs | src/bin/simulations/discrete_event_simulator.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! General discrete-event simulator.
//!
//! This module provides a generic discrete-event simulator.
//! It can be used to simulate different protocols and configurations.
mod resources;
mod timings;
use std::cmp::Reverse;
use std::fmt::Debug;
use std::hash::Hash;
use std::sync::{RwLock, RwLockReadGuard};
use alpenglow::network::simulated::ping_data::{PingServer, get_ping};
use alpenglow::{Stake, ValidatorId, ValidatorInfo};
use rand::prelude::*;
use rayon::prelude::*;
pub use self::resources::Resources;
pub use self::timings::{SimTime, TimingStats, Timings};
/// Wrapper trait for a specific protocol simulation.
pub trait Protocol {
type Event: Event<Params = Self::Params, Instance = Self::Instance>;
type Stage: Stage<Event = Self::Event, Params = Self::Params>;
type Params;
type Instance;
type Builder: Builder<Params = Self::Params, Instance = Self::Instance>;
}
/// Builder for instances of a protocol with a specific set of parameters.
pub trait Builder {
type Params;
type Instance;
/// Samples a new instance from the builder.
fn build(&self, rng: &mut impl Rng) -> Self::Instance;
/// Returns the parameters used by this builder.
fn params(&self) -> &Self::Params;
}
/// Events that can occur in a protocol simulation.
///
/// Each event has a name, a list of dependencies, and a calculation function.
/// The simulation engine will pass the timings of its dependencies to the calculation function.
/// The calculation function returns the timings of this event at each validator.
pub trait Event: Clone + Copy + Debug + Eq + Hash {
type Params;
type Instance;
/// Returns a printable name for the event.
///
/// This will be used as a column label in the output CSV file.
fn name(&self) -> String;
/// Returns `true` iff the event should be tracked for timing stats.
fn should_track_stats(&self) -> bool;
/// Returns a list of dependency event IDs.
fn dependencies(&self, params: &Self::Params) -> Vec<Self>;
/// Calculates timing vector given dependencies.
fn calculate_timing(
&self,
start_time: SimTime,
dep_timings: &[&[SimTime]],
instance: &Self::Instance,
resources: &mut Resources,
environment: &SimulationEnvironment,
) -> Vec<SimTime>;
}
/// Sequential stages of a protocol simulation.
///
/// Each stage contains one or more events.
/// Events in later stages can only depend on events from earlier stages.
pub trait Stage: Clone + Copy + Debug + Eq + Hash {
type Event: Event;
type Params;
/// Returns a list of all stages, in order.
fn all() -> Vec<Self> {
let mut stages = Vec::new();
let mut stage = Self::first();
loop {
stages.push(stage);
match stage.next() {
Some(s) => stage = s,
None => break,
}
}
stages
}
/// Returns the first stage.
fn first() -> Self;
/// Returns the next stage, if any.
fn next(&self) -> Option<Self>;
/// Returns a list of all events within the stage.
fn events(&self, params: &Self::Params) -> Vec<Self::Event>;
}
/// Matrix-based discrete-event simulation engine.
pub struct SimulationEngine<P: Protocol> {
builder: P::Builder,
environment: SimulationEnvironment,
stats: RwLock<TimingStats<P>>,
}
impl<P: Protocol> SimulationEngine<P> {
/// Creates a new simulation engine.
///
/// The `environment` holds the validators, network parameters, etc.
pub fn new(builder: P::Builder, environment: SimulationEnvironment) -> Self {
Self {
builder,
environment,
stats: RwLock::new(TimingStats::default()),
}
}
/// Runs the simulation `iterations` times.
///
/// Samples a new [`Protocol::Instance`] from the [`Protocol::Builder`] for each iteration.
pub fn run_many_sequential(&self, iterations: u64) {
let mut rng = rand::rng();
let mut timings = Timings::default();
for _ in 0..iterations {
let instance = self.builder.build(&mut rng);
self.run(&instance, &mut timings);
}
}
/// Runs one iteration of the simulation.
pub fn run(&self, instance: &P::Instance, timings: &mut Timings<P::Event>) {
// setup & initialization
let num_val = self.environment.num_validators();
timings.clear();
let mut resources = Resources::new(num_val);
// simulation loop
for stage in P::Stage::all() {
for event in stage.events(self.builder.params()) {
timings.initialize(event, num_val);
}
for event in stage.events(self.builder.params()) {
let dep_timings = event
.dependencies(self.builder.params())
.into_iter()
.map(|dep| timings.get(dep).unwrap())
.collect::<Vec<_>>();
let latencies = event.calculate_timing(
timings.start_time(),
&dep_timings,
instance,
&mut resources,
&self.environment,
);
for (validator, latency) in latencies.iter().enumerate() {
timings.record(event, *latency, validator as ValidatorId);
}
}
}
// commit timings to stats
let mut stats_map = self.stats.write().unwrap();
stats_map.record_latencies(timings, &self.environment);
}
/// References the timing stats.
pub fn stats(&'_ self) -> RwLockReadGuard<'_, TimingStats<P>> {
self.stats.read().unwrap()
}
}
impl<P: Protocol> SimulationEngine<P>
where
P::Builder: Send + Sync,
P::Event: Send + Sync,
{
/// Runs the simulation `iterations` times in parallel.
///
/// Samples a new [`Protocol::Instance`] from the [`Protocol::Builder`] for each iteration.
/// Uses the [`rayon`] crate for the thread pool.
pub fn run_many_parallel(&self, iterations: u64) {
(0..iterations).into_par_iter().for_each(|_| {
let mut rng = rand::rng();
let mut timings = Timings::default();
let instance = self.builder.build(&mut rng);
self.run(&instance, &mut timings);
});
}
}
/// Information about the environment in which the simulation is running.
///
/// This includes the validators, their stakes, bandwidths, ping data, etc.
#[derive(Clone, Debug)]
pub struct SimulationEnvironment {
// core setup of the latency test
pub(crate) validators: Vec<ValidatorInfo>,
ping_servers: Vec<&'static PingServer>,
pub(crate) total_stake: Stake,
// optional bandwidth information
// if provided, these will be used to simulate transmission delays
// otherwise, transmission delay is ignored
leader_bandwidth: Option<u64>,
bandwidths: Option<Vec<u64>>,
}
impl SimulationEnvironment {
/// Creates a new simulation environment.
pub fn new(validators: Vec<ValidatorInfo>, ping_servers: Vec<&'static PingServer>) -> Self {
let total_stake = validators.iter().map(|v| v.stake).sum();
Self {
validators,
ping_servers,
total_stake,
leader_bandwidth: None,
bandwidths: None,
}
}
/// Creates a new simulation environment from a list of validators with ping data.
pub fn from_validators_with_ping_data(
validators_with_ping_data: &[(ValidatorInfo, &'static PingServer)],
) -> Self {
// sort by stake (from highest to lowest)
let mut vals_with_ping_data = validators_with_ping_data.to_vec();
vals_with_ping_data.sort_by_key(|(v, _)| Reverse(v.stake));
for (i, (v, _)) in vals_with_ping_data.iter_mut().enumerate() {
v.id = i as ValidatorId;
}
// split and build environment
let vals = vals_with_ping_data.iter().map(|(v, _)| v.clone()).collect();
let ping_servers = vals_with_ping_data.iter().map(|(_, p)| *p).collect();
Self::new(vals, ping_servers)
}
/// Sets the bandwidths for all validators for simulating transmission delays.
pub fn with_bandwidths(mut self, leader_bandwidth: u64, bandwidths: Vec<u64>) -> Self {
self.leader_bandwidth = Some(leader_bandwidth);
self.bandwidths = Some(bandwidths);
self
}
/// Returns the number of validators.
pub fn num_validators(&self) -> usize {
self.validators.len()
}
/// Calculates how long it takes the `validator` to serialize `bytes` onto the wire.
pub fn transmission_delay(&self, bytes: usize, validator: ValidatorId) -> SimTime {
let Some(bandwidths) = &self.bandwidths else {
return SimTime::ZERO;
};
let latency_secs = bytes as f64 * 8.0 / bandwidths[validator as usize] as f64;
SimTime::from_secs(latency_secs)
}
/// Finds the latency between the `sender` and `receiver` validators.
pub fn propagation_delay(&self, sender: ValidatorId, receiver: ValidatorId) -> SimTime {
let sender_server = self.ping_servers[sender as usize].id;
let receiver_server = self.ping_servers[receiver as usize].id;
let rtt_ping_ms = get_ping(sender_server, receiver_server).unwrap();
let one_way_ping_secs = rtt_ping_ms / 2.0 / 1e3;
SimTime::from_secs(one_way_ping_secs)
}
}
/// Calculates the column-wise minimum.
///
/// Requires that all rows have the same non-zero length.
/// Returns a vector of the same length, containing the minimum over all rows in each column.
///
/// # Panics
///
/// - Panics if `rows` is empty.
/// - Panics if not all rows have same length.
pub fn column_min<T: Copy + Ord>(rows: &[&[T]]) -> Vec<T> {
assert!(!rows.is_empty());
let mut result = rows[0].to_vec();
for row in &rows[1..] {
assert_eq!(row.len(), result.len(), "all rows must have same length");
for (res, &val) in result.iter_mut().zip(row.iter()) {
if val < *res {
*res = val;
}
}
}
result
}
/// Calculates the column-wise maximum.
///
/// Requires that all rows have the same non-zero length.
/// Returns a vector of the same length, containing the maximum over all rows in each column.
///
/// # Panics
///
/// - Panics if `rows` is empty.
/// - Panics if not all rows have same length.
pub fn column_max<T: Copy + Ord>(rows: &[&[T]]) -> Vec<T> {
assert!(!rows.is_empty());
let mut result = rows[0].to_vec();
for row in &rows[1..] {
assert_eq!(row.len(), result.len(), "all rows must have same length");
for (res, &val) in result.iter_mut().zip(row.iter()) {
if val > *res {
*res = val;
}
}
}
result
}
/// Simulates a round of broadcast of proofs that an event has occurred.
///
/// The `start_times` vector indicates when each validator locally triggers the event.
/// We then use [`broadcast`] to simulate broadcasting the proofs as soon as possible.
/// Each validator actually triggers the event at the earlier of two times:
/// - The time at which the validator locally triggers the event.
/// - The time at which the validator received the first proof message.
///
/// Returns the time at which each validator triggers the event.
pub fn broadcast_first_arrival_or_dep(
start_times: &[SimTime],
resources: &mut Resources,
environment: &SimulationEnvironment,
message_size: usize,
) -> Vec<SimTime> {
let mut timings = start_times.to_vec();
let start_send_times = broadcast(start_times, resources, environment, message_size);
for (recipient, recipient_timing) in timings.iter_mut().enumerate() {
// calculate first message arrival time
let first_arrival_time = start_send_times
.iter()
.enumerate()
.map(|(sender, start_send)| {
let sender = sender as ValidatorId;
let prop_delay = environment.propagation_delay(sender, recipient as ValidatorId);
let tx_offset_bytes = (recipient + 1) * message_size;
let tx_delay = environment.transmission_delay(tx_offset_bytes, sender);
*start_send + prop_delay + tx_delay
})
.min()
.unwrap();
if first_arrival_time < *recipient_timing {
*recipient_timing = first_arrival_time;
}
}
timings
}
/// Simulates a round of broadcast, where votes from `threshold` fraction of stake must be seen.
///
/// The `start_times` vector indicates when each validator locally triggers the vote.
/// We then use [`broadcast`] to simulate broadcasting the vote message as soon as possible.
///
/// Returns the time at which each validator saw the required threshold of vote messages.
pub fn broadcast_stake_threshold(
start_times: &[SimTime],
resources: &mut Resources,
environment: &SimulationEnvironment,
message_size: usize,
threshold: f64,
) -> Vec<SimTime> {
let mut timings = start_times.to_vec();
let start_send_times = broadcast(start_times, resources, environment, message_size);
for (recipient, recipient_timing) in timings.iter_mut().enumerate() {
// calculate message arrival timings
let mut arrival_timings = start_send_times
.iter()
.enumerate()
.map(|(sender, start_send)| {
let sender = sender as ValidatorId;
let prop_delay = environment.propagation_delay(sender, recipient as ValidatorId);
let tx_offset_bytes = (recipient + 1) * message_size;
let tx_delay = environment.transmission_delay(tx_offset_bytes, sender);
(*start_send + prop_delay + tx_delay, sender)
})
.collect::<Vec<_>>();
// find time the stake threshold is first reached
arrival_timings.sort_unstable();
let mut stake_so_far = 0;
for (arrival_timing, sender) in arrival_timings {
*recipient_timing = arrival_timing;
stake_so_far += environment.validators[sender as usize].stake;
if stake_so_far as f64 >= threshold * environment.total_stake as f64 {
break;
}
}
}
timings
}
/// Simulates a round of broadcast.
///
/// The `start_times` vector indicates when each validator has met conditions for sending.
/// Every validator sends a message of `message_size` bytes to every other validator.
/// The message is sent out as soon as the network resource is free after that.
/// Updates the network resource for each validator, reserving the time used for the broadcast.
///
/// Returns an iterator over the times at which each validator will start sending the messages.
pub fn broadcast(
start_times: &[SimTime],
resources: &mut Resources,
environment: &SimulationEnvironment,
message_size: usize,
) -> Vec<SimTime> {
// determine the start time for sending messages
let res = &*resources;
let send_times = start_times
.iter()
.enumerate()
.map(|(sender, sender_timing)| {
res.network
.time_next_free_after(sender as ValidatorId, *sender_timing)
})
.collect();
// reserve the network resource for the full broadcast
for (sender, &start_time) in start_times.iter().enumerate() {
let sender = sender as ValidatorId;
let total_bytes = environment.num_validators() * message_size;
let total_tx_time = environment.transmission_delay(total_bytes, sender);
resources
.network
.schedule(sender, start_time, total_tx_time);
}
send_times
}
#[cfg(test)]
mod tests {
use alpenglow::network::simulated::stake_distribution::{
VALIDATOR_DATA, validators_from_validator_data,
};
use super::*;
// test constants
const TIME_PER_EVENT: SimTime = SimTime::from_secs(60.0);
const NUM_EVENTS: u64 = 20;
const NUM_SIMULATION_ITERATIONS: u64 = 100;
// simple test protocol
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
struct TestEvent(u64);
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
struct TestStage;
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
struct TestParams(u64);
#[derive(Clone, Copy, Debug)]
struct TestInstance;
#[derive(Clone, Copy, Debug)]
struct TestBuilder(TestParams);
#[derive(Clone, Copy, Debug)]
struct TestProtocol;
impl Event for TestEvent {
type Params = TestParams;
type Instance = TestInstance;
fn name(&self) -> String {
format!("test_event_{}", self.0)
}
fn should_track_stats(&self) -> bool {
true
}
fn dependencies(&self, _params: &Self::Params) -> Vec<Self> {
if self.0 > 0 {
vec![TestEvent(self.0 - 1)]
} else {
vec![]
}
}
fn calculate_timing(
&self,
start_time: SimTime,
dep_timings: &[&[SimTime]],
_instance: &TestInstance,
_resources: &mut Resources,
environment: &SimulationEnvironment,
) -> Vec<SimTime> {
let mut timings = if self.0 == 0 {
vec![start_time; environment.num_validators()]
} else {
dep_timings[0].to_vec()
};
for timing in timings.iter_mut() {
*timing += TIME_PER_EVENT;
}
timings
}
}
impl Stage for TestStage {
type Event = TestEvent;
type Params = TestParams;
fn first() -> Self {
TestStage
}
fn next(&self) -> Option<Self> {
None
}
fn events(&self, params: &TestParams) -> Vec<TestEvent> {
(0..params.0).map(TestEvent).collect()
}
}
impl Builder for TestBuilder {
type Params = TestParams;
type Instance = TestInstance;
fn build(&self, _rng: &mut impl Rng) -> TestInstance {
TestInstance
}
fn params(&self) -> &TestParams {
&self.0
}
}
impl Protocol for TestProtocol {
type Event = TestEvent;
type Stage = TestStage;
type Params = TestParams;
type Instance = TestInstance;
type Builder = TestBuilder;
}
fn setup() -> (SimulationEngine<TestProtocol>, TestBuilder) {
let (_, vals_with_ping) = validators_from_validator_data(&VALIDATOR_DATA);
let environment = SimulationEnvironment::from_validators_with_ping_data(&vals_with_ping);
let builder = TestBuilder(TestParams(NUM_EVENTS));
let engine = SimulationEngine::new(builder, environment);
(engine, builder)
}
#[test]
fn single() {
let (engine, builder) = setup();
let instance = builder.build(&mut rand::rng());
let mut timings = Timings::default();
engine.run(&instance, &mut timings);
// check that the timings are correct
for event_id in 0..NUM_EVENTS {
for time in timings.get(TestEvent(event_id)).unwrap() {
let expected_time_secs = TIME_PER_EVENT.as_secs() * (event_id + 1) as f64;
let delta = (time.as_secs() - expected_time_secs).abs();
assert!(delta < f64::EPSILON);
}
}
}
const CUSTOM_EPSILON: f64 = 1e-6;
#[test]
fn many_parallel() {
let (engine, _builder) = setup();
engine.run_many_parallel(NUM_SIMULATION_ITERATIONS);
// check that the timings are correct
for event_id in 0..NUM_EVENTS {
let stats = engine.stats();
let event_stats = stats.get(&TestEvent(event_id)).unwrap();
// timings should be the same for all validators, thus also for all percentiles
for percentile in 1..=100 {
let avg_timing_ms = event_stats.get_avg_percentile_latency(percentile);
let expected_time_ms = TIME_PER_EVENT.as_millis() * (event_id + 1) as f64;
let delta = (avg_timing_ms - expected_time_ms).abs();
assert!(delta < CUSTOM_EPSILON);
}
}
}
#[test]
fn many_sequential() {
let (engine, _builder) = setup();
engine.run_many_sequential(NUM_SIMULATION_ITERATIONS);
// check that the timings are correct
for event_id in 0..NUM_EVENTS {
let stats = engine.stats();
let event_stats = stats.get(&TestEvent(event_id)).unwrap();
// timings should be the same for all validators, thus also for all percentiles
for percentile in 1..=100 {
let avg_timing_ms = event_stats.get_avg_percentile_latency(percentile);
let expected_time_ms = TIME_PER_EVENT.as_millis() * (event_id + 1) as f64;
let delta = (avg_timing_ms - expected_time_ms).abs();
assert!(delta < CUSTOM_EPSILON);
}
}
}
#[test]
fn start_broadcast() {
const MESSAGE_BYTES: usize = 100;
let (_, vals_with_ping) = validators_from_validator_data(&VALIDATOR_DATA);
let num_val = vals_with_ping.len();
let start_times = vec![SimTime::new(0); num_val];
let mut resources = Resources::new(num_val);
let environment = SimulationEnvironment::from_validators_with_ping_data(&vals_with_ping);
// without bandwidth limits, all broadcasts start at time 0
let send_times = broadcast(&start_times, &mut resources, &environment, MESSAGE_BYTES);
assert_eq!(send_times, vec![SimTime::new(0); num_val]);
let send_times = broadcast(&start_times, &mut resources, &environment, MESSAGE_BYTES);
assert_eq!(send_times, vec![SimTime::new(0); num_val]);
// set bandwidth limits to 1 msg/sec for simplicity
let bandwidths = vec![8 * MESSAGE_BYTES as u64; num_val];
let environment = environment.with_bandwidths(8 * MESSAGE_BYTES as u64, bandwidths);
// first broadcast starts at time 0
let send_times = broadcast(&start_times, &mut resources, &environment, MESSAGE_BYTES);
assert_eq!(send_times, vec![SimTime::new(0); num_val]);
// with bandwidth limits, second broadcast should start after transmission delay
let send_times = broadcast(&start_times, &mut resources, &environment, MESSAGE_BYTES);
assert_eq!(
send_times,
vec![SimTime::from_secs(num_val as f64); num_val]
);
}
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/bin/simulations/ryse.rs | src/bin/simulations/ryse.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Simulations for Ryse, the MCP proposal.
//!
//! Ryse is one instantiation of a multiple-concurrent proposers (MCP) consensus protocol.
//! As such, it provides the following economic properties:
//! - Selective-censorship resistance:
//! A malicious consensus leader can not unilaterally exclude an honest proposer's proposal.
//! - Weak hiding:
//! A malicious proposer cannot condition the contents of their block on the contents of another.
//!
//! Compared to Pyjama, Ryse aims to be a simple modification of Alpenglow.
//! That is, it is not a general gadget that can be combined with any consensus protocol.
//! Instead, it specifically modifies Alpenglow's Rotor block dissemination protocol.
//! In Ryse, each relay would release shreds from all leaders simultaneously.
mod latency;
mod parameters;
mod robustness;
pub use self::latency::{LatencySimInstanceBuilder, LatencySimParams, RyseLatencySimulation};
pub use self::parameters::{RyseInstanceBuilder, RyseParameters};
pub use self::robustness::{run_robustness_tests, run_ryse_robustness_test};
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/bin/simulations/pyjama.rs | src/bin/simulations/pyjama.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Simulations for Pyjama, the MCP protocol.
//!
//! Pyjama is one instantiation of a multiple-concurrent proposers (MCP) consensus protocol.
//! As such, it provides the following economic properties:
//! - Selective-censorship resistance:
//! A malicious consensus leader can not unilaterally exclude an honest proposer's proposal.
//! - Weak hiding:
//! A malicious proposer cannot condition the contents of their block on the contents of another.
//!
//! Additionally, Pyjama provides the following stronger economic property:
//! - Strong hiding:
//! A malicious proposer cannot condition inclusion of their block on the contents of another.
//!
//! Compared to Ryse, Pyjama aims to be a general gadget that works with any consensus protocol.
//! It provides a wrapper, where any consensus protocol like Alpenglow can be plugged in.
mod latency;
mod parameters;
mod robustness;
pub use latency::PyjamaLatencySimulation;
pub use parameters::{PyjamaInstance, PyjamaInstanceBuilder, PyjamaParameters as PyjamaParams};
pub use robustness::{run_pyjama_robustness_test, run_robustness_tests};
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/bin/simulations/rotor.rs | src/bin/simulations/rotor.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Simulations for the Rotor protocol.
//!
//! This implements the following simulations:
//! - Latency simulation for block dissemination via Rotor.
//! - Robustness simulation against liveness and safety failures.
pub mod latency;
pub mod robustness;
use alpenglow::ValidatorId;
use alpenglow::disseminator::rotor::SamplingStrategy;
use rand::prelude::*;
pub use self::latency::{LatencyEvent, RotorLatencySimulation};
pub use self::robustness::run_rotor_robustness_test;
use crate::discrete_event_simulator::Builder;
/// Parameters for the Rotor protocol.
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct RotorParams {
/// Number of shreds needed to recover the data.
pub data_shreds: usize,
/// Number of shreds that make up a slice.
pub shreds: usize,
/// Number of slices that make up a block.
pub slices: usize,
}
impl RotorParams {
/// Creates a new set of Rotor parameters.
pub fn new(data_shreds: usize, shreds: usize, slices: usize) -> Self {
Self {
data_shreds,
shreds,
slices,
}
}
}
/// Builder for Rotor instances with a specific set of parameters.
#[derive(Debug)]
pub struct RotorInstanceBuilder<L: SamplingStrategy, R: SamplingStrategy> {
pub leader_sampler: L,
pub rotor_sampler: R,
pub params: RotorParams,
}
impl<L: SamplingStrategy, R: SamplingStrategy> RotorInstanceBuilder<L, R> {
/// Creates a new builder instance, with the provided sampling strategies.
pub fn new(leader_sampler: L, rotor_sampler: R, params: RotorParams) -> Self {
Self {
leader_sampler,
rotor_sampler,
params,
}
}
}
impl<L: SamplingStrategy, R: SamplingStrategy> Builder for RotorInstanceBuilder<L, R> {
type Params = RotorParams;
type Instance = RotorInstance;
fn build(&self, rng: &mut impl Rng) -> RotorInstance {
RotorInstance {
leader: self.leader_sampler.sample(rng),
relays: (0..self.params.slices)
.map(|_| self.rotor_sampler.sample_multiple(self.params.shreds, rng))
.collect(),
params: self.params,
}
}
fn params(&self) -> &Self::Params {
&self.params
}
}
/// Specific instance of the Rotor protocol.
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct RotorInstance {
/// Leader validator.
pub leader: ValidatorId,
/// Relays for each slice, and each shred within a slice.
pub relays: Vec<Vec<ValidatorId>>,
/// Parameters this instance corresponds to.
pub params: RotorParams,
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/bin/simulations/main.rs | src/bin/simulations/main.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Simulations for Rotor and Alpenglow.
//!
//! This module provides a binary for simulating Rotor and Alpenglow.
//!
//! Currently three major simulations are implemented:
//! 1. Bandwidth (Rotor, workload per node, maximum supported throughput)
//! 2. Latency (all stages of Alpenglow)
//! 3. Rotor robustness (Rotor, 40% crash, 20% byzantine)
//!
//! For parallelization of these simulations, [`rayon`] is used in most places.
//!
//! All of these simulations are evaluated on different stake distributions, namely:
//! - Solana mainnet (real-world data)
//! - Sui mainnet (real-world data)
//! - 5 hubs (five major cities, globally distributed, equal stake each)
//! - Stock exchanges (top 10 global stock exchange locations)
//!
//! Also, all simulations are evaluated for different sampling strategies, namely:
//! - Uniform
//! - Stake-weighted
//! - Fait Accompli 1 + Stake-weighted
//! - Fait Accompli 1 + Bin-packing
//! - Decaying acceptance (with 3.0 max samples)
//! - Turbine
//!
//! The global constants [`RUN_BANDWIDTH_TESTS`], [`RUN_LATENCY_TESTS`], and
//! [`RUN_ROTOR_ROBUSTNESS_TESTS`] control which tests to run.
//! Further, the global constants [`SAMPLING_STRATEGIES`], [`MAX_BANDWIDTHS`],
//! and [`SHRED_COMBINATIONS`] control the parameters for some tests.
#![deny(rustdoc::broken_intra_doc_links)]
mod alpenglow;
mod discrete_event_simulator;
mod pyjama;
mod quorum_robustness;
mod rotor;
mod ryse;
use std::cmp::Reverse;
use std::fs::File;
use std::path::PathBuf;
use std::sync::{Arc, Mutex};
use ::alpenglow::disseminator::rotor::sampling_strategy::{
AllSameSampler, DecayingAcceptanceSampler, FaitAccompli1Sampler, FaitAccompli2Sampler,
TurbineSampler, UniformSampler,
};
use ::alpenglow::disseminator::rotor::{SamplingStrategy, StakeWeightedSampler};
use ::alpenglow::network::simulated::ping_data::PingServer;
use ::alpenglow::network::simulated::stake_distribution::{
VALIDATOR_DATA, ValidatorData, validators_from_validator_data,
};
use ::alpenglow::{Stake, ValidatorId, ValidatorInfo, logging};
use color_eyre::Result;
use log::info;
use rayon::prelude::*;
use crate::alpenglow::{
AlpenglowLatencySimulation, BandwidthTest, LatencySimInstanceBuilder, LatencySimParams,
};
use crate::discrete_event_simulator::{SimulationEngine, SimulationEnvironment};
use crate::pyjama::{
PyjamaInstanceBuilder, PyjamaLatencySimulation, PyjamaParams, run_pyjama_robustness_test,
};
use crate::rotor::{
RotorInstanceBuilder, RotorLatencySimulation, RotorParams, run_rotor_robustness_test,
};
use crate::ryse::{
RyseInstanceBuilder, RyseLatencySimulation, RyseParameters, run_ryse_robustness_test,
};
const RUN_BANDWIDTH_TESTS: bool = false;
const RUN_LATENCY_TESTS: bool = true;
const RUN_ROTOR_ROBUSTNESS_TESTS: bool = true;
const SAMPLING_STRATEGIES: [&str; 1] = [
// "uniform",
"stake_weighted",
// "fa1_iid",
// "fa2",
// "fa1_partition",
// "decaying_acceptance",
// "turbine",
];
const MAX_BANDWIDTHS: [u64; 4] = [
100_000_000, // 100 Mbps
1_000_000_000, // 1 Gbps
10_000_000_000, // 10 Gbps
100_000_000_000, // 100 Gbps
];
const TOTAL_SHREDS_FA1: u64 = 64;
const SHRED_COMBINATIONS: [(usize, usize); 1] = [
// (32, 54),
(32, 64),
// (32, 80),
// (32, 96),
// (32, 320),
// (64, 128),
// (128, 256),
// (256, 512),
];
fn main() -> Result<()> {
// enable fancy `color_eyre` error messages
color_eyre::install()?;
logging::enable_logforth();
crate::ryse::run_robustness_tests();
crate::pyjama::run_robustness_tests();
for k in [64, 128, 256, 512] {
run_ryse_robustness_test(k)?;
run_pyjama_robustness_test(k)?;
}
if RUN_BANDWIDTH_TESTS {
// create bandwidth evaluation files
let filename = PathBuf::from("data")
.join("output")
.join("simulations")
.join("bandwidth")
.join("bandwidth_supported")
.with_extension("csv");
if let Some(parent) = filename.parent() {
std::fs::create_dir_all(parent)?;
}
let _ = File::create(filename)?;
let filename = PathBuf::from("data")
.join("output")
.join("simulations")
.join("bandwidth")
.join("bandwidth_usage")
.with_extension("csv");
if let Some(parent) = filename.parent() {
std::fs::create_dir_all(parent)?;
}
let _ = File::create(filename)?;
}
if RUN_ROTOR_ROBUSTNESS_TESTS {
// create saftey evaluation file
let filename = PathBuf::from("data")
.join("output")
.join("simulations")
.join("rotor_robustness")
.join("rotor_robustness")
.with_extension("csv");
if let Some(parent) = filename.parent() {
std::fs::create_dir_all(parent)?;
}
let _ = File::create(filename)?;
}
// run tests for different stake distributions
run_tests_for_stake_distribution("solana", &VALIDATOR_DATA)?;
// run_tests_for_stake_distribution("sui", &SUI_VALIDATOR_DATA);
// run_tests_for_stake_distribution("5hubs", &FIVE_HUBS_VALIDATOR_DATA);
// run_tests_for_stake_distribution("stock_exchanges", &STOCK_EXCHANGES_VALIDATOR_DATA);
Ok(())
}
fn run_tests_for_stake_distribution(
distribution_name: &str,
validator_data: &[ValidatorData],
) -> Result<()> {
// load validator and ping data
let (validators, mut validators_and_ping_servers) =
validators_from_validator_data(validator_data);
// sort by stake (from highest to lowest)
validators_and_ping_servers.sort_by_key(|(v, _)| Reverse(v.stake));
for (i, (v, _)) in validators_and_ping_servers.iter_mut().enumerate() {
v.id = i as ValidatorId;
}
// extract the validators only
let validators_with_pings: Vec<ValidatorInfo> = validators_and_ping_servers
.iter()
.map(|(v, _)| v.clone())
.collect();
// TODO: indicatif progress bar
// run all tests for the different sampling strategies
for sampling_strat in SAMPLING_STRATEGIES {
let test_name = format!("{distribution_name}-{sampling_strat}");
if sampling_strat == "uniform" {
let leader_sampler = UniformSampler::new(validators.clone());
let ping_leader_sampler = UniformSampler::new(validators_with_pings.clone());
let rotor_sampler = UniformSampler::new(validators.clone());
let ping_rotor_sampler = UniformSampler::new(validators_with_pings.clone());
run_tests(
&test_name,
&validators,
&validators_and_ping_servers,
&leader_sampler,
&rotor_sampler,
&ping_leader_sampler,
&ping_rotor_sampler,
)?;
} else if sampling_strat == "stake_weighted" {
let leader_sampler = StakeWeightedSampler::new(validators.clone());
let ping_leader_sampler = StakeWeightedSampler::new(validators_with_pings.clone());
let rotor_sampler = StakeWeightedSampler::new(validators.clone());
let ping_rotor_sampler = StakeWeightedSampler::new(validators_with_pings.clone());
run_tests(
&test_name,
&validators,
&validators_and_ping_servers,
&leader_sampler,
&rotor_sampler,
&ping_leader_sampler,
&ping_rotor_sampler,
)?;
} else if sampling_strat == "fa1_iid" {
let leader_sampler = StakeWeightedSampler::new(validators.clone());
let ping_leader_sampler = StakeWeightedSampler::new(validators_with_pings.clone());
let rotor_sampler = FaitAccompli1Sampler::new_with_stake_weighted_fallback(
validators.clone(),
TOTAL_SHREDS_FA1,
);
let ping_rotor_sampler = FaitAccompli1Sampler::new_with_stake_weighted_fallback(
validators_with_pings.clone(),
TOTAL_SHREDS_FA1,
);
run_tests(
&test_name,
&validators,
&validators_and_ping_servers,
&leader_sampler,
&rotor_sampler,
&ping_leader_sampler,
&ping_rotor_sampler,
)?;
} else if sampling_strat == "fa2" {
let leader_sampler = StakeWeightedSampler::new(validators.clone());
let ping_leader_sampler = StakeWeightedSampler::new(validators_with_pings.clone());
let rotor_sampler = FaitAccompli2Sampler::new(validators.clone(), TOTAL_SHREDS_FA1);
let ping_rotor_sampler =
FaitAccompli2Sampler::new(validators_with_pings.clone(), TOTAL_SHREDS_FA1);
run_tests(
&test_name,
&validators,
&validators_and_ping_servers,
&leader_sampler,
&rotor_sampler,
&ping_leader_sampler,
&ping_rotor_sampler,
)?;
} else if sampling_strat == "fa1_partition" {
let leader_sampler = StakeWeightedSampler::new(validators.clone());
let ping_leader_sampler = StakeWeightedSampler::new(validators_with_pings.clone());
let rotor_sampler = FaitAccompli1Sampler::new_with_partition_fallback(
validators.clone(),
TOTAL_SHREDS_FA1,
);
let ping_rotor_sampler = FaitAccompli1Sampler::new_with_partition_fallback(
validators_with_pings.clone(),
TOTAL_SHREDS_FA1,
);
run_tests(
&test_name,
&validators,
&validators_and_ping_servers,
&leader_sampler,
&rotor_sampler,
&ping_leader_sampler,
&ping_rotor_sampler,
)?;
} else if sampling_strat == "decaying_acceptance" {
let leader_sampler = StakeWeightedSampler::new(validators.clone());
let ping_leader_sampler = StakeWeightedSampler::new(validators_with_pings.clone());
let rotor_sampler = DecayingAcceptanceSampler::new(validators.clone(), 3.0);
let ping_rotor_sampler =
DecayingAcceptanceSampler::new(validators_with_pings.clone(), 3.0);
run_tests(
&test_name,
&validators,
&validators_and_ping_servers,
&leader_sampler,
&rotor_sampler,
&ping_leader_sampler,
&ping_rotor_sampler,
)?;
} else if sampling_strat == "turbine" {
let leader_sampler = TurbineSampler::new(validators.clone());
let ping_leader_sampler = TurbineSampler::new(validators_with_pings.clone());
let rotor_sampler = TurbineSampler::new(validators.clone());
let ping_rotor_sampler = TurbineSampler::new(validators_with_pings.clone());
run_tests(
&test_name,
&validators,
&validators_and_ping_servers,
&leader_sampler,
&rotor_sampler,
&ping_leader_sampler,
&ping_rotor_sampler,
)?;
}
}
Ok(())
}
fn run_tests<
L: SamplingStrategy + Send + Sync + Clone,
R: SamplingStrategy + Send + Sync + Clone,
>(
test_name: &str,
validators: &[ValidatorInfo],
validators_with_ping_data: &[(ValidatorInfo, &'static PingServer)],
leader_sampler: &L,
rotor_sampler: &R,
ping_leader_sampler: &L,
ping_rotor_sampler: &R,
) -> Result<()> {
if RUN_BANDWIDTH_TESTS {
// TODO: clean up code
let filename = PathBuf::from("data")
.join("output")
.join("simulations")
.join("bandwidth")
.join("bandwidth_supported")
.with_extension("csv");
let file = File::options().append(true).open(filename)?;
let writer = csv::Writer::from_writer(file);
let writer = Arc::new(Mutex::new(writer));
let supported_writer_ref = &writer;
let filename = PathBuf::from("data")
.join("output")
.join("simulations")
.join("bandwidth")
.join("bandwidth_usage")
.with_extension("csv");
let file = File::options().append(true).open(filename)?;
let writer = csv::Writer::from_writer(file);
let writer = Arc::new(Mutex::new(writer));
let usage_writer_ref = &writer;
// bandwidth experiment
MAX_BANDWIDTHS.into_par_iter().for_each(|max_bandwidth| {
let bandwidths = vec![max_bandwidth; validators.len()];
let mut tester = BandwidthTest::new(
validators,
max_bandwidth,
bandwidths,
leader_sampler.clone(),
rotor_sampler.clone(),
64,
);
for shreds in [64, 128, 256, 512] {
info!(
"{test_name} bandwidth test ({:.1} Gbps, {shreds} shreds)",
max_bandwidth as f64 / 1e9,
);
tester.set_num_shreds(shreds);
tester.reset();
tester.run_multiple(1_000_000);
tester.evaluate_supported(test_name, supported_writer_ref);
tester.evaluate_usage(test_name, usage_writer_ref.clone());
}
});
}
if RUN_LATENCY_TESTS {
let validators_with_pings = validators_with_ping_data
.iter()
.map(|(v, _)| v.clone())
.collect::<Vec<_>>();
let total_stake: Stake = validators_with_pings.iter().map(|v| v.stake).sum();
let leader_bandwidth = 10_000_000_000; // 10 Gbps
let min_bandwidth = 1_000_000_000; // 1 Gbps
let bandwidths = validators_with_pings
.iter()
.map(|v| {
((v.stake as f64 / total_stake as f64
* (validators_with_pings.len() as u64 * leader_bandwidth) as f64)
.round() as u64)
.max(min_bandwidth)
})
.collect();
let environment =
SimulationEnvironment::from_validators_with_ping_data(validators_with_ping_data)
.with_bandwidths(leader_bandwidth, bandwidths);
// Rotor
let params = RotorParams::new(32, 64, 40);
let builder = RotorInstanceBuilder::new(
ping_leader_sampler.clone(),
ping_rotor_sampler.clone(),
params,
);
let engine =
SimulationEngine::<RotorLatencySimulation<_, _>>::new(builder, environment.clone());
info!("rotor latency sim (sequential)");
engine.run_many_sequential(10);
engine
.stats()
.write_to_csv("data/output/rotor_10.csv", ¶ms)?;
info!("rotor latency sim (parallel)");
engine.run_many_parallel(1000);
engine
.stats()
.write_to_csv("data/output/rotor_1000.csv", ¶ms)?;
// Ryse
let ryse_params = RyseParameters::new(8, 320);
let ryse_builder = RyseInstanceBuilder::new(
ping_leader_sampler.clone(),
ping_rotor_sampler.clone(),
ryse_params,
);
let params = ryse::LatencySimParams::new(ryse_params, 4, 1);
let builder = ryse::LatencySimInstanceBuilder::new(ryse_builder, params.clone());
let engine =
SimulationEngine::<RyseLatencySimulation<_, _>>::new(builder, environment.clone());
info!("ryse latency sim (parallel)");
engine.run_many_parallel(1000);
engine
.stats()
.write_to_csv("data/output/ryse_1000.csv", ¶ms)?;
// Pyjama
let params = PyjamaParams::new(8, 640);
let builder = PyjamaInstanceBuilder::new(
ping_leader_sampler.clone(),
ping_leader_sampler.clone(),
ping_rotor_sampler.clone(),
params,
);
let engine =
SimulationEngine::<PyjamaLatencySimulation<_, _, _>>::new(builder, environment.clone());
info!("pyjama latency sim (parallel)");
engine.run_many_parallel(1000);
engine
.stats()
.write_to_csv("data/output/pyjama_1000.csv", ¶ms)?;
// Alpenglow
// latency experiments with random leaders
for (n, k) in SHRED_COMBINATIONS {
info!("{test_name} latency tests (random leaders, n={n}, k={k})");
let rotor_params = RotorParams::new(n, k, 40);
let rotor_builder = RotorInstanceBuilder::new(
ping_leader_sampler.clone(),
ping_rotor_sampler.clone(),
rotor_params,
);
let params = LatencySimParams::new(rotor_params, 4, 1);
let builder = LatencySimInstanceBuilder::new(rotor_builder, params.clone());
let engine = SimulationEngine::<AlpenglowLatencySimulation<_, _>>::new(
builder,
environment.clone(),
);
engine.run_many_parallel(1000);
engine
.stats()
.write_to_csv("data/output/alpenglow_1000.csv", ¶ms)?;
}
// latency experiments with fixed leaders
let cities = if test_name.starts_with("solana") {
vec![
"Westpoort", // Amsterdam
"Frankfurt",
"London",
"Basel",
"Secaucus", // NYC/NJ
"Los Angeles",
"Tokyo",
"Singapore",
"Cape Town",
"Buenos Aires",
]
} else if test_name.starts_with("sui") {
vec![
"Los Angeles",
"Secaucus", // NYC/NJ
"Dublin",
"London",
"Paris",
"Frankfurt",
"Singapore",
"Tokyo",
]
} else if test_name.starts_with("5hubs") {
vec![
"San Francisco",
"Secaucus", // NYC/NJ
"London",
"Shanghai",
"Tokyo",
]
} else if test_name.starts_with("stock_exchanges") {
vec![
"Toronto",
"Secaucus", // NYC/NJ
"Westpoort",
"Taipei",
"Pune",
"Shanghai",
"Hong Kong",
"Tokyo",
]
} else {
unimplemented!()
};
for (n, k) in SHRED_COMBINATIONS {
cities.par_iter().try_for_each(|city| {
info!("{test_name} latency tests (fixed leader in {city}, n={n}, k={k})");
let leader = find_leader_in_city(validators_with_ping_data, city);
let rotor_params = RotorParams::new(n, k, 40);
let rotor_builder = RotorInstanceBuilder::new(
AllSameSampler(leader),
ping_rotor_sampler.clone(),
rotor_params,
);
let params = LatencySimParams::new(rotor_params, 4, 1);
let builder = LatencySimInstanceBuilder::new(rotor_builder, params.clone());
let engine = SimulationEngine::<AlpenglowLatencySimulation<_, _>>::new(
builder,
environment.clone(),
);
engine.run_many_sequential(1000);
let filename = format!("data/output/alpenglow_{city}_1000.csv");
engine.stats().write_to_csv(filename, ¶ms)
})?;
}
}
if RUN_ROTOR_ROBUSTNESS_TESTS {
for &(n, k) in &SHRED_COMBINATIONS {
run_rotor_robustness_test(n, k)?;
}
}
Ok(())
}
fn find_leader_in_city(
validators_with_ping_data: &[(ValidatorInfo, &'static PingServer)],
city: &str,
) -> ValidatorInfo {
for (v, p) in validators_with_ping_data {
if p.location == city {
return v.clone();
}
}
panic!("leader not found in {city}");
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/bin/simulations/quorum_robustness.rs | src/bin/simulations/quorum_robustness.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Monte-Carlo simulations to evaluate robustness of random quorums.
//!
//! This implements two main attack scenarios:
//! - Equivocation attack: Less than 20% of stake is Byzantine.
//! - Censorship attack: Up to 40% of stake is crashed.
//!
//! For each attack scenario multiple adversary strategies are simulated:
//! - Random: Corrupt a random subset of validators.
//! - Small: Corrupt as many of the smallest validators as possible.
//! - Large: Corrupt as many of the largest validators as possible.
use std::cmp::Reverse;
use std::fs::File;
use std::sync::RwLock;
use alpenglow::disseminator::rotor::{FaitAccompli1Sampler, SamplingStrategy};
use alpenglow::{Stake, ValidatorInfo};
use color_eyre::Result;
use log::debug;
use rand::prelude::*;
use rayon::prelude::*;
use static_assertions::const_assert_eq;
/// Parallelism level for rayon.
const PARALLELISM: usize = 1000;
/// Interval to take write locks on `tests` and `failures`.
const WRITE_BATCH: usize = 1000;
/// Maximum number of total iterations per attack scenario.
const TOTAL_ITERATIONS: usize = 100_000_000_000;
const_assert_eq!(TOTAL_ITERATIONS % (PARALLELISM * WRITE_BATCH), 0);
/// Simulations stop early if the number of failures is greater than this.
const MAX_FAILURES: usize = 100;
/// Adversary strength.
#[derive(Clone, Copy, Debug)]
pub struct AdversaryStrength {
/// Fraction of stake that may crash.
pub crashed: f64,
/// Fraction of stake that may be arbitrarily controlled by the adversary.
pub byzantine: f64,
}
/// Test harness for quorum robustness testing.
pub struct QuorumRobustnessTest<S: SamplingStrategy> {
samplers: Vec<S>,
quorum_samplers: Vec<usize>,
quorum_sizes: Vec<usize>,
attacks: Vec<QuorumAttack>,
tests: RwLock<usize>,
failures: RwLock<Vec<usize>>,
validators: Vec<ValidatorInfo>,
total_stake: Stake,
stake_distribution: String,
}
impl<S: SamplingStrategy + Send + Sync> QuorumRobustnessTest<S> {
/// Creates a new instance of the test harness.
pub fn new(
validators: Vec<ValidatorInfo>,
stake_distribution: String,
samplers: Vec<S>,
quorum_samplers: Vec<usize>,
quorum_sizes: Vec<usize>,
attacks: Vec<QuorumAttack>,
) -> Self {
let total_stake: Stake = validators.iter().map(|v| v.stake).sum();
let tests = RwLock::new(0);
let failures = RwLock::new(vec![0; attacks.len()]);
Self {
samplers,
quorum_samplers,
quorum_sizes,
attacks,
tests,
failures,
validators,
total_stake,
stake_distribution,
}
}
/// Runs robustness test with an `attack_frac` fraction of stake corrupted.
///
/// This runs the various strategies of choosing validators to corrupt.
/// Returns the failure probability for the strongest adversary strategy.
///
/// Results are written as a single line into `csv_file`.
pub fn run(
&self,
adversary_strength: AdversaryStrength,
csv_file: &mut csv::Writer<File>,
) -> Result<()> {
let mut attack_probs = vec![0.0; self.attacks.len()];
// try three different adversary strategies
// let partition_attack_probs = self.run_bin_packing(adversary_strength, attack_probs);
// debug!("bin-packing failure rates:");
// for (attack, prob) in self.attacks.iter().zip(partition_attack_probs.iter()) {
// debug!(" - {}: {:.2}", attack.name, prob.log10());
// }
// self.reset();
// vec_max(&mut attack_probs, &partition_attack_probs);
let random_attack_probs = self.run_random(adversary_strength, &attack_probs);
debug!("random failure rates:");
for (attack, prob) in self.attacks.iter().zip(random_attack_probs.iter()) {
debug!(" - {}: {:.2}", attack.name, prob.log10());
}
self.reset();
vec_max(&mut attack_probs, &random_attack_probs);
let small_attack_probs = self.run_small(adversary_strength, &attack_probs);
debug!("small failure rates:");
for (attack, prob) in self.attacks.iter().zip(small_attack_probs.iter()) {
debug!(" - {}: {:.2}", attack.name, prob.log10());
}
self.reset();
vec_max(&mut attack_probs, &small_attack_probs);
let large_attack_probs = self.run_large(adversary_strength, &attack_probs);
debug!("large failure rate:");
for (attack, prob) in self.attacks.iter().zip(large_attack_probs.iter()) {
debug!(" - {}: {:.2}", attack.name, prob.log10());
}
self.reset();
vec_max(&mut attack_probs, &large_attack_probs);
// write results to CSV
let sampling_strategy = S::name();
let mut row = vec![
self.stake_distribution.clone(),
sampling_strategy.to_string(),
adversary_strength.byzantine.to_string(),
adversary_strength.crashed.to_string(),
// self.params().num_data_shreds.to_string(),
// self.params().num_shreds.to_string(),
];
for attack_prob in &attack_probs {
row.push(attack_prob.log2().to_string());
}
csv_file.write_record(&row)?;
csv_file.flush()?;
Ok(())
}
fn run_small(
&self,
adversary_strength: AdversaryStrength,
known_attack_probs: &[f64],
) -> Vec<f64> {
debug!("running attack with small nodes corrupted");
let mut byzantine = vec![false; self.validators.len()];
let mut crashed = vec![false; self.validators.len()];
let mut validators_to_corrupt = self.validators.clone();
validators_to_corrupt.sort_by_key(|v| v.stake);
// corrupt smallest validators (first byzantine, then crashed)
let mut byzantine_stake = 0.0;
let mut crashed_stake = 0.0;
for v in &validators_to_corrupt {
let rel_stake = v.stake as f64 / self.total_stake as f64;
if byzantine_stake + rel_stake < adversary_strength.byzantine {
byzantine[v.id as usize] = true;
byzantine_stake += rel_stake;
} else if crashed_stake + rel_stake < adversary_strength.crashed {
crashed[v.id as usize] = true;
crashed_stake += rel_stake;
} else {
break;
}
}
// run tests
(0..PARALLELISM).into_par_iter().for_each(|_| {
for _ in 0..TOTAL_ITERATIONS / PARALLELISM / WRITE_BATCH {
let (tests, hit_max_failures) =
self.run_with_corrupted(WRITE_BATCH, &byzantine, &crashed);
*self.tests.write().unwrap() += tests;
if hit_max_failures || self.is_better_attack_known(known_attack_probs) {
break;
}
}
});
self.attack_probabilities()
}
fn run_large(
&self,
adversary_strength: AdversaryStrength,
known_attack_probs: &[f64],
) -> Vec<f64> {
debug!("running attack with large nodes corrupted");
let mut byzantine = vec![false; self.validators.len()];
let mut crashed = vec![false; self.validators.len()];
let mut validators_to_corrupt = self.validators.clone();
validators_to_corrupt.sort_by_key(|v| Reverse(v.stake));
// corrupt largest validators (first byzantine, then crashed)
let mut byzantine_stake = 0.0;
let mut crashed_stake = 0.0;
for v in &validators_to_corrupt {
let rel_stake = v.stake as f64 / self.total_stake as f64;
if byzantine_stake + rel_stake < adversary_strength.byzantine {
byzantine[v.id as usize] = true;
byzantine_stake += rel_stake;
} else if crashed_stake + rel_stake < adversary_strength.crashed {
crashed[v.id as usize] = true;
crashed_stake += rel_stake;
} else {
break;
}
}
// run tests
(0..PARALLELISM).into_par_iter().for_each(|_| {
for _ in 0..TOTAL_ITERATIONS / PARALLELISM / WRITE_BATCH {
let (tests, hit_max_failures) =
self.run_with_corrupted(WRITE_BATCH, &byzantine, &crashed);
*self.tests.write().unwrap() += tests;
if hit_max_failures || self.is_better_attack_known(known_attack_probs) {
break;
}
}
});
self.attack_probabilities()
}
fn run_random(
&self,
adversary_strength: AdversaryStrength,
known_attack_probs: &[f64],
) -> Vec<f64> {
debug!("running attack with random nodes corrupted");
(0..PARALLELISM).into_par_iter().for_each(|_| {
let mut byzantine = vec![false; self.validators.len()];
let mut crashed = vec![false; self.validators.len()];
let mut validators_to_corrupt = self.validators.clone();
validators_to_corrupt.shuffle(&mut rand::rng());
// greedily corrupt validators (prioritizing byzantine)
let mut byzantine_stake = 0.0;
let mut crashed_stake = 0.0;
for v in &validators_to_corrupt {
let rel_stake = v.stake as f64 / self.total_stake as f64;
if byzantine_stake + rel_stake < adversary_strength.byzantine {
byzantine[v.id as usize] = true;
byzantine_stake += rel_stake;
} else if crashed_stake + rel_stake < adversary_strength.crashed {
crashed[v.id as usize] = true;
crashed_stake += rel_stake;
}
}
// run tests
for _ in 0..TOTAL_ITERATIONS / PARALLELISM / WRITE_BATCH {
let (tests, hit_max_failures) =
self.run_with_corrupted(WRITE_BATCH, &byzantine, &crashed);
*self.tests.write().unwrap() += tests;
if hit_max_failures || self.is_better_attack_known(known_attack_probs) {
break;
}
}
});
self.attack_probabilities()
}
// TODO: extend to multiple quorums and crash failures
fn _run_bin_packing(
&self,
adversary_strength: AdversaryStrength,
known_attack_probs: &[f64],
) -> Vec<f64> {
debug!("running attack with bin-packing attack");
let fa1_sampler = FaitAccompli1Sampler::new_with_partition_fallback(
self.validators.clone(),
self.quorum_sizes[0] as u64,
);
let bin_sampler = fa1_sampler.fallback_sampler;
let vals = &bin_sampler.bin_validators;
let stakes = &bin_sampler.bin_stakes;
let byzantine_bins =
adversary_strength.byzantine / (vals.len() as f64 / self.quorum_sizes[0] as f64);
let _crashed_bins =
adversary_strength.crashed / (vals.len() as f64 / self.quorum_sizes[0] as f64);
let stake_per_bin = self.total_stake as f64 / self.quorum_sizes[0] as f64;
(0..PARALLELISM).into_par_iter().for_each(|_| {
// greedily corrupt less than `attack_frac` of validators
// evenly spread over the bins!
let mut corrupted = vec![false; self.validators.len()];
let mut total_corrupted_stake = 0.0;
for bin in 0..vals.len() {
let mut corrupted_stake = 0.0;
let mut entries: Vec<_> = stakes[bin].iter().zip(vals[bin].iter()).collect();
entries.sort_by_key(|(s, _)| **s);
for (stake, id) in &entries {
if corrupted[**id as usize] {
corrupted_stake += **stake as f64;
}
}
for (stake, id) in entries {
let val_stake = self.validators[*id as usize].stake as f64;
if corrupted[*id as usize] {
continue;
}
if corrupted_stake + (*stake as f64)
< stake_per_bin * byzantine_bins
// && val_stake < stake_per_bin
&& total_corrupted_stake + val_stake < self.total_stake as f64 * adversary_strength.byzantine
{
corrupted[*id as usize] = true;
corrupted_stake += *stake as f64;
total_corrupted_stake += val_stake;
}
}
}
assert!(total_corrupted_stake < self.total_stake as f64 * adversary_strength.byzantine);
for _ in 0..TOTAL_ITERATIONS / PARALLELISM / WRITE_BATCH {
let (tests, hit_max_failures) =
self.run_with_corrupted(WRITE_BATCH, &corrupted, &[]);
*self.tests.write().unwrap() += tests;
if hit_max_failures || self.is_better_attack_known(known_attack_probs) {
break;
}
}
});
self.attack_probabilities()
}
fn run_with_corrupted(
&self,
iterations: usize,
byzantine: &[bool],
crashed: &[bool],
) -> (usize, bool) {
let mut rng = SmallRng::from_rng(&mut rand::rng());
let mut tests = 0;
for _ in 0..iterations {
tests += 1;
let corrupted = self
.quorum_sizes
.iter()
.copied()
.enumerate()
.map(|(quorum_index, quorum_size)| {
let sampler = &self.samplers[self.quorum_samplers[quorum_index]];
let sampled = sampler.sample_multiple(quorum_size, &mut rng);
let byzantine_samples =
sampled.iter().filter(|v| byzantine[**v as usize]).count();
let crashed_samples = sampled.iter().filter(|v| crashed[**v as usize]).count();
(byzantine_samples, crashed_samples)
})
.collect::<Vec<_>>();
for (attack_index, attack) in self.attacks.iter().enumerate() {
if attack.evaluate(&corrupted) {
self.failures.write().unwrap()[attack_index] += 1;
if *self.failures.read().unwrap().iter().min().unwrap() >= MAX_FAILURES {
return (tests, true);
}
}
}
}
(tests, false)
}
fn attack_probabilities(&self) -> Vec<f64> {
let tests = *self.tests.read().unwrap();
let failures = self.failures.read().unwrap();
failures.iter().map(|f| *f as f64 / tests as f64).collect()
}
fn is_better_attack_known(&self, known_attack_probs: &[f64]) -> bool {
const MARGIN: f64 = 3.0;
let tests = *self.tests.read().unwrap();
let failures = self.failures.read().unwrap();
known_attack_probs
.iter()
.enumerate()
.all(|(i, p)| tests as f64 > MARGIN * failures[i] as f64 / *p)
}
fn reset(&self) {
*self.tests.write().unwrap() = 0;
*self.failures.write().unwrap() = vec![0; self.attacks.len()];
}
}
/// Named wrapper for a [`QuorumThreshold`].
#[derive(Clone, Debug)]
pub struct QuorumAttack {
name: String,
quorum: QuorumThreshold,
}
impl QuorumAttack {
fn evaluate(&self, corrupted: &[(usize, usize)]) -> bool {
self.quorum.evaluate(corrupted)
}
}
/// Represents a threshold for one or more quorums.
///
/// This is used to model different attack scenarios in [`QuorumRobustnessTest`].
#[derive(Clone, Debug)]
pub enum QuorumThreshold {
/// This threshold is reached if the `quorum` contains at least `threshold` corrupted validators.
///
/// Where "corrupted" means Byzantine (plus crashed if `is_crash_enough` is true).
Simple {
quorum: usize,
threshold: usize,
is_crash_enough: bool,
},
/// This threshold is reached if all of the contained thresholds are reached.
#[allow(dead_code)] // currently unused
All(Vec<Self>),
/// This threshold is reached if at least one of the contained thresholds are reached.
Any(Vec<Self>),
}
impl QuorumThreshold {
/// Returns a [`QuorumThreshold`] that is the logical OR of `self` and `other`.
pub fn or(self, other: Self) -> Self {
if let Self::Any(mut thresholds) = self {
thresholds.push(other);
Self::Any(thresholds)
} else {
Self::Any(vec![self, other])
}
}
/// Turns this [`QuorumThreshold`] into a [`QuorumAttack`] with the given name.
pub fn into_attack(self, name: &str) -> QuorumAttack {
QuorumAttack {
name: name.to_owned(),
quorum: self,
}
}
fn evaluate(&self, corrupted: &[(usize, usize)]) -> bool {
match self {
Self::Simple {
quorum,
threshold,
is_crash_enough,
} => {
let (byzantine, crashed) = corrupted[*quorum];
if *is_crash_enough {
byzantine + crashed >= *threshold
} else {
byzantine >= *threshold
}
}
Self::All(thresholds) => thresholds
.iter()
.all(|threshold| threshold.evaluate(corrupted)),
Self::Any(thresholds) => thresholds
.iter()
.any(|threshold| threshold.evaluate(corrupted)),
}
}
}
fn vec_max(old_vec: &mut [f64], new_vec: &[f64]) {
old_vec
.iter_mut()
.zip(new_vec.iter())
.for_each(|(old, new)| *old = (*old).max(*new));
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn quorum_threshold() {
let threshold1 = QuorumThreshold::Simple {
quorum: 0,
threshold: 1,
is_crash_enough: false,
};
let threshold2 = QuorumThreshold::Simple {
quorum: 1,
threshold: 2,
is_crash_enough: true,
};
let threshold_both = QuorumThreshold::All(vec![threshold1.clone(), threshold2.clone()]);
let threshold_either = QuorumThreshold::Any(vec![threshold1.clone(), threshold2.clone()]);
let corrupted = [(0, 0), (0, 0)];
assert!(!threshold1.evaluate(&corrupted));
assert!(!threshold2.evaluate(&corrupted));
assert!(!threshold_both.evaluate(&corrupted));
assert!(!threshold_either.evaluate(&corrupted));
let corrupted = [(0, 1), (0, 0)];
assert!(!threshold1.evaluate(&corrupted));
assert!(!threshold2.evaluate(&corrupted));
assert!(!threshold_both.evaluate(&corrupted));
assert!(!threshold_either.evaluate(&corrupted));
let corrupted = [(1, 0), (0, 0)];
assert!(threshold1.evaluate(&corrupted));
assert!(!threshold2.evaluate(&corrupted));
assert!(!threshold_both.evaluate(&corrupted));
assert!(threshold_either.evaluate(&corrupted));
let corrupted = [(1, 0), (2, 0)];
assert!(threshold1.evaluate(&corrupted));
assert!(threshold2.evaluate(&corrupted));
assert!(threshold_both.evaluate(&corrupted));
assert!(threshold_either.evaluate(&corrupted));
let corrupted = [(1, 0), (0, 2)];
assert!(threshold1.evaluate(&corrupted));
assert!(threshold2.evaluate(&corrupted));
assert!(threshold_both.evaluate(&corrupted));
assert!(threshold_either.evaluate(&corrupted));
let corrupted = [(1, 0), (1, 1)];
assert!(threshold1.evaluate(&corrupted));
assert!(threshold2.evaluate(&corrupted));
assert!(threshold_both.evaluate(&corrupted));
assert!(threshold_either.evaluate(&corrupted));
}
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/bin/simulations/pyjama/parameters.rs | src/bin/simulations/pyjama/parameters.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Parameters for Pyjama, the MCP protocol.
//!
//!
use alpenglow::ValidatorId;
use alpenglow::disseminator::rotor::SamplingStrategy;
use log::info;
use rand::prelude::*;
use statrs::distribution::{Binomial, DiscreteCDF};
use crate::discrete_event_simulator::Builder;
/// Parameters for the Pyjama MCP protocol.
#[derive(Clone, Copy, Debug)]
pub struct PyjamaParameters {
pub num_proposers: u64,
pub num_relays: u64,
pub can_decode_threshold: u64,
pub should_decode_threshold: u64,
pub attestations_threshold: u64,
pub num_slices: u64,
}
/// Specific instance of the Ryse protocol.
pub struct PyjamaInstance {
pub leader: ValidatorId,
pub proposers: Vec<ValidatorId>,
pub relays: Vec<ValidatorId>,
pub params: PyjamaParameters,
}
/// Builder for Ryse instances with a specific set of parameters.
pub struct PyjamaInstanceBuilder<L: SamplingStrategy, P: SamplingStrategy, R: SamplingStrategy> {
leader_sampler: L,
proposer_sampler: P,
relay_sampler: R,
params: PyjamaParameters,
}
impl<L, P, R> PyjamaInstanceBuilder<L, P, R>
where
L: SamplingStrategy,
P: SamplingStrategy,
R: SamplingStrategy,
{
/// Creates a new builder instance, with the provided sampling strategies.
pub fn new(
leader_sampler: L,
proposer_sampler: P,
relay_sampler: R,
params: PyjamaParameters,
) -> Self {
Self {
leader_sampler,
proposer_sampler,
relay_sampler,
params,
}
}
}
impl<L, P, R> Builder for PyjamaInstanceBuilder<L, P, R>
where
L: SamplingStrategy,
P: SamplingStrategy,
R: SamplingStrategy,
{
type Params = PyjamaParameters;
type Instance = PyjamaInstance;
fn build(&self, rng: &mut impl Rng) -> PyjamaInstance {
PyjamaInstance {
leader: self.leader_sampler.sample(rng),
proposers: self
.proposer_sampler
.sample_multiple(self.params.num_proposers as usize, rng),
relays: self
.relay_sampler
.sample_multiple(self.params.num_relays as usize, rng),
params: self.params,
}
}
fn params(&self) -> &Self::Params {
&self.params
}
}
/// Adversary strength.
#[derive(Clone, Copy, Debug)]
pub struct AdversaryStrength {
pub crashed: f64,
pub byzantine: f64,
}
impl PyjamaParameters {
/// Generates a new balanced parameter set, equally resistant against all attacks.
pub fn new(num_proposers: u64, num_relays: u64) -> Self {
Self {
num_proposers,
num_relays,
can_decode_threshold: (num_relays * 25).div_ceil(100),
should_decode_threshold: (num_relays * 50).div_ceil(100),
attestations_threshold: (num_relays * 75).div_ceil(100),
num_slices: 1,
}
}
/// Generates a new parameter set based on the first ones proposed in the PJM paper.
pub fn new_paper1(num_proposers: u64, num_relays: u64) -> Self {
Self {
num_proposers,
num_relays,
can_decode_threshold: (num_relays * 40).div_ceil(100),
should_decode_threshold: (num_relays * 60).div_ceil(100),
attestations_threshold: (num_relays * 80).div_ceil(100),
num_slices: 1,
}
}
/// Generates a new parameter set based on the second ones proposed in the PJM paper.
pub fn new_paper2(num_proposers: u64, num_relays: u64) -> Self {
Self {
num_proposers,
num_relays,
can_decode_threshold: (num_relays * 30).div_ceil(100),
should_decode_threshold: (num_relays * 60).div_ceil(100),
attestations_threshold: (num_relays * 80).div_ceil(100),
num_slices: 1,
}
}
/// Generates a new parameter set prioritizing hiding over liveness.
pub fn new_hiding(num_proposers: u64, num_relays: u64) -> Self {
Self {
num_proposers,
num_relays,
can_decode_threshold: (num_relays * 40).div_ceil(100),
should_decode_threshold: (num_relays * 60).div_ceil(100),
attestations_threshold: (num_relays * 80).div_ceil(100),
num_slices: 1,
}
}
/// Generates a new parameter set prioritizing liveness over hiding.
pub fn new_liveness(num_proposers: u64, num_relays: u64) -> Self {
Self {
num_proposers,
num_relays,
can_decode_threshold: (num_relays * 20).div_ceil(100),
should_decode_threshold: (num_relays * 47).div_ceil(100),
attestations_threshold: (num_relays * 73).div_ceil(100),
num_slices: 1,
}
}
/// Generates a new parameter set prioritizing permanent liveness failures over all others.
pub fn new_permanent_liveness(num_proposers: u64, num_relays: u64) -> Self {
Self {
num_proposers,
num_relays,
can_decode_threshold: (num_relays * 23).div_ceil(100),
should_decode_threshold: (num_relays * 53).div_ceil(100),
attestations_threshold: (num_relays * 76).div_ceil(100),
num_slices: 1,
}
}
/// Proobability that the adversary can break the hiding property in a slot.
pub fn break_hiding_probability(&self, adv_strength: AdversaryStrength) -> f64 {
// probability that the adversary controls enough relays to decrypt
let byzantine = adv_strength.byzantine;
let relays_dist = Binomial::new(byzantine, self.num_relays).unwrap();
let relays_needed = self.can_decode_threshold;
1.0 - relays_dist.cdf(relays_needed - 1)
}
/// Probability that the adversary can selectively censor proposers in a slot.
//
// just as hard with `num_relays - attestations_threshold` crashed nodes
pub fn selective_censorship_probability(&self, adv_strength: AdversaryStrength) -> f64 {
// probability that only the adversary proposes
let failed = adv_strength.crashed + adv_strength.byzantine;
let proposers_dist = Binomial::new(failed, self.num_proposers).unwrap();
let prob_all_proposers = 1.0 - proposers_dist.cdf(self.num_proposers - 1);
// probability that the adversary can exclude all proposers
let byzantine = adv_strength.byzantine;
let relays_dist = Binomial::new(byzantine, self.num_relays).unwrap();
let relays_needed = self.attestations_threshold - self.should_decode_threshold;
let prob_censor_relays = 1.0 - relays_dist.cdf(relays_needed - 1);
// probability that either attack works
1.0 - (1.0 - prob_all_proposers) * (1.0 - prob_censor_relays)
}
/// Probability that the adversary can cause a temporary liveness failure in a slot.
pub fn temporary_liveness_failure_probability(&self, adv_strength: AdversaryStrength) -> f64 {
// probability that only the adversary proposes
let failed = adv_strength.crashed + adv_strength.byzantine;
let proposers_dist = Binomial::new(failed, self.num_proposers).unwrap();
let prob_no_proposals = 1.0 - proposers_dist.cdf(self.num_proposers - 1);
// probability that the adversary can prevent the leader from producing a non-empty block
let relays_dist = Binomial::new(failed, self.num_relays).unwrap();
let relays_to_hold_protocol = self.should_decode_threshold - self.can_decode_threshold;
let relays_to_censor_proposers = self.attestations_threshold - self.should_decode_threshold;
let relays_to_censor_leader = self.num_relays - self.attestations_threshold;
let relays_needed = relays_to_hold_protocol
.min(relays_to_censor_proposers)
.min(relays_to_censor_leader);
let prob_censor_relays = 1.0 - relays_dist.cdf(relays_needed - 1);
// probability that either attack works
1.0 - (1.0 - prob_no_proposals) * (1.0 - prob_censor_relays)
}
/// Probability that the adversary can cause a permanent liveness failure.
///
/// The adversary can achieve this by withholding enough shreds that should be revealed.
/// This analyzes the worst case where a batch got `self.should_decode_threshold` attestations.
pub fn permanent_liveness_failure_probability(&self, adv_stength: AdversaryStrength) -> f64 {
// probability that the adversary can withhold enough shreds
let byzantine = adv_stength.byzantine;
let relays_dist = Binomial::new(byzantine, self.num_relays).unwrap();
let relays_needed = self.should_decode_threshold - self.can_decode_threshold;
1.0 - relays_dist.cdf(relays_needed - 1)
}
/// Calculates and prints attack sucess probabilities.
///
/// Capabilities of the adversary are specified in the `adv_strength` parameter.
pub fn print_failure_probabilities(&self, adv_strength: AdversaryStrength) {
info!(
"Pyjama parameters: proposers={}, relays={}, {:.2}/{:.2}/{:.2}",
self.num_proposers,
self.num_relays,
self.can_decode_threshold as f64 / self.num_relays as f64 * 100.0,
self.should_decode_threshold as f64 / self.num_relays as f64 * 100.0,
self.attestations_threshold as f64 / self.num_relays as f64 * 100.0
);
info!(
"successful attack probabilities (crashed={}, byzantine={}):",
adv_strength.crashed, adv_strength.byzantine
);
info!(
"break hiding: {:.2}",
self.break_hiding_probability(adv_strength).log10()
);
info!(
"selective censorship: {:.2}",
self.selective_censorship_probability(adv_strength).log10()
);
info!(
"temporary liveness failure: {:.2}",
self.temporary_liveness_failure_probability(adv_strength)
.log10()
);
info!(
"permanent liveness failure: {:.2}",
self.permanent_liveness_failure_probability(adv_strength)
.log10()
);
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_mcp_parameters() {
let params = PyjamaParameters::new(2, 5);
assert_eq!(params.num_proposers, 2);
assert_eq!(params.num_relays, 5);
assert_eq!(params.can_decode_threshold, 2);
assert_eq!(params.should_decode_threshold, 3);
assert_eq!(params.attestations_threshold, 4);
}
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/bin/simulations/pyjama/latency.rs | src/bin/simulations/pyjama/latency.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Latency simulation for Pyjama, the MCP protocol.
//!
//! So far, this test can only simulate the happy path.
use std::marker::PhantomData;
use alpenglow::ValidatorId;
use alpenglow::disseminator::rotor::{SamplingStrategy, StakeWeightedSampler};
use alpenglow::shredder::{DATA_SHREDS, MAX_DATA_PER_SHRED, TOTAL_SHREDS};
use super::{PyjamaInstance, PyjamaInstanceBuilder, PyjamaParams};
use crate::alpenglow::AlpenglowLatencySimulation;
use crate::discrete_event_simulator::{
Builder, Event, Protocol, Resources, SimTime, SimulationEngine, SimulationEnvironment, Stage,
Timings, column_max,
};
use crate::rotor::RotorParams;
/// Wrapper type for the Pyjama latency simulation.
///
/// This type implements the `Protocol` trait and can be passed to the simulation engine.
/// There is probably never a need to construct this type directly.
pub struct PyjamaLatencySimulation<L: SamplingStrategy, P: SamplingStrategy, R: SamplingStrategy> {
_leader_sampler: PhantomData<L>,
_proposer_sampler: PhantomData<P>,
_rotor_sampler: PhantomData<R>,
}
impl<L, P, R> Protocol for PyjamaLatencySimulation<L, P, R>
where
L: SamplingStrategy,
P: SamplingStrategy,
R: SamplingStrategy,
{
type Event = LatencyEvent;
type Stage = LatencyTestStage;
type Params = PyjamaParams;
type Instance = PyjamaInstance;
type Builder = PyjamaInstanceBuilder<L, P, R>;
}
/// Stages of the Pyjama latency simulation.
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum LatencyTestStage {
Propose,
Relay,
Attestation,
Consensus,
Reconstruct,
}
impl Stage for LatencyTestStage {
type Event = LatencyEvent;
type Params = PyjamaParams;
fn first() -> Self {
LatencyTestStage::Propose
}
fn next(&self) -> Option<Self> {
match self {
LatencyTestStage::Propose => Some(LatencyTestStage::Relay),
LatencyTestStage::Relay => Some(LatencyTestStage::Attestation),
LatencyTestStage::Attestation => Some(LatencyTestStage::Consensus),
LatencyTestStage::Consensus => Some(LatencyTestStage::Reconstruct),
LatencyTestStage::Reconstruct => None,
}
}
fn events(&self, _params: &Self::Params) -> Vec<LatencyEvent> {
match self {
LatencyTestStage::Propose => vec![LatencyEvent::Propose],
LatencyTestStage::Relay => vec![LatencyEvent::Relay],
LatencyTestStage::Attestation => vec![LatencyEvent::Attestation],
LatencyTestStage::Consensus => vec![LatencyEvent::Consensus],
LatencyTestStage::Reconstruct => vec![
LatencyEvent::Release,
LatencyEvent::Reconstruct,
LatencyEvent::Final,
],
}
}
}
/// Events that can occur at each validator during the Pyjama latency simulation.
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum LatencyEvent {
Propose,
Relay,
Attestation,
Consensus,
Release,
Reconstruct,
Final,
}
impl Event for LatencyEvent {
type Params = PyjamaParams;
type Instance = PyjamaInstance;
fn name(&self) -> String {
match self {
Self::Propose => "propose",
Self::Relay => "relay",
Self::Attestation => "attestation",
Self::Consensus => "consensus",
Self::Release => "release",
Self::Reconstruct => "reconstruct",
Self::Final => "final",
}
.to_owned()
}
fn should_track_stats(&self) -> bool {
true
}
fn dependencies(&self, _params: &PyjamaParams) -> Vec<Self> {
match self {
Self::Propose => vec![],
Self::Relay => vec![Self::Propose],
Self::Attestation => vec![Self::Relay],
Self::Consensus => vec![Self::Attestation],
Self::Release => vec![Self::Consensus],
Self::Reconstruct => vec![Self::Release],
Self::Final => vec![Self::Consensus, Self::Reconstruct],
}
}
fn calculate_timing(
&self,
start_time: SimTime,
dependency_timings: &[&[SimTime]],
instance: &PyjamaInstance,
resources: &mut Resources,
environment: &SimulationEnvironment,
) -> Vec<SimTime> {
match self {
Self::Propose => {
let mut timings = vec![start_time; environment.num_validators()];
for &proposer in &instance.proposers {
let block_bytes = instance.params.num_slices as usize
* instance.params.num_relays as usize
* MAX_DATA_PER_SHRED;
let tx_time = environment.transmission_delay(block_bytes, proposer);
let start_sending_time =
resources.network.time_next_free_after(proposer, start_time);
resources.network.schedule(proposer, start_time, tx_time);
timings[proposer as usize] = start_sending_time;
}
timings
}
Self::Relay => {
let mut timings = vec![SimTime::ZERO; environment.num_validators()];
// TODO: actually run for more than 1 slot
for (relay_offset, &relay) in instance.relays.iter().enumerate() {
let shreds_from_all_proposers = instance
.proposers
.iter()
.map(|proposer| {
let start_send_time = dependency_timings[0][*proposer as usize];
let prop_delay = environment.propagation_delay(*proposer, relay);
let shred_send_index = relay_offset + 1;
let tx_delay = environment.transmission_delay(
shred_send_index * MAX_DATA_PER_SHRED,
*proposer,
);
start_send_time + prop_delay + tx_delay
})
.max()
.unwrap();
timings[relay as usize] =
timings[relay as usize].max(shreds_from_all_proposers);
}
timings
}
Self::Attestation => {
let mut timings = vec![SimTime::NEVER; environment.num_validators()];
let mut shred_timings = vec![SimTime::NEVER; instance.params.num_relays as usize];
for (i, relay) in instance.relays.iter().enumerate() {
shred_timings[i] = dependency_timings[0][*relay as usize]
+ environment.propagation_delay(*relay, instance.leader)
+ environment.transmission_delay(MAX_DATA_PER_SHRED, *relay);
}
shred_timings.sort_unstable();
timings[instance.leader as usize] =
shred_timings[instance.params.attestations_threshold as usize - 1];
timings
}
Self::Consensus => {
let consensus_start_time = dependency_timings[0][instance.leader as usize];
// TODO: find better way of integrating sub-protocol
let slices_required = 3;
let rotor_params = RotorParams {
data_shreds: DATA_SHREDS,
shreds: TOTAL_SHREDS,
slices: slices_required,
};
let rotor_builder = crate::rotor::RotorInstanceBuilder::new(
StakeWeightedSampler::new(environment.validators.clone()),
StakeWeightedSampler::new(environment.validators.clone()),
rotor_params,
);
let builder = crate::alpenglow::LatencySimInstanceBuilder::new(
rotor_builder,
crate::alpenglow::LatencySimParams::new(rotor_params, 4, 1),
);
let consensus_instance = builder.build(&mut rand::rng());
let engine = SimulationEngine::<AlpenglowLatencySimulation<_, _>>::new(
builder,
environment.clone(),
);
let mut timings = Timings::new(consensus_start_time);
engine.run(&consensus_instance, &mut timings);
timings
.get(crate::alpenglow::LatencyEvent::Final)
.unwrap()
.to_vec()
}
Self::Release => {
let mut timings = vec![SimTime::NEVER; environment.num_validators()];
for relay in &instance.relays {
let dep_time = dependency_timings[0][*relay as usize];
let block_bytes = environment.num_validators()
* instance.params.num_proposers as usize
* MAX_DATA_PER_SHRED;
let tx_time = environment.transmission_delay(block_bytes, *relay);
let start_sending_time =
resources.network.time_next_free_after(*relay, dep_time);
resources.network.schedule(*relay, dep_time, tx_time);
timings[*relay as usize] = start_sending_time;
}
timings
}
Self::Reconstruct => {
let mut timings = vec![SimTime::NEVER; environment.num_validators()];
let mut shred_timings = vec![SimTime::NEVER; instance.params.num_relays as usize];
for (recipient, timing) in timings.iter_mut().enumerate() {
for (i, relay) in instance.relays.iter().enumerate() {
shred_timings[i] = dependency_timings[0][*relay as usize]
+ environment.propagation_delay(*relay, recipient as ValidatorId)
+ environment.transmission_delay(
(recipient + 1)
* instance.params.num_proposers as usize
* MAX_DATA_PER_SHRED,
*relay,
);
}
shred_timings.sort_unstable();
*timing = shred_timings[instance.params.can_decode_threshold as usize - 1];
}
timings
}
Self::Final => column_max(dependency_timings),
}
}
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/bin/simulations/pyjama/robustness.rs | src/bin/simulations/pyjama/robustness.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Calculations about the robustness of the Pyjama MCP protocol.
//!
//! Currently, this just runs some static calculations on the set of parameters.
//!
//! In the future, this would also simulate attack scenarios for a specific stake distribution.
//! This is analogous to what is done for Rotor in [`crate::rotor::robustness`];
use std::fs::File;
use alpenglow::disseminator::rotor::FaitAccompli1Sampler;
use alpenglow::network::simulated::stake_distribution::{
VALIDATOR_DATA, validators_from_validator_data,
};
use color_eyre::Result;
use super::parameters::{AdversaryStrength, PyjamaParameters};
use crate::quorum_robustness::{QuorumRobustnessTest, QuorumThreshold};
const NUM_PROPOSERS: u64 = 16;
const NUM_RELAYS: u64 = 512;
const ADVERSARY_STRENGTH: AdversaryStrength = AdversaryStrength {
crashed: 0.0,
byzantine: 0.18,
};
pub fn run_robustness_tests() {
PyjamaParameters::new(NUM_PROPOSERS, NUM_RELAYS)
.print_failure_probabilities(ADVERSARY_STRENGTH);
PyjamaParameters::new_paper1(NUM_PROPOSERS, NUM_RELAYS)
.print_failure_probabilities(ADVERSARY_STRENGTH);
PyjamaParameters::new_paper2(NUM_PROPOSERS, NUM_RELAYS)
.print_failure_probabilities(ADVERSARY_STRENGTH);
PyjamaParameters::new_hiding(NUM_PROPOSERS, NUM_RELAYS)
.print_failure_probabilities(ADVERSARY_STRENGTH);
PyjamaParameters::new_liveness(NUM_PROPOSERS, NUM_RELAYS)
.print_failure_probabilities(ADVERSARY_STRENGTH);
PyjamaParameters::new_permanent_liveness(NUM_PROPOSERS, NUM_RELAYS)
.print_failure_probabilities(ADVERSARY_STRENGTH);
}
pub fn run_pyjama_robustness_test(total_shreds: u64) -> Result<()> {
let (validators, _with_pings) = validators_from_validator_data(&VALIDATOR_DATA);
let leader_sampler =
FaitAccompli1Sampler::new_with_stake_weighted_fallback(validators.clone(), 1);
let proposer_sampler =
FaitAccompli1Sampler::new_with_stake_weighted_fallback(validators.clone(), NUM_PROPOSERS);
let relay_sampler =
FaitAccompli1Sampler::new_with_stake_weighted_fallback(validators.clone(), total_shreds);
let params = PyjamaParameters::new(NUM_PROPOSERS, total_shreds);
let hiding_threshold = QuorumThreshold::Simple {
quorum: 2,
threshold: params.can_decode_threshold as usize,
is_crash_enough: false,
};
let hiding_attack = hiding_threshold.into_attack("hiding");
let all_proposers_threshold = QuorumThreshold::Simple {
quorum: 1,
threshold: params.num_proposers as usize,
is_crash_enough: true,
};
let relays_to_censor_proposers_threshold = QuorumThreshold::Simple {
quorum: 2,
threshold: (params.attestations_threshold - params.should_decode_threshold) as usize,
is_crash_enough: true,
};
let censorship_attack = all_proposers_threshold
.clone()
.or(relays_to_censor_proposers_threshold.clone())
.into_attack("censorship");
let relays_to_hold_protocol_threshold = QuorumThreshold::Simple {
quorum: 2,
threshold: (params.should_decode_threshold - params.can_decode_threshold) as usize,
is_crash_enough: true,
};
let relays_to_censor_leader_threshold = QuorumThreshold::Simple {
quorum: 2,
threshold: (params.num_relays - params.attestations_threshold) as usize,
is_crash_enough: true,
};
let temporary_liveness_attack = QuorumThreshold::Any(vec![
all_proposers_threshold,
relays_to_hold_protocol_threshold,
relays_to_censor_proposers_threshold,
relays_to_censor_leader_threshold,
])
.into_attack("temporary_liveness");
let permanent_liveness_threshold = QuorumThreshold::Simple {
quorum: 2,
threshold: (params.should_decode_threshold - params.can_decode_threshold) as usize,
is_crash_enough: false,
};
let permanent_liveness_attack =
QuorumThreshold::Any(vec![permanent_liveness_threshold]).into_attack("permanent_liveness");
let test = QuorumRobustnessTest::new(
validators,
"solana".to_string(),
vec![leader_sampler, proposer_sampler, relay_sampler],
vec![0, 1, 2],
vec![1, params.num_proposers as usize, params.num_relays as usize],
vec![
hiding_attack,
censorship_attack,
temporary_liveness_attack,
permanent_liveness_attack,
],
);
let adversary_strength = crate::quorum_robustness::AdversaryStrength {
crashed: 0.05,
byzantine: 0.2,
};
let filename = format!(
"pyjama_robustness_{}_{}",
params.num_proposers, total_shreds
);
let path = std::path::Path::new("data")
.join("output")
.join(filename)
.with_extension("csv");
let file = File::create(path).unwrap();
let mut csv_file = csv::Writer::from_writer(file);
test.run(adversary_strength, &mut csv_file)
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/bin/simulations/alpenglow/bandwidth.rs | src/bin/simulations/alpenglow/bandwidth.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Rotor simulated workload and bandwidth testing.
//!
//! This module provides simulations about Rotor bandwidth usage.
//! It simulates the dissemination of multiple slices via Rotor.
//! It tracks the workload (number of shreds/datagrams sent) for each validator.
//! The workload is then used to estimate the bandwidth requirements for each validator.
//!
//! Specifically, it provides the following:
//! - An analysis of Rotor workload distribution per validator,
//! that is the number of shreds/datagrams sent by each validator.
//! - An analysis of Rotor bandwidth usage (in bits/s) per validator to send
//! a given number of slices.
//! - The maximum goodput that can be achieved for a given bandwidth distribution.
use std::fs::File;
use std::sync::{Arc, Mutex};
use alpenglow::disseminator::rotor::SamplingStrategy;
use alpenglow::shredder::MAX_DATA_PER_SHRED;
use alpenglow::{ValidatorId, ValidatorInfo};
use rand::prelude::*;
/// Instance of a bandwidth requirements test.
///
/// This is a wrapper around [`WorkloadTest`].
/// It augments the workload test with bandwidth information.
pub struct BandwidthTest<L: SamplingStrategy, R: SamplingStrategy> {
leader_bandwidth: u64,
bandwidths: Vec<u64>,
workload_test: WorkloadTest<L, R>,
}
/// Instance of a bandwidth workload test.
///
/// This simulates the distribution of shreds via Rotor.
/// It tracks the workload (number of shreds/datagrams sent) for each validator.
pub struct WorkloadTest<L: SamplingStrategy, R: SamplingStrategy> {
validators: Vec<ValidatorInfo>,
leader_sampler: L,
rotor_sampler: R,
num_shreds: usize,
leader_workload: u64,
workload: Vec<u64>,
}
impl<L: SamplingStrategy, R: SamplingStrategy> BandwidthTest<L, R> {
/// Creates a new instance with the given stake and bandwidth distribution.
pub fn new(
validators: &[ValidatorInfo],
leader_bandwidth: u64,
bandwidths: Vec<u64>,
leader_sampler: L,
rotor_sampler: R,
num_shreds: usize,
) -> Self {
let workload_test =
WorkloadTest::new(validators, leader_sampler, rotor_sampler, num_shreds);
Self::from_workload_test(validators, leader_bandwidth, bandwidths, workload_test)
}
fn from_workload_test(
validators: &[ValidatorInfo],
leader_bandwidth: u64,
bandwidths: Vec<u64>,
workload_test: WorkloadTest<L, R>,
) -> Self {
assert_eq!(validators.len(), bandwidths.len());
Self {
leader_bandwidth,
bandwidths,
workload_test,
}
}
/// Sets the number of shreds per slice to `num_shreds`.
///
/// Should usually call [`BandwidthTest::reset`] after this.
pub fn set_num_shreds(&mut self, num_shreds: usize) {
self.workload_test.set_num_shreds(num_shreds);
}
/// Runs multiple iterations of the workload test.
///
/// Each iteration corresponds to distributing one slice, sampling leader
/// and relays. This only modifies the internal state of the workload test.
/// Calling `evaluate_supported` or `evaluate_usage` will output the results.
pub fn run_multiple(&mut self, slices: usize) {
self.workload_test.run_multiple(slices);
}
/// Evaluates the maximum supported goodput.
///
/// Writes the results to the given CSV file.
/// This is only meaningful after `run_multiple` has been called.
pub fn evaluate_supported(&self, test_name: &str, csv_file: &Arc<Mutex<csv::Writer<File>>>) {
let (leader_workload, workload) = self.workload_test.get_workload();
let seconds =
(8 * MAX_DATA_PER_SHRED as u64 * leader_workload) as f64 / self.leader_bandwidth as f64;
let mut min_supported_bandwidth = self.leader_bandwidth as f64;
for (i, shreds) in workload.iter().enumerate() {
let bytes = MAX_DATA_PER_SHRED as u64 * shreds;
let required_bandwidth = (bytes * 8) as f64 / seconds;
let ratio = required_bandwidth / self.bandwidths[i] as f64;
if self.leader_bandwidth as f64 / ratio < min_supported_bandwidth {
min_supported_bandwidth = self.leader_bandwidth as f64 / ratio;
}
}
let parts = test_name.split('-').collect::<Vec<_>>();
let stake_distribution = parts[0];
let sampling_strategy = parts[1];
let mut csv_file = csv_file.lock().unwrap();
csv_file
.write_record(&[
stake_distribution.to_string(),
sampling_strategy.to_string(),
self.leader_bandwidth.to_string(),
self.workload_test.num_shreds.to_string(),
(min_supported_bandwidth / 2.0).to_string(),
])
.unwrap();
csv_file.flush().unwrap();
}
/// Evaluates the bandwidth usage.
///
/// Writes the results to the given CSV file.
/// This is only meaningful after `run_multiple` has been called.
pub fn evaluate_usage(&self, test_name: &str, csv_file: Arc<Mutex<csv::Writer<File>>>) {
let (leader_workload, workload) = self.workload_test.get_workload();
let mut bandwidth_usage = vec![(0.0, 0); workload.len()];
for (i, shreds) in workload.iter().enumerate() {
let ratio = *shreds as f64 / leader_workload as f64;
bandwidth_usage[i] = (self.leader_bandwidth as f64 * ratio, i as ValidatorId);
}
bandwidth_usage.sort_unstable_by(|a, b| a.0.partial_cmp(&b.0).unwrap());
let mut binned_bandwidth_usage = vec![(0.0, 0, 0); 99];
for (i, (bandwidth, _)) in bandwidth_usage.into_iter().enumerate() {
let bin = i / 13;
let (b, mut v, c) = binned_bandwidth_usage[bin];
if i % 13 == 0 {
v = i;
}
let new_b = (b * c as f64 + bandwidth) / (c + 1) as f64;
binned_bandwidth_usage[bin] = (new_b, v, c + 1);
}
let parts = test_name.split('-').collect::<Vec<_>>();
let stake_distribution = parts[0];
let sampling_strategy = parts[1];
let mut csv_file = csv_file.lock().unwrap();
for (bandwidth, validator, _) in binned_bandwidth_usage {
csv_file
.write_record(&[
stake_distribution.to_string(),
sampling_strategy.to_string(),
self.leader_bandwidth.to_string(),
self.workload_test.num_shreds.to_string(),
validator.to_string(),
32_270_000.0.to_string(),
bandwidth.to_string(),
])
.unwrap();
}
csv_file.flush().unwrap();
}
/// Resets the internal state.
///
/// This is useful for running multiple independent tests.
pub fn reset(&mut self) {
self.workload_test.reset();
}
}
impl<L: SamplingStrategy, R: SamplingStrategy> WorkloadTest<L, R> {
/// Creates a new instance with the given stake distribution.
pub fn new(
validators: &[ValidatorInfo],
leader_sampler: L,
rotor_sampler: R,
num_shreds: usize,
) -> Self {
let num_val = validators.len();
Self {
validators: validators.to_vec(),
leader_sampler,
rotor_sampler,
num_shreds,
leader_workload: 0,
workload: vec![0; num_val],
}
}
/// Sets the number of shreds per slice to `num_shreds`.
///
/// Should usually call [`WorkloadTest::reset`] after this.
pub fn set_num_shreds(&mut self, num_shreds: usize) {
self.num_shreds = num_shreds;
}
/// Simulates distribution of `slices` slices via Rotor.
///
/// Adds the workload from these iterations to the running totals.
pub fn run_multiple(&mut self, slices: usize) {
let mut rng = SmallRng::from_rng(&mut rand::rng());
for _ in 0..slices {
self.run_one(&mut rng);
}
}
/// Simulates distribution of one slice via Rotor.
///
/// Adds the workload from this iteration to the running totals.
pub fn run_one(&mut self, rng: &mut impl Rng) {
let leader = self.leader_sampler.sample(rng);
self.leader_workload += self.num_shreds as u64;
self.workload[leader as usize] += self.num_shreds as u64;
let relays = self.rotor_sampler.sample_multiple(self.num_shreds, rng);
for relay in relays {
if leader == relay {
self.workload[relay as usize] += self.validators.len() as u64 - 1;
} else {
self.workload[relay as usize] += self.validators.len() as u64 - 2;
}
}
}
/// Resets the internal state.
///
/// This is useful for running multiple independent tests.
pub fn reset(&mut self) {
self.workload = vec![0; self.validators.len()];
}
/// Returns the workload for the leader and the workload per validator.
///
/// Workload is defined as the total number of shreds sent by each.
pub fn get_workload(&self) -> (u64, &[u64]) {
(self.leader_workload, &self.workload)
}
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/bin/simulations/alpenglow/latency.rs | src/bin/simulations/alpenglow/latency.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Simulated latency test for the Alpenglow protocol.
//!
//! So far, this test can only simulate the happy path.
use std::hash::Hash;
use std::marker::PhantomData;
use alpenglow::disseminator::rotor::{SamplingStrategy, StakeWeightedSampler};
use rand::prelude::*;
use crate::discrete_event_simulator::{
Builder, Event, Protocol, Resources, SimTime, SimulationEngine, SimulationEnvironment, Stage,
Timings, broadcast_first_arrival_or_dep, broadcast_stake_threshold, column_min,
};
use crate::rotor::{RotorInstance, RotorInstanceBuilder, RotorLatencySimulation, RotorParams};
/// Size (in bytes) assumed per vote in the simulation.
const VOTE_SIZE: usize = 128 /* sig */ + 64 /* slot, hash, flags */;
/// Size (in bytes) assumed per certificate in the simulation.
const CERT_SIZE: usize = 128 /* sig */ + 256 /* bitmap */ + 64 /* slot, hash, flags */;
/// Marker type for the Alpenglow latency simulation.
pub struct AlpenglowLatencySimulation<L: SamplingStrategy, R: SamplingStrategy> {
_leader_sampler: PhantomData<L>,
_rotor_sampler: PhantomData<R>,
}
impl<L: SamplingStrategy, R: SamplingStrategy> Protocol for AlpenglowLatencySimulation<L, R> {
type Event = LatencyEvent;
type Stage = LatencyTestStage;
type Params = LatencySimParams;
type Instance = LatencySimInstance;
type Builder = LatencySimInstanceBuilder<L, R>;
}
/// The sequential stages of the latency test.
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum LatencyTestStage {
Rotor,
Notar,
Final1,
Final2,
}
impl Stage for LatencyTestStage {
type Event = LatencyEvent;
type Params = LatencySimParams;
fn first() -> Self {
Self::Rotor
}
fn next(&self) -> Option<Self> {
match self {
Self::Rotor => Some(Self::Notar),
Self::Notar => Some(Self::Final1),
Self::Final1 => Some(Self::Final2),
Self::Final2 => None,
}
}
fn events(&self, _params: &Self::Params) -> Vec<LatencyEvent> {
match self {
Self::Rotor => vec![LatencyEvent::Block],
Self::Notar => vec![LatencyEvent::LocalNotar, LatencyEvent::Notar],
Self::Final1 => vec![LatencyEvent::LocalFastFinal, LatencyEvent::LocalSlowFinal],
Self::Final2 => vec![LatencyEvent::LocalFinal, LatencyEvent::Final],
}
}
}
/// Events that can occur at each validator.
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum LatencyEvent {
Block,
LocalNotar,
Notar,
LocalFastFinal,
LocalSlowFinal,
LocalFinal,
Final,
}
impl Event for LatencyEvent {
type Params = LatencySimParams;
type Instance = LatencySimInstance;
fn name(&self) -> String {
match self {
Self::Block => "block",
Self::LocalNotar => "local_notar",
Self::Notar => "notar",
Self::LocalFastFinal => "local_fast_final",
Self::LocalSlowFinal => "local_slow_final",
Self::LocalFinal => "local_final",
Self::Final => "final",
}
.to_owned()
}
fn should_track_stats(&self) -> bool {
true
}
// TODO: simulate actual circular dependency (of certs and status)
fn dependencies(&self, _params: &LatencySimParams) -> Vec<Self> {
match self {
Self::Block => vec![],
Self::LocalNotar => vec![Self::Block],
Self::Notar => vec![Self::LocalNotar],
Self::LocalFastFinal => vec![Self::Block],
Self::LocalSlowFinal => vec![Self::Notar],
Self::LocalFinal => vec![Self::LocalFastFinal, Self::LocalSlowFinal],
Self::Final => vec![Self::LocalFinal],
}
}
fn calculate_timing(
&self,
start_time: SimTime,
dependency_timings: &[&[SimTime]],
instance: &LatencySimInstance,
resources: &mut Resources,
environment: &SimulationEnvironment,
) -> Vec<SimTime> {
let broadcast_vote_threshold =
|resources: &mut Resources, threshold: f64| -> Vec<SimTime> {
broadcast_stake_threshold(
dependency_timings[0],
resources,
environment,
VOTE_SIZE,
threshold,
)
};
let local_or_cert = |resources: &mut Resources| -> Vec<SimTime> {
broadcast_first_arrival_or_dep(dependency_timings[0], resources, environment, CERT_SIZE)
};
match self {
Self::Block => {
// TODO: find better way of integrating sub-protocol
let builder = RotorInstanceBuilder::new(
StakeWeightedSampler::new(environment.validators.clone()),
StakeWeightedSampler::new(environment.validators.clone()),
instance.params.rotor_params,
);
let engine = SimulationEngine::<RotorLatencySimulation<_, _>>::new(
builder,
environment.clone(),
);
let mut timings = Timings::new(start_time);
// TODO: actually simulate more than one slot
engine.run(&instance.rotor_instances[0], &mut timings);
timings
.get(crate::rotor::LatencyEvent::Block)
.unwrap()
.to_vec()
}
Self::LocalNotar => broadcast_vote_threshold(resources, 0.6),
Self::Notar => local_or_cert(resources),
Self::LocalFastFinal => broadcast_vote_threshold(resources, 0.8),
Self::LocalSlowFinal => broadcast_vote_threshold(resources, 0.6),
Self::LocalFinal => column_min(dependency_timings),
// NOTE: when sending final cert, final vote is already scheduled
// TODO: this is not always optimal, handle this properly
Self::Final => local_or_cert(resources),
}
}
}
/// Parameters for the Alpenglow latency simulation.
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct LatencySimParams {
rotor_params: RotorParams,
num_slots_per_window: usize,
num_slots: usize,
}
impl LatencySimParams {
/// Creates a parameter set for the Alpenglow latency simulation.
pub fn new(rotor_params: RotorParams, num_slots_per_window: usize, num_slots: usize) -> Self {
Self {
rotor_params,
num_slots_per_window,
num_slots,
}
}
}
/// A builder for Alpenglow latency simulation instances.
pub struct LatencySimInstanceBuilder<L: SamplingStrategy, R: SamplingStrategy> {
rotor_builder: RotorInstanceBuilder<L, R>,
params: LatencySimParams,
}
impl<L: SamplingStrategy, R: SamplingStrategy> LatencySimInstanceBuilder<L, R> {
/// Creates a new builder instance from a builder for Rotor instances.
pub fn new(rotor_builder: RotorInstanceBuilder<L, R>, params: LatencySimParams) -> Self {
Self {
rotor_builder,
params,
}
}
}
impl<L: SamplingStrategy, R: SamplingStrategy> Builder for LatencySimInstanceBuilder<L, R> {
type Params = LatencySimParams;
type Instance = LatencySimInstance;
fn build(&self, rng: &mut impl Rng) -> LatencySimInstance {
let rotor_instances = (0..self.params.num_slots)
.map(|_| self.rotor_builder.build(rng))
.collect();
LatencySimInstance {
rotor_instances,
params: self.params.clone(),
}
}
fn params(&self) -> &Self::Params {
&self.params
}
}
/// A specific instance of the Alpenglow latency simulation.
///
/// Contains one instance of the Rotor latency simulation, [`RotorInstance`], per slot.
pub struct LatencySimInstance {
rotor_instances: Vec<RotorInstance>,
params: LatencySimParams,
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/bin/simulations/ryse/parameters.rs | src/bin/simulations/ryse/parameters.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Parameters for Ryse, the MCP protocol.
//!
//!
use alpenglow::ValidatorId;
use alpenglow::disseminator::rotor::SamplingStrategy;
use log::info;
use rand::prelude::*;
use statrs::distribution::{Binomial, DiscreteCDF};
use crate::discrete_event_simulator::Builder;
/// Parameters for the Ryse MCP protocol.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub struct RyseParameters {
/// Number of slices in a block.
pub num_slices: u64,
/// Number of leaders concurrently proposing in each slot.
pub num_leaders: u64,
/// Number of relays to use in the modified Rotor disseminator.
pub num_relays: u64,
/// Number of shreds required to successfully decode a block.
pub decode_threshold: u64,
/// Number of relays' signatures required for a block to become notarized.
pub relay_notar_threshold: u64,
}
/// Specific instance of the Ryse protocol.
#[derive(Clone, Debug)]
pub struct RyseInstance {
pub leaders: Vec<ValidatorId>,
pub relays: Vec<Vec<ValidatorId>>,
}
/// Builder for Ryse instances with a specific set of parameters.
pub struct RyseInstanceBuilder<L: SamplingStrategy, R: SamplingStrategy> {
leader_sampler: L,
relay_sampler: R,
params: RyseParameters,
}
impl<L: SamplingStrategy, R: SamplingStrategy> RyseInstanceBuilder<L, R> {
/// Creates a new builder instance, with the provided sampling strategies.
pub fn new(leader_sampler: L, relay_sampler: R, params: RyseParameters) -> Self {
Self {
leader_sampler,
relay_sampler,
params,
}
}
}
impl<L: SamplingStrategy, R: SamplingStrategy> Builder for RyseInstanceBuilder<L, R> {
type Params = RyseParameters;
type Instance = RyseInstance;
fn build(&self, rng: &mut impl Rng) -> RyseInstance {
RyseInstance {
leaders: self
.leader_sampler
.sample_multiple(self.params.num_leaders as usize, rng),
relays: (0..self.params.num_slices)
.map(|_| {
self.relay_sampler
.sample_multiple(self.params.num_relays as usize, rng)
})
.collect(),
}
}
fn params(&self) -> &Self::Params {
&self.params
}
}
/// Adversary strength.
#[derive(Clone, Copy, Debug)]
pub struct AdversaryStrength {
pub crashed: f64,
pub byzantine: f64,
}
impl RyseParameters {
/// Generates a new balanced parameter set, equally resistant against all attacks.
pub fn new(num_leaders: u64, num_relays: u64) -> Self {
Self {
num_leaders,
num_relays,
num_slices: 1,
decode_threshold: (num_relays * 50).div_ceil(100),
relay_notar_threshold: (num_relays * 60).div_ceil(100),
}
}
/// Creates a new builder instance, with the provided sampling strategies.
pub fn optmize(&self, adv_strength: AdversaryStrength) -> Self {
let mut optimal_params = *self;
let mut optimal_attack_prob = self.any_attack_probability(adv_strength);
for relay_notar_threshold in 1..self.num_relays {
for decode_threshold in 1..relay_notar_threshold {
let new_params = RyseParameters {
num_leaders: self.num_leaders,
num_relays: self.num_relays,
num_slices: self.num_slices,
decode_threshold,
relay_notar_threshold,
};
let attack_prob = new_params.any_attack_probability(adv_strength);
if attack_prob < optimal_attack_prob {
optimal_params = new_params;
optimal_attack_prob = attack_prob;
}
}
}
optimal_params
}
/// Returns the probability that the adversary can make any attack in a slot.
pub fn any_attack_probability(&self, adv_strength: AdversaryStrength) -> f64 {
self.break_hiding_probability(adv_strength)
.max(self.selective_censorship_probability(adv_strength))
.max(self.temporary_liveness_failure_probability(adv_strength))
}
/// Probability that the adversary can break the hiding property in a slot.
///
/// This attack is easier for the adversary if no nodes are crashed.
/// So, this is the case that we consider here to get a worst-case analysis.
pub fn break_hiding_probability(&self, adv_strength: AdversaryStrength) -> f64 {
// probability that the adversary controls enough relays to decrypt before proposing
let byzantine = adv_strength.byzantine;
let relays_dist = Binomial::new(byzantine, self.num_relays).unwrap();
let relays_needed =
(self.relay_notar_threshold + self.decode_threshold).saturating_sub(self.num_relays);
1.0 - relays_dist.cdf(relays_needed.saturating_sub(1))
}
/// Probability that the adversary can selectively censor leaders in a slot.
pub fn selective_censorship_probability(&self, adv_strength: AdversaryStrength) -> f64 {
// probability that only the adversary proposes
let failed = adv_strength.crashed + adv_strength.byzantine;
let leaders_dist = Binomial::new(failed, self.num_leaders).unwrap();
let prob_all_leaders = 1.0 - leaders_dist.cdf(self.num_leaders - 1);
// probability that the adversary can exclude all leaders
let relays_dist = Binomial::new(failed, self.num_relays).unwrap();
let relays_needed = self.num_relays - self.relay_notar_threshold;
let prob_censor_relays = 1.0 - relays_dist.cdf(relays_needed - 1);
// probability that either attack works
1.0 - (1.0 - prob_all_leaders) * (1.0 - prob_censor_relays)
}
/// Probability that the adversary can cause a temporary liveness failure in a slot.
pub fn temporary_liveness_failure_probability(&self, adv_strength: AdversaryStrength) -> f64 {
// there is no better liveness attack than the selective-censorship attack
self.selective_censorship_probability(adv_strength)
}
/// Calculates the attack probabilities and prints them.
///
/// Capabilities of the adversary are specified in the `adv_strength` parameter.
pub fn print_failure_probabilities(&self, adv_strength: AdversaryStrength) {
info!(
"Ryse parameters: leaders={}, relays={}, {:.2}/{:.2}",
self.num_leaders,
self.num_relays,
self.decode_threshold as f64 / self.num_relays as f64 * 100.0,
self.relay_notar_threshold as f64 / self.num_relays as f64 * 100.0,
);
info!(
"successful attack probabilities (crashed={}, byzantine={}):",
adv_strength.crashed, adv_strength.byzantine
);
info!(
"break hiding: {:.2}",
self.break_hiding_probability(adv_strength).log10()
);
info!(
"selective censorship: {:.2}",
self.selective_censorship_probability(adv_strength).log10()
);
info!(
"temporary liveness failure: {:.2}",
self.temporary_liveness_failure_probability(adv_strength)
.log10()
);
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_mcp_parameters() {
let params = RyseParameters::new(2, 10);
assert_eq!(params.num_leaders, 2);
assert_eq!(params.num_relays, 10);
assert_eq!(params.decode_threshold, 5);
assert_eq!(params.relay_notar_threshold, 6);
}
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/bin/simulations/ryse/latency.rs | src/bin/simulations/ryse/latency.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Simulated latency test for Ryse, the MCP protocol.
//!
//! So far, this test can only simulate the happy path.
// TODO: lots of shared code with `rotor/latency` and `alpenglow/latency`
use std::hash::Hash;
use std::marker::PhantomData;
use alpenglow::ValidatorId;
use alpenglow::disseminator::rotor::SamplingStrategy;
use alpenglow::shredder::MAX_DATA_PER_SHRED;
use log::debug;
use rand::prelude::*;
use crate::discrete_event_simulator::{
Builder, Event, Protocol, Resources, SimTime, SimulationEnvironment, Stage,
broadcast_first_arrival_or_dep, broadcast_stake_threshold, column_max, column_min,
};
use crate::ryse::parameters::{RyseInstance, RyseInstanceBuilder, RyseParameters};
/// Size (in bytes) assumed per vote in the simulation.
const VOTE_SIZE: usize = 128 /* sig */ + 64 /* slot, hash, flags */;
/// Size (in bytes) assumed per certificate in the simulation.
const CERT_SIZE: usize = 128 /* sig */ + 256 /* bitmap */ + 64 /* slot, hash, flags */;
/// Marker type for the Ryse latency simulation.
pub struct RyseLatencySimulation<L: SamplingStrategy, R: SamplingStrategy> {
_leader_sampler: PhantomData<L>,
_rotor_sampler: PhantomData<R>,
}
impl<L: SamplingStrategy, R: SamplingStrategy> Protocol for RyseLatencySimulation<L, R> {
type Event = LatencyEvent;
type Stage = LatencyTestStage;
type Params = LatencySimParams;
type Instance = LatencySimInstance;
type Builder = LatencySimInstanceBuilder<L, R>;
}
/// The sequential stages of the latency test.
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum LatencyTestStage {
Direct,
Rotor,
Block,
Notar,
Final1,
Final2,
}
impl Stage for LatencyTestStage {
type Event = LatencyEvent;
type Params = LatencySimParams;
fn first() -> Self {
Self::Direct
}
fn next(&self) -> Option<Self> {
match self {
Self::Direct => Some(Self::Rotor),
Self::Rotor => Some(Self::Block),
Self::Block => Some(Self::Notar),
Self::Notar => Some(Self::Final1),
Self::Final1 => Some(Self::Final2),
Self::Final2 => None,
}
}
fn events(&self, params: &LatencySimParams) -> Vec<LatencyEvent> {
match self {
Self::Direct => {
let mut events = Vec::with_capacity(params.ryse_params.num_slices as usize + 1);
events.push(LatencyEvent::BlockSent);
for slice in 0..params.ryse_params.num_slices {
events.push(LatencyEvent::Direct(slice));
}
events
}
Self::Rotor => {
let mut events = Vec::with_capacity(3 * params.ryse_params.num_slices as usize);
for slice in 0..params.ryse_params.num_slices {
events.push(LatencyEvent::StartForwarding(slice));
events.push(LatencyEvent::FirstShredInSlice(slice));
events.push(LatencyEvent::Rotor(slice));
}
events
}
Self::Block => vec![LatencyEvent::FirstShred, LatencyEvent::Block],
Self::Notar => vec![LatencyEvent::LocalNotar, LatencyEvent::Notar],
Self::Final1 => vec![LatencyEvent::LocalFastFinal, LatencyEvent::LocalSlowFinal],
Self::Final2 => vec![LatencyEvent::LocalFinal, LatencyEvent::Final],
}
}
}
/// Events that can occur at each validator.
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum LatencyEvent {
// proposal dissemination
BlockSent,
Direct(u64),
StartForwarding(u64),
FirstShredInSlice(u64),
Rotor(u64),
FirstShred,
Block,
// consensus
LocalNotar,
Notar,
LocalFastFinal,
LocalSlowFinal,
LocalFinal,
Final,
}
impl Event for LatencyEvent {
type Params = LatencySimParams;
type Instance = LatencySimInstance;
fn name(&self) -> String {
match self {
Self::BlockSent => "block_sent".to_owned(),
Self::Direct(slice) => format!("direct_{slice}"),
Self::StartForwarding(_) => "start_forwarding".to_owned(),
Self::FirstShredInSlice(_) => "first_shred_in_slice".to_owned(),
Self::Rotor(slice) => format!("rotor_{slice}"),
Self::FirstShred => "first_shred".to_owned(),
Self::Block => "block".to_owned(),
Self::LocalNotar => "local_notar".to_owned(),
Self::Notar => "notar".to_owned(),
Self::LocalFastFinal => "local_fast_final".to_owned(),
Self::LocalSlowFinal => "local_slow_final".to_owned(),
Self::LocalFinal => "local_final".to_owned(),
Self::Final => "final".to_owned(),
}
}
fn should_track_stats(&self) -> bool {
match self {
Self::BlockSent => true,
Self::Direct(slice) => *slice == 0,
Self::StartForwarding(_) => false,
Self::FirstShredInSlice(_) => false,
Self::Rotor(slice) => *slice == 0,
Self::FirstShred => true,
Self::Block => true,
_ => true,
}
}
fn dependencies(&self, params: &LatencySimParams) -> Vec<Self> {
match self {
Self::BlockSent => vec![],
Self::Direct(slice) => {
if *slice == 0 {
vec![]
} else {
vec![Self::Direct(*slice - 1)]
}
}
Self::StartForwarding(slice) => vec![Self::Direct(*slice)],
Self::FirstShredInSlice(slice) => {
vec![Self::StartForwarding(*slice)]
}
Self::Rotor(slice) => vec![Self::StartForwarding(*slice)],
Self::FirstShred => (0..params.ryse_params.num_slices)
.map(Self::FirstShredInSlice)
.collect(),
Self::Block => (0..params.ryse_params.num_slices)
.map(Self::Rotor)
.collect(),
Self::LocalNotar => vec![Self::Block],
Self::Notar => vec![Self::LocalNotar],
Self::LocalFastFinal => vec![Self::Block],
Self::LocalSlowFinal => vec![Self::Notar],
Self::LocalFinal => vec![Self::LocalFastFinal, Self::LocalSlowFinal],
Self::Final => vec![Self::LocalFinal],
}
}
fn calculate_timing(
&self,
start_time: SimTime,
dependency_timings: &[&[SimTime]],
instance: &LatencySimInstance,
resources: &mut Resources,
environment: &SimulationEnvironment,
) -> Vec<SimTime> {
let broadcast_vote_threshold =
|resources: &mut Resources, threshold: f64| -> Vec<SimTime> {
broadcast_stake_threshold(
dependency_timings[0],
resources,
environment,
VOTE_SIZE,
threshold,
)
};
let local_or_cert = |resources: &mut Resources| -> Vec<SimTime> {
broadcast_first_arrival_or_dep(dependency_timings[0], resources, environment, CERT_SIZE)
};
match self {
Self::BlockSent => {
let mut timings = vec![start_time; environment.num_validators()];
// TODO: actually run for more than 1 slot
for &leader in instance.ryse_instances[0].leaders.iter() {
let block_bytes = instance.params.ryse_params.num_slices as usize
* instance.params.ryse_params.num_relays as usize
* MAX_DATA_PER_SHRED;
let tx_time = environment.transmission_delay(block_bytes, leader);
let finished_sending_time =
resources.network.schedule(leader, start_time, tx_time);
timings[leader as usize] += finished_sending_time;
}
timings
}
Self::Direct(slice) => {
let mut timings = vec![SimTime::ZERO; environment.num_validators()];
// TODO: actually run for more than 1 slot
let slice_relays = &instance.ryse_instances[0].relays[*slice as usize];
for (relay_offset, &relay) in slice_relays.iter().enumerate() {
// TODO: correctly handle validators that are relays more than once
let shreds_from_all_leaders = instance.ryse_instances[0]
.leaders
.iter()
.map(|leader| {
let prop_delay = environment.propagation_delay(*leader, relay);
let shred_send_index = slice * instance.params.ryse_params.num_relays
+ (relay_offset + 1) as u64;
let tx_delay = environment.transmission_delay(
shred_send_index as usize * MAX_DATA_PER_SHRED,
*leader,
);
start_time + prop_delay + tx_delay
})
.max()
.unwrap();
timings[relay as usize] = timings[relay as usize].max(shreds_from_all_leaders);
}
// TODO: remove this again
let mut relay_timings = slice_relays
.iter()
.map(|&relay| timings[relay as usize])
.collect::<Vec<_>>();
relay_timings.sort_unstable();
debug!(
"p50 relay received proposals at: {}",
relay_timings[relay_timings.len() / 2]
);
timings
}
Self::StartForwarding(slice) => {
let mut timings = dependency_timings[0].to_vec();
// TODO: actually run for more than 1 slot
for &relay in &instance.ryse_instances[0].relays[*slice as usize] {
let timing = &mut timings[relay as usize];
let total_bytes = instance.params.ryse_params.num_leaders as usize
* environment.num_validators()
* MAX_DATA_PER_SHRED;
let total_tx_delay = environment.transmission_delay(total_bytes, relay);
let start_time = resources.network.time_next_free_after(relay, *timing);
resources.network.schedule(relay, *timing, total_tx_delay);
*timing = start_time;
}
timings
}
Self::FirstShredInSlice(slice) => {
let mut timings = vec![SimTime::NEVER; environment.num_validators()];
for (recipient, timing) in timings.iter_mut().enumerate() {
// TODO: actually run for more than 1 slot
let first_shred_time = instance.ryse_instances[0].relays[*slice as usize]
.iter()
.map(|relay| {
let prop_delay =
environment.propagation_delay(*relay, recipient as ValidatorId);
let tx_delay = environment.transmission_delay(
(recipient + 1)
* instance.params.ryse_params.num_leaders as usize
* MAX_DATA_PER_SHRED,
*relay,
);
dependency_timings[0][*relay as usize] + prop_delay + tx_delay
})
.min()
.unwrap();
*timing = first_shred_time;
}
timings
}
Self::Rotor(slice) => {
let mut timings = vec![SimTime::NEVER; environment.num_validators()];
let mut shred_timings =
vec![SimTime::NEVER; instance.params.ryse_params.num_relays as usize];
for (recipient, timing) in timings.iter_mut().enumerate() {
// TODO: actually run for more than 1 slot
let slice_relays = &instance.ryse_instances[0].relays[*slice as usize];
for (i, relay) in slice_relays.iter().enumerate() {
shred_timings[i] = dependency_timings[0][*relay as usize]
+ environment.propagation_delay(*relay, recipient as ValidatorId)
+ environment.transmission_delay(
(recipient + 1)
* instance.params.ryse_params.num_leaders as usize
* MAX_DATA_PER_SHRED,
*relay,
);
}
shred_timings.sort_unstable();
*timing =
shred_timings[instance.params.ryse_params.decode_threshold as usize - 1];
}
timings
}
Self::FirstShred => column_min(dependency_timings),
Self::Block => column_max(dependency_timings),
Self::LocalNotar => broadcast_vote_threshold(resources, 0.6),
Self::Notar => local_or_cert(resources),
Self::LocalFastFinal => broadcast_vote_threshold(resources, 0.8),
Self::LocalSlowFinal => broadcast_vote_threshold(resources, 0.6),
Self::LocalFinal => column_min(dependency_timings),
// NOTE: when sending final cert, final vote is already scheduled
// TODO: this is not always optimal, handle this properly
Self::Final => local_or_cert(resources),
}
}
}
/// Parameters for the Ryse latency simulation.
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct LatencySimParams {
ryse_params: RyseParameters,
num_slots_per_window: usize,
num_slots: usize,
}
impl LatencySimParams {
/// Creates a parameter set for the Ryse latency simulation.
pub fn new(ryse_params: RyseParameters, num_slots_per_window: usize, num_slots: usize) -> Self {
Self {
ryse_params,
num_slots_per_window,
num_slots,
}
}
}
/// A builder for Ryse latency simulation instances.
pub struct LatencySimInstanceBuilder<L: SamplingStrategy, R: SamplingStrategy> {
ryse_builder: RyseInstanceBuilder<L, R>,
params: LatencySimParams,
}
impl<L: SamplingStrategy, R: SamplingStrategy> LatencySimInstanceBuilder<L, R> {
/// Creates a new builder instance from a builder for Rotor instances.
pub fn new(ryse_builder: RyseInstanceBuilder<L, R>, params: LatencySimParams) -> Self {
Self {
ryse_builder,
params,
}
}
}
impl<L: SamplingStrategy, R: SamplingStrategy> Builder for LatencySimInstanceBuilder<L, R> {
type Params = LatencySimParams;
type Instance = LatencySimInstance;
fn build(&self, rng: &mut impl Rng) -> LatencySimInstance {
let ryse_instances = (0..self.params.num_slots)
.map(|_| self.ryse_builder.build(rng))
.collect();
LatencySimInstance {
ryse_instances,
params: self.params.clone(),
}
}
fn params(&self) -> &Self::Params {
&self.params
}
}
/// A specific instance of the Ryse latency simulation.
///
/// Contains one instance of the Ryse protocol, [`RyseInstance`], per slot.
pub struct LatencySimInstance {
ryse_instances: Vec<RyseInstance>,
params: LatencySimParams,
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/bin/simulations/ryse/robustness.rs | src/bin/simulations/ryse/robustness.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Calculations about the robustness of the Ryse MCP protocol.
//!
//! Currently, this just runs some static calculations on the set of parameters.
//!
//! In the future, this would also simulate attack scenarios for a specific stake distribution.
//! This is analogous to what is done for Rotor in [`crate::rotor::robustness`];
use std::fs::File;
use alpenglow::disseminator::rotor::FaitAccompli1Sampler;
use alpenglow::network::simulated::stake_distribution::{
VALIDATOR_DATA, validators_from_validator_data,
};
use color_eyre::Result;
use super::parameters::{AdversaryStrength, RyseParameters};
use crate::quorum_robustness::{QuorumRobustnessTest, QuorumThreshold};
const NUM_PROPOSERS: u64 = 16;
const NUM_RELAYS: u64 = 512;
const ADVERSARY_STRENGTH: AdversaryStrength = AdversaryStrength {
crashed: 0.05,
byzantine: 0.2,
};
pub fn run_robustness_tests() {
let params = RyseParameters::new(NUM_PROPOSERS, NUM_RELAYS);
params.print_failure_probabilities(ADVERSARY_STRENGTH);
let optimal_params = params.optmize(ADVERSARY_STRENGTH);
optimal_params.print_failure_probabilities(ADVERSARY_STRENGTH);
}
pub fn run_ryse_robustness_test(total_shreds: u64) -> Result<()> {
let (validators, _with_pings) = validators_from_validator_data(&VALIDATOR_DATA);
let proposer_sampler =
FaitAccompli1Sampler::new_with_stake_weighted_fallback(validators.clone(), NUM_PROPOSERS);
let relay_sampler =
FaitAccompli1Sampler::new_with_stake_weighted_fallback(validators.clone(), total_shreds);
let params = RyseParameters::new(NUM_PROPOSERS, total_shreds);
let hiding_threshold = QuorumThreshold::Simple {
quorum: 1,
threshold: (params.relay_notar_threshold + params.decode_threshold)
.saturating_sub(params.num_relays) as usize,
is_crash_enough: false,
};
let hiding_attack = hiding_threshold.into_attack("hiding");
let censorship_proposer_threshold = QuorumThreshold::Simple {
quorum: 0,
threshold: params.num_leaders as usize,
is_crash_enough: true,
};
let censorship_relay_threshold = QuorumThreshold::Simple {
quorum: 1,
threshold: (params.num_relays - params.relay_notar_threshold) as usize,
is_crash_enough: true,
};
let censorship_attack = censorship_proposer_threshold
.or(censorship_relay_threshold)
.into_attack("censorship");
let test = QuorumRobustnessTest::new(
validators,
"solana".to_string(),
vec![proposer_sampler, relay_sampler],
vec![0, 1],
vec![params.num_leaders as usize, params.num_relays as usize],
vec![hiding_attack, censorship_attack],
);
let adversary_strength = crate::quorum_robustness::AdversaryStrength {
crashed: 0.05,
byzantine: 0.2,
};
let filename = format!("ryse_robustness_{}_{}", params.num_leaders, total_shreds);
let path = std::path::Path::new("data")
.join("output")
.join(filename)
.with_extension("csv");
let file = File::create(path)?;
let mut csv_file = csv::Writer::from_writer(file);
test.run(adversary_strength, &mut csv_file)
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/bin/simulations/rotor/latency.rs | src/bin/simulations/rotor/latency.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Latency simulation for the Rotor block dissemination protocol.
//!
//!
use std::marker::PhantomData;
use alpenglow::ValidatorId;
use alpenglow::disseminator::rotor::SamplingStrategy;
use alpenglow::shredder::MAX_DATA_PER_SHRED;
use super::{RotorInstance, RotorInstanceBuilder, RotorParams};
use crate::discrete_event_simulator::{
Event, Protocol, Resources, SimTime, SimulationEnvironment, Stage, column_max, column_min,
};
/// Wrapper type for the Rotor latency simulation.
///
/// This type implements the `Protocol` trait and can be passed to the simulation engine.
/// There is probably never a need to construct this type directly.
pub struct RotorLatencySimulation<L: SamplingStrategy, R: SamplingStrategy> {
_leader_sampler: PhantomData<L>,
_rotor_sampler: PhantomData<R>,
}
impl<L: SamplingStrategy, R: SamplingStrategy> Protocol for RotorLatencySimulation<L, R> {
type Event = LatencyEvent;
type Stage = LatencyTestStage;
type Params = RotorParams;
type Instance = RotorInstance;
type Builder = RotorInstanceBuilder<L, R>;
}
/// Stages of the Rotor latency simulation.
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum LatencyTestStage {
Direct,
Rotor,
Block,
}
impl Stage for LatencyTestStage {
type Event = LatencyEvent;
type Params = RotorParams;
fn first() -> Self {
Self::Direct
}
fn next(&self) -> Option<Self> {
match self {
Self::Direct => Some(Self::Rotor),
Self::Rotor => Some(Self::Block),
Self::Block => None,
}
}
fn events(&self, params: &RotorParams) -> Vec<LatencyEvent> {
match self {
Self::Direct => {
let mut events = Vec::with_capacity(params.slices + 1);
events.push(LatencyEvent::BlockSent);
for slice in 0..params.slices {
events.push(LatencyEvent::Direct(slice));
}
events
}
Self::Rotor => {
let mut events = Vec::with_capacity(3 * params.slices);
for slice in 0..params.slices {
events.push(LatencyEvent::StartForwarding(slice));
events.push(LatencyEvent::FirstShredInSlice(slice));
events.push(LatencyEvent::Rotor(slice));
}
events
}
Self::Block => vec![LatencyEvent::FirstShred, LatencyEvent::Block],
}
}
}
/// Events that can occur at each validator during the Rotor latency simulation.
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum LatencyEvent {
BlockSent,
Direct(usize),
StartForwarding(usize),
FirstShredInSlice(usize),
Rotor(usize),
FirstShred,
Block,
}
impl Event for LatencyEvent {
type Params = RotorParams;
type Instance = RotorInstance;
fn name(&self) -> String {
match self {
Self::BlockSent => "block_sent".to_owned(),
Self::Direct(slice) => format!("direct_{slice}"),
Self::StartForwarding(_) => "start_forwarding".to_owned(),
Self::FirstShredInSlice(_) => "first_shred_in_slice".to_owned(),
Self::Rotor(slice) => format!("rotor_{slice}"),
Self::FirstShred => "first_shred".to_owned(),
Self::Block => "block".to_owned(),
}
}
fn should_track_stats(&self) -> bool {
match self {
Self::BlockSent => true,
Self::Direct(slice) => *slice == 0,
Self::StartForwarding(_) => false,
Self::FirstShredInSlice(_) => false,
Self::Rotor(slice) => *slice == 0,
Self::FirstShred => true,
Self::Block => true,
}
}
fn dependencies(&self, params: &RotorParams) -> Vec<Self> {
match self {
Self::BlockSent => vec![],
Self::Direct(slice) => {
if *slice == 0 {
vec![]
} else {
vec![Self::Direct(*slice - 1)]
}
}
Self::StartForwarding(slice) => vec![Self::Direct(*slice)],
Self::FirstShredInSlice(slice) => {
vec![Self::StartForwarding(*slice)]
}
Self::Rotor(slice) => vec![Self::StartForwarding(*slice)],
Self::FirstShred => (0..params.slices).map(Self::FirstShredInSlice).collect(),
Self::Block => (0..params.slices).map(Self::Rotor).collect(),
}
}
fn calculate_timing(
&self,
start_time: SimTime,
dependency_timings: &[&[SimTime]],
instance: &RotorInstance,
resources: &mut Resources,
environment: &SimulationEnvironment,
) -> Vec<SimTime> {
match self {
Self::BlockSent => {
let mut timings = vec![start_time; environment.num_validators()];
let block_bytes =
instance.params.slices * instance.params.shreds * MAX_DATA_PER_SHRED;
let tx_time = environment.transmission_delay(block_bytes, instance.leader);
let finished_sending_time =
resources
.network
.schedule(instance.leader, SimTime::ZERO, tx_time);
timings[instance.leader as usize] += finished_sending_time;
timings
}
Self::Direct(slice) => {
let mut timings = vec![start_time; environment.num_validators()];
for (recipient, timing) in timings.iter_mut().enumerate() {
*timing +=
environment.propagation_delay(instance.leader, recipient as ValidatorId);
}
for (relay_offset, &relay) in instance.relays[*slice].iter().enumerate() {
let shred_send_index = slice * instance.params.shreds + relay_offset + 1;
let tx_delay = environment
.transmission_delay(shred_send_index * MAX_DATA_PER_SHRED, instance.leader);
timings[relay as usize] += tx_delay;
}
timings
}
Self::StartForwarding(slice) => {
let mut timings = dependency_timings[0].to_vec();
for &relay in &instance.relays[*slice] {
let timing = &mut timings[relay as usize];
let total_bytes = environment.num_validators() * MAX_DATA_PER_SHRED;
let total_tx_delay = environment.transmission_delay(total_bytes, relay);
let start_time = resources.network.time_next_free_after(relay, *timing);
resources.network.schedule(relay, *timing, total_tx_delay);
*timing = start_time;
}
timings
}
Self::FirstShredInSlice(slice) => {
let mut timings = vec![SimTime::NEVER; environment.num_validators()];
for (recipient, timing) in timings.iter_mut().enumerate() {
let first_shred_time = instance.relays[*slice]
.iter()
.map(|relay| {
let prop_delay =
environment.propagation_delay(*relay, recipient as ValidatorId);
let tx_delay = environment
.transmission_delay((recipient + 1) * MAX_DATA_PER_SHRED, *relay);
dependency_timings[0][*relay as usize] + prop_delay + tx_delay
})
.min()
.unwrap();
*timing = first_shred_time;
}
timings
}
Self::Rotor(slice) => {
let mut timings = vec![SimTime::NEVER; environment.num_validators()];
let mut shred_timings = vec![SimTime::NEVER; instance.params.shreds];
for (recipient, timing) in timings.iter_mut().enumerate() {
for (i, relay) in instance.relays[*slice].iter().enumerate() {
shred_timings[i] = dependency_timings[0][*relay as usize]
+ environment.propagation_delay(*relay, recipient as ValidatorId)
+ environment
.transmission_delay((recipient + 1) * MAX_DATA_PER_SHRED, *relay);
}
shred_timings.sort_unstable();
*timing = shred_timings[instance.params.data_shreds - 1];
}
timings
}
Self::FirstShred => column_min(dependency_timings),
Self::Block => column_max(dependency_timings),
}
}
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/bin/simulations/rotor/robustness.rs | src/bin/simulations/rotor/robustness.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Calculations about the robustness of the Rotor block dissemination protocol.
//!
//! This implements two main attack scenarios:
//! - Equivocation attack: Less than 20% of stake is Byzantine.
//! - Censorship attack: Up to 40% of stake is crashed.
//!
//! For each attack scenario multiple adversary strategies are simulated:
//! - Random: Corrupt a random subset of validators.
//! - Small: Corrupt as many of the smallest validators as possible.
//! - Large: Corrupt as many of the largest validators as possible.
use std::fs::File;
use alpenglow::disseminator::rotor::StakeWeightedSampler;
use alpenglow::network::simulated::stake_distribution::{
VALIDATOR_DATA, validators_from_validator_data,
};
use color_eyre::Result;
use super::RotorParams;
use crate::quorum_robustness::{AdversaryStrength, QuorumRobustnessTest, QuorumThreshold};
// TODO: support different: stake distributions, sampling strategies, Rotor params
pub fn run_rotor_robustness_test(data_shreds: usize, total_shreds: usize) -> Result<()> {
let (validators, _with_pings) = validators_from_validator_data(&VALIDATOR_DATA);
let leader_sampler = StakeWeightedSampler::new(validators.clone());
let rotor_sampler = StakeWeightedSampler::new(validators.clone());
let params = RotorParams {
data_shreds,
shreds: total_shreds,
slices: 40,
};
let equivocation_thresholds = (0..params.slices)
.map(|slice| QuorumThreshold::Simple {
quorum: slice,
threshold: params.data_shreds,
is_crash_enough: false,
})
.collect::<Vec<_>>();
let equivocation_attack =
QuorumThreshold::Any(equivocation_thresholds).into_attack("equivocation");
let censorship_thresholds = (0..params.slices)
.map(|slice| QuorumThreshold::Simple {
quorum: slice,
threshold: params.shreds - params.data_shreds,
is_crash_enough: true,
})
.collect::<Vec<_>>();
let censorship_attack = QuorumThreshold::Any(censorship_thresholds).into_attack("censorship");
let test = QuorumRobustnessTest::new(
validators,
"solana".to_string(),
vec![leader_sampler, rotor_sampler],
vec![1; params.slices],
vec![params.shreds; params.slices],
vec![equivocation_attack, censorship_attack],
);
let adversary_strength = AdversaryStrength {
crashed: 0.2,
byzantine: 0.2,
};
let filename = format!("rotor_robustness_{data_shreds}_{total_shreds}");
let path = std::path::Path::new("data")
.join("output")
.join(filename)
.with_extension("csv");
let file = File::create(path).unwrap();
let mut csv_file = csv::Writer::from_writer(file);
test.run(adversary_strength, &mut csv_file)
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/bin/simulations/discrete_event_simulator/timings.rs | src/bin/simulations/discrete_event_simulator/timings.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Data structures for timing measurements.
//!
//! Most importantly the [`Timings`] struct, which is a map from events to timing vectors.
//! This is what the discrete-event simulator uses to record timings of events.
//! It can be thought of as a `#events x #validators` matrix of latencies.
//! Although, it is actually backed by a [`HashMap`] of [`Vec<SimTime>`],
//! so the rows are only initialized as needed.
use std::collections::HashMap;
use std::fmt::Display;
use std::fs::File;
use std::hash::Hash;
use std::io::{BufWriter, Write};
use std::ops::{Add, AddAssign};
use std::path::Path;
use alpenglow::ValidatorId;
use crate::discrete_event_simulator::{Event, Protocol, SimulationEnvironment, Stage};
/// Simulated time in nanoseconds.
// TODO: maybe split into a duration and an instant type?
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct SimTime(u64);
impl SimTime {
/// Start of the simulation.
pub const ZERO: Self = Self(0);
/// Infinite time, used to represent a point in time that is never reached.
pub const NEVER: Self = Self(u64::MAX);
/// Constructs a new [`SimTime`] from the given number of nanoseconds.
pub const fn new(time_ns: u64) -> Self {
Self(time_ns)
}
/// Constructs a new [`SimTime`] from the given number of seconds.
///
/// The time is rounded to the nearest nanosecond.
///
/// # Panics
///
/// Panics if `time_secs` is negative.
pub const fn from_secs(time_secs: f64) -> Self {
assert!(time_secs >= 0.0);
let time_ns = (time_secs * 1e9).round() as u64;
Self::new(time_ns)
}
/// Returns the exact number of nanoseconds the [`SimTime`] represents.
pub const fn nanos(self) -> Option<u64> {
match self {
Self::NEVER => None,
Self(t) => Some(t),
}
}
/// Converts the [`SimTime`] to (fractional) microseconds.
pub fn as_micros(self) -> f64 {
self.nanos()
.map_or(f64::INFINITY, |nanos| nanos as f64 / 1e3)
}
/// Converts the [`SimTime`] to (fractional) milliseconds.
pub fn as_millis(self) -> f64 {
self.nanos()
.map_or(f64::INFINITY, |nanos| nanos as f64 / 1e6)
}
/// Converts the [`SimTime`] to (fractional) seconds.
pub fn as_secs(self) -> f64 {
self.nanos()
.map_or(f64::INFINITY, |nanos| nanos as f64 / 1e9)
}
}
impl Add<SimTime> for SimTime {
type Output = Self;
fn add(self, other: SimTime) -> Self {
Self(self.0 + other.0)
}
}
impl AddAssign<SimTime> for SimTime {
fn add_assign(&mut self, other: SimTime) {
self.0 += other.0;
}
}
impl Display for SimTime {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
if self == &SimTime::NEVER {
write!(f, "never")
} else if self.0 < 1_000 {
write!(f, "{} ns", self.0)
} else if self.0 < 1_000_000 {
write!(f, "{:.0} us", self.as_micros())
} else if self.0 < 1_000_000_000 {
write!(f, "{:.0} ms", self.as_millis())
} else {
write!(f, "{:.0} s", self.as_secs())
}
}
}
/// The timing matrix, implemented as a map from events to timing vectors.
pub struct Timings<E: Event> {
start_time: SimTime,
event_timings: HashMap<E, Vec<SimTime>>,
}
impl<E: Event> Timings<E> {
/// Constructs a new [`Timings`] from the given start time.
pub fn new(start_time: SimTime) -> Self {
Self {
start_time,
event_timings: HashMap::new(),
}
}
/// Initializes the timing vector for the given event to infinity.
pub fn initialize(&mut self, event: E, num_val: usize) {
self.event_timings
.insert(event, vec![SimTime::NEVER; num_val]);
}
/// Deletes all the rows from the [`HashMap`].
pub fn clear(&mut self) {
self.event_timings.clear();
}
/// Records the latency for the given event and validator.
pub fn record(&mut self, event: E, timing: SimTime, validator: ValidatorId) {
let vec = self.event_timings.get_mut(&event).unwrap();
let entry = vec.get_mut(validator as usize).unwrap();
if timing < *entry {
*entry = timing;
}
}
/// Returns the start time.
pub fn start_time(&self) -> SimTime {
self.start_time
}
/// Returns the timing vector for the given event.
pub fn get(&self, event: E) -> Option<&[SimTime]> {
self.event_timings.get(&event).map(Vec::as_slice)
}
/// Iterates over timing vectors for all events.
pub fn iter(&self) -> impl Iterator<Item = (&E, &[SimTime])> {
self.event_timings.iter().map(|(k, v)| (k, v.as_slice()))
}
}
impl<E: Event> Default for Timings<E> {
fn default() -> Self {
Self::new(SimTime::ZERO)
}
}
/// Stats tracker for timings across all events and multiple simulation runs.
pub struct TimingStats<P: Protocol>(HashMap<P::Event, EventTimingStats>);
impl<P: Protocol> TimingStats<P> {
/// Records the timing statistics for all events.
///
/// Updates the [`EventTimingStats`] corresponding to each event.
pub fn record_latencies(
&mut self,
timings: &mut Timings<P::Event>,
environment: &SimulationEnvironment,
) {
for (event, timing_vec) in timings.iter() {
if !event.should_track_stats() {
continue;
}
self.0
.entry(*event)
.or_default()
.record_latencies(timing_vec, environment);
}
}
/// References the [`EventTimingStats`] for the given event, if it exists.
pub fn get(&self, event: &P::Event) -> Option<&EventTimingStats> {
self.0.get(event)
}
/// Writes percentiles to a CSV file.
pub fn write_to_csv(
&self,
filename: impl AsRef<Path>,
params: &P::Params,
) -> std::io::Result<()> {
let file = File::create(filename)?;
let mut writer = BufWriter::new(file);
// collect all events
let events = P::Stage::all()
.iter()
.flat_map(|stage| {
stage
.events(params)
.into_iter()
.filter(Event::should_track_stats)
.map(|event| (event.name(), event))
})
.collect::<Vec<_>>();
// write header row
let columns = events
.iter()
.map(|(name, _event)| name.to_string())
.collect::<Vec<_>>();
let column_str = columns.join(",");
writeln!(writer, "percentile,{column_str}")?;
// write data rows
for percentile in 1..=100 {
let event_timings = events
.iter()
.map(|(_name, event)| {
let event_stats = self.get(event).unwrap();
event_stats
.get_avg_percentile_latency(percentile)
.to_string()
})
.collect::<Vec<_>>();
let event_timings_str = event_timings.join(",");
writeln!(writer, "{percentile},{event_timings_str}")?;
}
Ok(())
}
}
impl<P: Protocol> Default for TimingStats<P> {
fn default() -> Self {
Self(HashMap::new())
}
}
/// Stats tracker for timings of a single event across multiple simulation runs.
pub struct EventTimingStats {
sum_percentile_latencies: [f64; 100],
percentile_location: Vec<HashMap<String, f64>>,
count: u64,
}
impl EventTimingStats {
/// Updates the aggregate stats based on the timing vector from a single run.
pub fn record_latencies(&mut self, latencies: &[SimTime], environment: &SimulationEnvironment) {
let mut latencies = latencies
.iter()
.enumerate()
.map(|(v, l)| (*l, v))
.collect::<Vec<_>>();
latencies.sort_unstable_by(|a, b| a.partial_cmp(b).unwrap());
let percentile_stake = environment.total_stake as f64 / 100.0;
let mut percentile = 1;
let mut stake_so_far = 0.0;
for (latency, v) in latencies {
let mut validator_stake = environment.validators[v].stake as f64;
for _ in 0..100 {
let percentile_stake_left = percentile as f64 * percentile_stake - stake_so_far;
let abs_stake_contrib = validator_stake.min(percentile_stake_left);
let rel_stake_contrib = abs_stake_contrib / percentile_stake;
let latency_contrib = rel_stake_contrib * latency.as_millis();
self.sum_percentile_latencies[percentile as usize - 1] += latency_contrib;
let count = self.percentile_location[percentile as usize - 1]
.entry(environment.ping_servers[v].location.clone())
.or_default();
*count += abs_stake_contrib;
stake_so_far += abs_stake_contrib;
validator_stake -= abs_stake_contrib;
if percentile < 100 && stake_so_far >= percentile as f64 * percentile_stake {
percentile += 1;
} else {
break;
}
}
}
assert!((stake_so_far - environment.total_stake as f64).abs() < 5000.0);
assert!(percentile >= 100);
self.count += 1;
}
/// Returns the average timing for a given percentile in milliseconds.
pub fn get_avg_percentile_latency(&self, percentile: u8) -> f64 {
assert!(percentile > 0 && percentile <= 100);
let sum = self.sum_percentile_latencies[percentile as usize - 1];
sum / self.count as f64
}
}
impl Default for EventTimingStats {
fn default() -> Self {
Self {
sum_percentile_latencies: [0.0; 100],
percentile_location: vec![HashMap::new(); 100],
count: 0,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::rotor::LatencyEvent;
#[test]
fn basic() {
let mut time = SimTime::new(1_000_000);
assert!((time.as_secs() - 1e-3).abs() < f64::EPSILON);
assert!((time.as_millis() - 1.0).abs() < f64::EPSILON);
time += SimTime::new(1_000_000);
assert!((time.as_secs() - 2e-3).abs() < f64::EPSILON);
assert!((time.as_millis() - 2.0).abs() < f64::EPSILON);
let time = SimTime::from_secs(0.1);
assert!((time.as_secs() - 0.1).abs() < f64::EPSILON);
assert!((time.as_millis() - 100.0).abs() < f64::EPSILON);
}
#[test]
fn timings() {
let mut timings = Timings::<LatencyEvent>::default();
let event = LatencyEvent::BlockSent;
timings.initialize(event, 2);
timings.record(event, SimTime::new(10), 0);
assert_eq!(timings.get(event).unwrap()[0], SimTime::new(10));
assert_eq!(timings.get(event).unwrap()[1], SimTime::NEVER);
}
// #[test]
// fn stats() {
// let mut stats = TimingStats::default();
// let mut stats = EventTimingStats::default();
// stats.record_latencies(&[], &SimulationEnvironment::new());
// assert_eq!(stats.get_avg_percentile_latency(1), 0.0);
// }
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/bin/simulations/discrete_event_simulator/resources.rs | src/bin/simulations/discrete_event_simulator/resources.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Data structures for resource utilization.
//!
//! For now, all these resources exist at each validator individually.
// TODO: introduce notion of a shared resource?
use alpenglow::ValidatorId;
use crate::discrete_event_simulator::SimTime;
/// Tracks resource utilization across all resources and validators.
// TODO: add other resources
#[derive(Clone, Debug)]
pub struct Resources {
pub network: Resource,
// pub cpu: Resource,
}
impl Resources {
/// Creates a new resource utilization tracker.
///
/// All validators start with all resources free to be used.
pub fn new(num_validators: usize) -> Self {
Self {
network: Resource::new(num_validators),
// cpu: Resource::new(num_validators),
}
}
}
/// Tracks resource utilization for a single resource.
#[derive(Clone, Debug)]
pub struct Resource {
next_free: Vec<SimTime>,
}
impl Resource {
/// Creates a new resource.
///
/// All validators start with this resource free to be used.
pub fn new(num_validators: usize) -> Self {
Self {
next_free: vec![SimTime::ZERO; num_validators],
}
}
/// Returns the next time this resource will be free.
pub fn time_next_free(&self, validator: ValidatorId) -> SimTime {
self.next_free[validator as usize]
}
/// Returns the next time this resource will be free, after `time`.
pub fn time_next_free_after(&self, validator: ValidatorId, time: SimTime) -> SimTime {
time.max(self.time_next_free(validator))
}
/// Schedules the resource to be used.
///
/// First, finds the next time this resource will be free after `target_start_time`.
/// Then, reserves the resource for `duration` starting at that time.
pub fn schedule(
&mut self,
validator: ValidatorId,
target_start_time: SimTime,
duration: SimTime,
) -> SimTime {
let actual_start_time = self.time_next_free_after(validator, target_start_time);
self.reserve(validator, actual_start_time, duration)
}
/// Reserves the resource for `duration` starting at `start_time`.
fn reserve(
&mut self,
validator: ValidatorId,
start_time: SimTime,
duration: SimTime,
) -> SimTime {
let end_time = start_time + duration;
self.next_free[validator as usize] = end_time;
end_time
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn basic() {
let mut resource = Resource::new(2);
assert_eq!(resource.time_next_free(0), SimTime::ZERO);
assert_eq!(resource.time_next_free(1), SimTime::ZERO);
// schedule resource on validator 0 for time 1-11
assert_eq!(
resource.schedule(0, SimTime::new(1), SimTime::new(10)),
SimTime::new(11)
);
// next free works
assert_eq!(resource.time_next_free(0), SimTime::new(11));
assert_eq!(
resource.time_next_free_after(0, SimTime::new(10)),
SimTime::new(11)
);
assert_eq!(
resource.time_next_free_after(0, SimTime::new(20)),
SimTime::new(20)
);
// resource still free on other validator
assert_eq!(resource.time_next_free(1), SimTime::ZERO);
}
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/types/slice.rs | src/types/slice.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Defines the [`Slice`] and related data structures.
use rand::{RngCore, rng};
use wincode::{SchemaRead, SchemaWrite};
use crate::crypto::merkle::{BlockHash, SliceRoot};
use crate::shredder::{MAX_DATA_PER_SLICE, ValidatedShred};
use crate::types::SliceIndex;
use crate::{BlockId, Slot};
/// A slice is the unit of data between block and shred.
///
/// It corresponds to a single batch of data that is disseminated by the leader.
/// During shredding, a slice is turned into multiple shreds.
/// During deshredding, multiple shreds are turned into a slice.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct Slice {
/// Slot number this slice is part of.
pub slot: Slot,
/// Index of the slice within its slot.
pub slice_index: SliceIndex,
/// Indicates whether this is the last slice in the slot.
pub is_last: bool,
/// Merkle root hash over all shreds in this slice.
pub merkle_root: Option<SliceRoot>,
/// If first slice in the block or parent changed due to optimistic handover,
/// then indicates which block is the parent of the block this slice is part of.
pub parent: Option<(Slot, BlockHash)>,
/// Payload bytes.
pub data: Vec<u8>,
}
impl Slice {
/// Constructs a [`Slice`] from its component parts.
pub(crate) fn from_parts(
header: SliceHeader,
payload: SlicePayload,
merkle_root: Option<SliceRoot>,
) -> Self {
let SliceHeader {
slot,
slice_index,
is_last,
} = header;
let SlicePayload { parent, data } = payload;
Self {
slot,
slice_index,
is_last,
merkle_root,
parent,
data,
}
}
/// Creates a [`Slice`] from raw payload bytes and the metadata extracted from a shred.
#[must_use]
pub(crate) fn from_shreds(payload: SlicePayload, any_shred: &ValidatedShred) -> Self {
let header = any_shred.payload().header.clone();
let merkle_root = Some(any_shred.merkle_root.clone());
Self::from_parts(header, payload, merkle_root)
}
/// Deconstructs a [`Slice`] into its components: [`SliceHeader`] and [`SlicePayload`].
pub(crate) fn deconstruct(self) -> (SliceHeader, SlicePayload) {
let Slice {
slot,
slice_index,
is_last,
merkle_root: _,
parent,
data,
} = self;
(
SliceHeader {
slot,
slice_index,
is_last,
},
SlicePayload { parent, data },
)
}
/// Extracts the [`SliceHeader`] from a [`Slice`].
pub(crate) fn to_header(&self) -> SliceHeader {
SliceHeader {
slot: self.slot,
slice_index: self.slice_index,
is_last: self.is_last,
}
}
}
/// Struct to hold all the header payload of a [`Slice`].
///
/// This information is included in each shred after shredding.
#[derive(Clone, Debug, SchemaRead, SchemaWrite)]
pub(crate) struct SliceHeader {
/// Same as [`Slice::slot`].
pub(crate) slot: Slot,
/// Same as [`Slice::slice_index`].
pub(crate) slice_index: SliceIndex,
/// Same as [`Slice::is_last`].
pub(crate) is_last: bool,
}
/// Struct to hold all the actual payload of a [`Slice`].
///
/// This is what actually gets "shredded" into different shreds.
#[derive(Clone, Debug, PartialEq, Eq, SchemaRead, SchemaWrite)]
pub(crate) struct SlicePayload {
/// Same as [`Slice::parent`].
pub(crate) parent: Option<(Slot, BlockHash)>,
/// Same as [`Slice::data`].
pub(crate) data: Vec<u8>,
}
impl SlicePayload {
/// Constructs a new [`SlicePayload`] from its component parts.
pub(crate) fn new(parent: Option<(Slot, BlockHash)>, data: Vec<u8>) -> Self {
Self { parent, data }
}
/// Serializes the payload into bytes.
pub(crate) fn to_bytes(&self) -> Vec<u8> {
wincode::serialize(self).unwrap()
}
}
impl From<SlicePayload> for Vec<u8> {
fn from(payload: SlicePayload) -> Self {
wincode::serialize(&payload).unwrap()
}
}
impl From<&[u8]> for SlicePayload {
fn from(payload: &[u8]) -> Self {
assert!(
payload.len() <= MAX_DATA_PER_SLICE,
"payload.len()={} {MAX_DATA_PER_SLICE}",
payload.len()
);
wincode::deserialize(payload).unwrap()
}
}
/// Creates a [`SlicePayload`] with a random payload of desired size (in bytes).
///
/// The payload does not contain valid transactions.
/// This function should only be used for testing and benchmarking.
//
// XXX: This is only used in test and benchmarking code.
// Ensure it is only compiled when we are testing or benchmarking.
pub(crate) fn create_slice_payload_with_invalid_txs(
parent: Option<BlockId>,
desired_size: usize,
) -> SlicePayload {
let parent_bytes = <Option<BlockId> as wincode::SchemaWrite>::size_of(&parent).unwrap();
// 8 bytes for data length (usize), since wincode uses fixed-length integer encoding
let data_len_bytes = 8;
let size = desired_size
.checked_sub(parent_bytes + data_len_bytes)
.unwrap();
let mut data = vec![0; size];
let mut rng = rng();
rng.fill_bytes(&mut data);
SlicePayload { parent, data }
}
/// Creates a [`Slice`] with a random payload of desired size (in bytes).
///
/// The slice does not contain valid transactions.
/// This function should only be used for testing and benchmarking.
//
// XXX: This is only used in test and benchmarking code. Ensure it is only compiled when we are testing or benchmarking.
pub fn create_slice_with_invalid_txs(desired_size: usize) -> Slice {
let payload = create_slice_payload_with_invalid_txs(None, desired_size);
let header = SliceHeader {
slot: Slot::new(0),
slice_index: SliceIndex::first(),
is_last: true,
};
Slice::from_parts(header, payload, None)
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/types/slot.rs | src/types/slot.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Defines the [`Slot`] type.
use std::fmt::Display;
use wincode::{SchemaRead, SchemaWrite};
/// Number of slots in each leader window.
// NOTE: this is public to support testing and one additional function.
// Consider hiding it.
pub const SLOTS_PER_WINDOW: u64 = 4;
/// Number of slots in each epoch.
// NOTE: consider hiding this definition.
pub const SLOTS_PER_EPOCH: u64 = 18_000;
/// Slot number type.
#[repr(transparent)]
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, SchemaRead, SchemaWrite)]
pub struct Slot(u64);
impl Slot {
/// Creates a new slot with the given number.
pub fn new(slot: u64) -> Self {
Self(slot)
}
/// Returns the genesis slot.
pub fn genesis() -> Self {
Self(0)
}
/// Returns the inner `u64`.
pub fn inner(self) -> u64 {
self.0
}
/// Returns an infinite iterator that yields the first slot in each window.
pub fn windows() -> impl Iterator<Item = Self> {
(0..).step_by(SLOTS_PER_WINDOW as usize).map(Self)
}
/// Returns a double-ended iterator that yields all the slots in the window `self` is in.
pub fn slots_in_window(self) -> impl DoubleEndedIterator<Item = Slot> {
let start = self.first_slot_in_window();
(start.0..start.0 + SLOTS_PER_WINDOW).map(Self)
}
/// Returns an infinite iterator that yields all the slots after `self`.
pub fn future_slots(&self) -> impl Iterator<Item = Self> {
(self.0 + 1..).map(Self)
}
/// Returns the first slot in the window this slot belongs to.
pub fn first_slot_in_window(&self) -> Slot {
let window = self.0 / SLOTS_PER_WINDOW;
Self(window * SLOTS_PER_WINDOW)
}
/// Returns the last slow in the window this slot belongs to.
pub fn last_slot_in_window(&self) -> Slot {
let window = self.0 / SLOTS_PER_WINDOW;
let next_window = window + 1;
Self(next_window * SLOTS_PER_WINDOW - 1)
}
/// Returns true if `self` is the first slot in the window.
pub fn is_start_of_window(&self) -> bool {
self.0.is_multiple_of(SLOTS_PER_WINDOW)
}
/// Returns the next slot after `self`.
pub fn next(&self) -> Self {
Self(self.0 + 1)
}
/// Returns the previous slot before `self`.
pub fn prev(&self) -> Self {
Self(self.0.checked_sub(1).unwrap())
}
/// Returns `true` iff this slot is part of the genesis window.
pub fn is_genesis_window(&self) -> bool {
let window = self.0 / SLOTS_PER_WINDOW;
window == 0
}
/// Returns `true` iff this slot is the genesis slot.
pub fn is_genesis(&self) -> bool {
self.0 == 0
}
}
impl Default for Slot {
fn default() -> Self {
Self::genesis()
}
}
impl Display for Slot {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.0)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn basic() {
let window_slots = Slot::windows().take(10).collect::<Vec<_>>();
for (window, first_slot) in window_slots.iter().take(9).enumerate() {
assert!(first_slot.is_start_of_window());
assert_eq!(*first_slot, first_slot.first_slot_in_window());
let last_slot = first_slot.last_slot_in_window();
assert_eq!(last_slot.next(), window_slots[window + 1]);
assert_eq!(last_slot, window_slots[window + 1].prev());
}
}
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/types/slice_index.rs | src/types/slice_index.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Defines the [`SliceIndex`] type.
use std::fmt::Display;
use std::mem::MaybeUninit;
use serde::de::{self, Visitor};
use serde::{Deserialize, Serialize};
use wincode::{SchemaRead, SchemaWrite};
/// Maximum number of slices a leader may produce per block.
pub const MAX_SLICES_PER_BLOCK: usize = 1024;
/// Slice index type.
///
/// Using strong type to enforce certain constraints, e.g. it is never >= [`MAX_SLICES_PER_BLOCK`].
#[repr(transparent)]
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Serialize, SchemaWrite)]
pub struct SliceIndex(usize);
impl SliceIndex {
/// Creates a new slice index for testing purposes.
///
/// Panics if `index` is not in the valid range.
#[cfg(test)]
pub(crate) fn new_unchecked(index: usize) -> Self {
Self::new(index).unwrap()
}
/// Creates a new slice index.
fn new(index: usize) -> Option<Self> {
if index >= MAX_SLICES_PER_BLOCK {
None
} else {
Some(Self(index))
}
}
/// Returns the inner `usize`.
pub(crate) fn inner(self) -> usize {
self.0
}
/// Returns the first, i.e. smallest, slice index.
pub(crate) fn first() -> Self {
Self(0)
}
/// Returns `true` if `self` is the first, i.e. smallest, slice index.
pub(crate) fn is_first(self) -> bool {
self.0 == 0
}
/// Returns `true` if `self` is the max possible slice index, i.e. `MAX_SLICES_PER_BLOCK - 1`.
pub(crate) fn is_max(self) -> bool {
self.0 == MAX_SLICES_PER_BLOCK - 1
}
/// Returns an iterator that iterates over all the valid slice indices.
pub(crate) fn all() -> impl Iterator<Item = Self> {
(0..MAX_SLICES_PER_BLOCK).map(Self)
}
/// Returns an iterator that iterates over slice indices starting from the first to self inclusive.
pub(crate) fn until(self) -> impl Iterator<Item = Self> {
(0..=self.0).map(Self)
}
}
impl Display for SliceIndex {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.0)
}
}
impl<'de> Deserialize<'de> for SliceIndex {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
deserializer.deserialize_u64(SliceIndexVisitor)
}
}
struct SliceIndexVisitor;
impl<'de> Visitor<'de> for SliceIndexVisitor {
type Value = SliceIndex;
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(
formatter,
"expected a usize between 0 and {MAX_SLICES_PER_BLOCK}"
)
}
fn visit_u64<E>(self, v: u64) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
SliceIndex::new(v as usize).ok_or(de::Error::custom(
"input {v} is not in the range [0:{MAX_SLICES_PER_BLOCK})",
))
}
}
impl<'de> SchemaRead<'de> for SliceIndex {
type Dst = Self;
fn read(
reader: &mut impl wincode::io::Reader<'de>,
dst: &mut MaybeUninit<Self::Dst>,
) -> wincode::ReadResult<()> {
// SAFETY: Any read of `std::mem::size_of(usize)` bytes correctly initializes `usize`.
unsafe {
reader.copy_into_t(dst)?;
if dst.assume_init_ref().0 >= MAX_SLICES_PER_BLOCK {
Err(wincode::ReadError::Custom("slice index out of bounds"))
} else {
Ok(())
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn valid_serde() {
let vs = [0, 1, MAX_SLICES_PER_BLOCK - 10, MAX_SLICES_PER_BLOCK - 1];
let vs = vs.into_iter().map(|v| v.to_string());
for v in vs {
serde_json::from_str::<SliceIndex>(&v).unwrap();
}
}
#[test]
fn invalid_serde() {
let vs = [
(-1).to_string(),
i64::MIN.to_string().to_string(),
MAX_SLICES_PER_BLOCK.to_string(),
(MAX_SLICES_PER_BLOCK + 1).to_string(),
(i64::MAX).to_string(),
(u64::MAX).to_string(),
(usize::MAX).to_string(),
];
for v in vs {
serde_json::from_str::<SliceIndex>(&v).unwrap_err();
}
}
#[test]
fn valid_wincode() {
let vs = [0, 1, MAX_SLICES_PER_BLOCK - 10, MAX_SLICES_PER_BLOCK - 1];
let vs = vs.iter().map(wincode::serialize);
for res in vs {
let v = res.unwrap();
wincode::deserialize::<SliceIndex>(&v).unwrap();
}
}
#[test]
fn invalid_wincode() {
let vs = [MAX_SLICES_PER_BLOCK, MAX_SLICES_PER_BLOCK + 1, usize::MAX];
let vs = vs.iter().map(wincode::serialize);
for res in vs {
let v = res.unwrap();
wincode::deserialize::<SliceIndex>(&v).unwrap_err();
}
}
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/all2all/robust.rs | src/all2all/robust.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! A more robust implementation of an all-to-all broadcast protocol.
//!
//! Broadcasts each message over the underlying instance of [`Network`].
//! The message may be retransmitted multiple times.
use std::iter::repeat_n;
use async_trait::async_trait;
use super::All2All;
use crate::ValidatorInfo;
use crate::consensus::ConsensusMessage;
use crate::network::{ConsensusNetwork, Network};
/// Instance of the robust all-to-all broadcast protocol.
// TODO: acutally make more robust (retransmits, ...)
pub struct RobustAll2All<N: Network> {
validators: Vec<ValidatorInfo>,
network: N,
}
impl<N: Network> RobustAll2All<N> {
/// Creates a new `RobustAll2All` instance.
///
/// Messages will be broadcast to all `validators` over the provided `network`.
/// Potential retransmits will be handled automatically, also over the `network`.
pub fn new(validators: Vec<ValidatorInfo>, network: N) -> Self {
Self {
validators,
network,
}
}
pub fn handle_retransmits(&self) {}
}
#[async_trait]
impl<N: Network> All2All for RobustAll2All<N>
where
N: ConsensusNetwork,
{
async fn broadcast(&self, msg: &ConsensusMessage) -> std::io::Result<()> {
// HACK: stupidly expensive retransmits
let addrs = self
.validators
.iter()
.flat_map(|v| repeat_n(v.all2all_address, 1000));
self.network.send_to_many(msg, addrs).await
}
async fn receive(&self) -> std::io::Result<ConsensusMessage> {
self.network.receive().await
// loop {
// let msg = self.network.receive().await;
// match msg {
// // TODO: handle ACK
// NetworkMessage::Ack(nonce) => {}
// // TODO: send ACK
// _ => return msg,
// }
// }
}
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use std::time::Duration;
use tokio::task::JoinSet;
use tokio::time::timeout;
use super::*;
use crate::consensus::Vote;
use crate::crypto::aggsig;
use crate::crypto::signature::SecretKey;
use crate::network::simulated::SimulatedNetworkCore;
use crate::network::{dontcare_sockaddr, localhost_ip_sockaddr};
use crate::types::Slot;
async fn broadcast_test(packet_loss: f64) {
// set up network and nodes
let core = Arc::new(
SimulatedNetworkCore::default()
.with_default_latency(Duration::from_millis(10))
.with_packet_loss(packet_loss),
);
let net_sender = core.join_unlimited(0).await;
let mut net_others = Vec::new();
let mut validators = Vec::new();
for i in 0..20 {
if i > 0 {
net_others.push(core.join_unlimited(i).await);
}
let sk = SecretKey::new(&mut rand::rng());
let voting_sk = aggsig::SecretKey::new(&mut rand::rng());
validators.push(ValidatorInfo {
id: i,
stake: 1,
pubkey: sk.to_pk(),
voting_pubkey: voting_sk.to_pk(),
all2all_address: localhost_ip_sockaddr(i.try_into().unwrap()),
disseminator_address: dontcare_sockaddr(),
repair_request_address: dontcare_sockaddr(),
repair_response_address: dontcare_sockaddr(),
});
}
// set up all-to-all instances
let mut all2all_others = Vec::new();
for net in net_others {
all2all_others.push(RobustAll2All::new(validators.clone(), net));
}
let all2all_sender = RobustAll2All::new(validators, net_sender);
// run sender and receivers
let mut tasks = JoinSet::new();
tasks.spawn(async move {
let voting_sk = aggsig::SecretKey::new(&mut rand::rng());
let vote = Vote::new_skip(Slot::genesis(), &voting_sk, 0);
let msg = ConsensusMessage::Vote(vote);
all2all_sender.broadcast(&msg).await.unwrap();
while let Ok(Ok(_)) =
timeout(Duration::from_millis(1000), all2all_sender.receive()).await
{
// do nothing
}
});
for all2all in all2all_others {
tasks.spawn(async move {
let received = all2all.receive().await.unwrap();
assert!(matches!(received, ConsensusMessage::Vote(_)));
while let Ok(Ok(_)) = timeout(Duration::from_millis(1000), all2all.receive()).await
{
// do nothing
}
});
}
tasks.join_all().await;
}
#[tokio::test]
async fn simple_broadcast() {
// run broadcast test with simulated network w/o any packet loss
broadcast_test(0.0).await;
}
#[tokio::test]
async fn packet_loss() {
// run broadcast test with simulated network with 20% packet loss
broadcast_test(0.2).await;
}
#[tokio::test]
async fn extreme_packet_loss() {
// run broadcast test with simulated network with 90% packet loss
broadcast_test(0.9).await;
}
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/all2all/trivial.rs | src/all2all/trivial.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! A trivial implementation of an all-to-all broadcast protocol.
//!
//! Broadcasts each message once over the underlying [`Network`].
//! After that, the message is forgotten. The protocol is completely stateless.
//! If the underlying [`Network`] is not reliable, the message might thus be lost.
use async_trait::async_trait;
use super::All2All;
use crate::ValidatorInfo;
use crate::consensus::ConsensusMessage;
use crate::network::{ConsensusNetwork, Network};
/// Instance of the trivial all-to-all broadcast protocol.
pub struct TrivialAll2All<N: Network> {
validators: Vec<ValidatorInfo>,
network: N,
}
impl<N: Network> TrivialAll2All<N> {
/// Creates a new `TrivialAll2All` instance.
///
/// Messages will be broadcast to all `validators` over the provided `network`.
/// For each, [`ValidatorInfo::all2all_address`] will serve as recipient.
pub const fn new(validators: Vec<ValidatorInfo>, network: N) -> Self {
Self {
validators,
network,
}
}
}
#[async_trait]
impl<N: Network> All2All for TrivialAll2All<N>
where
N: ConsensusNetwork,
{
async fn broadcast(&self, msg: &ConsensusMessage) -> std::io::Result<()> {
self.network
.send_to_many(msg, self.validators.iter().map(|v| v.all2all_address))
.await?;
Ok(())
}
async fn receive(&self) -> std::io::Result<ConsensusMessage> {
self.network.receive().await
}
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use std::time::Duration;
use tokio::task::JoinSet;
use super::*;
use crate::consensus::Vote;
use crate::crypto::aggsig;
use crate::crypto::signature::SecretKey;
use crate::network::simulated::SimulatedNetworkCore;
use crate::network::{SimulatedNetwork, dontcare_sockaddr, localhost_ip_sockaddr};
use crate::types::Slot;
#[tokio::test]
async fn simple_broadcast() {
// set up network and nodes
let core = Arc::new(
SimulatedNetworkCore::default()
.with_default_latency(Duration::from_millis(10))
.with_packet_loss(0.0),
);
let net_sender: SimulatedNetwork<ConsensusMessage, ConsensusMessage> =
core.join_unlimited(0).await;
let mut net_others = Vec::new();
let mut validators = Vec::new();
for i in 0..20 {
if i > 0 {
let net: SimulatedNetwork<ConsensusMessage, ConsensusMessage> =
core.join_unlimited(i).await;
net_others.push(net);
}
let sk = SecretKey::new(&mut rand::rng());
let voting_sk = aggsig::SecretKey::new(&mut rand::rng());
validators.push(ValidatorInfo {
id: i,
stake: 1,
pubkey: sk.to_pk(),
voting_pubkey: voting_sk.to_pk(),
all2all_address: localhost_ip_sockaddr(i.try_into().unwrap()),
disseminator_address: dontcare_sockaddr(),
repair_request_address: dontcare_sockaddr(),
repair_response_address: dontcare_sockaddr(),
});
}
// set up all-to-all instances
let mut all2all_others = Vec::new();
for net in net_others {
all2all_others.push(TrivialAll2All::new(validators.clone(), net));
}
let all2all_sender = TrivialAll2All::new(validators, net_sender);
// run sender and receivers
let mut tasks = JoinSet::new();
tasks.spawn(async move {
let voting_sk = aggsig::SecretKey::new(&mut rand::rng());
let vote = Vote::new_skip(Slot::genesis(), &voting_sk, 0);
let msg = ConsensusMessage::Vote(vote);
all2all_sender.broadcast(&msg).await.unwrap();
});
for all2all in all2all_others {
tasks.spawn(async move {
let received = all2all.receive().await.unwrap();
assert!(matches!(received, ConsensusMessage::Vote(_)));
});
}
tasks.join_all().await;
}
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/consensus/epoch_info.rs | src/consensus/epoch_info.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
use crate::types::SLOTS_PER_WINDOW;
use crate::{Slot, Stake, ValidatorId, ValidatorInfo};
/// Epoch-specfic validator information.
#[derive(Clone, Debug)]
pub struct EpochInfo {
pub(crate) own_id: ValidatorId,
pub(crate) validators: Vec<ValidatorInfo>,
}
impl EpochInfo {
/// Creates a new `EpochInfo` instance with the given validators.
pub const fn new(own_id: ValidatorId, validators: Vec<ValidatorInfo>) -> Self {
Self { own_id, validators }
}
/// Gives the validator info for the given validator ID.
///
/// # Panics
///
/// Panics if the validator ID is out of range.
#[must_use]
pub fn validator(&self, id: ValidatorId) -> &ValidatorInfo {
&self.validators[id as usize]
}
/// Gives the validator info for the leader for the given slot.
#[must_use]
pub fn leader(&self, slot: Slot) -> &ValidatorInfo {
let window = slot.inner() / SLOTS_PER_WINDOW;
let leader_id = window % (self.validators.len() as u64);
self.validator(leader_id)
}
/// Gives the total stake over all validators.
#[must_use]
pub fn total_stake(&self) -> Stake {
self.validators.iter().map(|v| v.stake).sum()
}
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/consensus/vote.rs | src/consensus/vote.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Vote types used for the consensus protocol.
//!
//!
use wincode::{SchemaRead, SchemaWrite};
use crate::crypto::aggsig::{PublicKey, SecretKey};
use crate::crypto::merkle::BlockHash;
use crate::crypto::{IndividualSignature, Signable};
use crate::{Slot, ValidatorId};
/// A signed vote used in consensus.
///
/// `Vote` wraps a [`VoteKind`] with the signer's public key and signature,
/// allowing type-specific data to be authenticated and verified.
///
/// This struct is produced by signing the bytes of a `VoteKind` instance.
#[derive(Clone, Debug, PartialEq, Eq, SchemaRead, SchemaWrite)]
pub struct Vote {
kind: VoteKind,
sig: IndividualSignature,
signer: ValidatorId,
}
/// Represents the type-specific vote payload as per the protocol.
#[derive(Clone, Debug, PartialEq, Eq, SchemaRead, SchemaWrite)]
pub enum VoteKind {
/// A notarization vote for a given block hash in a given slot.
Notar(Slot, BlockHash),
/// A notar-fallback vote for a given block hash in a given slot.
NotarFallback(Slot, BlockHash),
/// A skip vote for a given slot.
Skip(Slot),
/// A fast finalization vote for a given slot.
SkipFallback(Slot),
/// A finalization vote for a given slot.
Final(Slot),
}
impl Vote {
/// Creates a new vote directly from its [`VoteKind`].
#[must_use]
pub fn new(kind: VoteKind, sk: &SecretKey, signer: ValidatorId) -> Self {
let sig = sk.sign(&kind.bytes_to_sign());
Self { kind, sig, signer }
}
/// Creates a new notarization vote.
/// That is, a vote corresponding to the [`VoteKind::Notar`] variant.
#[must_use]
pub fn new_notar(
slot: Slot,
block_hash: BlockHash,
sk: &SecretKey,
signer: ValidatorId,
) -> Self {
let kind = VoteKind::Notar(slot, block_hash);
Self::new(kind, sk, signer)
}
/// Creates a new notar-fallback vote.
/// That is, a vote corresponding to the [`VoteKind::NotarFallback`] variant.
#[must_use]
pub fn new_notar_fallback(
slot: Slot,
block_hash: BlockHash,
sk: &SecretKey,
signer: ValidatorId,
) -> Self {
let kind = VoteKind::NotarFallback(slot, block_hash);
Self::new(kind, sk, signer)
}
/// Creates a new skip vote.
/// That is, a vote corresponding to the [`VoteKind::Skip`] variant.
#[must_use]
pub fn new_skip(slot: Slot, sk: &SecretKey, signer: ValidatorId) -> Self {
let kind = VoteKind::Skip(slot);
Self::new(kind, sk, signer)
}
/// Creates a new skip-fallback vote.
/// That is, a vote corresponding to the [`VoteKind::SkipFallback`] variant.
#[must_use]
pub fn new_skip_fallback(slot: Slot, sk: &SecretKey, signer: ValidatorId) -> Self {
let kind = VoteKind::SkipFallback(slot);
Self::new(kind, sk, signer)
}
/// Creates a new finalization vote.
/// That is, a vote corresponding to the [`VoteKind::Final`] variant.
#[must_use]
pub fn new_final(slot: Slot, sk: &SecretKey, signer: ValidatorId) -> Self {
let kind = VoteKind::Final(slot);
Self::new(kind, sk, signer)
}
/// Checks whether this vote's signature is valid under the given public key.
#[must_use]
pub fn check_sig(&self, pk: &PublicKey) -> bool {
let msg = self.kind.bytes_to_sign();
self.sig.verify(&msg, pk)
}
/// Returns the [`VoteKind`] of this vote.
#[must_use]
pub const fn kind(&self) -> &VoteKind {
&self.kind
}
/// Returns `true` iff this is a notarization vote.
#[must_use]
pub const fn is_notar(&self) -> bool {
matches!(self.kind, VoteKind::Notar(_, _))
}
/// Returns `true` iff this is a notar-fallback vote.
#[must_use]
pub const fn is_notar_fallback(&self) -> bool {
matches!(self.kind, VoteKind::NotarFallback(_, _))
}
/// Returns `true` iff this is a skip vote.
#[must_use]
pub const fn is_skip(&self) -> bool {
matches!(self.kind, VoteKind::Skip(_))
}
/// Returns `true` iff this is a skip-fallback vote.
#[must_use]
pub const fn is_skip_fallback(&self) -> bool {
matches!(self.kind, VoteKind::SkipFallback(_))
}
/// Returns `true` iff this is a finalization vote.
#[must_use]
pub const fn is_final(&self) -> bool {
matches!(self.kind, VoteKind::Final(_))
}
/// Returns the slot number this vote corresponds to.
#[must_use]
pub const fn slot(&self) -> Slot {
self.kind.slot()
}
/// Returns the block hash this vote corresponds to, if any.
///
/// Returns `None` if the vote is a skip(-fallback) or finalization vote.
#[must_use]
pub const fn block_hash(&self) -> Option<&BlockHash> {
self.kind.block_hash()
}
/// Returns the signature of this vote.
#[must_use]
pub const fn sig(&self) -> &IndividualSignature {
&self.sig
}
/// Returns the signer of this vote.
#[must_use]
pub const fn signer(&self) -> ValidatorId {
self.signer
}
}
impl VoteKind {
/// Returns the slot number this vote corresponds to.
#[must_use]
pub const fn slot(&self) -> Slot {
match self {
Self::Notar(slot, _)
| Self::NotarFallback(slot, _)
| Self::Skip(slot)
| Self::SkipFallback(slot)
| Self::Final(slot) => *slot,
}
}
/// Returns the block hash this vote corresponds to, if any.
///
/// Returns `None` if the vote is a skip(-fallback) or finalization vote.
#[must_use]
pub const fn block_hash(&self) -> Option<&BlockHash> {
match self {
Self::Notar(_, hash) | Self::NotarFallback(_, hash) => Some(hash),
Self::Skip(_) | Self::SkipFallback(_) | Self::Final(_) => None,
}
}
}
impl Signable for VoteKind {
fn bytes_to_sign(&self) -> Vec<u8> {
wincode::serialize(self).expect("serialization should not panic")
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::crypto::merkle::GENESIS_BLOCK_HASH;
#[test]
fn basic() {
let sk = SecretKey::new(&mut rand::rng());
let pk = sk.to_pk();
let vote = Vote::new_notar(Slot::new(0), GENESIS_BLOCK_HASH, &sk, 0);
assert!(vote.is_notar());
assert!(vote.check_sig(&pk));
let vote = Vote::new_notar_fallback(Slot::new(0), GENESIS_BLOCK_HASH, &sk, 0);
assert!(vote.is_notar_fallback());
assert!(vote.check_sig(&pk));
let vote = Vote::new_skip(Slot::new(0), &sk, 0);
assert!(vote.is_skip());
assert!(vote.check_sig(&pk));
let vote = Vote::new_skip_fallback(Slot::new(0), &sk, 0);
assert!(vote.is_skip_fallback());
assert!(vote.check_sig(&pk));
let vote = Vote::new_final(Slot::new(0), &sk, 0);
assert!(vote.is_final());
assert!(vote.check_sig(&pk));
}
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/consensus/votor.rs | src/consensus/votor.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Main voting logic for the consensus protocol.
//!
//! Besides [`super::Pool`], [`Votor`] is the other main internal component Alpenglow.
//! It handles the main voting decisions for the consensus protocol. As input it
//! receives events of type [`VotorEvent`] over a channel, depending on the event
//! type these were emitted by [`super::Pool`], [`super::Blockstore`] and itself.
//! Votor keeps its own internal state for each slot based on previous events and votes.
//!
//! Votor has access to an instance of [`All2All`] for broadcasting votes.
use std::collections::{BTreeMap, BTreeSet};
use std::sync::Arc;
use color_eyre::Result;
use log::{debug, trace, warn};
use tokio::sync::mpsc::{Receiver, Sender};
use super::blockstore::BlockInfo;
use super::{Cert, DELTA_BLOCK, DELTA_TIMEOUT, Vote};
use crate::consensus::DELTA_FIRST_SLICE;
use crate::crypto::aggsig::SecretKey;
use crate::crypto::merkle::{BlockHash, GENESIS_BLOCK_HASH, MerkleRoot};
use crate::{All2All, Slot, ValidatorId};
/// Events that Votor is interested in.
///
/// These are emitted by [`super::Pool`], [`super::Blockstore`] and [`Votor`] itself.
/// They are the inputs that drive the voting loop of Votor.
#[derive(Clone, Debug)]
pub enum VotorEvent {
/// The pool has newly marked the given block as a ready parent for `slot`.
///
/// This event is only emitted per window, `slot` is always the first slot.
/// The parent block is identified by `parent_slot` and `parent_hash`.
ParentReady {
slot: Slot,
parent_slot: Slot,
parent_hash: BlockHash,
},
/// The given block has reached the safe-to-notar status.
SafeToNotar(Slot, BlockHash),
/// The given slot has reached the safe-to-skip status.
SafeToSkip(Slot),
/// New certificated created in pool (should then be broadcast by Votor).
CertCreated(Box<Cert>),
/// Standstill timeout has fired.
///
/// The provided slot indicates the highest finalized slot as seen by Pool.
/// The provided certificates and votes should be re-broadcast.
Standstill(Slot, Vec<Cert>, Vec<Vote>),
/// First valid shred of the leader's block was received for the block.
FirstShred(Slot),
/// New (complete) block was received in blockstore.
Block { slot: Slot, block_info: BlockInfo },
/// Regular timeout for the given slot has fired.
Timeout(Slot),
/// Early timeout for a crashed leader (nothing was received) has fired.
TimeoutCrashedLeader(Slot),
}
/// Votor implements the decision process of which votes to cast.
///
/// It keeps some state for each slot and checks the conditions for voting.
/// On [`Votor::event_receiver`], it receives events from [`super::Pool`],
/// [`super::Blockstore`] and itself.
/// Informed by these events Votor updates its state and generates votes.
/// Votes are signed with [`Votor::voting_key`] and broadcast using [`Votor::all2all`].
pub struct Votor<A: All2All> {
// TODO: merge all of these into `SlotState` struct?
/// Indicates for which slots we already voted notar or skip.
voted: BTreeSet<Slot>,
/// Indicates for which slots we already voted notar and for what hash.
voted_notar: BTreeMap<Slot, BlockHash>,
/// Indicates for which slots we set the 'bad window' flag.
bad_window: BTreeSet<Slot>,
/// Blocks that have a notarization certificate (not notar-fallback).
block_notarized: BTreeMap<Slot, BlockHash>,
/// Indicates for which slots the given (slot, hash) pair is a valid parent.
parents_ready: BTreeSet<(Slot, Slot, BlockHash)>,
/// Indicates for which slots we received at least one shred.
received_shred: BTreeSet<Slot>,
/// Blocks that are waiting for previous slots to be notarized.
pending_blocks: BTreeMap<Slot, BlockInfo>,
/// Slots that Votor is done with.
retired_slots: BTreeSet<Slot>,
/// Own validator ID.
validator_id: ValidatorId,
/// Secret key used to sign votes.
voting_key: SecretKey,
/// Channel for receiving events from pool, blockstore and Votor itself.
event_receiver: Receiver<VotorEvent>,
/// Sender side of event channel. Used for sending events to self.
event_sender: Sender<VotorEvent>,
/// [`All2All`] instance used to broadcast votes.
all2all: Arc<A>,
}
impl<A: All2All> Votor<A> {
/// Creates a new Votor instance with empty state.
pub fn new(
validator_id: ValidatorId,
voting_key: SecretKey,
event_sender: Sender<VotorEvent>,
event_receiver: Receiver<VotorEvent>,
all2all: Arc<A>,
) -> Self {
// add dummy genesis block to some of the data structures
let voted = [Slot::genesis()].into_iter().collect();
let voted_notar = [(Slot::genesis(), GENESIS_BLOCK_HASH)]
.into_iter()
.collect();
let block_notarized = [(Slot::genesis(), GENESIS_BLOCK_HASH)]
.into_iter()
.collect();
let parents_ready = [(Slot::genesis(), Slot::genesis(), GENESIS_BLOCK_HASH)]
.into_iter()
.collect();
let retired_slots = [Slot::genesis()].into_iter().collect();
let votor = Self {
voted,
voted_notar,
bad_window: BTreeSet::new(),
block_notarized,
parents_ready,
received_shred: BTreeSet::new(),
pending_blocks: BTreeMap::new(),
retired_slots,
validator_id,
voting_key,
event_receiver,
event_sender,
all2all,
};
votor.set_timeouts(Slot::new(0));
votor
}
/// Handles the voting (leader and non-leader) side of consensus protocol.
///
/// Checks consensus conditions and broadcasts new votes.
#[fastrace::trace]
pub async fn voting_loop(&mut self) -> Result<()> {
while let Some(event) = self.event_receiver.recv().await {
if self.retired_slots.contains(&event.slot()) {
trace!("ignoring event for retired slot {}", event.slot());
continue;
}
trace!("votor event: {event:?}");
match event {
// events from Pool
VotorEvent::ParentReady {
slot,
parent_slot,
parent_hash,
} => {
let h = &hex::encode(parent_hash.as_hash())[..8];
trace!("slot {slot} has new valid parent {h} in slot {parent_slot}");
self.parents_ready.insert((slot, parent_slot, parent_hash));
self.check_pending_blocks().await;
self.set_timeouts(slot);
}
VotorEvent::SafeToNotar(slot, hash) => {
debug!("voted notar-fallback in slot {slot}");
let vote =
Vote::new_notar_fallback(slot, hash, &self.voting_key, self.validator_id);
self.all2all.broadcast(&vote.into()).await.unwrap();
self.try_skip_window(slot).await;
self.bad_window.insert(slot);
}
VotorEvent::SafeToSkip(slot) => {
debug!("voted skip-fallback in slot {slot}");
let vote = Vote::new_skip_fallback(slot, &self.voting_key, self.validator_id);
self.all2all.broadcast(&vote.into()).await.unwrap();
self.try_skip_window(slot).await;
self.bad_window.insert(slot);
}
VotorEvent::CertCreated(cert) => {
match cert.as_ref() {
Cert::Notar(_) => {
self.block_notarized
.insert(cert.slot(), cert.block_hash().cloned().unwrap());
self.try_final(cert.slot(), cert.block_hash().cloned().unwrap())
.await;
}
Cert::Final(_) | Cert::FastFinal(_) => {
let first_slot_in_window = cert.slot().first_slot_in_window();
self.set_timeouts(first_slot_in_window);
}
_ => {}
}
self.all2all.broadcast(&(*cert).into()).await.unwrap();
}
VotorEvent::Standstill(_, certs, votes) => {
for cert in certs {
self.all2all.broadcast(&cert.into()).await.unwrap();
}
for vote in votes {
self.all2all.broadcast(&vote.into()).await.unwrap();
}
}
// events from Blockstore
VotorEvent::FirstShred(slot) => {
self.received_shred.insert(slot);
}
VotorEvent::Block { slot, block_info } => {
if self.voted.contains(&slot) {
let h = &hex::encode(block_info.hash.as_hash())[..8];
warn!("not voting for block {h} in slot {slot}, already voted");
continue;
}
if self.try_notar(slot, block_info.clone()).await {
self.check_pending_blocks().await;
} else {
self.pending_blocks.insert(slot, block_info);
}
}
// events from Votor itself
VotorEvent::Timeout(slot) => {
trace!("timeout for slot {slot}");
if !self.voted.contains(&slot) {
self.try_skip_window(slot).await;
}
}
VotorEvent::TimeoutCrashedLeader(slot) => {
trace!("timeout (crashed leader) for slot {slot}");
if !self.received_shred.contains(&slot) && !self.voted.contains(&slot) {
self.try_skip_window(slot).await;
}
}
}
}
Ok(())
}
/// Sets timeouts for the leader window starting at the given `slot`.
///
/// # Panics
///
/// Panics if `slot` is not the first slot of a window.
fn set_timeouts(&self, slot: Slot) {
assert!(slot.is_start_of_window());
trace!(
"setting timeouts for slots {slot}-{}",
slot.last_slot_in_window()
);
let sender = self.event_sender.clone();
tokio::spawn(async move {
tokio::time::sleep(DELTA_TIMEOUT + DELTA_FIRST_SLICE).await;
// HACK: ignoring errors to prevent panic when shutting down votor
let event = VotorEvent::TimeoutCrashedLeader(slot);
let _ = sender.send(event).await;
for s in slot.slots_in_window() {
if s.is_start_of_window() {
tokio::time::sleep(DELTA_BLOCK - DELTA_FIRST_SLICE).await;
} else {
tokio::time::sleep(DELTA_BLOCK).await;
}
let event = VotorEvent::Timeout(s);
let _ = sender.send(event).await;
}
});
}
/// Sends a notarization vote for the given block if the conditions are met.
///
/// Returns `true` iff we decided to send a notarization vote for the block.
async fn try_notar(&mut self, slot: Slot, block_info: BlockInfo) -> bool {
let BlockInfo {
hash,
parent: (parent_slot, parent_hash),
} = block_info;
let first_slot_in_window = slot.first_slot_in_window();
if slot == first_slot_in_window {
let valid_parent =
self.parents_ready
.contains(&(slot, parent_slot, parent_hash.clone()));
let h = &hex::encode(parent_hash.as_hash())[..8];
trace!(
"try notar slot {slot} with parent {h} in slot {parent_slot} (valid {valid_parent})"
);
if !valid_parent {
return false;
}
} else if parent_slot != slot.prev()
|| self.voted_notar.get(&parent_slot) != Some(&parent_hash)
{
return false;
}
debug!("voted notar for slot {slot}");
let vote = Vote::new_notar(slot, hash.clone(), &self.voting_key, self.validator_id);
self.all2all.broadcast(&vote.into()).await.unwrap();
self.voted.insert(slot);
self.voted_notar.insert(slot, hash.clone());
self.pending_blocks.remove(&slot);
self.try_final(slot, hash).await;
true
}
/// Sends a finalization vote for the given block if the conditions are met.
async fn try_final(&mut self, slot: Slot, hash: BlockHash) {
let notarized = self.block_notarized.get(&slot) == Some(&hash);
let voted_notar = self.voted_notar.get(&slot) == Some(&hash);
let not_bad = !self.bad_window.contains(&slot);
if notarized && voted_notar && not_bad {
let vote = Vote::new_final(slot, &self.voting_key, self.validator_id);
self.all2all.broadcast(&vote.into()).await.unwrap();
self.retired_slots.insert(slot);
}
}
/// Sends skip votes for all unvoted slots in the window that `slot` belongs to.
async fn try_skip_window(&mut self, slot: Slot) {
trace!("try skip window of slot {slot}");
for s in slot.slots_in_window() {
if self.voted.insert(s) {
let vote = Vote::new_skip(s, &self.voting_key, self.validator_id);
self.all2all.broadcast(&vote.into()).await.unwrap();
self.bad_window.insert(s);
debug!("voted skip for slot {s}");
}
}
}
/// Checks if we can vote on any of the pending blocks by now.
async fn check_pending_blocks(&mut self) {
let slots: Vec<_> = self.pending_blocks.keys().copied().collect();
for slot in &slots {
if let Some(block_info) = self.pending_blocks.get(slot) {
self.try_notar(*slot, block_info.clone()).await;
}
}
}
}
impl VotorEvent {
const fn slot(&self) -> Slot {
match self {
Self::ParentReady { slot, .. }
| Self::SafeToNotar(slot, _)
| Self::SafeToSkip(slot)
| Self::Standstill(slot, _, _)
| Self::FirstShred(slot)
| Self::Block { slot, .. }
| Self::Timeout(slot)
| Self::TimeoutCrashedLeader(slot) => *slot,
Self::CertCreated(cert) => cert.slot(),
}
}
}
#[cfg(test)]
mod tests {
use std::time::Duration;
use tokio::sync::mpsc;
use super::*;
use crate::all2all::TrivialAll2All;
use crate::consensus::cert::NotarCert;
use crate::consensus::{ConsensusMessage, EpochInfo};
use crate::crypto::Hash;
use crate::network::SimulatedNetwork;
use crate::test_utils::{generate_all2all_instances, generate_validators};
type A2A = TrivialAll2All<SimulatedNetwork<ConsensusMessage, ConsensusMessage>>;
async fn start_votor() -> (A2A, mpsc::Sender<VotorEvent>, Arc<EpochInfo>) {
let (sks, epoch_info) = generate_validators(2);
let mut a2a = generate_all2all_instances(epoch_info.validators.clone()).await;
let (tx, rx) = mpsc::channel(100);
let other_a2a = a2a.pop().unwrap();
let votor_a2a = a2a.pop().unwrap();
let mut votor = Votor::new(0, sks[0].clone(), tx.clone(), rx, Arc::new(votor_a2a));
tokio::spawn(async move {
votor.voting_loop().await.unwrap();
});
(other_a2a, tx, epoch_info)
}
#[tokio::test]
async fn timeouts() {
let (other_a2a, _, _) = start_votor().await;
// should vote skip for all slots
let mut skipped_slots = Vec::new();
let mut slots = Slot::genesis().slots_in_window().collect::<Vec<_>>();
slots.remove(0);
for _ in slots.clone() {
if let Ok(msg) = other_a2a.receive().await {
match msg {
ConsensusMessage::Vote(v) => {
assert!(v.is_skip());
skipped_slots.push(v.slot());
}
m => panic!("other msg: {m:?}"),
}
}
}
assert_eq!(skipped_slots, slots);
}
#[tokio::test]
async fn notar_and_final() {
let (other_a2a, tx, epoch_info) = start_votor().await;
// vote notar after seeing block
let slot = Slot::genesis().next();
let event = VotorEvent::FirstShred(slot);
tx.send(event).await.unwrap();
let block_info = BlockInfo {
hash: Hash::random_for_test().into(),
parent: (Slot::genesis(), GENESIS_BLOCK_HASH),
};
let event = VotorEvent::Block { slot, block_info };
tx.send(event).await.unwrap();
let vote = match other_a2a.receive().await.unwrap() {
ConsensusMessage::Vote(v) => v,
m => panic!("other msg: {m:?}"),
};
assert!(vote.is_notar());
assert_eq!(vote.slot(), slot);
// vote finalize after seeing branch-certified
let cert = Cert::Notar(NotarCert::new_unchecked(&[vote], &epoch_info.validators));
let event = VotorEvent::CertCreated(Box::new(cert));
tx.send(event).await.unwrap();
match other_a2a.receive().await.unwrap() {
ConsensusMessage::Vote(v) => {
assert!(v.is_final());
assert_eq!(v.slot(), slot);
}
m => panic!("other msg: {m:?}"),
}
}
#[tokio::test]
async fn notar_out_of_order() {
let (other_a2a, tx, _) = start_votor().await;
let (slot1, hash1) = (Slot::genesis().next(), Hash::random_for_test());
let (slot2, hash2) = (slot1.next(), Hash::random_for_test());
// give later block to votor first
let event = VotorEvent::FirstShred(slot2);
tx.send(event).await.unwrap();
let block_info = BlockInfo {
hash: hash2.into(),
parent: (slot1, hash1.clone().into()),
};
let event = VotorEvent::Block {
slot: slot2,
block_info,
};
tx.send(event).await.unwrap();
// should not vote yet
assert!(
tokio::time::timeout(Duration::from_secs(1), other_a2a.receive())
.await
.is_err()
);
// now notify votor of earlier block
let event = VotorEvent::FirstShred(slot1);
tx.send(event).await.unwrap();
let block_info = BlockInfo {
hash: hash1.into(),
parent: (Slot::genesis(), GENESIS_BLOCK_HASH),
};
let event = VotorEvent::Block {
slot: slot1,
block_info,
};
tx.send(event).await.unwrap();
// should now see notar votes
for _ in 0..2 {
match other_a2a.receive().await.unwrap() {
ConsensusMessage::Vote(vote) => {
assert!(vote.is_notar());
assert!(vote.slot() == slot1 || vote.slot() == slot2);
}
m => panic!("other msg: {m:?}"),
};
}
}
#[tokio::test]
async fn safe_to_notar() {
let (other_a2a, tx, _) = start_votor().await;
let slot = Slot::genesis().next();
// wait for skip votes
for slot in slot.slots_in_window() {
if slot.is_genesis() {
continue;
}
if let Ok(msg) = other_a2a.receive().await {
match msg {
ConsensusMessage::Vote(v) => assert!(v.is_skip()),
m => panic!("other msg: {m:?}"),
}
}
}
// vote notar-fallback after safe-to-notar
let hash = Hash::random_for_test();
let event = VotorEvent::SafeToNotar(slot, hash.clone().into());
tx.send(event).await.unwrap();
match other_a2a.receive().await.unwrap() {
ConsensusMessage::Vote(v) => {
assert!(v.is_notar_fallback());
assert_eq!(v.slot(), slot);
assert_eq!(v.block_hash(), Some(&hash.into()));
}
m => panic!("other msg: {m:?}"),
}
}
#[tokio::test]
async fn safe_to_skip() {
let (other_a2a, tx, _) = start_votor().await;
let slot = Slot::genesis().next();
// vote notar after seeing block
let event = VotorEvent::FirstShred(slot);
tx.send(event).await.unwrap();
let block_info = BlockInfo {
hash: Hash::random_for_test().into(),
parent: (Slot::genesis(), GENESIS_BLOCK_HASH),
};
let event = VotorEvent::Block { slot, block_info };
tx.send(event).await.unwrap();
let vote = match other_a2a.receive().await.unwrap() {
ConsensusMessage::Vote(v) => v,
m => panic!("other msg: {m:?}"),
};
assert!(vote.is_notar());
assert_eq!(vote.slot(), slot);
// vote skip-fallback after safe-to-skip
let event = VotorEvent::SafeToSkip(slot);
tx.send(event).await.unwrap();
match other_a2a.receive().await.unwrap() {
ConsensusMessage::Vote(v) => {
assert!(v.is_skip_fallback());
assert_eq!(v.slot(), slot);
}
m => panic!("other msg: {m:?}"),
}
}
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/consensus/block_producer.rs | src/consensus/block_producer.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Block production, leader-side of the consensus protocol.
use std::sync::Arc;
use std::time::{Duration, Instant};
use color_eyre::Result;
use either::Either;
use fastrace::Span;
use log::{debug, info, warn};
use static_assertions::const_assert;
use tokio::pin;
use tokio::sync::{RwLock, oneshot};
use tokio::time::sleep;
use tokio_util::sync::CancellationToken;
use crate::consensus::{Blockstore, EpochInfo, Pool};
use crate::crypto::merkle::{BlockHash, GENESIS_BLOCK_HASH, MerkleRoot};
use crate::crypto::signature;
use crate::network::{Network, TransactionNetwork};
use crate::shredder::{MAX_DATA_PER_SLICE, RegularShredder, Shredder};
use crate::types::{Slice, SliceHeader, SliceIndex, SlicePayload, Slot};
use crate::{BlockId, Disseminator, MAX_TRANSACTION_SIZE};
/// Produces blocks from transactions and dissminates them.
///
/// This is the leader's side of the consensus protocol.
/// Produces blocks in accordance with the consensus protocol's timeouts.
/// Receives transactions from clients via a [`Network`] instance and packs them into blocks.
/// Finished blocks are shredded and disseminated via a [`Disseminator`] instance.
pub(super) struct BlockProducer<D: Disseminator, T: Network> {
/// Own validator's secret key (used e.g. for block production).
/// This is not the same as the voting secret key, which is held by [`super::Votor`].
secret_key: signature::SecretKey,
/// Other validators' info.
epoch_info: Arc<EpochInfo>,
/// Blockstore for storing raw block data.
blockstore: Arc<RwLock<Box<dyn Blockstore + Send + Sync>>>,
/// Pool of votes and certificates.
pool: Arc<RwLock<Box<dyn Pool + Send + Sync>>>,
/// Block dissemination network protocol for shreds.
disseminator: Arc<D>,
/// Network connection to receive transactions from clients.
txs_receiver: T,
/// Indicates whether the node is shutting down.
cancel_token: CancellationToken,
/// Should be set to [`super::DELTA_BLOCK`] in production.
/// Stored as a field to aid in testing.
delta_block: Duration,
/// Should be set to [`super::DELTA_FIRST_SLICE`] in production.
/// Stored as a field to aid in testing.
delta_first_slice: Duration,
}
impl<D, T> BlockProducer<D, T>
where
D: Disseminator,
T: TransactionNetwork,
{
#[allow(clippy::too_many_arguments)]
pub(super) fn new(
secret_key: signature::SecretKey,
epoch_info: Arc<EpochInfo>,
disseminator: Arc<D>,
txs_receiver: T,
blockstore: Arc<RwLock<Box<dyn Blockstore + Send + Sync>>>,
pool: Arc<RwLock<Box<dyn Pool + Send + Sync>>>,
cancel_token: CancellationToken,
delta_block: Duration,
delta_first_slice: Duration,
) -> Self {
assert!(delta_block >= delta_first_slice);
Self {
secret_key,
epoch_info,
blockstore,
pool,
disseminator,
txs_receiver,
cancel_token,
delta_block,
delta_first_slice,
}
}
/// Handles the leader side of the consensus protocol.
///
/// Once all previous blocks have been notarized or skipped and the next
/// slot belongs to our leader window, we will produce a block.
pub(super) async fn block_production_loop(&self) -> Result<()> {
for first_slot_in_window in Slot::windows() {
if self.cancel_token.is_cancelled() {
break;
}
let last_slot_in_window = first_slot_in_window.last_slot_in_window();
// don't do anything if we are not the leader
let leader = self.epoch_info.leader(first_slot_in_window);
if leader.id != self.epoch_info.own_id {
debug!(
"[val {}] not producing in window {first_slot_in_window}..{last_slot_in_window}, not leader",
self.epoch_info.own_id
);
continue;
}
// wait for ParentReady or block in previous slot
let slot_ready = wait_for_first_slot(
self.pool.clone(),
self.blockstore.clone(),
first_slot_in_window,
)
.await;
// produce first block
let start = Instant::now();
let mut block_id = match slot_ready {
SlotReady::Skip => {
warn!(
"not producing in window {first_slot_in_window}..{last_slot_in_window}, saw later finalization"
);
continue;
}
SlotReady::Ready(parent) => {
if first_slot_in_window.is_genesis() {
// genesis block is already produced so skip it
(first_slot_in_window, GENESIS_BLOCK_HASH)
} else {
self.produce_block_parent_ready(first_slot_in_window, parent)
.await?
}
}
SlotReady::ParentReadyNotSeen(parent, channel) => {
self.produce_block_parent_not_ready(first_slot_in_window, parent, channel)
.await?
}
};
debug!(
"produced block {} in {} ms",
first_slot_in_window,
start.elapsed().as_millis()
);
// produce remaining blocks
for slot in first_slot_in_window.slots_in_window().skip(1) {
let start = Instant::now();
block_id = self.produce_block_parent_ready(slot, block_id).await?;
debug!(
"produced block {} in {} ms",
slot,
start.elapsed().as_millis()
);
}
}
Ok(())
}
/// Produces a block in the situation where we have not yet seen the `ParentReady` event.
///
/// The `parent_block_id` refers to the block of the previous slot which may end up not being the actualy parent of the block.
pub(super) async fn produce_block_parent_not_ready(
&self,
slot: Slot,
parent_block_id: BlockId,
mut parent_ready_receiver: oneshot::Receiver<BlockId>,
) -> Result<BlockId> {
let _slot_span = Span::enter_with_local_parent(format!("slot {slot}"));
let (parent_slot, parent_hash) = &parent_block_id;
assert_eq!(*parent_slot, slot.prev());
assert!(slot.is_start_of_window());
info!(
"optimistically producing block in slot {} with parent {} in slot {}",
slot,
&hex::encode(parent_hash.as_hash())[..8],
*parent_slot,
);
// only start the DELTA_BLOCK timer once the ParentReady event is seen
let mut duration_left = Duration::MAX;
for slice_index in SliceIndex::all() {
let parent = if slice_index.is_first() {
Some(parent_block_id.clone())
} else {
None
};
let time_for_slice = if slice_index.is_first() {
// make sure first slice is produced on time
// TODO: this can be made more accurate, only needed if production of first slice
// still takes more than delta_first_slice after we saw ParentReady, not if:
// 1. first slice is produced before ParentReady is seen, OR
// 2. first slice finishes at most delta_first_slice after ParentReady is seen
duration_left.min(self.delta_first_slice)
} else {
// cap timeout for each slice to `DELTA_BLOCK`
// makes sure optimistic block production yields before timeout would expire
duration_left.min(self.delta_block)
};
let produce_slice_future =
produce_slice_payload(&self.txs_receiver, parent, time_for_slice);
// If we have not yet received the ParentReady event, wait for it concurrently while producing the next slice.
let (mut payload, new_duration_left) = if parent_ready_receiver.is_terminated() {
produce_slice_future.await
} else {
pin!(produce_slice_future);
tokio::select! {
res = &mut produce_slice_future => {
let (payload, _new_duration_left) = res;
// ParentReady event still not seen, do not start DELTA_BLOCK timer yet
(payload, Duration::MAX)
}
res = &mut parent_ready_receiver => {
// Got ParentReady event while producing slice.
// It's a NOP if we have been using the same parent as before.
let start = Instant::now();
let (new_slot, new_hash) = res.unwrap();
let (mut payload, _maybe_duration) = produce_slice_future.await;
if new_hash == *parent_hash {
debug!("parent is ready, continuing with same parent");
} else {
assert_ne!(new_slot, *parent_slot);
debug!(
"changed parent from {} in slot {} to {} in slot {}",
&hex::encode(parent_hash.as_hash())[..8],
parent_slot,
&hex::encode(new_hash.as_hash())[..8],
new_slot
);
payload.parent = Some((new_slot, new_hash));
}
// ParentReady was seen, start the DELTA_BLOCK timer
// account for the time it took to finish producing the slice
debug!("starting blocktime timer");
let duration = self.delta_block.saturating_sub(start.elapsed());
(payload, duration)
}
}
};
let is_last = slice_index.is_max() || new_duration_left.is_zero();
if is_last && !parent_ready_receiver.is_terminated() {
let (new_slot, new_hash) = (&mut parent_ready_receiver).await.unwrap();
if new_hash != *parent_hash {
assert_ne!(new_slot, *parent_slot);
debug!(
"changed parent from {} in slot {} to {} in slot {}",
&hex::encode(parent_hash.as_hash())[..8],
parent_slot,
&hex::encode(new_hash.as_hash())[..8],
new_slot
);
payload.parent = Some((new_slot, new_hash));
} else {
debug!("parent is ready, continuing with same parent");
}
}
let header = SliceHeader {
slot,
slice_index,
is_last,
};
match self.shred_and_disseminate(header, payload).await? {
Some(block_hash) => return Ok((slot, block_hash)),
None => {
assert!(!new_duration_left.is_zero());
duration_left = new_duration_left;
}
}
}
unreachable!()
}
/// Produces a block in the situation where we have already seen the `ParentReady` event.
///
/// The `parent_block_id` refers to the block that is the ready parent.
pub(crate) async fn produce_block_parent_ready(
&self,
slot: Slot,
parent_block_id: BlockId,
) -> Result<BlockId> {
let _slot_span = Span::enter_with_local_parent(format!("slot {slot}"));
let (parent_slot, parent_hash) = &parent_block_id;
info!(
"producing block in slot {} with ready parent {} in slot {}",
slot,
&hex::encode(parent_hash.as_hash())[..8],
parent_slot,
);
let mut duration_left = self.delta_block;
for slice_index in SliceIndex::all() {
let (payload, new_duration_left) = if slice_index.is_first() {
// make sure first slice is produced quickly enough so that other nodes do not generate the [`TimeoutCrashedLeader`] event
let time_for_slice = self.delta_first_slice;
let (payload, slice_duration_left) = produce_slice_payload(
&self.txs_receiver,
Some(parent_block_id.clone()),
time_for_slice,
)
.await;
let elapsed = self.delta_first_slice - slice_duration_left;
let left = duration_left.saturating_sub(elapsed);
(payload, left)
} else {
produce_slice_payload(&self.txs_receiver, None, duration_left).await
};
let is_last = slice_index.is_max() || new_duration_left.is_zero();
let header = SliceHeader {
slot,
slice_index,
is_last,
};
if let Some(block_hash) = self.shred_and_disseminate(header, payload).await? {
return Ok((slot, block_hash));
} else {
assert!(!new_duration_left.is_zero());
duration_left = new_duration_left;
}
}
unreachable!()
}
/// Shreds and disseminates the slice payload.
///
/// Returns Ok(Some(hash of the block)) if this is the last slice.
/// Returns Ok(None) otherwise.
async fn shred_and_disseminate(
&self,
header: SliceHeader,
payload: SlicePayload,
) -> Result<Option<BlockHash>> {
let slot = header.slot;
let is_last = header.is_last;
let slice = Slice::from_parts(header, payload, None);
let mut maybe_block_hash = None;
// PERF: new shredder every time!
let shreds = RegularShredder::default()
.shred(slice, &self.secret_key)
.expect("shredding of valid slice should never fail");
for s in shreds {
self.disseminator.send(&s).await?;
// PERF: move expensive add_shred() call out of block production
let block = self
.blockstore
.write()
.await
.add_shred_from_disseminator(s.into_shred())
.await;
if let Ok(Some(block_info)) = block {
assert!(maybe_block_hash.is_none());
maybe_block_hash = Some(block_info.hash.clone());
let block_id = (slot, block_info.hash.clone());
self.pool
.write()
.await
.add_block(block_id, block_info.parent)
.await;
}
}
if is_last {
Ok(Some(maybe_block_hash.unwrap()))
} else {
assert!(maybe_block_hash.is_none());
Ok(None)
}
}
}
// TODO: extend docstring
/// Returns
async fn produce_slice_payload<T>(
txs_receiver: &T,
parent: Option<BlockId>,
duration_left: Duration,
) -> (SlicePayload, Duration)
where
T: TransactionNetwork,
{
let start_time = Instant::now();
// each slice should be able hold at least 1 transaction
// need 8 bytes to encode number of txs + 8 bytes to encode the length of the tx payload
const_assert!(MAX_DATA_PER_SLICE >= MAX_TRANSACTION_SIZE + 8 + 8);
// reserve space for parent and 8 bytes to encode number of txs
let parent_encoded_len = <Option<BlockId> as wincode::SchemaWrite>::size_of(&parent).unwrap();
let mut slice_capacity_left = MAX_DATA_PER_SLICE
.checked_sub(parent_encoded_len + 8)
.unwrap();
let mut txs = Vec::new();
let ret = loop {
let sleep_duration = duration_left.saturating_sub(start_time.elapsed());
let res = tokio::select! {
() = tokio::time::sleep(sleep_duration) => {
break Duration::ZERO;
}
res = txs_receiver.receive() => {
res
}
};
let tx = res.expect("receiving tx");
let tx = wincode::serialize(&tx).expect("serialization should not panic");
slice_capacity_left = slice_capacity_left.checked_sub(tx.len()).unwrap();
txs.push(tx);
// if there is not enough space for another tx, break
// this needs to account for the 8 bytes to encode the length of the tx payload
if slice_capacity_left < MAX_TRANSACTION_SIZE + 8 {
break duration_left.saturating_sub(start_time.elapsed());
}
};
// TODO: not accounting for this potentially expensive operation in duration_left calculation above.
let txs = wincode::serialize(&txs).expect("serialization should not panic");
let payload = SlicePayload::new(parent, txs);
(payload, ret)
}
/// Enum to capture the different scenarios that can be returned from [`wait_for_first_slot`].
#[derive(Debug)]
enum SlotReady {
/// Window was already skipped.
Skip,
/// Slot is ready and the Pool emitted a `ParentReady` for given `BlockId`.
Ready(BlockId),
/// Slot is ready as a block for the previous slot was seen but the Pool has not emitted `ParentReady` yet.
ParentReadyNotSeen(BlockId, oneshot::Receiver<BlockId>),
}
/// Waits for first slot in the given window to become ready for block production.
///
/// Ready here can mean:
/// - Pool emitted the `ParentReady` event for it, OR
/// - the blockstore has stored a block for the previous slot.
///
/// See [`SlotReady`] for what is returned.
async fn wait_for_first_slot(
pool: Arc<RwLock<Box<dyn Pool + Send + Sync>>>,
blockstore: Arc<RwLock<Box<dyn Blockstore + Send + Sync>>>,
first_slot_in_window: Slot,
) -> SlotReady {
assert!(first_slot_in_window.is_start_of_window());
if first_slot_in_window.is_genesis_window() {
return SlotReady::Ready((Slot::genesis(), GENESIS_BLOCK_HASH));
}
// if already have parent ready, return it, otherwise get a channel to await on
let mut rx = {
let mut guard = pool.write().await;
match guard.wait_for_parent_ready(first_slot_in_window) {
Either::Left(parent) => {
return SlotReady::Ready(parent);
}
Either::Right(rx) => rx,
}
};
// Concurrently wait for:
// - `ParentReady` event,
// - block reconstruction in blockstore, OR
// - notification that a later slot was finalized.
tokio::select! {
res = &mut rx => {
let parent = res.expect("sender dropped channel");
SlotReady::Ready(parent)
}
res = async {
let handle = tokio::spawn(async move {
// PERF: These are burning a CPU. Can we use async here?
loop {
let last_slot_in_prev_window = first_slot_in_window.prev();
if let Some(hash) = blockstore.read().await
.disseminated_block_hash(last_slot_in_prev_window)
{
return Some((last_slot_in_prev_window, hash.clone()));
}
if pool.read().await.finalized_slot() >= first_slot_in_window {
return None;
}
sleep(Duration::from_millis(1)).await;
}
});
handle.await.expect("error in task")
} => {
match res {
None => SlotReady::Skip,
Some((slot, hash)) => SlotReady::ParentReadyNotSeen((slot, hash.clone()), rx),
}
}
}
}
#[cfg(test)]
mod tests {
use std::time::Duration;
use mockall::{Sequence, predicate};
use super::*;
use crate::Transaction;
use crate::consensus::BlockInfo;
use crate::consensus::blockstore::MockBlockstore;
use crate::consensus::pool::MockPool;
use crate::crypto::Hash;
use crate::disseminator::MockDisseminator;
use crate::network::{UdpNetwork, localhost_ip_sockaddr};
use crate::shredder::TOTAL_SHREDS;
use crate::test_utils::generate_validators;
#[tokio::test]
async fn produce_slice_empty_slices() {
let txs_receiver: UdpNetwork<Transaction, Transaction> = UdpNetwork::new_with_any_port();
let duration_left = Duration::from_micros(0);
let parent = None;
let (payload, maybe_duration) =
produce_slice_payload(&txs_receiver, parent.clone(), duration_left).await;
assert_eq!(maybe_duration, Duration::ZERO);
assert_eq!(payload.parent, parent);
// bin encoding an empty Vec takes 8 bytes
assert_eq!(payload.data.len(), 8);
let parent = Some((Slot::genesis(), GENESIS_BLOCK_HASH));
let (payload, maybe_duration) =
produce_slice_payload(&txs_receiver, parent.clone(), duration_left).await;
assert_eq!(maybe_duration, Duration::ZERO);
assert_eq!(payload.parent, parent);
// bin encoding an empty Vec takes 8 bytes
assert_eq!(payload.data.len(), 8);
}
#[tokio::test]
async fn produce_slice_full_slices() {
let txs_receiver: UdpNetwork<Transaction, Transaction> = UdpNetwork::new_with_any_port();
let addr = localhost_ip_sockaddr(txs_receiver.port());
let txs_sender: UdpNetwork<Transaction, Transaction> = UdpNetwork::new_with_any_port();
// long enough duration so hopefully doesn't fire while collecting txs
let duration_left = Duration::from_secs(100);
tokio::spawn(async move {
for i in 0..255 {
let data = vec![i; MAX_TRANSACTION_SIZE];
let msg = Transaction(data);
txs_sender.send(&msg, addr).await.unwrap();
}
});
let parent = None;
let (payload, maybe_duration) =
produce_slice_payload(&txs_receiver, parent.clone(), duration_left).await;
assert!(maybe_duration > Duration::ZERO);
assert_eq!(payload.parent, parent);
assert!(payload.data.len() <= MAX_DATA_PER_SLICE);
assert!(payload.data.len() > MAX_DATA_PER_SLICE - MAX_TRANSACTION_SIZE);
}
#[tokio::test]
async fn wait_for_first_slot_genesis() {
let pool: Box<dyn Pool + Send + Sync> = Box::new(MockPool::new());
let pool = Arc::new(RwLock::new(pool));
let blockstore: Box<dyn Blockstore + Send + Sync> = Box::new(MockBlockstore::new());
let blockstore = Arc::new(RwLock::new(blockstore));
let status = wait_for_first_slot(pool, blockstore, Slot::genesis()).await;
assert!(matches!(status, SlotReady::Ready(_)));
}
#[tokio::test]
async fn wait_for_first_slot_parent_already_ready() {
let blockstore: Box<dyn Blockstore + Send + Sync> = Box::new(MockBlockstore::new());
let blockstore = Arc::new(RwLock::new(blockstore));
let slot = Slot::windows().nth(10).unwrap();
let parent = (slot.prev(), GENESIS_BLOCK_HASH);
let mut pool = MockPool::new();
let p = parent.clone();
pool.expect_wait_for_parent_ready()
.with(predicate::eq(slot))
.return_once(move |_slot| Either::Left(p));
let pool: Box<dyn Pool + Send + Sync> = Box::new(pool);
let pool = Arc::new(RwLock::new(pool));
let status = wait_for_first_slot(pool, blockstore, slot).await;
match status {
SlotReady::Ready(p) => assert_eq!(p, parent),
other => panic!("unexpected {other:?}"),
}
}
#[tokio::test]
async fn wait_for_first_slot_parent_ready_later() {
let blockstore: Box<dyn Blockstore + Send + Sync> = Box::new(MockBlockstore::new());
let blockstore = Arc::new(RwLock::new(blockstore));
let slot = Slot::windows().nth(10).unwrap();
let parent = (slot.prev(), GENESIS_BLOCK_HASH);
let (tx, rx) = oneshot::channel();
tx.send(parent.clone()).unwrap();
let mut pool = MockPool::new();
pool.expect_wait_for_parent_ready()
.with(predicate::eq(slot))
.return_once(move |_slot| Either::Right(rx));
let pool: Box<dyn Pool + Send + Sync> = Box::new(pool);
let pool = Arc::new(RwLock::new(pool));
let status = wait_for_first_slot(pool, blockstore, slot).await;
match status {
SlotReady::Ready(p) => assert_eq!(p, parent),
other => panic!("unexpected {other:?}"),
}
}
/// A bunch of boilerplate to initialize and return a [`BlockProducer`].
fn setup(
blockstore: MockBlockstore,
pool: MockPool,
disseminator: MockDisseminator,
delta_block: Duration,
delta_first_slice: Duration,
) -> BlockProducer<MockDisseminator, UdpNetwork<Transaction, Transaction>> {
let secret_key = signature::SecretKey::new(&mut rand::rng());
let (_, epoch_info) = generate_validators(11);
let blockstore: Box<dyn Blockstore + Send + Sync> = Box::new(blockstore);
let blockstore = Arc::new(RwLock::new(blockstore));
let pool: Box<dyn Pool + Send + Sync> = Box::new(pool);
let pool = Arc::new(RwLock::new(pool));
let disseminator = Arc::new(disseminator);
let txs_receiver = UdpNetwork::new_with_any_port();
let cancel_token = CancellationToken::new();
BlockProducer::new(
secret_key,
epoch_info,
disseminator,
txs_receiver,
blockstore,
pool,
cancel_token,
delta_block,
delta_first_slice,
)
}
#[tokio::test]
async fn verify_produce_block_parent_ready() {
let slot = Slot::windows().nth(10).unwrap();
let hash: BlockHash = Hash::random_for_test().into();
let hash_prev: BlockHash = Hash::random_for_test().into();
let block_info = BlockInfo {
hash: hash.clone(),
parent: (slot.prev(), hash_prev.clone()),
};
// Handles TOTAL_SHRED number of calls.
// The first TOTAL_SHRED - 1 calls return None.
// The last call returns Some.
let mut seq = Sequence::new();
let mut blockstore = MockBlockstore::new();
blockstore
.expect_add_shred_from_disseminator()
.times(TOTAL_SHREDS - 1)
.in_sequence(&mut seq)
.returning(move |_| Box::pin(async move { Ok(None) }));
let bi = block_info.clone();
blockstore
.expect_add_shred_from_disseminator()
.times(1)
.in_sequence(&mut seq)
.returning(move |_| {
let bi = bi.clone();
Box::pin(async move { Ok(Some(bi)) })
});
let mut pool = MockPool::new();
let bi = block_info.clone();
pool.expect_add_block()
.returning(move |ret_block_id, ret_parent_block_id| {
assert_eq!(ret_block_id, (slot, bi.hash.clone()));
assert_eq!(bi.parent, ret_parent_block_id);
Box::pin(async {})
});
let mut disseminator = MockDisseminator::new();
disseminator
.expect_send()
.returning(|_| Box::pin(async { Ok(()) }));
let block_producer = setup(
blockstore,
pool,
disseminator,
Duration::from_micros(0),
Duration::from_micros(0),
);
let ret = block_producer
.produce_block_parent_ready(slot, block_info.parent)
.await
.unwrap();
assert_eq!(slot, ret.0);
assert_eq!(block_info.hash, ret.1);
}
#[tokio::test]
async fn verify_produce_block_parent_not_ready() {
let slot = Slot::windows().nth(10).unwrap();
let slot_hash: BlockHash = Hash::random_for_test().into();
let old_parent = (slot.prev(), Hash::random_for_test().into());
let new_parent = (slot.prev().prev(), Hash::random_for_test().into());
let old_block_info = BlockInfo {
hash: slot_hash.clone(),
parent: old_parent,
};
let new_block_info = BlockInfo {
hash: slot_hash,
parent: new_parent.clone(),
};
let (first_slice_finished_tx, first_slice_finished_rx) = oneshot::channel();
let (start_second_slice_tx, start_second_slice_rx) = oneshot::channel();
let mut seq = Sequence::new();
let mut blockstore = MockBlockstore::new();
// handle first slice
blockstore
.expect_add_shred_from_disseminator()
.times(TOTAL_SHREDS - 1)
.in_sequence(&mut seq)
.returning(move |_| Box::pin(async move { Ok(None) }));
blockstore
.expect_add_shred_from_disseminator()
.times(1)
.in_sequence(&mut seq)
.return_once(move |_| {
Box::pin(async move {
// last shred; wait for the parent ready event to be sent before continuing
first_slice_finished_tx.send(()).unwrap();
let () = start_second_slice_rx.await.unwrap();
Ok(None)
})
});
// handle second slice
blockstore
.expect_add_shred_from_disseminator()
.times(TOTAL_SHREDS - 1)
.in_sequence(&mut seq)
.returning(move |_| Box::pin(async move { Ok(None) }));
let nbi = new_block_info.clone();
blockstore
.expect_add_shred_from_disseminator()
.times(1)
.in_sequence(&mut seq)
.returning(move |_| {
let nbi = nbi.clone();
Box::pin(async {
// final shred of second slice
// block is constructed with the new parent
Ok(Some(nbi))
})
});
let mut pool = MockPool::new();
let nbi = new_block_info.clone();
pool.expect_add_block()
.returning(move |ret_block_id, ret_parent_block_id| {
assert_eq!(ret_block_id, (slot, nbi.hash.clone()));
assert_eq!(nbi.parent, ret_parent_block_id);
Box::pin(async {})
});
let mut disseminator = MockDisseminator::new();
disseminator
.expect_send()
.returning(|_| Box::pin(async { Ok(()) }));
let block_producer = setup(
blockstore,
pool,
disseminator,
Duration::from_micros(0),
Duration::from_millis(0),
);
let (parent_ready_tx, parent_ready_rx) = oneshot::channel();
let np = new_parent.clone();
tokio::spawn(async move {
let () = first_slice_finished_rx.await.unwrap();
parent_ready_tx.send(np).unwrap();
start_second_slice_tx.send(()).unwrap();
});
let ret = block_producer
.produce_block_parent_not_ready(slot, old_block_info.parent, parent_ready_rx)
.await
.unwrap();
assert_eq!(slot, ret.0);
assert_eq!(new_block_info.hash, ret.1);
assert_eq!(new_block_info.parent, new_parent);
}
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/consensus/cert.rs | src/consensus/cert.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Certificate types used for the consensus protocol.
//!
//!
use thiserror::Error;
use wincode::{SchemaRead, SchemaWrite};
use super::Vote;
use super::vote::VoteKind;
use crate::crypto::merkle::BlockHash;
use crate::crypto::{AggregateSignature, Signable};
use crate::{Slot, Stake, ValidatorId, ValidatorInfo};
/// Errors that can occur during certificate aggregation.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Error)]
pub enum CertError {
#[error("wrong vote type found during aggregation")]
WrongVoteType,
#[error("votes for different slots found during aggregation")]
SlotMismatch,
#[error("votes for different block hashes found during aggregation")]
BlockHashMismatch,
}
/// Certificate types used for the consensus protocol.
#[derive(Clone, Debug, PartialEq, Eq, SchemaRead, SchemaWrite)]
pub enum Cert {
Notar(NotarCert),
NotarFallback(NotarFallbackCert),
Skip(SkipCert),
FastFinal(FastFinalCert),
Final(FinalCert),
}
impl Cert {
/// Checks that the aggregated signatures are valid.
#[must_use]
pub fn check_sig(&self, validators: &[ValidatorInfo]) -> bool {
match self {
Self::Notar(n) => n.check_sig(validators),
Self::NotarFallback(n) => n.check_sig(validators),
Self::Skip(s) => s.check_sig(validators),
Self::FastFinal(f) => f.check_sig(validators),
Self::Final(f) => f.check_sig(validators),
}
}
/// Gives the slot number this certificate is for.
#[must_use]
pub const fn slot(&self) -> Slot {
match self {
Self::Notar(n) => n.slot,
Self::NotarFallback(n) => n.slot,
Self::Skip(s) => s.slot,
Self::FastFinal(s) => s.slot,
Self::Final(f) => f.slot,
}
}
/// Returns the block hash this certificate corresponds to, if any.
///
/// Returns `None` if this is a skip or finalization certificates.
#[must_use]
pub const fn block_hash(&self) -> Option<&BlockHash> {
match self {
Self::Notar(n) => Some(&n.block_hash),
Self::NotarFallback(n) => Some(&n.block_hash),
Self::FastFinal(f) => Some(&f.block_hash),
Self::Skip(_) | Self::Final(_) => None,
}
}
/// Checks if the given validator is a signer of this certificate.
#[must_use]
pub fn is_signer(&self, v: ValidatorId) -> bool {
match self {
Self::Notar(n) => n.agg_sig.is_signer(v),
Self::NotarFallback(n) => {
let is_sig1_signer = n.agg_sig_notar.as_ref().is_some_and(|s| s.is_signer(v));
let is_sig2_signer = n
.agg_sig_notar_fallback
.as_ref()
.is_some_and(|s| s.is_signer(v));
is_sig1_signer || is_sig2_signer
}
Self::Skip(s) => {
let is_sig1_signer = s.agg_sig_skip.as_ref().is_some_and(|s| s.is_signer(v));
let is_sig2_signer = s
.agg_sig_skip_fallback
.as_ref()
.is_some_and(|s| s.is_signer(v));
is_sig1_signer || is_sig2_signer
}
Self::FastFinal(f) => f.agg_sig.is_signer(v),
Self::Final(f) => f.agg_sig.is_signer(v),
}
}
/// Iterates over the signers of this certificate, yielding their IDs.
#[must_use]
pub fn signers(&self) -> Box<dyn Iterator<Item = ValidatorId> + '_> {
match self {
Self::Notar(n) => Box::new(n.agg_sig.signers()),
Self::NotarFallback(n) => Box::new(
n.agg_sig_notar
.as_ref()
.map(AggregateSignature::signers)
.into_iter()
.flatten()
.chain(
n.agg_sig_notar_fallback
.as_ref()
.map(AggregateSignature::signers)
.into_iter()
.flatten(),
),
),
Self::Skip(s) => Box::new(
s.agg_sig_skip
.as_ref()
.map(AggregateSignature::signers)
.into_iter()
.flatten()
.chain(
s.agg_sig_skip_fallback
.as_ref()
.map(AggregateSignature::signers)
.into_iter()
.flatten(),
),
),
Self::FastFinal(f) => Box::new(f.agg_sig.signers()),
Self::Final(f) => Box::new(f.agg_sig.signers()),
}
}
/// Gives the combined stake of the validators who signed this certificate.
#[must_use]
pub const fn stake(&self) -> Stake {
match self {
Self::Notar(n) => n.stake,
Self::NotarFallback(n) => n.stake,
Self::Skip(s) => s.stake,
Self::FastFinal(s) => s.stake,
Self::Final(f) => f.stake,
}
}
}
/// A notarization certificate is an aggregate of a quorum of notar votes.
#[derive(Clone, Debug, PartialEq, Eq, SchemaRead, SchemaWrite)]
pub struct NotarCert {
slot: Slot,
block_hash: BlockHash,
agg_sig: AggregateSignature,
stake: Stake,
}
impl NotarCert {
/// Tries to create a new notarization certificate.
///
/// # Errors
///
/// - [`CertError::WrongVoteType`] if any of the votes is not a notarization vote.
/// - [`CertError::SlotMismatch`] if the votes have different slots.
/// - [`CertError::BlockHashMismatch`] if the votes have different block hashes.
pub fn try_new(votes: &[Vote], validators: &[ValidatorInfo]) -> Result<Self, CertError> {
if !votes[0].is_notar() {
return Err(CertError::WrongVoteType);
}
let slot = votes[0].slot();
let block_hash = votes[0].block_hash().unwrap().clone();
for vote in votes {
if vote.slot() != slot {
return Err(CertError::SlotMismatch);
} else if !vote.is_notar() {
return Err(CertError::WrongVoteType);
} else if vote.block_hash() != Some(&block_hash) {
return Err(CertError::BlockHashMismatch);
}
}
let agg_sig = aggsig_from_votes(votes, validators);
let stake: Stake = votes
.iter()
.map(|v| validators[v.signer() as usize].stake)
.sum();
Ok(Self {
slot,
block_hash,
agg_sig,
stake,
})
}
/// Creates a new notarization certificate.
///
/// # Panics
///
/// Panics if `try_new` returns an error.
pub fn new_unchecked(votes: &[Vote], validators: &[ValidatorInfo]) -> Self {
Self::try_new(votes, validators).unwrap()
}
/// Checks that the aggregated signature is valid.
#[must_use]
pub fn check_sig(&self, validators: &[ValidatorInfo]) -> bool {
let pks: Vec<_> = validators.iter().map(|v| v.voting_pubkey).collect();
let vote_bytes = VoteKind::Notar(self.slot, self.block_hash.clone()).bytes_to_sign();
self.agg_sig.verify(&vote_bytes, &pks)
}
/// Returns the block hash of the notarized block.
pub const fn block_hash(&self) -> &BlockHash {
&self.block_hash
}
}
/// A notar-fallback certificate is an aggregate of a quorum of notar(-fallback) votes.
#[derive(Clone, Debug, PartialEq, Eq, SchemaRead, SchemaWrite)]
pub struct NotarFallbackCert {
slot: Slot,
block_hash: BlockHash,
agg_sig_notar: Option<AggregateSignature>,
agg_sig_notar_fallback: Option<AggregateSignature>,
stake: Stake,
}
impl NotarFallbackCert {
/// Tries to create a new notar-fallback certificate.
///
/// # Errors
///
/// - [`CertError::WrongVoteType`] if any of the votes is not a notar(-fallback) vote.
/// - [`CertError::SlotMismatch`] if the votes have different slots.
/// - [`CertError::BlockHashMismatch`] if the votes have different block hashes.
pub fn try_new(votes: &[Vote], validators: &[ValidatorInfo]) -> Result<Self, CertError> {
if !votes[0].is_notar() && !votes[0].is_notar_fallback() {
return Err(CertError::WrongVoteType);
}
let slot = votes[0].slot();
let block_hash = votes[0].block_hash().unwrap().clone();
for vote in votes {
if vote.slot() != slot {
return Err(CertError::SlotMismatch);
} else if !vote.is_notar() && !vote.is_notar_fallback() {
return Err(CertError::WrongVoteType);
} else if vote.block_hash() != Some(&block_hash) {
return Err(CertError::BlockHashMismatch);
}
}
let stake: Stake = votes
.iter()
.map(|v| validators[v.signer() as usize].stake)
.sum();
let mut notar_votes = votes.iter().filter(|v| v.is_notar()).peekable();
let mut nf_votes = votes.iter().filter(|v| v.is_notar_fallback()).peekable();
let agg_sig_notar = if notar_votes.peek().is_none() {
None
} else {
Some(aggsig_from_votes_iter(notar_votes, validators))
};
let agg_sig_notar_fallback = if nf_votes.peek().is_none() {
None
} else {
Some(aggsig_from_votes_iter(nf_votes, validators))
};
Ok(Self {
slot,
block_hash,
agg_sig_notar,
agg_sig_notar_fallback,
stake,
})
}
/// Creates a new notar-fallback certificate.
///
/// # Panics
///
/// Panics if `try_new` returns an error.
pub fn new_unchecked(votes: &[Vote], validators: &[ValidatorInfo]) -> Self {
Self::try_new(votes, validators).unwrap()
}
/// Checks that the aggregated signatures are valid.
#[must_use]
pub fn check_sig(&self, validators: &[ValidatorInfo]) -> bool {
let pks: Vec<_> = validators.iter().map(|v| v.voting_pubkey).collect();
let vote_bytes = VoteKind::Notar(self.slot, self.block_hash.clone()).bytes_to_sign();
let sig1_valid = self
.agg_sig_notar
.as_ref()
.is_none_or(|s| s.verify(&vote_bytes, &pks));
let vote_bytes =
VoteKind::NotarFallback(self.slot, self.block_hash.clone()).bytes_to_sign();
let sig2_valid = self
.agg_sig_notar_fallback
.as_ref()
.is_none_or(|s| s.verify(&vote_bytes, &pks));
sig1_valid && sig2_valid
}
/// Returns the block hash of the notarized-fallback block.
pub const fn block_hash(&self) -> &BlockHash {
&self.block_hash
}
}
/// A skip certificate is an aggregate of a quorum of skip(-fallback) votes.
#[derive(Clone, Debug, PartialEq, Eq, SchemaRead, SchemaWrite)]
pub struct SkipCert {
slot: Slot,
agg_sig_skip: Option<AggregateSignature>,
agg_sig_skip_fallback: Option<AggregateSignature>,
stake: Stake,
}
impl SkipCert {
/// Tries to create a new skip certificate.
///
/// # Errors
///
/// - [`CertError::WrongVoteType`] if any of the votes is not a skip(-fallback) vote.
/// - [`CertError::SlotMismatch`] if the votes have different slots.
pub fn try_new(votes: &[Vote], validators: &[ValidatorInfo]) -> Result<Self, CertError> {
if !votes[0].is_skip() && !votes[0].is_skip_fallback() {
return Err(CertError::WrongVoteType);
}
let slot = votes[0].slot();
for vote in votes {
if vote.slot() != slot {
return Err(CertError::SlotMismatch);
} else if !vote.is_skip() && !vote.is_skip_fallback() {
return Err(CertError::WrongVoteType);
}
}
let stake: Stake = votes
.iter()
.map(|v| validators[v.signer() as usize].stake)
.sum();
let mut skip_votes = votes.iter().filter(|v| v.is_skip()).peekable();
let mut sf_votes = votes.iter().filter(|v| v.is_skip_fallback()).peekable();
let agg_sig_skip = if skip_votes.peek().is_none() {
None
} else {
Some(aggsig_from_votes_iter(skip_votes, validators))
};
let agg_sig_skip_fallback = if sf_votes.peek().is_none() {
None
} else {
Some(aggsig_from_votes_iter(sf_votes, validators))
};
Ok(Self {
slot,
agg_sig_skip,
agg_sig_skip_fallback,
stake,
})
}
/// Creates a new skip certificate.
///
/// # Panics
///
/// Panics if `try_new` returns an error.
pub fn new_unchecked(votes: &[Vote], validators: &[ValidatorInfo]) -> Self {
Self::try_new(votes, validators).unwrap()
}
/// Checks that the aggregated signatures are valid.
#[must_use]
pub fn check_sig(&self, validators: &[ValidatorInfo]) -> bool {
let pks: Vec<_> = validators.iter().map(|v| v.voting_pubkey).collect();
let vote_bytes = VoteKind::Skip(self.slot).bytes_to_sign();
let sig1_valid = self
.agg_sig_skip
.as_ref()
.is_none_or(|s| s.verify(&vote_bytes, &pks));
let vote_bytes = VoteKind::SkipFallback(self.slot).bytes_to_sign();
let sig2_valid = self
.agg_sig_skip_fallback
.as_ref()
.is_none_or(|s| s.verify(&vote_bytes, &pks));
sig1_valid && sig2_valid
}
}
/// A fast finalization certificate is an aggregate of a strong quorun of notar votes.
#[derive(Clone, Debug, PartialEq, Eq, SchemaRead, SchemaWrite)]
pub struct FastFinalCert {
slot: Slot,
block_hash: BlockHash,
agg_sig: AggregateSignature,
stake: Stake,
}
impl FastFinalCert {
/// Tries to create a new fast finalization certificate.
///
/// # Errors
///
/// - [`CertError::WrongVoteType`] if any of the votes is not a notarization vote.
/// - [`CertError::SlotMismatch`] if the votes have different slots.
/// - [`CertError::BlockHashMismatch`] if the votes have different block hashes.
pub fn try_new(votes: &[Vote], validators: &[ValidatorInfo]) -> Result<Self, CertError> {
if !votes[0].is_notar() {
return Err(CertError::WrongVoteType);
}
let slot = votes[0].slot();
let block_hash = votes[0].block_hash().unwrap().clone();
for vote in votes {
if vote.slot() != slot {
return Err(CertError::SlotMismatch);
} else if !vote.is_notar() {
return Err(CertError::WrongVoteType);
} else if vote.block_hash() != Some(&block_hash) {
return Err(CertError::BlockHashMismatch);
}
}
let agg_sig = aggsig_from_votes(votes, validators);
let stake: Stake = votes
.iter()
.map(|v| validators[v.signer() as usize].stake)
.sum();
Ok(Self {
slot,
block_hash,
agg_sig,
stake,
})
}
/// Creates a new fast finalization certificate.
///
/// # Panics
///
/// Panics if `try_new` returns an error.
pub fn new_unchecked(votes: &[Vote], validators: &[ValidatorInfo]) -> Self {
Self::try_new(votes, validators).unwrap()
}
/// Checks that the aggregated signatures are valid.
#[must_use]
pub fn check_sig(&self, validators: &[ValidatorInfo]) -> bool {
let pks: Vec<_> = validators.iter().map(|v| v.voting_pubkey).collect();
let vote_bytes = VoteKind::Notar(self.slot, self.block_hash.clone()).bytes_to_sign();
self.agg_sig.verify(&vote_bytes, &pks)
}
/// Returns the block hash of the fast-finalized block.
pub const fn block_hash(&self) -> &BlockHash {
&self.block_hash
}
}
/// A finalization certificate is an aggregate of a quorum of finalization votes.
#[derive(Clone, Debug, PartialEq, Eq, SchemaRead, SchemaWrite)]
pub struct FinalCert {
slot: Slot,
agg_sig: AggregateSignature,
stake: Stake,
}
impl FinalCert {
/// Tries to create a new finalization certificate.
///
/// # Errors
///
/// - [`CertError::WrongVoteType`] if any of the votes is not a finalization vote.
/// - [`CertError::SlotMismatch`] if the votes have different slots.
pub fn try_new(votes: &[Vote], validators: &[ValidatorInfo]) -> Result<Self, CertError> {
if !votes[0].is_final() {
return Err(CertError::WrongVoteType);
}
let slot = votes[0].slot();
for vote in votes {
if vote.slot() != slot {
return Err(CertError::SlotMismatch);
} else if !vote.is_final() {
return Err(CertError::WrongVoteType);
}
}
let agg_sig = aggsig_from_votes(votes, validators);
let stake: Stake = votes
.iter()
.map(|v| validators[v.signer() as usize].stake)
.sum();
Ok(Self {
slot,
agg_sig,
stake,
})
}
/// Creates a new finalization certificate.
///
/// # Panics
///
/// Panics if `try_new` returns an error.
pub fn new_unchecked(votes: &[Vote], validators: &[ValidatorInfo]) -> Self {
Self::try_new(votes, validators).unwrap()
}
/// Checks that the aggregated signatures are valid.
#[must_use]
pub fn check_sig(&self, validators: &[ValidatorInfo]) -> bool {
let pks: Vec<_> = validators.iter().map(|v| v.voting_pubkey).collect();
let vote_bytes = VoteKind::Final(self.slot).bytes_to_sign();
self.agg_sig.verify(&vote_bytes, &pks)
}
}
fn aggsig_from_votes(votes: &[Vote], validators: &[ValidatorInfo]) -> AggregateSignature {
let sigs = votes.iter().map(Vote::sig);
let indices = votes.iter().map(Vote::signer);
AggregateSignature::new(sigs, indices, validators.len())
}
fn aggsig_from_votes_iter<'a>(
votes: impl IntoIterator<Item = &'a Vote> + Clone,
validators: &[ValidatorInfo],
) -> AggregateSignature {
let sigs = votes.clone().into_iter().map(Vote::sig);
let indices = votes.into_iter().map(Vote::signer);
AggregateSignature::new(sigs, indices, validators.len())
}
#[cfg(test)]
mod tests {
use std::collections::HashSet;
use super::*;
use crate::crypto::aggsig::SecretKey;
use crate::crypto::merkle::GENESIS_BLOCK_HASH;
use crate::crypto::{Hash, signature};
use crate::network::dontcare_sockaddr;
fn create_signers(signers: u64) -> (Vec<SecretKey>, Vec<ValidatorInfo>) {
let mut sks = Vec::new();
let mut voting_sks = Vec::new();
let mut info = Vec::new();
for i in 0..signers {
sks.push(signature::SecretKey::new(&mut rand::rng()));
voting_sks.push(SecretKey::new(&mut rand::rng()));
info.push(ValidatorInfo {
id: i,
stake: 1,
pubkey: sks.last().unwrap().to_pk(),
voting_pubkey: voting_sks.last().unwrap().to_pk(),
all2all_address: dontcare_sockaddr(),
disseminator_address: dontcare_sockaddr(),
repair_request_address: dontcare_sockaddr(),
repair_response_address: dontcare_sockaddr(),
});
}
(voting_sks, info)
}
fn create_votes(kind: VoteKind, sks: &[SecretKey]) -> Vec<Vote> {
sks.iter()
.enumerate()
.map(|(i, sk)| Vote::new(kind.clone(), sk, i as ValidatorId))
.collect()
}
fn check_full_cert(cert: Cert, info: &[ValidatorInfo]) {
let total_stake: Stake = info.iter().map(|v| v.stake).sum();
assert!(cert.check_sig(info));
assert_eq!(cert.stake(), total_stake);
let signers: HashSet<_> = cert.signers().collect();
for v in info {
assert!(cert.is_signer(v.id));
assert!(signers.contains(&v.id));
}
}
#[test]
fn create() {
let (sks, info) = create_signers(100);
// notar cert
let votes = create_votes(VoteKind::Notar(Slot::genesis(), GENESIS_BLOCK_HASH), &sks);
let res = NotarCert::try_new(&votes, &info);
assert!(res.is_ok());
let cert = Cert::Notar(res.unwrap());
check_full_cert(cert, &info);
// notar-fallback cert
let votes = create_votes(
VoteKind::NotarFallback(Slot::genesis(), GENESIS_BLOCK_HASH),
&sks,
);
let res = NotarFallbackCert::try_new(&votes, &info);
assert!(res.is_ok());
let cert = Cert::NotarFallback(res.unwrap());
check_full_cert(cert, &info);
// skip cert
let votes = create_votes(VoteKind::Skip(Slot::genesis()), &sks);
let res = SkipCert::try_new(&votes, &info);
assert!(res.is_ok());
let cert = Cert::Skip(res.unwrap());
check_full_cert(cert, &info);
// fast finalization cert
let votes = create_votes(VoteKind::Notar(Slot::genesis(), GENESIS_BLOCK_HASH), &sks);
let res = FastFinalCert::try_new(&votes, &info);
assert!(res.is_ok());
let cert = Cert::FastFinal(res.unwrap());
check_full_cert(cert, &info);
// finalization cert
let votes = create_votes(VoteKind::Final(Slot::genesis()), &sks);
let res = FinalCert::try_new(&votes, &info);
assert!(res.is_ok());
let cert = Cert::Final(res.unwrap());
check_full_cert(cert, &info);
}
#[test]
fn mixed_notar_fallback() {
let (sks, info) = create_signers(2);
// notar + notar-fallback
let vote1 = Vote::new_notar(Slot::genesis(), GENESIS_BLOCK_HASH, &sks[0], 0);
let vote2 = Vote::new_notar_fallback(Slot::genesis(), GENESIS_BLOCK_HASH, &sks[1], 1);
let res = NotarFallbackCert::try_new(&[vote1, vote2], &info);
assert!(res.is_ok());
let cert = Cert::NotarFallback(res.unwrap());
check_full_cert(cert, &info);
// notar-fallback + notar
let vote1 = Vote::new_notar_fallback(Slot::genesis(), GENESIS_BLOCK_HASH, &sks[0], 0);
let vote2 = Vote::new_notar(Slot::genesis(), GENESIS_BLOCK_HASH, &sks[1], 1);
let res = NotarFallbackCert::try_new(&[vote1, vote2], &info);
assert!(res.is_ok());
let cert = Cert::NotarFallback(res.unwrap());
check_full_cert(cert, &info);
}
#[test]
fn mixed_skip() {
let (sks, info) = create_signers(2);
// skip + skip-fallback
let vote1 = Vote::new_skip(Slot::genesis(), &sks[0], 0);
let vote2 = Vote::new_skip_fallback(Slot::genesis(), &sks[1], 1);
let res = SkipCert::try_new(&[vote1, vote2], &info);
assert!(res.is_ok());
let cert = Cert::Skip(res.unwrap());
check_full_cert(cert, &info);
// skip-fallback + skip
let vote1 = Vote::new_skip_fallback(Slot::genesis(), &sks[0], 0);
let vote2 = Vote::new_skip(Slot::genesis(), &sks[1], 1);
let res = SkipCert::try_new(&[vote1, vote2], &info);
assert!(res.is_ok());
let cert = Cert::Skip(res.unwrap());
check_full_cert(cert, &info);
}
#[test]
fn notar_failure_cases() {
let (sks, info) = create_signers(2);
let hash: BlockHash = Hash::random_for_test().into();
// slot mismatch
let vote1 = Vote::new_notar(Slot::new(1), hash.clone(), &sks[0], 0);
let vote2 = Vote::new_notar(Slot::new(2), hash.clone(), &sks[1], 1);
let res = NotarCert::try_new(&[vote1, vote2], &info);
assert_eq!(res.err(), Some(CertError::SlotMismatch));
// block hash mismatch
let vote1 = Vote::new_notar(Slot::new(1), hash.clone(), &sks[0], 0);
let vote2 = Vote::new_notar(Slot::new(1), Hash::random_for_test().into(), &sks[1], 1);
let res = NotarCert::try_new(&[vote1, vote2], &info);
assert_eq!(res.err(), Some(CertError::BlockHashMismatch));
// different vote types
let vote1 = Vote::new_notar(Slot::new(1), hash.clone(), &sks[0], 0);
let vote2 = Vote::new_notar_fallback(Slot::new(1), hash.clone(), &sks[1], 1);
let res = NotarCert::try_new(&[vote1, vote2], &info);
assert_eq!(res.err(), Some(CertError::WrongVoteType));
// wrong vote type for cert
let vote1 = Vote::new_notar_fallback(Slot::new(1), hash.clone(), &sks[0], 0);
let vote2 = Vote::new_notar_fallback(Slot::new(1), hash.clone(), &sks[1], 1);
let res = NotarCert::try_new(&[vote1, vote2], &info);
assert_eq!(res.err(), Some(CertError::WrongVoteType));
}
#[test]
fn notar_fallback_failure_cases() {
let (sks, info) = create_signers(2);
let hash: BlockHash = Hash::random_for_test().into();
// slot mismatch
let vote1 = Vote::new_notar_fallback(Slot::new(1), hash.clone(), &sks[0], 0);
let vote2 = Vote::new_notar_fallback(Slot::new(2), hash.clone(), &sks[1], 1);
let res = NotarFallbackCert::try_new(&[vote1, vote2], &info);
assert_eq!(res.err(), Some(CertError::SlotMismatch));
// block hash mismatch
let vote1 = Vote::new_notar_fallback(Slot::new(1), hash.clone(), &sks[0], 0);
let vote2 =
Vote::new_notar_fallback(Slot::new(1), Hash::random_for_test().into(), &sks[1], 1);
let res = NotarFallbackCert::try_new(&[vote1, vote2], &info);
assert_eq!(res.err(), Some(CertError::BlockHashMismatch));
// wrong vote types for cert
let vote1 = Vote::new_notar(Slot::new(1), hash.clone(), &sks[0], 0);
let vote2 = Vote::new_final(Slot::new(1), &sks[1], 1);
let res = NotarFallbackCert::try_new(&[vote1, vote2], &info);
assert_eq!(res.err(), Some(CertError::WrongVoteType));
// wrong vote type for cert
let vote1 = Vote::new_final(Slot::new(1), &sks[0], 0);
let vote2 = Vote::new_final(Slot::new(1), &sks[1], 1);
let res = NotarFallbackCert::try_new(&[vote1, vote2], &info);
assert_eq!(res.err(), Some(CertError::WrongVoteType));
}
#[test]
fn skip_failure_cases() {
let (sks, info) = create_signers(2);
// slot mismatch
let vote1 = Vote::new_skip(Slot::new(1), &sks[0], 0);
let vote2 = Vote::new_skip(Slot::new(2), &sks[1], 1);
let res = SkipCert::try_new(&[vote1, vote2], &info);
assert_eq!(res.err(), Some(CertError::SlotMismatch));
// wrong vote type for cert
let vote1 = Vote::new_skip(Slot::new(1), &sks[0], 0);
let vote2 = Vote::new_final(Slot::new(1), &sks[1], 1);
let res = SkipCert::try_new(&[vote1, vote2], &info);
assert_eq!(res.err(), Some(CertError::WrongVoteType));
// wrong vote type for cert
let vote1 = Vote::new_final(Slot::new(1), &sks[0], 0);
let vote2 = Vote::new_final(Slot::new(1), &sks[1], 1);
let res = SkipCert::try_new(&[vote1, vote2], &info);
assert_eq!(res.err(), Some(CertError::WrongVoteType));
}
#[test]
fn fast_final_failure_cases() {
let (sks, info) = create_signers(2);
let hash: BlockHash = Hash::random_for_test().into();
// slot mismatch
let vote1 = Vote::new_notar(Slot::new(1), hash.clone(), &sks[0], 0);
let vote2 = Vote::new_notar(Slot::new(2), hash.clone(), &sks[1], 1);
let res = FastFinalCert::try_new(&[vote1, vote2], &info);
assert_eq!(res.err(), Some(CertError::SlotMismatch));
// block hash mismatch
let vote1 = Vote::new_notar(Slot::new(1), hash.clone(), &sks[0], 0);
let vote2 = Vote::new_notar(Slot::new(1), Hash::random_for_test().into(), &sks[1], 1);
let res = FastFinalCert::try_new(&[vote1, vote2], &info);
assert_eq!(res.err(), Some(CertError::BlockHashMismatch));
// wrong vote type for cert
let vote1 = Vote::new_notar(Slot::new(1), hash.clone(), &sks[0], 0);
let vote2 = Vote::new_notar_fallback(Slot::new(1), hash.clone(), &sks[1], 1);
let res = FastFinalCert::try_new(&[vote1, vote2], &info);
assert_eq!(res.err(), Some(CertError::WrongVoteType));
// wrong vote type for cert
let vote1 = Vote::new_notar_fallback(Slot::new(1), hash.clone(), &sks[0], 0);
let vote2 = Vote::new_notar_fallback(Slot::new(1), hash.clone(), &sks[1], 1);
let res = FastFinalCert::try_new(&[vote1, vote2], &info);
assert_eq!(res.err(), Some(CertError::WrongVoteType));
}
#[test]
fn final_failure_cases() {
let (sks, info) = create_signers(2);
// slot mismatch
let vote1 = Vote::new_final(Slot::new(1), &sks[0], 0);
let vote2 = Vote::new_final(Slot::new(2), &sks[1], 1);
let res = FinalCert::try_new(&[vote1, vote2], &info);
assert_eq!(res.err(), Some(CertError::SlotMismatch));
// wrong vote type for cert
let vote1 = Vote::new_final(Slot::new(1), &sks[0], 0);
let vote2 = Vote::new_skip(Slot::new(1), &sks[1], 1);
let res = FinalCert::try_new(&[vote1, vote2], &info);
assert_eq!(res.err(), Some(CertError::WrongVoteType));
// wrong vote type for cert
let vote1 = Vote::new_skip(Slot::new(1), &sks[0], 0);
let vote2 = Vote::new_skip(Slot::new(1), &sks[1], 1);
let res = FinalCert::try_new(&[vote1, vote2], &info);
assert_eq!(res.err(), Some(CertError::WrongVoteType));
}
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/consensus/blockstore.rs | src/consensus/blockstore.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Data structure holding blocks for each slot.
mod slot_block_data;
use std::collections::BTreeMap;
use std::sync::Arc;
use async_trait::async_trait;
use log::debug;
use mockall::automock;
use tokio::sync::mpsc::Sender;
use self::slot_block_data::{AddShredError, SlotBlockData};
use super::epoch_info::EpochInfo;
use super::votor::VotorEvent;
use crate::consensus::blockstore::slot_block_data::BlockData;
use crate::crypto::merkle::{BlockHash, DoubleMerkleProof, MerkleRoot, SliceRoot};
use crate::shredder::{RegularShredder, Shred, ShredIndex, ShredderPool, ValidatedShred};
use crate::types::SliceIndex;
use crate::{Block, BlockId, Slot};
/// Information about a block within a slot.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct BlockInfo {
pub(crate) hash: BlockHash,
pub(crate) parent: BlockId,
}
impl From<&Block> for BlockInfo {
fn from(block: &Block) -> Self {
BlockInfo {
hash: block.hash.clone(),
parent: (block.parent, block.parent_hash.clone()),
}
}
}
/// Interface for the blockstore.
///
/// This is only used for mocking of [`BlockstoreImpl`].
#[async_trait]
#[automock]
pub trait Blockstore {
async fn add_shred_from_disseminator(
&mut self,
shred: Shred,
) -> Result<Option<BlockInfo>, AddShredError>;
async fn add_shred_from_repair(
&mut self,
hash: BlockHash,
shred: Shred,
) -> Result<Option<BlockInfo>, AddShredError>;
#[allow(clippy::needless_lifetimes)]
fn disseminated_block_hash<'a>(&'a self, slot: Slot) -> Option<&'a BlockHash>;
#[allow(clippy::needless_lifetimes)]
fn get_block<'a>(&'a self, block_id: &BlockId) -> Option<&'a Block>;
fn get_last_slice_index(&self, block_id: &BlockId) -> Option<SliceIndex>;
fn get_slice_root<'a>(&'a self, block_id: &BlockId, slice: SliceIndex)
-> Option<&'a SliceRoot>;
#[allow(clippy::needless_lifetimes)]
fn get_shred<'a>(
&'a self,
block_id: &BlockId,
slice_index: SliceIndex,
shred_index: ShredIndex,
) -> Option<&'a ValidatedShred>;
fn create_double_merkle_proof(
&self,
block_id: &BlockId,
slice_index: SliceIndex,
) -> Option<DoubleMerkleProof>;
}
/// Blockstore is the fundamental data structure holding block data per slot.
pub struct BlockstoreImpl {
/// Data structure holding the actual block data per slot.
block_data: BTreeMap<Slot, SlotBlockData>,
/// Shredders used for reconstructing blocks.
shredders: ShredderPool<RegularShredder>,
/// Event channel for sending notifications to Votor.
votor_channel: Sender<VotorEvent>,
/// Information about all active validators.
epoch_info: Arc<EpochInfo>,
}
impl BlockstoreImpl {
/// Initializes a new empty blockstore.
///
/// Blockstore will send the following [`VotorEvent`]s to the provided `votor_channel`:
/// - [`VotorEvent::FirstShred`] when receiving the first shred for a slot
/// from the block dissemination protocol
/// - [`VotorEvent::Block`] for any reconstructed block
pub fn new(epoch_info: Arc<EpochInfo>, votor_channel: Sender<VotorEvent>) -> Self {
Self {
block_data: BTreeMap::new(),
shredders: ShredderPool::with_size(1),
votor_channel,
epoch_info,
}
}
/// Deletes everything before the given `slot` from the blockstore.
pub fn prune(&mut self, slot: Slot) {
self.block_data = self.block_data.split_off(&slot);
}
async fn send_votor_event(&self, event: VotorEvent) -> Option<BlockInfo> {
match &event {
VotorEvent::FirstShred(_) => {
self.votor_channel.send(event).await.unwrap();
None
}
VotorEvent::Block { slot, block_info } => {
let block_info = block_info.clone();
debug!(
"reconstructed block {} in slot {} with parent {} in slot {}",
&hex::encode(block_info.hash.as_hash())[..8],
slot,
&hex::encode(block_info.parent.1.as_hash())[..8],
block_info.parent.0,
);
self.votor_channel.send(event).await.unwrap();
Some(block_info)
}
ev => panic!("unexpected event {ev:?}"),
}
}
/// Gives reference to stored block data for the given `block_id`.
///
/// Considers both, the disseminated block and any repaired blocks.
///
/// Returns [`None`] if blockstore does not know about this block yet.
fn get_block_data(&self, block_id: &BlockId) -> Option<&BlockData> {
let (slot, hash) = block_id;
let slot_data = self.slot_data(*slot)?;
if let Some((h, _)) = &slot_data.disseminated.completed
&& h == hash
{
return Some(&slot_data.disseminated);
}
slot_data.repaired.get(hash)
}
/// Reads slot data for the given `slot`.
fn slot_data(&self, slot: Slot) -> Option<&SlotBlockData> {
self.block_data.get(&slot)
}
/// Writes slot data for the given `slot`, initializing it if necessary.
fn slot_data_mut(&mut self, slot: Slot) -> &mut SlotBlockData {
self.block_data
.entry(slot)
.or_insert_with(|| SlotBlockData::new(slot))
}
/// Gives the shred for the given `slot`, `slice` and `shred` index.
///
/// Considers only the disseminated block.
///
/// Only used for testing.
#[cfg(test)]
fn get_disseminated_shred(
&self,
slot: Slot,
slice: SliceIndex,
shred_index: ShredIndex,
) -> Option<&ValidatedShred> {
self.slot_data(slot).and_then(|s| {
s.disseminated
.shreds
.get(&slice)
.and_then(|shreds| shreds[*shred_index].as_ref())
})
}
/// Gives the number of stored shreds for a given `slot` (across all slices).
///
/// Only used for testing.
#[cfg(test)]
fn stored_shreds_for_slot(&self, slot: Slot) -> usize {
self.slot_data(slot).map_or(0, |s| {
let mut cnt = 0;
for shreds in s.disseminated.shreds.values() {
cnt += shreds.iter().filter(|s| s.is_some()).count();
}
cnt
})
}
/// Gives the number of stored slices for a given `slot`.
///
/// Only used for testing.
#[cfg(test)]
pub(crate) fn stored_slices_for_slot(&self, slot: Slot) -> usize {
self.slot_data(slot)
.map_or(0, |s| s.disseminated.slices.len())
}
}
#[async_trait]
impl Blockstore for BlockstoreImpl {
/// Stores a new shred in the blockstore.
///
/// This shred is stored in the default spot without a known block hash.
/// For shreds obtained through repair, `add_shred_from_repair` should be used instead.
/// Compared to that function, this one checks for leader equivocation.
///
/// Reconstructs the corresponding slice and block if possible and necessary.
/// If the added shred belongs to the last slice, all later shreds are deleted.
///
/// Returns `Some(slot, block_info)` if a block was reconstructed, `None` otherwise.
/// In the `Some`-case, `block_info` is the [`BlockInfo`] of the reconstructed block.
#[fastrace::trace(short_name = true)]
async fn add_shred_from_disseminator(
&mut self,
shred: Shred,
) -> Result<Option<BlockInfo>, AddShredError> {
let slot = shred.payload().header.slot;
let leader_pk = self.epoch_info.leader(slot).pubkey;
let mut shredder = self
.shredders
.checkout()
.expect("should have a shredder because of exclusive access");
match self.slot_data_mut(slot).add_shred_from_disseminator(
shred,
leader_pk,
&mut shredder,
)? {
Some(event) => Ok(self.send_votor_event(event).await),
None => Ok(None),
}
}
/// Stores a new shred from repair in the blockstore.
///
/// This shred is stored in a spot associated with the given block`hash`.
/// For shreds obtained through block dissemination, `add_shred_from_disseminator`
/// should be used instead.
/// Compared to that function, this one does not check for leader equivocation.
///
/// Reconstructs the corresponding slice and block if possible and necessary.
/// If the added shred belongs to last slice, deletes later slices and their shreds.
///
/// Returns `Some(slot, block_info)` if a block was reconstructed, `None` otherwise.
/// In the `Some`-case, `block_info` is the [`BlockInfo`] of the reconstructed block.
#[fastrace::trace(short_name = true)]
async fn add_shred_from_repair(
&mut self,
hash: BlockHash,
shred: Shred,
) -> Result<Option<BlockInfo>, AddShredError> {
let slot = shred.payload().header.slot;
let leader_pk = self.epoch_info.leader(slot).pubkey;
let mut shredder = self
.shredders
.checkout()
.expect("should have a shredder because of exclusive access");
match self.slot_data_mut(slot).add_shred_from_repair(
hash,
shred,
leader_pk,
&mut shredder,
)? {
Some(event) => Ok(self.send_votor_event(event).await),
None => Ok(None),
}
}
/// Gives the disseminated block hash for a given `slot`, if any.
///
/// This refers to the block we received from block dissemination.
///
/// Returns `None` if we have no block or only blocks from repair.
fn disseminated_block_hash(&self, slot: Slot) -> Option<&BlockHash> {
self.slot_data(slot)?
.disseminated
.completed
.as_ref()
.map(|c| &c.0)
}
/// Gives reference to stored block for the given `block_id`.
///
/// Considers both, the disseminated block and any repaired blocks.
/// However, the dissminated block can only be considered if it's complete.
///
/// Returns `None` if blockstore does not know a block for that hash.
fn get_block(&self, block_id: &BlockId) -> Option<&Block> {
let block_data = self.get_block_data(block_id)?;
if let Some((hash, block)) = block_data.completed.as_ref() {
debug_assert_eq!(*hash, block_id.1);
Some(block)
} else {
None
}
}
/// Gives the last slice index for the given `block_id`.
///
/// Returns `None` if blockstore does not know the last slice yet.
fn get_last_slice_index(&self, block_id: &BlockId) -> Option<SliceIndex> {
let block_data = self.get_block_data(block_id)?;
block_data.last_slice
}
/// Gives the Merkle root for the given `slice_index` of the given `block_id`.
///
/// Returns `None` if blockstore does not hold any shred for that slice.
fn get_slice_root(&self, block_id: &BlockId, slice_index: SliceIndex) -> Option<&SliceRoot> {
let block_data = self.get_block_data(block_id)?;
let slice_shreds = block_data.shreds.get(&slice_index)?;
slice_shreds.iter().flatten().next().map(|s| &s.merkle_root)
}
/// Gives reference to stored shred for given `block_id`, `slice_index` and `shred_index`.
///
/// Returns `None` if blockstore does not hold that shred.
fn get_shred(
&self,
block_id: &BlockId,
slice_index: SliceIndex,
shred_index: ShredIndex,
) -> Option<&ValidatedShred> {
let block_data = self.get_block_data(block_id)?;
let slice_shreds = block_data.shreds.get(&slice_index)?;
slice_shreds[*shred_index].as_ref()
}
/// Generates a Merkle proof for the given `slice_index` of the given `block_id`.
///
/// Returns `None` if blockstore does not hold that block yet.
fn create_double_merkle_proof(
&self,
block_id: &BlockId,
slice_index: SliceIndex,
) -> Option<DoubleMerkleProof> {
let block_data = self.get_block_data(block_id)?;
let tree = block_data.double_merkle_tree.as_ref()?;
Some(tree.create_proof(slice_index.inner()))
}
}
#[cfg(test)]
mod tests {
use color_eyre::Result;
use tokio::sync::mpsc;
use super::*;
use crate::ValidatorInfo;
use crate::crypto::merkle::DoubleMerkleTree;
use crate::crypto::signature::SecretKey;
use crate::crypto::{Hash, aggsig};
use crate::network::dontcare_sockaddr;
use crate::shredder::{DATA_SHREDS, TOTAL_SHREDS};
use crate::test_utils::create_random_shredded_block;
use crate::types::SliceIndex;
fn test_setup(tx: Sender<VotorEvent>) -> (SecretKey, BlockstoreImpl) {
let sk = SecretKey::new(&mut rand::rng());
let voting_sk = aggsig::SecretKey::new(&mut rand::rng());
let info = ValidatorInfo {
id: 0,
stake: 1,
pubkey: sk.to_pk(),
voting_pubkey: voting_sk.to_pk(),
all2all_address: dontcare_sockaddr(),
disseminator_address: dontcare_sockaddr(),
repair_request_address: dontcare_sockaddr(),
repair_response_address: dontcare_sockaddr(),
};
let validators = vec![info];
let epoch_info = EpochInfo::new(0, validators);
(sk, BlockstoreImpl::new(Arc::new(epoch_info), tx))
}
async fn add_shred_ignore_duplicate(
blockstore: &mut BlockstoreImpl,
shred: Shred,
) -> Result<Option<BlockInfo>, AddShredError> {
match blockstore.add_shred_from_disseminator(shred).await {
Ok(output) => Ok(output),
Err(AddShredError::Duplicate) => Ok(None),
Err(e) => Err(e),
}
}
#[tokio::test]
async fn store_one_slice_block() -> Result<()> {
let slot = Slot::genesis().next();
let (tx, _rx) = mpsc::channel(100);
let (sk, mut blockstore) = test_setup(tx);
assert!(blockstore.slot_data(slot).is_none());
// generate single-slice block
let (block_hash, _, shreds) = create_random_shredded_block(slot, 1, &sk);
let block_id = (slot, block_hash);
let slice_hash = &shreds[0][0].merkle_root;
for shred in &shreds[0] {
// store shred
add_shred_ignore_duplicate(&mut blockstore, shred.clone().into_shred()).await?;
// check shred is stored
let Some(stored_shred) = blockstore.get_disseminated_shred(
slot,
SliceIndex::first(),
shred.payload().shred_index,
) else {
panic!("shred not stored");
};
assert_eq!(stored_shred.payload().data, shred.payload().data);
}
// create and check double-Merkle proof
let proof = blockstore
.create_double_merkle_proof(&block_id, SliceIndex::first())
.unwrap();
let slot_data = blockstore.slot_data(slot).unwrap();
let tree = slot_data.disseminated.double_merkle_tree.as_ref().unwrap();
let root = tree.get_root();
assert!(DoubleMerkleTree::check_proof(slice_hash, 0, &root, &proof));
Ok(())
}
#[tokio::test]
async fn store_two_slice_block() -> Result<()> {
let slot = Slot::genesis().next();
let (tx, _rx) = mpsc::channel(100);
let (sk, mut blockstore) = test_setup(tx);
assert!(blockstore.slot_data(slot).is_none());
// generate two-slice block
let (_hash, _tree, slices) = create_random_shredded_block(slot, 2, &sk);
// first slice is not enough
for shred in slices[0].clone() {
add_shred_ignore_duplicate(&mut blockstore, shred.into_shred()).await?;
}
assert!(blockstore.disseminated_block_hash(slot).is_none());
// after second slice we should have the block
for shred in slices[1].clone() {
add_shred_ignore_duplicate(&mut blockstore, shred.into_shred()).await?;
}
assert!(blockstore.disseminated_block_hash(slot).is_some());
Ok(())
}
#[tokio::test]
async fn store_block_from_repair() -> Result<()> {
let slot = Slot::genesis().next();
let (tx, _rx) = mpsc::channel(100);
let (sk, mut blockstore) = test_setup(tx);
assert!(blockstore.slot_data(slot).is_none());
// generate and shred two slices
let (block_hash, _tree, slices) = create_random_shredded_block(slot, 2, &sk);
// first slice is not enough
for shred in slices[0].clone().into_iter().take(DATA_SHREDS) {
blockstore
.add_shred_from_repair(block_hash.clone(), shred.into_shred())
.await?;
}
assert!(blockstore.get_block(&(slot, block_hash.clone())).is_none());
// after second slice we should have the block
for shred in slices[1].clone().into_iter().take(DATA_SHREDS) {
blockstore
.add_shred_from_repair(block_hash.clone(), shred.into_shred())
.await?;
}
assert!(blockstore.get_block(&(slot, block_hash)).is_some());
Ok(())
}
#[tokio::test]
async fn out_of_order_shreds() -> Result<()> {
let slot = Slot::genesis().next();
let (tx, _rx) = mpsc::channel(100);
let (sk, mut blockstore) = test_setup(tx);
assert!(blockstore.disseminated_block_hash(slot).is_none());
// generate a single slice for slot 0
let (_hash, _tree, slices) = create_random_shredded_block(slot, 1, &sk);
// insert shreds in reverse order
for shred in slices[0].clone().into_iter().rev() {
add_shred_ignore_duplicate(&mut blockstore, shred.into_shred()).await?;
}
assert!(blockstore.disseminated_block_hash(slot).is_some());
Ok(())
}
#[tokio::test]
async fn just_enough_shreds() -> Result<()> {
let slot = Slot::genesis().next();
let (tx, _rx) = mpsc::channel(100);
let (sk, mut blockstore) = test_setup(tx);
assert!(blockstore.disseminated_block_hash(slot).is_none());
// generate a larger block for slot 0
let (_hash, _tree, slices) = create_random_shredded_block(slot, 4, &sk);
assert_eq!(blockstore.stored_slices_for_slot(slot), 0);
// insert just enough shreds to reconstruct slice 0 (from beginning)
for shred in slices[0].clone().into_iter().take(DATA_SHREDS) {
blockstore
.add_shred_from_disseminator(shred.into_shred())
.await?;
}
assert_eq!(blockstore.stored_slices_for_slot(slot), 1);
// insert just enough shreds to reconstruct slice 1 (from end)
for shred in slices[1]
.clone()
.into_iter()
.skip(TOTAL_SHREDS - DATA_SHREDS)
{
blockstore
.add_shred_from_disseminator(shred.into_shred())
.await?;
}
assert_eq!(blockstore.stored_slices_for_slot(slot), 2);
// insert just enough shreds to reconstruct slice 2 (from middle)
for shred in slices[2]
.clone()
.into_iter()
.skip((TOTAL_SHREDS - DATA_SHREDS) / 2)
.take(DATA_SHREDS)
{
blockstore
.add_shred_from_disseminator(shred.into_shred())
.await?;
}
assert_eq!(blockstore.stored_slices_for_slot(slot), 3);
// insert just enough shreds to reconstruct slice 3 (split)
for (_, shred) in slices[3]
.clone()
.into_iter()
.enumerate()
.filter(|(i, _)| *i < DATA_SHREDS / 2 || *i >= TOTAL_SHREDS - DATA_SHREDS / 2)
{
blockstore
.add_shred_from_disseminator(shred.into_shred())
.await?;
}
assert!(blockstore.disseminated_block_hash(slot).is_some());
// slices are deleted after reconstruction
assert_eq!(blockstore.stored_slices_for_slot(slot), 0);
Ok(())
}
#[tokio::test]
async fn out_of_order_slices() -> Result<()> {
let slot = Slot::genesis().next();
let (tx, _rx) = mpsc::channel(100);
let (sk, mut blockstore) = test_setup(tx);
assert!(blockstore.disseminated_block_hash(slot).is_none());
// generate two slices for slot 0
let (_hash, _tree, slices) = create_random_shredded_block(slot, 2, &sk);
// second slice alone is not enough
for shred in slices[0].clone() {
add_shred_ignore_duplicate(&mut blockstore, shred.into_shred()).await?;
}
assert!(blockstore.disseminated_block_hash(slot).is_none());
// stored all shreds for slot 0
assert_eq!(blockstore.stored_shreds_for_slot(slot), TOTAL_SHREDS);
// after also also inserting first slice we should have the block
for shred in slices[1].clone() {
add_shred_ignore_duplicate(&mut blockstore, shred.into_shred()).await?;
}
assert!(blockstore.disseminated_block_hash(slot).is_some());
// stored all shreds
assert_eq!(blockstore.stored_shreds_for_slot(slot), 2 * TOTAL_SHREDS);
Ok(())
}
#[tokio::test]
async fn duplicate_shreds() -> Result<()> {
let slot = Slot::genesis().next();
let (tx, _rx) = mpsc::channel(100);
let (sk, mut blockstore) = test_setup(tx);
let (_hash, _tree, slices) = create_random_shredded_block(slot, 1, &sk);
// inserting single shred should not throw errors
let res = blockstore
.add_shred_from_disseminator(slices[0][0].clone().into_shred())
.await;
assert!(res.is_ok());
// inserting same shred again should give duplicate error
let res = blockstore
.add_shred_from_disseminator(slices[0][0].clone().into_shred())
.await;
assert_eq!(res, Err(AddShredError::Duplicate));
// should only store one copy
assert_eq!(blockstore.stored_shreds_for_slot(slot), 1);
Ok(())
}
#[tokio::test]
async fn invalid_shreds() -> Result<()> {
let slot = Slot::genesis().next();
let (tx, _rx) = mpsc::channel(100);
let (sk, mut blockstore) = test_setup(tx);
let (_hash, _tree, slices) = create_random_shredded_block(slot, 1, &sk);
// insert shreds with wrong Merkle root
for shred in slices[0].clone() {
let mut shred = shred.into_shred();
shred.merkle_root = Hash::random_for_test().into();
let res = add_shred_ignore_duplicate(&mut blockstore, shred).await;
assert!(res.is_err());
assert_eq!(res.err(), Some(AddShredError::InvalidSignature));
}
Ok(())
}
#[tokio::test]
async fn pruning() -> Result<()> {
let block0_slot = Slot::genesis().next();
let block1_slot = block0_slot.next();
let block2_slot = block1_slot.next();
let block3_slot = block2_slot.next();
let future_slot = block3_slot.next();
let (tx, _rx) = mpsc::channel(1000);
let (sk, mut blockstore) = test_setup(tx);
let block0 = create_random_shredded_block(block0_slot, 1, &sk);
let block1 = create_random_shredded_block(block1_slot, 1, &sk);
let block2 = create_random_shredded_block(block2_slot, 1, &sk);
// insert shreds
let mut shreds = vec![];
shreds.extend(block0.2.into_iter().flatten());
shreds.extend(block1.2.into_iter().flatten());
shreds.extend(block2.2.into_iter().flatten());
for shred in shreds {
add_shred_ignore_duplicate(&mut blockstore, shred.into_shred()).await?;
}
assert!(blockstore.disseminated_block_hash(block0_slot).is_some());
assert!(blockstore.disseminated_block_hash(block1_slot).is_some());
assert!(blockstore.disseminated_block_hash(block2_slot).is_some());
// stored all shreds
assert_eq!(blockstore.stored_shreds_for_slot(block0_slot), TOTAL_SHREDS);
assert_eq!(blockstore.stored_shreds_for_slot(block1_slot), TOTAL_SHREDS);
assert_eq!(blockstore.stored_shreds_for_slot(block2_slot), TOTAL_SHREDS);
// some (and only some) shreds deleted after partial pruning
blockstore.prune(block1_slot);
assert_eq!(blockstore.stored_shreds_for_slot(block0_slot), 0);
assert_eq!(blockstore.stored_shreds_for_slot(block1_slot), TOTAL_SHREDS);
assert_eq!(blockstore.stored_shreds_for_slot(block2_slot), TOTAL_SHREDS);
// no shreds left after full pruning
blockstore.prune(future_slot);
assert_eq!(blockstore.stored_shreds_for_slot(block0_slot), 0);
assert_eq!(blockstore.stored_shreds_for_slot(block1_slot), 0);
assert_eq!(blockstore.stored_shreds_for_slot(block2_slot), 0);
let shred_count = blockstore
.block_data
.values()
.map(|d| {
d.disseminated
.shreds
.values()
.map(|s| s.len())
.sum::<usize>()
})
.sum::<usize>();
assert_eq!(shred_count, 0);
Ok(())
}
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/consensus/pool.rs | src/consensus/pool.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Data structure handling votes and certificates.
//!
//! Any received votes or certificates are placed into the pool.
//! The pool then tracks status for each slot and sends notification to votor.
mod finality_tracker;
mod parent_ready_tracker;
mod slot_state;
use std::collections::BTreeMap;
use std::ops::RangeBounds;
use std::sync::Arc;
use async_trait::async_trait;
use either::Either;
use log::{debug, info, trace, warn};
use mockall::automock;
use thiserror::Error;
use tokio::sync::mpsc::Sender;
use tokio::sync::oneshot;
use self::finality_tracker::FinalityTracker;
use self::parent_ready_tracker::ParentReadyTracker;
use self::slot_state::SlotState;
use super::votor::VotorEvent;
use super::{Cert, EpochInfo, Vote};
use crate::consensus::cert::NotarCert;
use crate::consensus::pool::finality_tracker::FinalizationEvent;
use crate::crypto::merkle::{BlockHash, MerkleRoot};
use crate::types::SLOTS_PER_EPOCH;
use crate::{BlockId, Slot, ValidatorId};
/// Errors the Pool may return when adding a vote.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Error)]
pub enum AddVoteError {
#[error("slot is either too old or too far in the future")]
SlotOutOfBounds,
#[error("invalid signature on the vote")]
InvalidSignature,
#[error("duplicate vote")]
Duplicate,
#[error("vote constitutes a slashable offence")]
Slashable(SlashableOffence),
}
/// Errors the Pool may return when adding a certificate.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Error)]
pub enum AddCertError {
#[error("slot is either too old or too far in the future")]
SlotOutOfBounds,
#[error("invalid signature on the cert")]
InvalidSignature,
#[error("duplicate cert")]
Duplicate,
}
/// Slashable offences that may be detected by the Pool.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Error)]
pub enum SlashableOffence {
#[error("Validator {0} already voted notar on slot {1} for a different hash")]
NotarDifferentHash(ValidatorId, Slot),
#[error("Validator {0} voted both skip and notarize on slot {1}")]
SkipAndNotarize(ValidatorId, Slot),
#[error("Validator {0} voted both skip(-fallback) and finalize on slot {1}")]
SkipAndFinalize(ValidatorId, Slot),
#[error("Validator {0} voted both notar-fallback and finalize on slot {1}")]
NotarFallbackAndFinalize(ValidatorId, Slot),
}
/// Interface for the Pool.
///
/// This is only used for mocking of [`PoolImpl`].
#[async_trait]
#[automock]
pub trait Pool {
async fn add_cert(&mut self, cert: Cert) -> Result<(), AddCertError>;
async fn add_vote(&mut self, vote: Vote) -> Result<(), AddVoteError>;
async fn add_block(&mut self, block_id: BlockId, parent_id: BlockId);
async fn recover_from_standstill(&self);
fn finalized_slot(&self) -> Slot;
fn parents_ready(&self, slot: Slot) -> &[BlockId];
fn wait_for_parent_ready(&mut self, slot: Slot) -> Either<BlockId, oneshot::Receiver<BlockId>>;
}
/// Pool is the central consensus data structure.
///
/// It holds votes and certificates for each slot.
///
/// This is the main implementation to use when you require the [`Pool`] trait.
pub struct PoolImpl {
/// State for each slot. Stores all votes and certificates.
slot_states: BTreeMap<Slot, SlotState>,
/// Keeps track of which slots have a parent ready.
parent_ready_tracker: ParentReadyTracker,
/// Keeps track of which slots are finalized.
finality_tracker: FinalityTracker,
/// Keeps track of safe-to-notar blocks waiting for a parent certificate.
s2n_waiting_parent_cert: BTreeMap<BlockId, BlockId>,
/// Information about all active validators.
epoch_info: Arc<EpochInfo>,
/// Channel for sending events related to voting logic to Votor.
votor_event_channel: Sender<VotorEvent>,
/// Channel for sending repair requests to the repair loop.
repair_channel: Sender<BlockId>,
}
impl PoolImpl {
/// Creates a new empty pool containing no votes or certificates.
///
/// Any later emitted events will be sent on provided `votor_event_channel`.
pub fn new(
epoch_info: Arc<EpochInfo>,
votor_event_channel: Sender<VotorEvent>,
repair_channel: Sender<BlockId>,
) -> Self {
Self {
slot_states: BTreeMap::new(),
parent_ready_tracker: ParentReadyTracker::default(),
finality_tracker: FinalityTracker::default(),
s2n_waiting_parent_cert: BTreeMap::new(),
epoch_info,
votor_event_channel,
repair_channel,
}
}
/// Adds a new certificate to the pool. Certificate is assumed to be valid.
///
/// Caller needs to ensure that the certificate passes all validity checks:
/// - slot is not too old or too far in the future
/// - signature is valid
/// - certificate is not a duplicate
async fn add_valid_cert(&mut self, cert: Cert) {
let slot = cert.slot();
// actually add certificate
trace!("adding cert to pool: {cert:?}");
self.slot_state(slot).add_cert(cert.clone());
// handle resulting state updates
match &cert {
Cert::Notar(_) | Cert::NotarFallback(_) => {
let block_hash = cert.block_hash().cloned().unwrap();
let block_id = (slot, block_hash.clone());
info!(
"notarized(-fallback) block {} in slot {}",
&hex::encode(block_hash.as_hash())[..8],
slot
);
if matches!(cert, Cert::Notar(_)) {
let finalization_event = self
.finality_tracker
.mark_notarized(slot, block_hash.clone());
self.handle_finalization(finalization_event).await;
}
// potentially notify child waiting for safe-to-notar
if let Some((child_slot, child_hash)) =
self.s2n_waiting_parent_cert.remove(&block_id)
&& let Some(output) = self
.slot_state(child_slot)
.notify_parent_certified(child_hash)
{
match output {
Either::Left(event) => {
self.votor_event_channel.send(event).await.unwrap();
}
Either::Right((slot, hash)) => {
self.repair_channel.send((slot, hash)).await.unwrap();
}
}
}
// add block to parent-ready tracker, send any new parents to Votor.
let new_parents_ready = self.parent_ready_tracker.mark_notar_fallback(&block_id);
self.send_parent_ready_events(new_parents_ready).await;
// repair this block, if necessary
self.repair_channel.send((slot, block_hash)).await.unwrap();
}
Cert::Skip(_) => {
warn!("skipped slot {slot}");
let new_parents_ready = self.parent_ready_tracker.mark_skipped(slot);
self.send_parent_ready_events(new_parents_ready).await;
}
Cert::FastFinal(ff_cert) => {
info!("fast finalized slot {slot}");
let hash = ff_cert.block_hash().clone();
let finalization_event = self.finality_tracker.mark_fast_finalized(slot, hash);
self.handle_finalization(finalization_event).await;
self.prune();
}
Cert::Final(_) => {
info!("slow finalized slot {slot}");
let finalization_event = self.finality_tracker.mark_finalized(slot);
self.handle_finalization(finalization_event).await;
self.prune();
}
}
// send to votor for broadcasting
let event = VotorEvent::CertCreated(Box::new(cert));
self.votor_event_channel.send(event).await.unwrap();
}
/// Mutably accesses the [`SlotState`] for the given `slot`.
///
/// Creates a new [`SlotState`] if none exists yet.
fn slot_state(&mut self, slot: Slot) -> &mut SlotState {
self.slot_states
.entry(slot)
.or_insert_with(|| SlotState::new(slot, Arc::clone(&self.epoch_info)))
}
/// Fetches all certficates for the provided range of `slots`.
fn get_certs(&self, slots: impl RangeBounds<Slot>) -> Vec<Cert> {
let mut certs = Vec::new();
for (_, slot_state) in self.slot_states.range(slots) {
if let Some(cert) = slot_state.certificates.finalize.clone() {
certs.push(Cert::Final(cert));
}
if let Some(cert) = slot_state.certificates.fast_finalize.clone() {
certs.push(Cert::FastFinal(cert));
}
if let Some(cert) = slot_state.certificates.notar.clone() {
certs.push(Cert::Notar(cert));
}
for cert in slot_state.certificates.notar_fallback.iter().cloned() {
certs.push(Cert::NotarFallback(cert));
}
if let Some(cert) = slot_state.certificates.skip.clone() {
certs.push(Cert::Skip(cert));
}
}
certs
}
/// Fetches finalization certficates for given `slot`, if any.
///
/// Prefers fast-finalization over slow-finalization, if it's available.
/// In that case this returns only the fast-finalization certificate.
/// Otherwise, returns the finalization and notarization certificates.
fn get_final_certs(&self, slot: Slot) -> Vec<Cert> {
let Some(slot_state) = self.slot_states.get(&slot) else {
return Vec::new();
};
if let Some(ff_cert) = &slot_state.certificates.fast_finalize {
return vec![Cert::FastFinal(ff_cert.clone())];
}
if let Some(final_cert) = &slot_state.certificates.finalize
&& let Some(notar_cert) = &slot_state.certificates.notar
{
return vec![
Cert::Final(final_cert.clone()),
Cert::Notar(notar_cert.clone()),
];
}
Vec::new()
}
/// Fetches all votes cast by myself for the provided range of `slots`.
fn get_own_votes(&self, slots: impl RangeBounds<Slot>) -> Vec<Vote> {
let mut votes = Vec::new();
let own_id = self.epoch_info.own_id;
for (_, slot_state) in self.slot_states.range(slots) {
if let Some(vote) = &slot_state.votes.finalize[own_id as usize] {
votes.push(vote.clone());
}
if let Some(vote) = &slot_state.votes.notar[own_id as usize] {
votes.push(vote.clone());
}
for vote in slot_state.votes.notar_fallback[own_id as usize].values() {
votes.push(vote.clone());
}
if let Some(vote) = &slot_state.votes.skip[own_id as usize] {
votes.push(vote.clone());
}
if let Some(vote) = &slot_state.votes.skip_fallback[own_id as usize] {
votes.push(vote.clone());
}
}
votes
}
/// Cleans up old finalized slots from the pool.
///
/// After this, [`Self::slot_states`] will only contain entries for slots
/// >= [`Self::finalized_slot`].
fn prune(&mut self) {
let last_slot = self.finalized_slot();
self.slot_states = self.slot_states.split_off(&last_slot);
}
/// Returns `true` iff the given parent is ready for the given slot.
///
/// This requires that the parent is at least notarized-fallback.
/// Also, if the parent is in a slot before `slot-1`, then all slots in
/// `parent+1..slot-1` (inclusive) must be skip-certified.
pub fn is_parent_ready(&self, slot: Slot, parent: &BlockId) -> bool {
self.parent_ready_tracker
.parents_ready(slot)
.contains(parent)
}
/// Returns `true` iff the pool contains a notar(-fallback) certificate for the slot.
pub fn has_notar_or_fallback_cert(&self, slot: Slot) -> bool {
self.slot_states.get(&slot).is_some_and(|state| {
state.certificates.notar.is_some() || !state.certificates.notar_fallback.is_empty()
})
}
/// Returns the hash of the notarized block for the given slot, if any.
pub fn get_notarized_block(&self, slot: Slot) -> Option<&BlockHash> {
self.slot_states
.get(&slot)
.and_then(|state| state.certificates.notar.as_ref().map(NotarCert::block_hash))
}
/// Returns `true` iff the pool contains a (fast) finalization certificate for the slot.
pub fn has_final_cert(&self, slot: Slot) -> bool {
self.slot_states.get(&slot).is_some_and(|state| {
state.certificates.fast_finalize.is_some() || state.certificates.finalize.is_some()
})
}
/// Returns `true` iff the pool contains a notarization certificate for the slot.
pub fn has_notar_cert(&self, slot: Slot) -> bool {
self.slot_states
.get(&slot)
.is_some_and(|state| state.certificates.notar.is_some())
}
/// Returns `true` iff the pool contains a skip certificate for the slot.
pub fn has_skip_cert(&self, slot: Slot) -> bool {
self.slot_states
.get(&slot)
.is_some_and(|state| state.certificates.skip.is_some())
}
async fn handle_finalization(&mut self, event: FinalizationEvent) {
let new_parents_ready = self.parent_ready_tracker.handle_finalization(event);
self.send_parent_ready_events(new_parents_ready).await;
}
async fn send_parent_ready_events(&self, parents: impl IntoIterator<Item = (Slot, BlockId)>) {
for (slot, (parent_slot, parent_hash)) in parents {
debug_assert!(slot.is_start_of_window());
let event = VotorEvent::ParentReady {
slot,
parent_slot,
parent_hash,
};
self.votor_event_channel.send(event).await.unwrap();
}
}
}
#[async_trait]
impl Pool for PoolImpl {
/// Adds a new certificate to the pool. Checks validity of the certificate.
async fn add_cert(&mut self, cert: Cert) -> Result<(), AddCertError> {
// ignore old and far-in-the-future certificates
let slot = cert.slot();
// TODO: set bounds exactly correctly,
// use correct validator set & stake distribution
let slot_far_in_future = Slot::new(self.finalized_slot().inner() + 2 * SLOTS_PER_EPOCH);
// NOTE: This needs to be `< finalize_slot` to allow for later notarization.
if slot < self.finalized_slot() || slot >= slot_far_in_future {
return Err(AddCertError::SlotOutOfBounds);
}
// verify signature
if !cert.check_sig(&self.epoch_info.validators) {
return Err(AddCertError::InvalidSignature);
}
// get `SlotCertificates`, initialize if it doesn't exist yet
let certs = &mut self.slot_state(slot).certificates;
// check if the certificate is a duplicate
let duplicate = match cert {
Cert::Notar(_) => certs.notar.is_some(),
Cert::NotarFallback(_) => certs
.notar_fallback
.iter()
.any(|nf| nf.block_hash() == cert.block_hash().unwrap()),
Cert::Skip(_) => certs.skip.is_some(),
Cert::FastFinal(_) => certs.fast_finalize.is_some(),
Cert::Final(_) => certs.finalize.is_some(),
};
if duplicate {
return Err(AddCertError::Duplicate);
}
self.add_valid_cert(cert).await;
Ok(())
}
/// Adds a new vote to the pool. Checks validity of the vote.
async fn add_vote(&mut self, vote: Vote) -> Result<(), AddVoteError> {
// ignore old and far-in-the-future votes
let slot = vote.slot();
// TODO: set bounds exactly correctly,
// use correct validator set & stake distribution
let slot_far_in_future = Slot::new(self.finalized_slot().inner() + 2 * SLOTS_PER_EPOCH);
if slot < self.finalized_slot() || slot >= slot_far_in_future {
return Err(AddVoteError::SlotOutOfBounds);
}
// verify signature
let pk = &self.epoch_info.validator(vote.signer()).voting_pubkey;
if !vote.check_sig(pk) {
return Err(AddVoteError::InvalidSignature);
}
// check if vote is valid and should be counted
let voter = vote.signer();
let voter_stake = self.epoch_info.validator(voter).stake;
if let Some(offence) = self.slot_state(slot).check_slashable_offence(&vote) {
return Err(AddVoteError::Slashable(offence));
} else if self.slot_state(slot).should_ignore_vote(&vote) {
return Err(AddVoteError::Duplicate);
}
// actually add the vote
trace!("adding vote to pool: {vote:?}");
let (new_certs, votor_events, blocks_to_repair) =
self.slot_state(slot).add_vote(vote, voter_stake);
// handle any resulting events
for cert in new_certs {
self.add_valid_cert(cert).await;
}
for event in votor_events {
self.votor_event_channel.send(event).await.unwrap();
}
for (slot, block_hash) in blocks_to_repair {
self.repair_channel.send((slot, block_hash)).await.unwrap();
}
Ok(())
}
/// Registers a new block with its respective parent in the pool.
///
/// This should be called once for every valid block (e.g. directly by blockstore).
/// Ensures that the parent information is available for safe-to-notar checks.
async fn add_block(&mut self, block_id: BlockId, parent_id: BlockId) {
assert!(block_id.0 > parent_id.0);
let (slot, block_hash) = &block_id;
let (parent_slot, parent_hash) = &parent_id;
let finalization_event = self
.finality_tracker
.add_parent(block_id.clone(), parent_id.clone());
let new_parents_ready = self
.parent_ready_tracker
.handle_finalization(finalization_event);
self.send_parent_ready_events(new_parents_ready).await;
self.slot_state(*slot)
.notify_parent_known(block_hash.clone());
if let Some(parent_state) = self.slot_states.get(parent_slot)
&& parent_state.is_notar_fallback(parent_hash)
&& let Some(output) = self
.slot_state(*slot)
.notify_parent_certified(block_hash.clone())
{
match output {
Either::Left(event) => {
self.votor_event_channel.send(event).await.unwrap();
}
Either::Right((slot, hash)) => {
self.repair_channel.send((slot, hash)).await.unwrap();
}
}
return;
}
self.s2n_waiting_parent_cert.insert(parent_id, block_id);
}
/// Triggers a recovery from a standstill.
///
/// Determines which certificates and votes need to be re-broadcast.
/// Emits the corresponding [`VotorEvent::Standstill`] event for Votor.
/// Should be called after not seeing any progress for the standstill duration.
async fn recover_from_standstill(&self) {
let slot = self.finalized_slot();
let mut certs = self.get_final_certs(slot);
assert!(!certs.is_empty(), "no final cert");
certs.extend(self.get_certs(slot.next()..));
let votes = self.get_own_votes(slot.next()..);
warn!("recovering from standstill at slot {slot}");
debug!(
"re-broadcasting {} certificates and {} votes",
certs.len(),
votes.len()
);
// NOTE: This event corresponds to the slot after the last finalized one.
// This way it is ignored by `Votor` iff a new slot was finalized.
let event = VotorEvent::Standstill(slot.next(), certs, votes);
// send to votor for broadcasting
self.votor_event_channel.send(event).await.unwrap();
}
/// Gives the currently highest finalized (fast or slow) slot.
fn finalized_slot(&self) -> Slot {
self.finality_tracker.highest_finalized_slot()
}
/// Returns all possible parents for the given slot that are ready.
fn parents_ready(&self, slot: Slot) -> &[BlockId] {
self.parent_ready_tracker.parents_ready(slot)
}
fn wait_for_parent_ready(&mut self, slot: Slot) -> Either<BlockId, oneshot::Receiver<BlockId>> {
self.parent_ready_tracker.wait_for_parent_ready(slot)
}
}
#[cfg(test)]
mod tests {
use tokio::sync::mpsc;
use super::*;
use crate::consensus::cert::{FastFinalCert, NotarCert, SkipCert};
use crate::consensus::vote::VoteKind;
use crate::crypto::Hash;
use crate::crypto::aggsig::SecretKey;
use crate::crypto::merkle::GENESIS_BLOCK_HASH;
use crate::test_utils::generate_validators;
use crate::types::SLOTS_PER_WINDOW;
#[tokio::test]
async fn handle_invalid_votes() {
let (_, epoch_info) = generate_validators(11);
let (votor_tx, _votor_rx) = mpsc::channel(1024);
let (repair_tx, _repair_rx) = mpsc::channel(1024);
let mut pool = PoolImpl::new(epoch_info, votor_tx, repair_tx);
let wrong_sk = SecretKey::new(&mut rand::rng());
let vote = Vote::new_notar(Slot::new(0), GENESIS_BLOCK_HASH, &wrong_sk, 0);
assert_eq!(
pool.add_vote(vote).await,
Err(AddVoteError::InvalidSignature)
);
}
#[tokio::test]
async fn notarize_block() {
let (sks, epoch_info) = generate_validators(11);
let (votor_tx, _votor_rx) = mpsc::channel(1024);
let (repair_tx, _repair_rx) = mpsc::channel(1024);
let mut pool = PoolImpl::new(epoch_info, votor_tx, repair_tx);
// all nodes notarize block in slot 0
assert!(!pool.has_notar_cert(Slot::new(0)));
for v in 0..11 {
let vote = Vote::new_notar(Slot::new(0), GENESIS_BLOCK_HASH, &sks[v as usize], v);
assert_eq!(pool.add_vote(vote).await, Ok(()));
}
assert!(pool.has_notar_cert(Slot::new(0)));
// just enough nodes notarize block in slot 1
assert!(!pool.has_notar_cert(Slot::new(1)));
for v in 0..7 {
let vote = Vote::new_notar(Slot::new(1), GENESIS_BLOCK_HASH, &sks[v as usize], v);
assert_eq!(pool.add_vote(vote).await, Ok(()));
}
assert!(pool.has_notar_cert(Slot::new(1)));
// just NOT enough nodes notarize block in slot 2
assert!(!pool.has_notar_cert(Slot::new(2)));
for v in 0..6 {
let vote = Vote::new_notar(Slot::new(2), GENESIS_BLOCK_HASH, &sks[v as usize], v);
assert_eq!(pool.add_vote(vote).await, Ok(()));
}
assert!(!pool.has_notar_cert(Slot::new(2)));
}
#[tokio::test]
async fn skip_block() {
let (sks, epoch_info) = generate_validators(11);
let (votor_tx, _votor_rx) = mpsc::channel(1024);
let (repair_tx, _repair_rx) = mpsc::channel(1024);
let mut pool = PoolImpl::new(epoch_info, votor_tx, repair_tx);
// all nodes vote skip on slot 0
assert!(!pool.has_skip_cert(Slot::new(0)));
for v in 0..11 {
let vote = Vote::new_skip(Slot::new(0), &sks[v as usize], v);
assert_eq!(pool.add_vote(vote).await, Ok(()));
}
assert!(pool.has_skip_cert(Slot::new(0)));
// just enough nodes vote skip on slot 1
assert!(!pool.has_skip_cert(Slot::new(1)));
for v in 0..7 {
let vote = Vote::new_skip(Slot::new(1), &sks[v as usize], v);
assert_eq!(pool.add_vote(vote).await, Ok(()));
}
assert!(pool.has_skip_cert(Slot::new(1)));
// just NOT enough nodes notarize block in slot 2
assert!(!pool.has_skip_cert(Slot::new(2)));
for v in 0..6 {
let vote = Vote::new_skip(Slot::new(2), &sks[v as usize], v);
assert_eq!(pool.add_vote(vote).await, Ok(()));
}
assert!(!pool.has_skip_cert(Slot::new(2)));
}
#[tokio::test]
async fn finalize_block() {
let (sks, epoch_info) = generate_validators(11);
let (votor_tx, _votor_rx) = mpsc::channel(1024);
let (repair_tx, _repair_rx) = mpsc::channel(1024);
let mut pool = PoolImpl::new(epoch_info, votor_tx, repair_tx);
// just enough nodes vote notar, this is NOT enough on its own to finalize
let slot1 = Slot::genesis().next();
let hash1: BlockHash = Hash::random_for_test().into();
for v in 0..7 {
let vote = Vote::new_notar(slot1, hash1.clone(), &sks[v as usize], v);
assert_eq!(pool.add_vote(vote).await, Ok(()));
}
assert!(!pool.has_final_cert(slot1));
assert_eq!(pool.finalized_slot(), Slot::genesis());
// just enough nodes vote final, NOW slot 1 should be finalized
for v in 0..7 {
let vote = Vote::new_final(slot1, &sks[v as usize], v);
assert_eq!(pool.add_vote(vote).await, Ok(()));
}
assert!(pool.has_final_cert(slot1));
assert_eq!(pool.finalized_slot(), slot1);
// just enough nodes vote final, this is NOT enough on its own to finalize
let slot2 = slot1.next();
for v in 0..7 {
let vote = Vote::new_final(slot2, &sks[v as usize], v);
assert_eq!(pool.add_vote(vote).await, Ok(()));
}
assert!(pool.has_final_cert(slot2));
assert_eq!(pool.finalized_slot(), slot1);
// just enough nodes vote notar, NOW slot 2 should be finalized
let hash2: BlockHash = Hash::random_for_test().into();
for v in 0..7 {
let vote = Vote::new_notar(slot2, hash2.clone(), &sks[v as usize], v);
assert_eq!(pool.add_vote(vote).await, Ok(()));
}
assert!(pool.has_final_cert(slot2));
assert_eq!(pool.finalized_slot(), slot2);
// just NOT enough nodes vote notar + final on slot 3
let slot3 = slot2.next();
let hash3: BlockHash = Hash::random_for_test().into();
for v in 0..6 {
let vote = Vote::new_notar(slot3, hash3.clone(), &sks[v as usize], v);
assert_eq!(pool.add_vote(vote).await, Ok(()));
let vote = Vote::new_final(slot3, &sks[v as usize], v);
assert_eq!(pool.add_vote(vote).await, Ok(()));
}
assert!(!pool.has_final_cert(slot3));
assert_eq!(pool.finalized_slot(), slot2);
}
#[tokio::test]
async fn fast_finalize_block() {
let (sks, epoch_info) = generate_validators(11);
let (votor_tx, _votor_rx) = mpsc::channel(1024);
let (repair_tx, _repair_rx) = mpsc::channel(1024);
let mut pool = PoolImpl::new(epoch_info, votor_tx, repair_tx);
// all nodes vote notarize on slot 0
assert!(!pool.has_final_cert(Slot::new(0)));
for v in 0..11 {
let vote = Vote::new_notar(Slot::new(0), GENESIS_BLOCK_HASH, &sks[v as usize], v);
assert_eq!(pool.add_vote(vote).await, Ok(()));
}
assert!(pool.has_final_cert(Slot::new(0)));
assert_eq!(pool.finalized_slot(), Slot::new(0));
// just enough nodes to fast finalize slot 1
assert!(!pool.has_final_cert(Slot::new(1)));
for v in 0..9 {
let vote = Vote::new_notar(Slot::new(1), GENESIS_BLOCK_HASH, &sks[v as usize], v);
assert_eq!(pool.add_vote(vote).await, Ok(()));
}
assert!(pool.has_final_cert(Slot::new(1)));
assert_eq!(pool.finalized_slot(), Slot::new(1));
// just NOT enough nodes to fast finalize slot 2
assert!(!pool.has_final_cert(Slot::new(2)));
for v in 0..8 {
let vote = Vote::new_notar(Slot::new(2), GENESIS_BLOCK_HASH, &sks[v as usize], v);
assert_eq!(pool.add_vote(vote).await, Ok(()));
}
assert!(!pool.has_final_cert(Slot::new(2)));
assert_eq!(pool.finalized_slot(), Slot::new(1));
}
#[tokio::test]
async fn simple_branch_certified() {
let (sks, epoch_info) = generate_validators(11);
let (votor_tx, _votor_rx) = mpsc::channel(1024);
let (repair_tx, _repair_rx) = mpsc::channel(1024);
let mut pool = PoolImpl::new(epoch_info, votor_tx, repair_tx);
let window = Slot::genesis().slots_in_window().collect::<Vec<_>>();
let hashes: Vec<BlockHash> = window
.iter()
.map(|_| Hash::random_for_test().into())
.collect();
for slot in window.iter().skip(1) {
for v in 0..7 {
let vote = Vote::new_notar(
*slot,
hashes[slot.inner() as usize].clone(),
&sks[v as usize],
v,
);
assert_eq!(pool.add_vote(vote).await, Ok(()));
}
}
let slot = *window.last().unwrap();
let next = slot.next();
assert!(pool.is_parent_ready(next, &(slot, hashes[next.inner() as usize - 1].clone())));
}
#[tokio::test]
async fn branch_certified_notar_fallback() {
let (sks, epoch_info) = generate_validators(11);
let (votor_tx, _votor_rx) = mpsc::channel(1024);
let (repair_tx, _repair_rx) = mpsc::channel(1024);
let mut pool = PoolImpl::new(epoch_info, votor_tx, repair_tx);
// receive mixed notar & notar-fallback votes
let window = Slot::genesis().slots_in_window().collect::<Vec<_>>();
let hashes: Vec<BlockHash> = window
.iter()
.map(|_| Hash::random_for_test().into())
.collect();
for slot in window.iter().skip(1) {
let hash = &hashes[slot.inner() as usize];
assert!(!pool.is_parent_ready(slot.next(), &(*slot, hash.clone())));
for v in 0..4 {
let vote = Vote::new_notar(*slot, hash.clone(), &sks[v as usize], v);
assert_eq!(pool.add_vote(vote).await, Ok(()));
}
for v in 4..7 {
let vote = Vote::new_notar_fallback(*slot, hash.clone(), &sks[v as usize], v);
assert_eq!(pool.add_vote(vote).await, Ok(()));
}
}
let slot = *window.last().unwrap();
let next = slot.next();
let hash = hashes[next.inner() as usize - 1].clone();
assert!(pool.is_parent_ready(next, &(slot, hash)));
}
#[tokio::test]
async fn branch_certified_out_of_order() {
let (sks, epoch_info) = generate_validators(11);
let (votor_tx, _votor_rx) = mpsc::channel(1024);
let (repair_tx, _repair_rx) = mpsc::channel(1024);
let mut pool = PoolImpl::new(epoch_info, votor_tx, repair_tx);
// first see skip votes for later slots
let mut window = Slot::new(0).slots_in_window().collect::<Vec<_>>();
assert!(window.len() > 2);
window.remove(0);
window.remove(0);
for slot in window.iter() {
for v in 0..7 {
let vote = Vote::new_skip(*slot, &sks[v as usize], v);
assert_eq!(pool.add_vote(vote).await, Ok(()));
}
}
let next = window.last().unwrap().next();
// no blocks are valid parents yet
assert!(pool.parents_ready(next).is_empty());
// then see notarization votes for slot 1
let slot1 = Slot::new(1);
let hash1: BlockHash = Hash::random_for_test().into();
for v in 0..7 {
let vote = Vote::new_notar(slot1, hash1.clone(), &sks[v as usize], v);
assert_eq!(pool.add_vote(vote).await, Ok(()));
}
// branch can only be certified once we saw votes other slots in window
assert!(pool.is_parent_ready(next, &(slot1, hash1)));
// no other blocks are valid parents
assert_eq!(pool.parents_ready(next).len(), 1);
}
#[tokio::test]
async fn branch_certified_late_cert() {
let (sks, epoch_info) = generate_validators(11);
let (votor_tx, _votor_rx) = mpsc::channel(1024);
let (repair_tx, _repair_rx) = mpsc::channel(1024);
let mut pool = PoolImpl::new(epoch_info.clone(), votor_tx, repair_tx);
// first see skip votes for later slots
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | true |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/consensus/pool/slot_state.rs | src/consensus/pool/slot_state.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Data structures handling votes and certificates for a single slot.
//!
//! The main data structure defined here is [`SlotState`], which has components:
//! - [`SlotVotes`] for all votes in a single slot.
//! - [`SlotVotedStake`] for all running stake totals in a single slot.
//! - [`SlotCertificates`] for all certificates in a single slot.
use std::collections::btree_map::Entry;
use std::collections::{BTreeMap, BTreeSet};
use std::sync::Arc;
use either::Either;
use smallvec::SmallVec;
use super::SlashableOffence;
use crate::consensus::cert::{FastFinalCert, FinalCert, NotarCert, NotarFallbackCert, SkipCert};
use crate::consensus::vote::VoteKind;
use crate::consensus::votor::VotorEvent;
use crate::consensus::{Cert, EpochInfo, Vote};
use crate::crypto::merkle::BlockHash;
use crate::{BlockId, Slot, Stake};
/// Data structure holding pool state for a single slot.
pub struct SlotState {
/// Votes for this slot, contains all vote types and validators.
pub(super) votes: SlotVotes,
/// Running stake totals for different types of votes.
pub(super) voted_stakes: SlotVotedStake,
/// Certificates for this slot, contains all certificate types and validators.
pub(super) certificates: SlotCertificates,
/// Indicates blocks for which we already know their parents.
parents: BTreeMap<BlockHash, ParentStatus>,
/// Hashes of blocks that have reached the necessary votes for safe-to-notar
/// and are only waiting for our only vote to arrive.
pending_safe_to_notar: BTreeSet<BlockHash>,
/// Hashes of blocks for which safe-to-notar has already been reached.
sent_safe_to_notar: BTreeSet<BlockHash>,
/// Indicates if safe-to-skip has already been sent for this slot.
sent_safe_to_skip: bool,
/// The slot this state is for.
slot: Slot,
/// Information about all validators active in this slot.
pub(super) epoch_info: Arc<EpochInfo>,
}
// PERF: replace storing Votes (50% size overhead) with storing only signatures?
pub struct SlotVotes {
/// Notarization votes for all validators (indexed by `ValidatorId`).
pub(super) notar: Vec<Option<Vote>>,
/// Notar-fallback votes for all validators (indexed by `ValidatorId`).
pub(super) notar_fallback: Vec<BTreeMap<BlockHash, Vote>>,
/// Skip votes for all validators (indexed by `ValidatorId`).
pub(super) skip: Vec<Option<Vote>>,
/// Skip-fallback votes for all validators (indexed by `ValidatorId`).
pub(super) skip_fallback: Vec<Option<Vote>>,
/// Finalization votes for all validators (indexed by `ValidatorId`).
pub(super) finalize: Vec<Option<Vote>>,
}
#[derive(Default)]
pub struct SlotVotedStake {
/// Amount of stake for each block has for which we have a notarization vote.
pub(super) notar: BTreeMap<BlockHash, Stake>,
/// Amount of stake for each block hash for which we have a notar-fallback vote.
pub(super) notar_fallback: BTreeMap<BlockHash, Stake>,
/// Amount of stake for which we have a skip vote.
pub(super) skip: Stake,
/// Amount of stake for which we have a skip-fallback vote.
pub(super) skip_fallback: Stake,
/// Amount of stake for which we have a finalization vote.
pub(super) finalize: Stake,
/// Amount of stake for which we have either notar or skip vote.
pub(super) notar_or_skip: Stake,
/// Maximum amount of stake that voted notar on the same block.
pub(super) top_notar: Stake,
}
#[derive(Default)]
pub struct SlotCertificates {
/// Notarization certificate for this slot, if it exists.
pub(super) notar: Option<NotarCert>,
/// Notar-fallback certificates for this slot, if any.
pub(super) notar_fallback: Vec<NotarFallbackCert>,
/// Skip certificate for this slot, if it exists.
pub(super) skip: Option<SkipCert>,
/// Fast finalization certificate for this slot, if it exists.
pub(super) fast_finalize: Option<FastFinalCert>,
/// Finalization certificate for this slot, if it exists.
pub(super) finalize: Option<FinalCert>,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
enum ParentStatus {
Known,
Certified,
}
/// Possible states for the safe-to-notar check.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum SafeToNotarStatus {
SafeToNotar,
MissingBlock,
AwaitingVotes,
}
type SlotStateOutputs = (
SmallVec<[Cert; 2]>,
SmallVec<[VotorEvent; 2]>,
SmallVec<[(Slot, BlockHash); 1]>,
);
impl SlotState {
/// Creates a new container for votes and certificates for a single slot.
///
/// Initially, it is completely empty.
pub fn new(slot: Slot, epoch_info: Arc<EpochInfo>) -> Self {
Self {
votes: SlotVotes::new(epoch_info.validators.len()),
voted_stakes: SlotVotedStake::default(),
certificates: SlotCertificates::default(),
parents: BTreeMap::new(),
pending_safe_to_notar: BTreeSet::new(),
sent_safe_to_notar: BTreeSet::new(),
sent_safe_to_skip: false,
slot,
epoch_info,
}
}
/// Adds a certificate to this slot.
pub fn add_cert(&mut self, cert: Cert) {
match cert {
Cert::Notar(n) => self.certificates.notar = Some(n),
Cert::NotarFallback(n) => {
if !self.is_notar_fallback(n.block_hash()) {
self.certificates.notar_fallback.push(n);
}
}
Cert::Skip(s) => self.certificates.skip = Some(s),
Cert::FastFinal(s) => self.certificates.fast_finalize = Some(s),
Cert::Final(f) => self.certificates.finalize = Some(f),
}
}
/// Adds a vote to this slot.
///
/// Handles updating the corresponding running stake totals, creating any
/// new certificates and checking other conditions, like safe-to-notar.
///
/// Returns potentially created certificates and newly emitted votor events.
pub fn add_vote(&mut self, vote: Vote, voter_stake: Stake) -> SlotStateOutputs {
let slot = vote.slot();
let voter = vote.signer();
let v = voter as usize;
let (certs_created, mut votor_events, mut blocks_to_repair) = match vote.kind() {
VoteKind::Notar(_, block_hash) => {
let outputs = self.count_notar_stake(slot, block_hash, voter_stake);
self.votes.notar[v] = Some(vote);
outputs
}
VoteKind::NotarFallback(_, block_hash) => {
let outputs = self.count_notar_fallback_stake(block_hash, voter_stake);
let res = self.votes.notar_fallback[v].insert(block_hash.clone(), vote);
assert!(res.is_none());
outputs
}
VoteKind::Skip(_) => {
self.votes.skip[v] = Some(vote);
self.voted_stakes.notar_or_skip += voter_stake;
self.count_skip_stake(slot, voter_stake, false)
}
VoteKind::SkipFallback(_) => {
self.votes.skip_fallback[v] = Some(vote);
self.count_skip_stake(slot, voter_stake, true)
}
VoteKind::Final(_) => {
self.votes.finalize[v] = Some(vote);
self.count_finalize_stake(voter_stake)
}
};
// own vote might have made a block safe-to-notar
if voter == self.epoch_info.own_id {
for hash in self.pending_safe_to_notar.clone() {
if self.sent_safe_to_notar.contains(&hash) {
continue;
}
match self.check_safe_to_notar(hash.clone()) {
SafeToNotarStatus::SafeToNotar => {
votor_events.push(VotorEvent::SafeToNotar(slot, hash));
}
SafeToNotarStatus::MissingBlock => blocks_to_repair.push((slot, hash)),
SafeToNotarStatus::AwaitingVotes => {}
}
}
}
(certs_created, votor_events, blocks_to_repair)
}
/// Mark the parent of the block given by `hash` as known (in Blokstor).
pub fn notify_parent_known(&mut self, hash: BlockHash) {
self.parents.entry(hash).or_insert(ParentStatus::Known);
}
/// Mark the parent of the block given by `hash` as notarized-fallback.
///
/// # Panics
///
/// If [`SlotState::notify_parent_known`] has not yet been called for this block.
pub fn notify_parent_certified(
&mut self,
hash: BlockHash,
) -> Option<Either<VotorEvent, BlockId>> {
let Some(parent_info) = self.parents.get_mut(&hash) else {
panic!("parent not known")
};
*parent_info = ParentStatus::Certified;
// potentially emit safe-to-notar
if self.sent_safe_to_notar.contains(&hash) {
return None;
}
match self.check_safe_to_notar(hash.clone()) {
SafeToNotarStatus::SafeToNotar => {
Some(Either::Left(VotorEvent::SafeToNotar(self.slot, hash)))
}
SafeToNotarStatus::MissingBlock => Some(Either::Right((self.slot, hash))),
SafeToNotarStatus::AwaitingVotes => None,
}
}
fn is_weakest_quorum(&self, stake: Stake) -> bool {
stake >= (self.epoch_info.total_stake()).div_ceil(5)
}
fn is_weak_quorum(&self, stake: Stake) -> bool {
stake >= (self.epoch_info.total_stake() * 2).div_ceil(5)
}
fn is_quorum(&self, stake: Stake) -> bool {
stake >= (self.epoch_info.total_stake() * 3).div_ceil(5)
}
fn is_strong_quorum(&self, stake: Stake) -> bool {
stake >= (self.epoch_info.total_stake() * 4).div_ceil(5)
}
/// Adds a given amount of `stake` to notarization counter for `block_hash`.
/// Then, checks if a new notarization certificate can be created.
///
/// Returns potentially created certificates and newly emitted votor events.
fn count_notar_stake(
&mut self,
slot: Slot,
block_hash: &BlockHash,
stake: Stake,
) -> SlotStateOutputs {
let mut new_certs = SmallVec::new();
let mut votor_events = SmallVec::new();
let mut blocks_to_repair = SmallVec::new();
// increment stake
let notar_stake = self
.voted_stakes
.notar
.entry(block_hash.clone())
.or_insert(0);
*notar_stake += stake;
self.voted_stakes.notar_or_skip += stake;
let notar_stake = *notar_stake;
self.voted_stakes.top_notar = notar_stake.max(self.voted_stakes.top_notar);
// check quorums
if !self.sent_safe_to_notar.contains(block_hash) {
match self.check_safe_to_notar(block_hash.clone()) {
SafeToNotarStatus::SafeToNotar => {
votor_events.push(VotorEvent::SafeToNotar(slot, block_hash.clone()));
}
SafeToNotarStatus::MissingBlock => {
blocks_to_repair.push((slot, block_hash.clone()));
}
SafeToNotarStatus::AwaitingVotes => {}
}
}
if !self.sent_safe_to_skip
&& self.is_weak_quorum(self.voted_stakes.notar_or_skip - self.voted_stakes.top_notar)
&& self.votes.notar[self.epoch_info.own_id as usize].is_some()
{
votor_events.push(VotorEvent::SafeToSkip(slot));
self.sent_safe_to_skip = true;
}
let nf_stake = *self
.voted_stakes
.notar_fallback
.get(block_hash)
.unwrap_or(&0);
if self.is_quorum(nf_stake + notar_stake) && !self.is_notar_fallback(block_hash) {
let mut votes = self.votes.notar_votes(block_hash);
votes.extend(self.votes.notar_fallback_votes(block_hash));
let cert = NotarFallbackCert::new_unchecked(&votes, &self.epoch_info.validators);
new_certs.push(Cert::NotarFallback(cert));
}
if self.is_quorum(notar_stake) && self.certificates.notar.is_none() {
let votes = self.votes.notar_votes(block_hash);
let cert = NotarCert::new_unchecked(&votes, &self.epoch_info.validators);
new_certs.push(Cert::Notar(cert));
}
if self.is_strong_quorum(notar_stake) && self.certificates.fast_finalize.is_none() {
let votes = self.votes.notar_votes(block_hash);
let cert = FastFinalCert::new_unchecked(&votes, &self.epoch_info.validators);
new_certs.push(Cert::FastFinal(cert));
}
(new_certs, votor_events, blocks_to_repair)
}
/// Adds a given amount of `stake` to notar-fallback counter for `block_hash`.
/// Then, checks if a new notar-fallback certificate can be created.
///
/// Returns potentially created certificates and newly emitted votor events.
fn count_notar_fallback_stake(
&mut self,
block_hash: &BlockHash,
stake: Stake,
) -> SlotStateOutputs {
let mut new_certs = SmallVec::new();
let nf_stakes = &mut self.voted_stakes.notar_fallback;
let nf_stake = nf_stakes.entry(block_hash.clone()).or_insert(0);
*nf_stake += stake;
let nf_stake = *nf_stake;
let notar_stake = *self.voted_stakes.notar.get(block_hash).unwrap_or(&0);
if self.is_quorum(nf_stake + notar_stake) && !self.is_notar_fallback(block_hash) {
let mut votes = self.votes.notar_votes(block_hash);
votes.extend(self.votes.notar_fallback_votes(block_hash));
let cert = NotarFallbackCert::new_unchecked(&votes, &self.epoch_info.validators);
new_certs.push(Cert::NotarFallback(cert));
}
(new_certs, SmallVec::new(), SmallVec::new())
}
/// Adds a given amount of `stake` to skip counter for `slot`.
/// Then, checks if a new skip certificate can be created.
///
/// Returns potentially created certificates and newly emitted votor events.
fn count_skip_stake(&mut self, slot: Slot, stake: Stake, fallback: bool) -> SlotStateOutputs {
let mut new_certs = SmallVec::new();
let mut votor_events = SmallVec::new();
let mut blocks_to_repair = SmallVec::new();
if fallback {
self.voted_stakes.skip_fallback += stake;
} else {
self.voted_stakes.skip += stake;
}
// PERF: clone on every skip vote
for hash in self.pending_safe_to_notar.clone() {
if self.sent_safe_to_notar.contains(&hash) {
continue;
}
match self.check_safe_to_notar(hash.clone()) {
SafeToNotarStatus::SafeToNotar => {
votor_events.push(VotorEvent::SafeToNotar(slot, hash));
}
SafeToNotarStatus::MissingBlock => blocks_to_repair.push((slot, hash)),
SafeToNotarStatus::AwaitingVotes => {}
}
}
let total_skip_stake = self.voted_stakes.skip + self.voted_stakes.skip_fallback;
if self.is_quorum(total_skip_stake) && self.certificates.skip.is_none() {
let mut votes = self.votes.skip_votes();
votes.extend(self.votes.skip_fallback_votes());
let cert = SkipCert::new_unchecked(&votes, &self.epoch_info.validators);
new_certs.push(Cert::Skip(cert));
}
if !self.sent_safe_to_skip
&& self.is_weak_quorum(self.voted_stakes.notar_or_skip - self.voted_stakes.top_notar)
&& self.votes.notar[self.epoch_info.own_id as usize].is_some()
{
votor_events.push(VotorEvent::SafeToSkip(slot));
self.sent_safe_to_skip = true;
}
(new_certs, votor_events, blocks_to_repair)
}
/// Adds a given amount of `stake` to finalization counter for `slot`.
/// Then, checks if a new finalization certificate can be created.
///
/// Returns potentially created certificates and newly emitted votor events.
fn count_finalize_stake(&mut self, stake: Stake) -> SlotStateOutputs {
let mut new_certs = SmallVec::new();
self.voted_stakes.finalize += stake;
if self.is_quorum(self.voted_stakes.finalize) && self.certificates.finalize.is_none() {
let votes: Vec<_> = self.votes.final_votes();
let cert = FinalCert::new_unchecked(&votes, &self.epoch_info.validators);
new_certs.push(Cert::Final(cert));
}
(new_certs, SmallVec::new(), SmallVec::new())
}
/// Checks whether the given vote constitutes a slashable offence.
///
/// This has to be called before dismissing potential duplicates, as
/// according to `should_ignore_vote()`.
pub fn check_slashable_offence(&self, vote: &Vote) -> Option<SlashableOffence> {
let slot = vote.slot();
let voter = vote.signer();
let v = voter as usize;
match vote.kind() {
VoteKind::Notar(_, block_hash) => {
if self.votes.skip[v].is_some() {
return Some(SlashableOffence::SkipAndNotarize(voter, slot));
}
if let Some(notar_vote) = &self.votes.notar[v]
&& block_hash != notar_vote.block_hash().unwrap()
{
return Some(SlashableOffence::NotarDifferentHash(voter, slot));
}
}
VoteKind::NotarFallback(_, _) => {
if self.votes.finalize[v].is_some() {
return Some(SlashableOffence::NotarFallbackAndFinalize(voter, slot));
}
}
VoteKind::Skip(_) => {
if self.votes.finalize[v].is_some() {
return Some(SlashableOffence::SkipAndFinalize(voter, slot));
} else if self.votes.notar[v].is_some() {
return Some(SlashableOffence::SkipAndNotarize(voter, slot));
}
}
VoteKind::SkipFallback(_) => {
if self.votes.finalize[v].is_some() {
return Some(SlashableOffence::SkipAndFinalize(voter, slot));
}
}
VoteKind::Final(_) => {
if self.votes.skip[v].is_some() || self.votes.skip_fallback[v].is_some() {
return Some(SlashableOffence::SkipAndFinalize(voter, slot));
} else if !self.votes.notar_fallback[v].is_empty() {
return Some(SlashableOffence::NotarFallbackAndFinalize(voter, slot));
}
}
}
None
}
/// Checks whether the given vote should be ignored as a duplicate.
///
/// Votes for which this returns `true` should never be counted.
/// Doing so could lead to double counting.
pub fn should_ignore_vote(&self, vote: &Vote) -> bool {
let v = vote.signer() as usize;
match vote.kind() {
VoteKind::Notar(_, _) => self.votes.notar[v].is_some(),
VoteKind::NotarFallback(_, block_hash) => {
self.votes.notar_fallback[v].contains_key(block_hash)
}
VoteKind::Skip(_) | VoteKind::SkipFallback(_) => {
self.votes.skip[v].is_some() || self.votes.skip_fallback[v].is_some()
}
VoteKind::Final(_) => self.votes.finalize[v].is_some(),
}
}
fn check_safe_to_notar(&mut self, block_hash: BlockHash) -> SafeToNotarStatus {
// check general voted stake conditions
let notar_stake = *self.voted_stakes.notar.get(&block_hash).unwrap_or(&0);
let skip_stake = self.voted_stakes.skip;
if !self.is_weakest_quorum(notar_stake) {
return SafeToNotarStatus::AwaitingVotes;
}
if !self.is_weak_quorum(notar_stake) && !self.is_quorum(notar_stake + skip_stake) {
self.pending_safe_to_notar.insert(block_hash);
return SafeToNotarStatus::AwaitingVotes;
}
// check parent condition
match self.parents.entry(block_hash.clone()) {
Entry::Vacant(_) => return SafeToNotarStatus::MissingBlock,
Entry::Occupied(entry) => {
if entry.get() != &ParentStatus::Certified {
return SafeToNotarStatus::AwaitingVotes;
}
}
}
// check own vote
let own_id = self.epoch_info.own_id;
let skip = &self.votes.skip[own_id as usize];
let notar = &self.votes.notar[own_id as usize];
match (skip, notar) {
(Some(_), _) => {
self.pending_safe_to_notar.remove(&block_hash);
self.sent_safe_to_notar.insert(block_hash);
SafeToNotarStatus::SafeToNotar
}
(_, Some(n)) => {
if n.block_hash().unwrap() != &block_hash {
self.pending_safe_to_notar.remove(&block_hash);
self.sent_safe_to_notar.insert(block_hash);
SafeToNotarStatus::SafeToNotar
} else {
SafeToNotarStatus::AwaitingVotes
}
}
(None, None) => {
self.pending_safe_to_notar.insert(block_hash);
SafeToNotarStatus::AwaitingVotes
}
}
}
/// Checks whether the given block hash has a notar-fallback cert in this slot.
pub fn is_notar_fallback(&self, block_hash: &BlockHash) -> bool {
self.certificates
.notar_fallback
.iter()
.any(|n| n.block_hash() == block_hash)
}
}
impl SlotVotes {
/// Creates a new container for votes for the given number of validators.
///
/// Initially, it contains no votes.
pub fn new(num_validators: usize) -> Self {
Self {
notar: vec![None; num_validators],
notar_fallback: vec![BTreeMap::new(); num_validators],
skip: vec![None; num_validators],
skip_fallback: vec![None; num_validators],
finalize: vec![None; num_validators],
}
}
/// Returns all notarization votes for the given block hash.
// PERF: return iterators here (to avoid memory allocation)?
pub fn notar_votes(&self, block_hash: &BlockHash) -> Vec<Vote> {
self.notar
.iter()
.filter_map(|vote| {
vote.as_ref()
.and_then(|vote| (vote.block_hash().unwrap() == block_hash).then_some(vote))
})
.cloned()
.collect()
}
/// Returns all notar-fallback votes for the given block hash.
// PERF: return iterators here (to avoid memory allocation)?
pub fn notar_fallback_votes(&self, block_hash: &BlockHash) -> Vec<Vote> {
self.notar_fallback
.iter()
.filter_map(|m| m.get(block_hash).cloned())
.collect()
}
/// Returns all skip votes for this slot.
// PERF: return iterators here (to avoid memory allocation)?
pub fn skip_votes(&self) -> Vec<Vote> {
self.skip.iter().filter_map(Clone::clone).collect()
}
/// Returns all skip-fallback votes for this slot.
// PERF: return iterators here (to avoid memory allocation)?
pub fn skip_fallback_votes(&self) -> Vec<Vote> {
self.skip_fallback.iter().filter_map(Clone::clone).collect()
}
/// Returns all finalization votes for this slot.
// PERF: return iterators here (to avoid memory allocation)?
pub fn final_votes(&self) -> Vec<Vote> {
self.finalize.iter().filter_map(Clone::clone).collect()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::ValidatorId;
use crate::crypto::Hash;
use crate::test_utils::generate_validators;
#[test]
fn quorums() {
let (_, epoch_info) = generate_validators(6);
let slot_state = SlotState::new(Slot::new(0), epoch_info);
assert!(slot_state.is_weak_quorum(3));
assert!(!slot_state.is_quorum(3));
assert!(slot_state.is_quorum(4));
assert!(!slot_state.is_strong_quorum(4));
assert!(slot_state.is_strong_quorum(5));
let (_, epoch_info) = generate_validators(11);
let slot_state = SlotState::new(Slot::new(0), epoch_info);
assert!(slot_state.is_weak_quorum(5));
assert!(!slot_state.is_quorum(5));
assert!(slot_state.is_quorum(7));
assert!(!slot_state.is_strong_quorum(7));
assert!(slot_state.is_strong_quorum(9));
}
#[test]
fn add_cert() {
let (sks, epoch_info) = generate_validators(11);
let (slot, hash): BlockId = (Slot::new(1), Hash::random_for_test().into());
let mut slot_state = SlotState::new(slot, epoch_info.clone());
let votes: Vec<_> = sks
.iter()
.enumerate()
.map(|(i, sk)| Vote::new_notar(slot, hash.clone(), sk, i as ValidatorId))
.collect();
let cert = NotarCert::try_new(&votes, &epoch_info.validators).unwrap();
assert!(slot_state.certificates.notar.is_none());
slot_state.add_cert(Cert::Notar(cert));
assert!(slot_state.certificates.notar.is_some());
}
#[test]
fn add_vote() {
let (sks, epoch_info) = generate_validators(11);
let (slot, hash): BlockId = (Slot::new(1), Hash::random_for_test().into());
let mut slot_state = SlotState::new(slot, epoch_info.clone());
for (i, sk) in sks.iter().enumerate() {
let vote = Vote::new_notar(slot, hash.clone(), sk, i as ValidatorId);
let voter_stake = epoch_info.validator(i as ValidatorId).stake;
assert!(slot_state.votes.notar[i].is_none());
slot_state.add_vote(vote.clone(), voter_stake);
let notar_vote = &slot_state.votes.notar[i];
assert!(notar_vote.is_some());
assert_eq!(
slot_state.voted_stakes.notar.get(&hash),
Some(&((i + 1) as Stake))
);
assert_eq!(slot_state.voted_stakes.notar_or_skip, (i + 1) as Stake);
}
}
#[test]
fn safe_to_notar() {
let (sks, epoch_info) = generate_validators(3);
let (slot, hash): BlockId = (Slot::new(1), Hash::random_for_test().into());
let mut slot_state = SlotState::new(slot, epoch_info.clone());
// mark parent as notarized(-fallback)
slot_state.notify_parent_known(hash.clone());
slot_state.notify_parent_certified(hash.clone());
// 33% notar alone has no effect
let vote = Vote::new_notar(slot, hash.clone(), &sks[1], 1);
let voter_stake = epoch_info.validator(1).stake;
let (certs, events, blocks) = slot_state.add_vote(vote.clone(), voter_stake);
assert!(certs.is_empty());
assert!(events.is_empty());
assert!(blocks.is_empty());
// additional 33% skip should lead to safe-to-notar
let vote = Vote::new_skip(slot, &sks[0], 0);
let voter_stake = epoch_info.validator(0).stake;
let (certs, events, blocks) = slot_state.add_vote(vote.clone(), voter_stake);
assert!(certs.is_empty());
assert_eq!(events.len(), 1);
assert!(blocks.is_empty());
match &events[0] {
VotorEvent::SafeToNotar(s, h) => {
assert_eq!(*s, slot);
assert_eq!(*h, hash);
}
_ => unreachable!(),
}
}
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/consensus/pool/parent_ready_tracker.rs | src/consensus/pool/parent_ready_tracker.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Tracks the parent-ready condition.
//!
//! The parent-ready condition pertains to a slot `s` and a block hash `hash(b)`,
//! where `s` is the first slot of a leader window and `s > slot(b)`.
//! Specifically, it is defined as the following:
//! - Block `b` is notarized or notarized-fallback, AND
//! - slots `slot(b) + 1` (inclusive) to `s` (non-inclusive) are skip-certified.
//!
//! Additional restriction on notarization votes ensure that the parent-ready
//! condition holds for a block `b` only if it also holds for all ancestors of `b`.
//! Together this ensures that the block `b` is a valid parent for block
//! production, i.e., under good network conditions an honest leader proposing
//! a block with parent `b` in slot `s` will have their block finalized.
mod parent_ready_state;
use std::collections::HashMap;
use either::Either;
use smallvec::SmallVec;
use tokio::sync::oneshot;
use self::parent_ready_state::ParentReadyState;
use crate::consensus::pool::finality_tracker::FinalizationEvent;
use crate::{BlockId, Slot};
/// Keeps track of the parent-ready condition across slots.
pub struct ParentReadyTracker(HashMap<Slot, ParentReadyState>);
impl ParentReadyTracker {
/// Marks the given block as notarized-fallback.
///
/// Returns a list of any newly connected parents.
/// All of these will have the given block ID as the parent.
pub fn mark_notar_fallback(&mut self, id: &BlockId) -> SmallVec<[(Slot, BlockId); 1]> {
let (slot, hash) = id.clone();
let state = self.slot_state(slot);
if !state.mark_notar_fallback(hash) {
return SmallVec::new();
}
// add this block as valid parent to any skip-connected future windows
let mut newly_certified = SmallVec::new();
for slot in slot.future_slots() {
let state = self.slot_state(slot);
if slot.is_start_of_window() {
state.add_to_ready(id.clone());
newly_certified.push((slot, id.clone()));
}
if !state.is_skip_certified() {
break;
}
}
newly_certified
}
/// Marks the given slot as skipped.
///
/// Returns a list of any newly connected parents.
pub fn mark_skipped(&mut self, marked_slot: Slot) -> SmallVec<[(Slot, BlockId); 1]> {
let state = self.slot_state(marked_slot);
if !state.mark_skip() {
return SmallVec::new();
}
// find possible parents for future windows
let mut potential_parents = SmallVec::<[BlockId; 1]>::new();
let window_slots = marked_slot.slots_in_window();
// going back from `marked_slot` find any skip-connected parents
for slot in window_slots.filter(|s| *s <= marked_slot).rev() {
let state = self.slot_state(slot);
// add any notarized-fallback blocks from this slot
if slot != marked_slot {
for nf in state.notar_fallback_blocks() {
potential_parents.push((slot, nf));
}
}
// stop as soon as we see any non-skipped slot
if !state.is_skip_certified() {
break;
}
// if the slot is skipped, add its parents as well
potential_parents.extend(state.ready_block_ids().iter().cloned());
}
// add these as valid parents to any skip-connected future windows
let mut newly_certified = SmallVec::new();
for slot in marked_slot.future_slots() {
let state = self.slot_state(slot);
// add parents to this window
if slot.is_start_of_window() {
for parent in &potential_parents {
state.add_to_ready(parent.clone());
newly_certified.push((slot, parent.clone()));
}
}
// stop as soon as we see any non-skipped slot
if !state.is_skip_certified() {
break;
}
}
newly_certified
}
/// Handles the given finalization event.
///
/// Marks blocks as notarized-fallback and slots as skipped as appropriate.
///
/// Returns at most one newly ready parent (for the highest slot).
/// For consistency with other functions it still returns a `Vec`.
pub fn handle_finalization(
&mut self,
event: FinalizationEvent,
) -> SmallVec<[(Slot, BlockId); 1]> {
let mut parents_ready = SmallVec::<[(Slot, BlockId); 1]>::new();
if let Some(finalized) = &event.finalized {
parents_ready.extend(self.mark_notar_fallback(finalized));
}
for block_id in &event.implicitly_finalized {
parents_ready.extend(self.mark_notar_fallback(block_id));
}
for slot in event.implicitly_skipped {
parents_ready.extend(self.mark_skipped(slot));
}
// keep only highest slot ParentReady
let maybe_parent = parents_ready.iter().max_by_key(|(slot, _)| slot);
maybe_parent.into_iter().cloned().collect()
}
/// Returns list of all valid parents for the given slot, as of now.
///
/// The list can be empty if there are no valid parents yet.
pub fn parents_ready(&self, slot: Slot) -> &[BlockId] {
self.0
.get(&slot)
.map_or(&[], |state| state.ready_block_ids())
}
/// Returns a ready parent if available, otherwise returns a oneshot channel.
///
/// The oneshot channel will receive the first ready parent once it becomes available.
pub fn wait_for_parent_ready(
&mut self,
slot: Slot,
) -> Either<BlockId, oneshot::Receiver<BlockId>> {
let state = self.0.entry(slot).or_default();
state.wait_for_parent_ready()
}
/// Mutably accesses the [`ParentReadyState`] for the given `slot`.
///
/// Initializes the state with [`Default`] if necessary.
fn slot_state(&mut self, slot: Slot) -> &mut ParentReadyState {
self.0.entry(slot).or_default()
}
}
impl Default for ParentReadyTracker {
/// Creates a new empty tracker.
///
/// Initially, only the genesis block is considered notarized-fallback.
fn default() -> Self {
let mut map = HashMap::new();
let genesis_parent_state = ParentReadyState::genesis();
map.insert(Slot::genesis(), genesis_parent_state);
Self(map)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::crypto::Hash;
use crate::crypto::merkle::GENESIS_BLOCK_HASH;
use crate::types::SLOTS_PER_WINDOW;
#[test]
fn basic() {
let mut tracker = ParentReadyTracker::default();
for s in Slot::genesis()
.future_slots()
.take(2 * SLOTS_PER_WINDOW as usize)
{
let block = (s, Hash::random_for_test().into());
let new_valid_parents = tracker.mark_notar_fallback(&block);
if s == s.last_slot_in_window() {
assert!(new_valid_parents.contains(&(s.next(), block)));
} else {
assert!(new_valid_parents.is_empty());
}
}
}
#[test]
fn genesis() {
let genesis = (Slot::genesis(), GENESIS_BLOCK_HASH);
let mut tracker = ParentReadyTracker::default();
for slot in genesis.0.slots_in_window() {
let new_valid_parents = tracker.mark_skipped(slot);
if slot == slot.last_slot_in_window() {
assert!(new_valid_parents.contains(&(slot.next(), genesis.clone())));
} else {
assert!(new_valid_parents.is_empty());
}
}
}
#[test]
fn skips() {
let genesis = (Slot::genesis(), GENESIS_BLOCK_HASH);
let slot = Slot::genesis().next();
let block = (slot, Hash::random_for_test().into());
let mut tracker = ParentReadyTracker::default();
assert!(tracker.mark_notar_fallback(&block).is_empty());
for s in slot.slots_in_window() {
let new_valid_parents = tracker.mark_skipped(s);
if s == s.last_slot_in_window() {
assert!(new_valid_parents.contains(&(s.next(), block.clone())));
assert!(new_valid_parents.contains(&(s.next(), genesis.clone())));
} else {
assert!(new_valid_parents.is_empty());
}
}
}
#[test]
fn out_of_order_skips() {
let genesis = (Slot::genesis(), GENESIS_BLOCK_HASH);
let slot = Slot::genesis().next();
let block = (slot, Hash::random_for_test().into());
let mut tracker = ParentReadyTracker::default();
assert_eq!(slot.slots_in_window().count(), 4);
assert!(tracker.mark_skipped(Slot::new(3)).is_empty());
assert!(tracker.mark_skipped(Slot::new(2)).is_empty());
assert_eq!(
tracker.mark_notar_fallback(&block).to_vec(),
vec![(Slot::new(4), block)]
);
assert_eq!(
tracker.mark_skipped(slot).to_vec(),
vec![(Slot::new(4), genesis)]
);
}
#[test]
fn out_of_order_notars() {
assert_eq!(Slot::genesis().slots_in_window().count(), 4);
let block1 = (Slot::new(1), Hash::random_for_test().into());
let block2 = (Slot::new(2), Hash::random_for_test().into());
let block3 = (Slot::new(3), Hash::random_for_test().into());
let mut tracker = ParentReadyTracker::default();
assert!(tracker.mark_notar_fallback(&block2).is_empty());
assert_eq!(
tracker.mark_notar_fallback(&block3).to_vec(),
vec![(Slot::new(4), block3)]
);
assert!(tracker.mark_notar_fallback(&block1).is_empty());
}
#[test]
fn no_double_counting_skip_chain() {
assert_eq!(Slot::genesis().slots_in_window().count(), 4);
let slot = Slot::genesis().next();
let block = (slot, Hash::random_for_test().into());
let mut tracker = ParentReadyTracker::default();
assert!(tracker.mark_notar_fallback(&block).is_empty());
assert!(tracker.mark_skipped(Slot::new(2)).is_empty());
assert_eq!(
tracker.mark_skipped(Slot::new(3)).to_vec(),
vec![(Slot::new(4), block.clone())]
);
assert!(tracker.mark_skipped(Slot::new(4)).is_empty());
assert!(tracker.mark_skipped(Slot::new(5)).is_empty());
assert!(tracker.mark_skipped(Slot::new(6)).is_empty());
assert_eq!(
tracker.mark_skipped(Slot::new(7)).to_vec(),
vec![(Slot::new(8), block)]
);
}
#[test]
fn no_double_counting_notar_and_skip() {
assert_eq!(Slot::genesis().slots_in_window().count(), 4);
let slot = Slot::genesis().next();
let block = (slot, Hash::random_for_test().into());
let mut tracker = ParentReadyTracker::default();
assert!(tracker.mark_notar_fallback(&block).is_empty());
assert!(tracker.mark_skipped(Slot::new(2)).is_empty());
assert_eq!(
tracker.mark_skipped(Slot::new(3)).to_vec(),
vec![(Slot::new(4), block)]
);
// notably this does not re-issue a ParentReady for `block`
assert_eq!(
tracker.mark_skipped(Slot::new(1)).to_vec(),
vec![(Slot::new(4), (Slot::genesis(), GENESIS_BLOCK_HASH))]
);
}
#[test]
fn wait_for_parent_ready() {
let genesis = (Slot::genesis(), GENESIS_BLOCK_HASH);
let mut windows = Slot::windows();
let window1 = windows.next().unwrap();
let window2 = windows.next().unwrap();
let window3 = windows.next().unwrap();
let mut tracker = ParentReadyTracker::default();
// skip slots in first window
for slot in window1.slots_in_window() {
if slot.is_genesis() {
continue;
}
tracker.mark_skipped(slot);
}
// genesis should be valid parent for 2nd window
let res = tracker.wait_for_parent_ready(window2);
let Either::Left((slot, hash)) = res else {
panic!("unexpected result {res:?}");
};
assert_eq!((slot, hash), genesis);
// parent should not yet be ready
let res = tracker.wait_for_parent_ready(window3);
let Either::Right(mut rx) = res else {
panic!("unexpected result {res:?}");
};
let Err(oneshot::error::TryRecvError::Empty) = rx.try_recv() else {
panic!("parent should not yet be ready");
};
// skip slots in first window
for slot in window2.slots_in_window() {
tracker.mark_skipped(slot);
}
// now we should be notified of genesis as valid parent
assert_eq!(rx.try_recv(), Ok(genesis));
}
#[test]
fn parent_ready_finalized() {
let mut windows = Slot::windows();
let window2 = windows.nth(1).unwrap();
let window3 = windows.next().unwrap();
let window4 = windows.next().unwrap();
let window5 = windows.next().unwrap();
let mut tracker = ParentReadyTracker::default();
// basic case where finalized slot is first in its window
let block = (
window2.first_slot_in_window(),
Hash::random_for_test().into(),
);
let parent = (block.0.prev(), Hash::random_for_test().into());
let event = FinalizationEvent {
finalized: Some(block.clone()),
implicitly_finalized: vec![parent.clone()],
implicitly_skipped: vec![],
};
let parents = tracker.handle_finalization(event);
assert_eq!(parents.len(), 1);
let parent_ready = &parents[0];
assert_eq!(parent_ready.0, block.0);
assert_eq!(parent_ready.1, parent);
// case where an entire window is skipped between parent and finalized block
let block = (
window4.first_slot_in_window(),
Hash::random_for_test().into(),
);
let parent = (
window3.first_slot_in_window().prev(),
Hash::random_for_test().into(),
);
let event = FinalizationEvent {
finalized: Some(block.clone()),
implicitly_finalized: vec![parent.clone()],
implicitly_skipped: window3.slots_in_window().collect(),
};
let parents = tracker.handle_finalization(event);
assert_eq!(parents.len(), 1);
let parent_ready = &parents[0];
assert_eq!(parent_ready.0, block.0);
assert_eq!(parent_ready.1, parent);
// case where finalized slot is NOT first in its window
let block = (
window5.first_slot_in_window().next(),
Hash::random_for_test().into(),
);
let parent = (block.0.prev(), Hash::random_for_test().into());
let parent_parent = (parent.0.prev(), Hash::random_for_test().into());
let event = FinalizationEvent {
finalized: Some(block.clone()),
implicitly_finalized: vec![parent.clone(), parent_parent.clone()],
implicitly_skipped: vec![],
};
let parents = tracker.handle_finalization(event);
assert_eq!(parents.len(), 1);
let parent_ready = &parents[0];
assert_eq!(parent_ready.0, parent.0);
assert_eq!(parent_ready.1, parent_parent);
}
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/consensus/pool/finality_tracker.rs | src/consensus/pool/finality_tracker.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Tracks finality of blocks.
//!
//! This is used internally as part of [`PoolImpl`].
//!
//! Keeps track of:
//! - Direct finalization of blocks,
//! - resulting indirect finalizations of blocks, AND
//! - resulting implicit skipping of slots
//!
//! It does this based on:
//! - Notarization of blocks,
//! - finalization of slots, AND
//! - availability of blocks and knowledge of their parents.
//!
//! [`PoolImpl`]: crate::consensus::pool::PoolImpl
use std::collections::BTreeMap;
use std::collections::btree_map::Entry;
use crate::BlockId;
use crate::crypto::merkle::{BlockHash, GENESIS_BLOCK_HASH};
use crate::types::Slot;
/// Tracks finality of blocks.
pub struct FinalityTracker {
/// Current finalization status for each slot.
status: BTreeMap<Slot, FinalizationStatus>,
/// Maps blocks to their parents.
parents: BTreeMap<BlockId, BlockId>,
/// The highest finalized slot so far.
///
/// This means that slot has a fast finalization OR finalization + notarization.
/// Also, all prior slots are finalized (directly or implicitly) OR implicitly skipped.
highest_finalized_slot: Slot,
}
/// Possible states a slot can be in regarding finality.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum FinalizationStatus {
/// Block with given hash is notarized, but slot is not yet (known to be) finalized.
Notarized(BlockHash),
/// Slot is known to be finalized, but we are missing the notarization certificate.
FinalPendingNotar,
/// Slot is finalized, and notarized block is known to have the given hash.
Finalized(BlockHash),
/// Block with given hash was implicitly finalized through later finalization.
ImplicitlyFinalized(BlockHash),
/// Slot was implicitly skipped through later finalization.
ImplicitlySkipped,
}
/// Information about newly finalized slots.
#[derive(Clone, Debug, Default, PartialEq, Eq)]
pub struct FinalizationEvent {
// TODO: instead use `Option<FinalizationEvent>`?
/// Directly finalized block, if any.
pub(super) finalized: Option<BlockId>,
/// Any implicitly finalized blocks.
pub(super) implicitly_finalized: Vec<BlockId>,
/// Any implicitly skipped slots.
pub(super) implicitly_skipped: Vec<Slot>,
}
impl FinalityTracker {
/// Adds the given `parent` for the given `block`.
///
/// Handles possibly resulting implicit finalizations.
///
/// Returns a [`FinalizationEvent`] that contains information about newly finalized slots.
pub fn add_parent(&mut self, block: BlockId, parent: BlockId) -> FinalizationEvent {
assert!(block.0 > parent.0);
match self.parents.entry(block.clone()) {
Entry::Occupied(e) => {
assert!(e.get() == &parent);
return FinalizationEvent::default();
}
Entry::Vacant(e) => {
e.insert(parent.clone());
}
}
let (slot, block_hash) = block;
let Some(status) = self.status.get(&slot) else {
return FinalizationEvent::default();
};
match status {
FinalizationStatus::Finalized(hash) | FinalizationStatus::ImplicitlyFinalized(hash) => {
let mut event = FinalizationEvent::default();
if &block_hash == hash {
self.handle_implicitly_finalized(block.0, parent, &mut event);
}
event
}
FinalizationStatus::Notarized(_)
| FinalizationStatus::FinalPendingNotar
| FinalizationStatus::ImplicitlySkipped => FinalizationEvent::default(),
}
}
/// Marks the given block as fast finalized.
///
/// If the block was newly finalized, handles resulting implicit finalizations.
///
/// Returns a [`FinalizationEvent`] that contains information about newly finalized slots.
pub fn mark_fast_finalized(&mut self, slot: Slot, block_hash: BlockHash) -> FinalizationEvent {
let old = self
.status
.insert(slot, FinalizationStatus::Finalized(block_hash.clone()));
if let Some(status) = old {
match status {
FinalizationStatus::Finalized(hash)
| FinalizationStatus::ImplicitlyFinalized(hash) => {
assert_eq!(hash, block_hash, "consensus safety violation");
return FinalizationEvent::default();
}
FinalizationStatus::Notarized(hash) => {
assert_eq!(hash, block_hash, "consensus safety violation");
}
FinalizationStatus::FinalPendingNotar => {}
FinalizationStatus::ImplicitlySkipped => unreachable!("consensus safety violation"),
}
}
let mut event = FinalizationEvent::default();
self.handle_finalized_block((slot, block_hash), &mut event);
event
}
/// Marks the given block as notarized.
///
/// Handles possibly resulting direct finalization of the block.
/// Further, also handles any possibly resulting implicit finalizations.
///
/// Returns a [`FinalizationEvent`] that contains information about newly finalized slots.
pub fn mark_notarized(&mut self, slot: Slot, block_hash: BlockHash) -> FinalizationEvent {
let old = self
.status
.insert(slot, FinalizationStatus::Notarized(block_hash.clone()));
let Some(status) = old else {
return FinalizationEvent::default();
};
match status {
FinalizationStatus::Notarized(hash)
| FinalizationStatus::Finalized(hash)
| FinalizationStatus::ImplicitlyFinalized(hash) => {
assert_eq!(hash, block_hash, "consensus safety violation");
FinalizationEvent::default()
}
FinalizationStatus::ImplicitlySkipped => FinalizationEvent::default(),
FinalizationStatus::FinalPendingNotar => {
let mut event = FinalizationEvent::default();
self.status
.insert(slot, FinalizationStatus::Finalized(block_hash.clone()));
self.handle_finalized_block((slot, block_hash), &mut event);
event
}
}
}
/// Marks the given slot as finalized.
///
/// Handles possibly resulting direct finalization of a block in this slot.
/// Further, also handles any possibly resulting implicit finalizations.
///
/// Returns a [`FinalizationEvent`] that contains information about newly finalized slots.
pub fn mark_finalized(&mut self, slot: Slot) -> FinalizationEvent {
let old = self
.status
.insert(slot, FinalizationStatus::FinalPendingNotar);
let Some(status) = old else {
return FinalizationEvent::default();
};
match status {
FinalizationStatus::FinalPendingNotar
| FinalizationStatus::Finalized(_)
| FinalizationStatus::ImplicitlyFinalized(_) => FinalizationEvent::default(),
FinalizationStatus::Notarized(block_hash) => {
let mut event = FinalizationEvent::default();
self.status
.insert(slot, FinalizationStatus::Finalized(block_hash.clone()));
self.handle_finalized_block((slot, block_hash), &mut event);
event
}
FinalizationStatus::ImplicitlySkipped => unreachable!("consensus safety violation"),
}
}
/// Returns the highest finalized slot.
///
/// This means that slot has a fast finalization OR finalization + notarization.
/// Also, all prior slots are finalized (directly or implicitly) OR implicitly skipped.
pub fn highest_finalized_slot(&self) -> Slot {
self.highest_finalized_slot
}
/// Handles the direct finalization of the given block.
///
/// Recurses through ancestors, potentially implicitly finalizing them.
///
/// Updates the `event` all along the way with:
/// - The finalized block,
/// - any potentially implicitly finalized blocks, AND
/// - any implicitly skipped slots.
fn handle_finalized_block(&mut self, finalized: BlockId, event: &mut FinalizationEvent) {
let (slot, _) = finalized;
event.finalized = Some(finalized.clone());
self.highest_finalized_slot = slot.max(self.highest_finalized_slot);
if let Some(parent) = self.parents.get(&finalized).cloned() {
self.handle_implicitly_finalized(slot, parent, event);
}
}
/// Handles the indirect finalization of the given block.
///
/// Recurses through ancestors, potentially implicitly finalizing them as well.
///
/// Updates the `event` all along the way with:
/// - Any potentially implicitly finalized blocks, AND
/// - any implicitly skipped slots.
fn handle_implicitly_finalized(
&mut self,
source_slot: Slot,
implicitly_finalized: BlockId,
event: &mut FinalizationEvent,
) {
assert!(source_slot > implicitly_finalized.0);
// implicitly skip slots in between
for slot in implicitly_finalized.0.future_slots() {
if slot == source_slot {
break;
}
let old = self
.status
.insert(slot, FinalizationStatus::ImplicitlySkipped);
if let Some(status) = old {
match status {
FinalizationStatus::ImplicitlySkipped => {
return;
}
FinalizationStatus::Notarized(_) => {}
FinalizationStatus::FinalPendingNotar
| FinalizationStatus::Finalized(_)
| FinalizationStatus::ImplicitlyFinalized(_) => {
unreachable!("consensus safety violation")
}
}
}
event.implicitly_skipped.push(slot);
}
// mark block as implicitly finalized
let (slot, block_hash) = implicitly_finalized.clone();
let old = self.status.insert(
slot,
FinalizationStatus::ImplicitlyFinalized(block_hash.clone()),
);
if let Some(status) = old {
match &status {
FinalizationStatus::Finalized(hash)
| FinalizationStatus::ImplicitlyFinalized(hash) => {
assert_eq!(*hash, block_hash, "consensus safety violation");
self.status.insert(slot, status);
return;
}
FinalizationStatus::Notarized(_) | FinalizationStatus::FinalPendingNotar => {}
FinalizationStatus::ImplicitlySkipped => {
unreachable!("consensus safety violation")
}
}
}
event
.implicitly_finalized
.push(implicitly_finalized.clone());
// recurse through ancestors
if let Some(parent) = self.parents.get(&implicitly_finalized).cloned() {
self.handle_implicitly_finalized(implicitly_finalized.0, parent, event);
}
}
}
impl Default for FinalityTracker {
/// Creates a new empty tracker.
///
/// Initially, only the genesis block is considered (directly) finalized.
fn default() -> Self {
let mut status = BTreeMap::new();
status.insert(
Slot::genesis(),
FinalizationStatus::Notarized(GENESIS_BLOCK_HASH),
);
Self {
status,
parents: BTreeMap::new(),
highest_finalized_slot: Slot::genesis(),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::crypto::Hash;
#[test]
fn basic() {
let mut tracker = FinalityTracker::default();
// slow finalize a block
let slot1 = Slot::genesis().next();
let hash1: BlockHash = Hash::random_for_test().into();
let event = tracker.mark_notarized(slot1, hash1.clone());
assert_eq!(event, FinalizationEvent::default());
let event = tracker.mark_finalized(slot1);
assert_eq!(event.finalized, Some((slot1, hash1)));
assert_eq!(event.implicitly_finalized, vec![]);
assert_eq!(event.implicitly_skipped, vec![]);
// fast finalize a block
let slot2 = slot1.next();
let hash2: BlockHash = Hash::random_for_test().into();
let event = tracker.mark_fast_finalized(slot2, hash2.clone());
assert_eq!(event.finalized, Some((slot2, hash2)));
assert_eq!(event.implicitly_finalized, vec![]);
assert_eq!(event.implicitly_skipped, vec![]);
// implicitly finalize a block WITHOUT skips
let slot4 = slot2.next().next();
let hash3: BlockHash = Hash::random_for_test().into();
let hash4: BlockHash = Hash::random_for_test().into();
let event = tracker.add_parent((slot4, hash4.clone()), (slot4.prev(), hash3.clone()));
assert_eq!(event, FinalizationEvent::default());
let event = tracker.mark_fast_finalized(slot4, hash4.clone());
assert_eq!(event.finalized, Some((slot4, hash4)));
assert_eq!(event.implicitly_finalized, vec![(slot4.prev(), hash3)]);
assert_eq!(event.implicitly_skipped, vec![]);
// implicitly finalize a block WITH skips
let slot7 = slot4.next().next().next();
let hash5: BlockHash = Hash::random_for_test().into();
let hash7: BlockHash = Hash::random_for_test().into();
let event =
tracker.add_parent((slot7, hash7.clone()), (slot7.prev().prev(), hash5.clone()));
assert_eq!(event, FinalizationEvent::default());
let event = tracker.mark_fast_finalized(slot7, hash7.clone());
assert_eq!(event.finalized, Some((slot7, hash7.clone())));
assert_eq!(
event.implicitly_finalized,
vec![(slot7.prev().prev(), hash5)]
);
assert_eq!(event.implicitly_skipped, vec![slot7.prev()]);
}
#[test]
fn no_duplicates() {
let mut tracker = FinalityTracker::default();
// slow finalize + fast finalize a block
let slot1 = Slot::genesis().next();
let hash1: BlockHash = Hash::random_for_test().into();
let event = tracker.mark_finalized(slot1);
assert_eq!(event, FinalizationEvent::default());
let event = tracker.mark_notarized(slot1, hash1.clone());
assert_eq!(event.finalized, Some((slot1, hash1.clone())));
assert_eq!(event.implicitly_finalized, vec![]);
assert_eq!(event.implicitly_skipped, vec![]);
let event = tracker.mark_fast_finalized(slot1, hash1.clone());
assert_eq!(event, FinalizationEvent::default());
// do NOT implicitly finalize parent, that is already directly finalized
let slot2 = slot1.next();
let hash2: BlockHash = Hash::random_for_test().into();
let event = tracker.add_parent((slot2, hash2.clone()), (slot2.prev(), hash1.clone()));
assert_eq!(event, FinalizationEvent::default());
let event = tracker.mark_fast_finalized(slot2, hash2.clone());
assert_eq!(event.finalized, Some((slot2, hash2.clone())));
assert_eq!(event.implicitly_finalized, vec![]);
assert_eq!(event.implicitly_skipped, vec![]);
// implicitly finalize a block WITHOUT skips
let slot4 = slot2.next().next();
let hash3: BlockHash = Hash::random_for_test().into();
let hash4: BlockHash = Hash::random_for_test().into();
let event = tracker.add_parent((slot4, hash4.clone()), (slot4.prev(), hash3.clone()));
assert_eq!(event, FinalizationEvent::default());
let event = tracker.mark_fast_finalized(slot4, hash4.clone());
assert_eq!(event.finalized, Some((slot4, hash4.clone())));
assert_eq!(
event.implicitly_finalized,
vec![(slot4.prev(), hash3.clone())]
);
assert_eq!(event.implicitly_skipped, vec![]);
// do NOT implicitly finalize parent again when adding parent again
let event = tracker.add_parent((slot4, hash4.clone()), (slot4.prev(), hash3.clone()));
assert_eq!(event, FinalizationEvent::default());
}
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/consensus/pool/parent_ready_tracker/parent_ready_state.rs | src/consensus/pool/parent_ready_tracker/parent_ready_state.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Implements the [`ParentReadyState`] data structure.
//!
//! It holds the necessary state for a given slot to track the parent-ready condition.
//! This is used by the [`super::ParentReadyTracker`].
use either::Either;
use log::warn;
use smallvec::{SmallVec, smallvec};
use tokio::sync::oneshot;
use crate::BlockId;
use crate::crypto::merkle::{BlockHash, GENESIS_BLOCK_HASH};
/// Status of whether an individual slot has a parent ready.
enum IsReady {
/// Do not have a parent ready for this slot yet.
///
/// Might have someone waiting to hear when the slot does become ready.
NotReady(Option<oneshot::Sender<BlockId>>),
/// Have at least one parent ready for this slot.
///
/// We can potentially have multiple parents ready per slot, but we
/// optimize for the common case where there will only be one.
Ready(SmallVec<[BlockId; 1]>),
}
impl Default for IsReady {
fn default() -> Self {
IsReady::NotReady(None)
}
}
/// Holds the relevant state for a single slot.
#[derive(Default)]
pub(super) struct ParentReadyState {
/// Whether this slot is skip-certified.
skip: bool,
/// Blocks that are notarized-fallback for this slot, if any.
///
/// We can potentially have multiple notar fallbacks per slot,
/// but we optimize for the common case where there will only be one.
notar_fallbacks: SmallVec<[BlockHash; 1]>,
/// Current status of the parent-ready condition for this slot.
// NOTE: Do not make this field more visible.
// Updating it must sometimes produce additional actions.
is_ready: IsReady,
}
impl ParentReadyState {
/// Creates a new [`ParentReadyState`] for the genesis block.
pub(super) fn genesis() -> Self {
Self {
skip: false,
notar_fallbacks: SmallVec::from([GENESIS_BLOCK_HASH]),
is_ready: IsReady::default(),
}
}
/// Marks this slot as skip-certified.
///
/// Returns `true` iff this slot was not already skip-certified.
pub(super) fn mark_skip(&mut self) -> bool {
if self.skip {
false
} else {
self.skip = true;
true
}
}
/// Returns `true` iff this slot is skip-certified.
pub(super) fn is_skip_certified(&self) -> bool {
self.skip
}
/// Marks the given block as notarized-fallback.
///
/// Returns `true` iff this block was not already marked as notarized-fallback.
pub(super) fn mark_notar_fallback(&mut self, hash: BlockHash) -> bool {
if self.notar_fallbacks.contains(&hash) {
false
} else {
self.notar_fallbacks.push(hash);
true
}
}
/// Returns an iterator over the notarized-fallback block hashes for this slot.
pub(super) fn notar_fallback_blocks(&self) -> impl Iterator<Item = BlockHash> {
self.notar_fallbacks.iter().cloned()
}
/// Adds a [`BlockId`] to the parents ready list.
///
/// Additionally, will inform any waiters.
///
/// # Panics
///
/// If the specific parent is already marked ready for this slot.
pub(super) fn add_to_ready(&mut self, id: BlockId) {
match &mut self.is_ready {
IsReady::NotReady(sender) => {
let sender = sender.take();
match sender {
None => (),
Some(sender) => match sender.send(id.clone()) {
Ok(()) => (),
Err(id) => {
warn!("sending {id:?} failed, receiver deallocated");
}
},
}
self.is_ready = IsReady::Ready(smallvec![id]);
}
IsReady::Ready(ready_ids) => {
assert!(!ready_ids.contains(&id));
ready_ids.push(id);
}
}
}
/// Returns the list of currently valid parents for this slot.
pub(super) fn ready_block_ids(&self) -> &[BlockId] {
match &self.is_ready {
IsReady::NotReady(_) => &[],
IsReady::Ready(block_ids) => block_ids,
}
}
/// Requests to know a valid parent for this slot.
///
/// Returns either:
/// - The block ID of a parent if at least one parent is already ready.
/// Always returns a parent with minimal slot number if multiple parents are ready.
/// - A receiver of a oneshot channel that will receive the first parent's block ID.
pub(super) fn wait_for_parent_ready(&mut self) -> Either<BlockId, oneshot::Receiver<BlockId>> {
match &mut self.is_ready {
IsReady::Ready(block_ids) => {
assert!(!block_ids.is_empty());
block_ids.sort();
Either::Left(block_ids[0].clone())
}
IsReady::NotReady(maybe_waiter) => {
assert!(maybe_waiter.is_none());
let (tx, rx) = oneshot::channel();
self.is_ready = IsReady::NotReady(Some(tx));
Either::Right(rx)
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::Slot;
use crate::crypto::Hash;
#[test]
fn wait_for_parent_ready_no_blocking() {
let mut state = ParentReadyState::default();
assert_eq!(state.ready_block_ids().len(), 0);
let block_id = (Slot::new(1), Hash::random_for_test().into());
state.add_to_ready(block_id.clone());
let res = state.wait_for_parent_ready();
let Either::Left(received_block_id) = res else {
panic!("unexpected result {res:?}");
};
assert_eq!(received_block_id, block_id);
assert_eq!(state.ready_block_ids().len(), 1);
}
#[tokio::test]
async fn wait_for_parent_ready_blocking() {
let mut state = ParentReadyState::default();
assert_eq!(state.ready_block_ids().len(), 0);
let res = state.wait_for_parent_ready();
let Either::Right(rx) = res else {
panic!("unexpected result {res:?}");
};
let block_id = (Slot::new(1), Hash::random_for_test().into());
state.add_to_ready(block_id.clone());
let received_block_id = rx.await.unwrap();
assert_eq!(received_block_id, block_id);
assert_eq!(state.ready_block_ids().len(), 1);
}
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/consensus/blockstore/slot_block_data.rs | src/consensus/blockstore/slot_block_data.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Data structure holding shreds, slices and blocks for a specific slot.
//!
//!
use std::collections::BTreeMap;
use std::collections::btree_map::Entry;
use log::{debug, trace, warn};
use thiserror::Error;
use super::BlockInfo;
use crate::consensus::votor::VotorEvent;
use crate::crypto::merkle::{BlockHash, DoubleMerkleTree, SliceRoot};
use crate::crypto::signature::PublicKey;
use crate::shredder::{
DeshredError, RegularShredder, Shred, ShredVerifyError, Shredder, TOTAL_SHREDS, ValidatedShred,
};
use crate::types::{Slice, SliceIndex};
use crate::{Block, Slot};
/// Errors that may be encountered when adding a shred.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Error)]
pub enum AddShredError {
#[error("shred has invalid signature")]
InvalidSignature,
#[error("shred is already stored")]
Duplicate,
#[error("shred shows leader equivocation")]
Equivocation,
#[error("shred was invalid and leader did not equivocate")]
InvalidShred,
}
impl From<ShredVerifyError> for AddShredError {
fn from(src: ShredVerifyError) -> Self {
match src {
ShredVerifyError::InvalidProof | ShredVerifyError::InvalidSignature => {
AddShredError::InvalidSignature
}
ShredVerifyError::Equivocation => AddShredError::Equivocation,
}
}
}
/// Holds all data corresponding to any blocks for a single slot.
pub struct SlotBlockData {
/// Slot number this data corresponds to.
slot: Slot,
/// Spot for storing the block that was received via block dissemination.
pub(super) disseminated: BlockData,
/// Spot for storing blocks that might later be received via repair.
pub(super) repaired: BTreeMap<BlockHash, BlockData>,
/// Tracks whether we observed the leader misbehaving.
/// Once misbehavior is observed, we stop accepting additional [`Shred`]s through dissemination.
leader_misbehaved: bool,
}
impl SlotBlockData {
/// Creates a new empty structure for a slot's block data.
pub fn new(slot: Slot) -> Self {
Self {
slot,
disseminated: BlockData::new(slot),
repaired: BTreeMap::new(),
leader_misbehaved: false,
}
}
/// Adds a shred receive via block dissemination in the corresponding spot.
///
/// Performs the necessary validity checks, including checks for leader equivocation.
pub fn add_shred_from_disseminator(
&mut self,
shred: Shred,
leader_pk: PublicKey,
shredder: &mut RegularShredder,
) -> Result<Option<VotorEvent>, AddShredError> {
assert_eq!(shred.payload().header.slot, self.slot);
if self.leader_misbehaved {
debug!("recevied shred from misbehaving leader, not adding to blockstore");
return Err(AddShredError::InvalidShred);
}
self.disseminated
.add_shred(shred, leader_pk, shredder)
.inspect_err(|err| match err {
AddShredError::Equivocation | AddShredError::InvalidShred => {
self.leader_misbehaved = true;
}
_ => (),
})
}
/// Adds a shred received via repair to the spot given by block hash.
///
/// Performs the necessary validity checks, all but leader equivocation.
pub fn add_shred_from_repair(
&mut self,
hash: BlockHash,
shred: Shred,
leader_pk: PublicKey,
shredder: &mut RegularShredder,
) -> Result<Option<VotorEvent>, AddShredError> {
assert_eq!(shred.payload().header.slot, self.slot);
let block_data = self
.repaired
.entry(hash)
.or_insert_with(|| BlockData::new(self.slot));
block_data
.add_shred(shred, leader_pk, shredder)
.inspect_err(|err| match err {
AddShredError::Equivocation | AddShredError::InvalidShred => {
self.leader_misbehaved = true;
}
_ => (),
})
}
}
/// Returned value from [`BlockData::try_reconstruct_slice`]
enum ReconstructSliceResult {
/// Either slice was already reconstructed or not enough data.
NoAction,
/// Encountered an error reconstructing the slice.
Error,
/// Slice successfully reconstructed.
Complete,
}
/// Returned value from [`BlockData::try_reconstruct_block`]
enum ReconstructBlockResult {
/// Either block was already reconstructed or not enough data.
NoAction,
/// Encountered an error reconstructing the block.
Error,
/// Block successfully reconstructed.
/// [`BlockInfo`] describing the block is returned.
Complete(BlockInfo),
}
/// Holds all data corresponding to a single block.
pub struct BlockData {
/// Slot number this block is in.
slot: Slot,
/// Potentially completely restored block.
pub(super) completed: Option<(BlockHash, Block)>,
/// Any shreds of this block stored so far, indexed by slice index.
pub(super) shreds: BTreeMap<SliceIndex, [Option<ValidatedShred>; TOTAL_SHREDS]>,
/// Any already reconstructed slices of this block.
pub(super) slices: BTreeMap<SliceIndex, Slice>,
/// Index of the slice marked as last, if any.
pub(super) last_slice: Option<SliceIndex>,
/// Double merkle tree of this block, only known if block has been reconstructed.
pub(super) double_merkle_tree: Option<DoubleMerkleTree>,
/// Cache of Merkle roots for which the leader signature has been verified.
pub(super) merkle_root_cache: BTreeMap<SliceIndex, SliceRoot>,
}
impl BlockData {
/// Create a new spot for storing data of a single block.
pub fn new(slot: Slot) -> Self {
Self {
slot,
completed: None,
shreds: BTreeMap::new(),
slices: BTreeMap::new(),
last_slice: None,
double_merkle_tree: None,
merkle_root_cache: BTreeMap::new(),
}
}
fn add_shred(
&mut self,
shred: Shred,
leader_pk: PublicKey,
shredder: &mut RegularShredder,
) -> Result<Option<VotorEvent>, AddShredError> {
assert!(shred.payload().header.slot == self.slot);
let slice_index = shred.payload().header.slice_index;
let cached_merkle_root = self.merkle_root_cache.entry(slice_index);
let validated_shred = ValidatedShred::try_new(shred, cached_merkle_root, &leader_pk)?;
self.add_validated_shred(validated_shred, shredder)
}
fn add_validated_shred(
&mut self,
validated_shred: ValidatedShred,
shredder: &mut RegularShredder,
) -> Result<Option<VotorEvent>, AddShredError> {
let header = &validated_shred.payload().header;
assert!(header.slot == self.slot);
let slice_index = header.slice_index;
match (header.is_last, self.last_slice) {
(true, None) => {
self.last_slice = Some(slice_index);
self.slices.retain(|&ind, _| ind <= slice_index);
self.shreds.retain(|&ind, _| ind <= slice_index);
}
(true, Some(l)) => {
if slice_index != l {
return Err(AddShredError::InvalidShred);
}
}
(false, None) => (),
(false, Some(l)) => {
if slice_index >= l {
return Err(AddShredError::InvalidShred);
}
}
}
let is_first_shred = self.shreds.is_empty();
let shred_index = validated_shred.payload().shred_index;
let slice_shreds = self
.shreds
.entry(slice_index)
.or_insert([const { None }; TOTAL_SHREDS]);
if slice_shreds[*shred_index].is_some() {
debug!(
"dropping duplicate shred {}-{} in slot {}",
slice_index, shred_index, self.slot
);
return Err(AddShredError::Duplicate);
}
slice_shreds[*shred_index] = Some(validated_shred);
if is_first_shred {
return Ok(Some(VotorEvent::FirstShred(self.slot)));
}
match self.try_reconstruct_slice(slice_index, shredder) {
ReconstructSliceResult::NoAction => Ok(None),
ReconstructSliceResult::Error => Err(AddShredError::InvalidShred),
ReconstructSliceResult::Complete => match self.try_reconstruct_block() {
ReconstructBlockResult::NoAction => Ok(None),
ReconstructBlockResult::Error => Err(AddShredError::InvalidShred),
ReconstructBlockResult::Complete(block_info) => Ok(Some(VotorEvent::Block {
slot: self.slot,
block_info,
})),
},
}
}
/// Reconstructs the slice if the blockstore contains enough shreds.
///
/// See [`ReconstructSliceResult`] for more info on what the function returns.
fn try_reconstruct_slice(
&mut self,
index: SliceIndex,
shredder: &mut RegularShredder,
) -> ReconstructSliceResult {
if self.completed.is_some() {
trace!("already have block for slot {}", self.slot);
return ReconstructSliceResult::NoAction;
}
let entry = match self.slices.entry(index) {
Entry::Occupied(_) => return ReconstructSliceResult::NoAction,
Entry::Vacant(entry) => entry,
};
// assuming caller has inserted at least one valid shred so unwrap() should be safe
let slice_shreds = self.shreds.get_mut(&index).unwrap();
let (reconstructed_slice, reconstructed_shreds) = match shredder.deshred(slice_shreds) {
Ok(output) => output,
Err(DeshredError::NotEnoughShreds) => return ReconstructSliceResult::NoAction,
rest => {
warn!("deshreding failed with {rest:?}");
return ReconstructSliceResult::Error;
}
};
if reconstructed_slice.parent.is_none() && reconstructed_slice.slice_index.is_first() {
warn!(
"reconstructed slice {} in slot {} expected to contain parent",
index, self.slot
);
return ReconstructSliceResult::Error;
}
// insert reconstructed slice and shreds
entry.insert(reconstructed_slice);
let mut reconstructed_shreds = reconstructed_shreds.map(Some);
std::mem::swap(slice_shreds, &mut reconstructed_shreds);
trace!("reconstructed slice {} in slot {}", index, self.slot);
ReconstructSliceResult::Complete
}
/// Reconstructs the block if the blockstore contains all slices.
///
/// See [`ReconstructBlockResult`] for more info on what the function returns.
fn try_reconstruct_block(&mut self) -> ReconstructBlockResult {
if self.completed.is_some() {
trace!("already have block for slot {}", self.slot);
return ReconstructBlockResult::NoAction;
}
let Some(last_slice) = self.last_slice else {
return ReconstructBlockResult::NoAction;
};
if self.slices.len() != last_slice.inner() + 1 {
trace!("don't have all slices for slot {} yet", self.slot);
return ReconstructBlockResult::NoAction;
}
// calculate double-Merkle tree & block hash
let merkle_roots = self
.slices
.values()
.map(|s| s.merkle_root.as_ref().unwrap());
let tree = DoubleMerkleTree::new(merkle_roots);
let block_hash = tree.get_root();
self.double_merkle_tree = Some(tree);
// reconstruct block header
let first_slice = self.slices.get(&SliceIndex::first()).unwrap();
// based on the logic in `try_reconstruct_slice`, first_slice should be valid i.e. it must contain a parent.
let mut parent = first_slice.parent.clone().unwrap();
let mut parent_switched = false;
let mut transactions = vec![];
for (ind, slice) in &self.slices {
// handle optimistic handover
if !ind.is_first()
&& let Some(new_parent) = slice.parent.clone()
{
if new_parent == parent {
warn!("parent switched to same value");
return ReconstructBlockResult::Error;
}
if parent_switched {
warn!("parent switched more than once");
return ReconstructBlockResult::Error;
}
parent_switched = true;
parent = new_parent;
}
let mut txs = match wincode::deserialize(&slice.data) {
Ok(r) => r,
Err(err) => {
warn!("decoding slice {ind} failed with {err:?}");
return ReconstructBlockResult::Error;
}
};
transactions.append(&mut txs);
}
let block = Block {
_slot: self.slot,
hash: block_hash.clone(),
parent: parent.0,
parent_hash: parent.1,
_transactions: transactions,
};
let block_info = BlockInfo::from(&block);
self.completed = Some((block_hash, block));
// clean up raw slices
for slice_index in last_slice.until() {
self.slices.remove(&slice_index);
}
ReconstructBlockResult::Complete(block_info)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::crypto::signature::SecretKey;
use crate::shredder::{DATA_SHREDS, ShredIndex, TOTAL_SHREDS};
use crate::test_utils::{assert_votor_events_match, create_random_block};
fn handle_slice(
block_data: &mut BlockData,
slice: Slice,
sk: &SecretKey,
) -> (Vec<VotorEvent>, Result<(), AddShredError>) {
let mut shredder = RegularShredder::default();
let pk = sk.to_pk();
let shreds = shredder.shred(slice, sk).unwrap();
let mut events = vec![];
for shred in shreds {
match block_data.add_shred(shred.into_shred(), pk, &mut shredder) {
Ok(Some(event)) => {
events.push(event);
}
Ok(None) | Err(AddShredError::Duplicate) => (),
Err(err) => return (events, Err(err)),
}
}
(events, Ok(()))
}
fn get_block_hash_from_votor_event(event: &VotorEvent) -> BlockHash {
match event {
VotorEvent::Block {
slot: _,
block_info: BlockInfo { hash, parent: _ },
} => hash.clone(),
_ => panic!(),
}
}
#[test]
fn reconstruct_slice_and_shreds() {
let sk = SecretKey::new(&mut rand::rng());
let pk = sk.to_pk();
let slot = Slot::new(123);
// manage to construct block from just enough shreds
let slices = create_random_block(slot, 1);
let mut block_data = BlockData::new(slot);
let mut shredder = RegularShredder::default();
let shreds = shredder.shred(slices[0].clone(), &sk).unwrap();
let mut events = vec![];
for shred in shreds.into_iter().skip(TOTAL_SHREDS - DATA_SHREDS) {
if let Some(event) = block_data
.add_shred(shred.into_shred(), pk, &mut shredder)
.unwrap()
{
events.push(event);
}
}
assert!(block_data.completed.is_some());
// all shreds should have been reconstructed
let slice_shreds = block_data.shreds.get(&SliceIndex::first()).unwrap();
assert_eq!(slice_shreds.len(), TOTAL_SHREDS);
for shred_index in ShredIndex::all() {
assert!(slice_shreds[*shred_index].is_some());
}
}
#[test]
fn reconstruct_slice_invalid_parent() {
let sk = SecretKey::new(&mut rand::rng());
let slot = Slot::new(123);
// manage to construct a valid block
let slices = create_random_block(slot, 1);
let (events, res) =
handle_slice(&mut BlockData::new(slices[0].slot), slices[0].clone(), &sk);
let () = res.unwrap();
assert_eq!(events.len(), 2);
let first_shred_event = VotorEvent::FirstShred(slot);
assert_votor_events_match(events[0].clone(), first_shred_event);
let hash = get_block_hash_from_votor_event(&events[1]);
let block_event = VotorEvent::Block {
slot,
block_info: BlockInfo {
hash,
parent: slices[0].parent.clone().unwrap(),
},
};
assert_votor_events_match(events[1].clone(), block_event);
// do not construct a valid block when slice is invalid
let mut slices = create_random_block(slot, 1);
slices[0].parent = None;
let (events, res) =
handle_slice(&mut BlockData::new(slices[0].slot), slices[0].clone(), &sk);
assert_eq!(res.unwrap_err(), AddShredError::InvalidShred);
assert_eq!(events.len(), 1);
let first_shred_event = VotorEvent::FirstShred(slot);
assert_votor_events_match(events[0].clone(), first_shred_event);
}
// If a subsequent slice switches parent to the original, the block is not reconstructed.
#[test]
fn reconstruct_block_optimistic_handover_duplicate_parent() {
let sk = SecretKey::new(&mut rand::rng());
let slot = Slot::new(123);
let mut slices = create_random_block(slot, 3);
slices[2].parent = slices[0].parent.clone();
let mut block_data = BlockData::new(slot);
let mut events = vec![];
for (ind, slice) in slices.into_iter().enumerate() {
let (mut evs, res) = handle_slice(&mut block_data, slice, &sk);
events.append(&mut evs);
if ind == 0 || ind == 1 {
let () = res.unwrap();
} else {
assert_eq!(res.unwrap_err(), AddShredError::InvalidShred);
}
}
assert_eq!(events.len(), 1);
match events[0] {
VotorEvent::FirstShred(s) => assert_eq!(slot, s),
_ => panic!(),
}
}
// Two switches of parents do not reconstruct block.
#[test]
fn reconstruct_block_optimistic_handover_two_switches() {
let sk = SecretKey::new(&mut rand::rng());
let slot = Slot::new(123);
let mut slices = create_random_block(slot, 3);
let parent = slices[0].parent.clone().unwrap();
let slice_1_parent = (parent.0.next(), parent.1.clone());
assert!(slice_1_parent.0 < slot);
let slice_2_parent = (parent.0.next().next(), parent.1);
assert!(slice_2_parent.0 < slot);
slices[1].parent = Some(slice_1_parent);
slices[2].parent = Some(slice_2_parent);
let mut block_data = BlockData::new(slot);
let mut events = vec![];
for (ind, slice) in slices.into_iter().enumerate() {
let (mut evs, res) = handle_slice(&mut block_data, slice, &sk);
events.append(&mut evs);
if ind == 0 || ind == 1 {
let () = res.unwrap();
} else {
assert_eq!(res.unwrap_err(), AddShredError::InvalidShred);
}
}
assert_eq!(events.len(), 1);
match events[0] {
VotorEvent::FirstShred(s) => assert_eq!(slot, s),
_ => panic!(),
}
}
// Optimistic handover works.
#[test]
fn reconstruct_block_optimistic_handover_works() {
let sk = SecretKey::new(&mut rand::rng());
let slot = Slot::new(123);
let mut slices = create_random_block(slot, 3);
let parent = slices[0].parent.clone().unwrap();
let slice_1_parent = (parent.0.next(), parent.1);
assert!(slice_1_parent.0 < slot);
slices[1].parent = Some(slice_1_parent.clone());
let mut block_data = BlockData::new(slot);
let mut events = vec![];
for slice in slices {
let (mut evs, res) = handle_slice(&mut block_data, slice, &sk);
events.append(&mut evs);
let () = res.unwrap();
}
assert_eq!(events.len(), 2);
match events[0] {
VotorEvent::FirstShred(s) => assert_eq!(slot, s),
_ => panic!(),
}
match &events[1] {
VotorEvent::Block {
slot: ret_slot,
block_info,
} => {
assert_eq!(*ret_slot, slot);
assert_eq!(block_info.parent, slice_1_parent);
}
_ => panic!(),
}
}
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/network/udp.rs | src/network/udp.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! UDP network interface.
//!
//! This module provides an implementation of the [`Network`] trait for UDP sockets.
//! It is essentially a wrapper around [`tokio::net::UdpSocket`].
use std::marker::PhantomData;
use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4};
use async_trait::async_trait;
use futures::future::join_all;
use log::warn;
use tokio::net::UdpSocket;
use wincode::{SchemaRead, SchemaWrite};
use super::MTU_BYTES;
use crate::network::Network;
/// Number of bytes used as buffer for any incoming packet.
///
/// This should be enough to receive and deserialize any `NetworkMessage`,
/// since messages we send in our protocols should fit in one MTU size packet.
const RECEIVE_BUFFER_SIZE: usize = MTU_BYTES;
/// Implementation of network abstraction over a simple UDP socket.
pub struct UdpNetwork<S, R> {
socket: UdpSocket,
_msg_types: PhantomData<(S, R)>,
}
impl<S, R> UdpNetwork<S, R> {
/// Creates a new `UdpNetwork` instance bound to the given `port`.
///
/// # Panics
///
/// Panics if the UDP `port` is already in use.
#[must_use]
pub fn new(port: u16) -> Self {
let addr = SocketAddrV4::new(Ipv4Addr::UNSPECIFIED, port);
let socket = futures::executor::block_on(UdpSocket::bind(addr)).unwrap();
Self {
socket,
_msg_types: PhantomData,
}
}
/// Creates a new `UdpNetwork` instance bound to an arbitrary port.
/// The port is assigned by the OS.
#[must_use]
pub fn new_with_any_port() -> Self {
Self::new(0)
}
/// Returns the UDP port number the network is bound to.
pub fn port(&self) -> u16 {
self.socket.local_addr().unwrap().port()
}
async fn send_serialized(&self, bytes: &[u8], addr: SocketAddr) -> std::io::Result<()> {
assert!(bytes.len() <= MTU_BYTES, "each message should fit in MTU");
let bytes_sent = self.socket.send_to(bytes, addr).await?;
assert_eq!(bytes.len(), bytes_sent);
Ok(())
}
}
#[async_trait]
impl<S, R> Network for UdpNetwork<S, R>
where
S: SchemaWrite<Src = S> + Send + Sync,
R: for<'de> SchemaRead<'de, Dst = R> + Send + Sync,
{
type Recv = R;
type Send = S;
async fn send_to_many(
&self,
msg: &S,
addrs: impl Iterator<Item = SocketAddr> + Send,
) -> std::io::Result<()> {
let bytes = &wincode::serialize(msg).unwrap();
let tasks = addrs.map(async move |addr| self.send_serialized(bytes, addr).await);
for res in join_all(tasks).await {
let () = res?;
}
Ok(())
}
async fn send(&self, msg: &Self::Send, addr: SocketAddr) -> std::io::Result<()> {
let bytes = &wincode::serialize(msg).unwrap();
self.send_serialized(bytes, addr).await
}
async fn receive(&self) -> std::io::Result<R> {
let mut buf = [0; RECEIVE_BUFFER_SIZE];
loop {
let _bytes_recved = self.socket.recv(&mut buf).await?;
let msg = match wincode::deserialize(&buf) {
Ok(r) => r,
Err(err) => {
warn!("deserializing failed with {err:?}");
continue;
}
};
return Ok(msg);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::network::localhost_ip_sockaddr;
use crate::test_utils::{Ping, Pong};
#[tokio::test]
async fn ping() {
let socket1: UdpNetwork<Ping, Ping> = UdpNetwork::new_with_any_port();
let socket2: UdpNetwork<Ping, Ping> = UdpNetwork::new_with_any_port();
let addr1 = localhost_ip_sockaddr(socket1.port());
// regular send()
socket2.send(&Ping::default(), addr1).await.unwrap();
let msg = socket1.receive().await.unwrap();
assert_eq!(msg.0, Ping::default().0);
}
#[tokio::test]
async fn ping_pong() {
let socket1 = UdpNetwork::new_with_any_port();
let socket2 = UdpNetwork::new_with_any_port();
let addr1 = localhost_ip_sockaddr(socket1.port());
let addr2 = localhost_ip_sockaddr(socket2.port());
socket1.send(&Ping::default(), addr2).await.unwrap();
let msg: Ping = socket2.receive().await.unwrap();
assert_eq!(msg.0, Ping::default().0);
socket2.send(&Pong(msg.0), addr1).await.unwrap();
let msg: Pong = socket1.receive().await.unwrap();
assert_eq!(msg.0, Ping::default().0);
}
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/network/simulated.rs | src/network/simulated.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Simulated network interface.
//!
//! This module provides a in implementation of a simulated network interface,
//! which may be used for local testing and simulations.
//! It works by having [`SimulatedNetwork`] instances communicate with eachother
//! via an underlying [`SimulatedNetworkCore`], which links them together.
//! The network core handles channels for sending packets from one node to another.
//! These channels are artificially limited in bandwidth through token buckets.
//! The core also delays delivery of packets, simulating network latency, and
//! supports jitter as well as packet loss.
//!
//! Further, this module exposes real-world data via its sub-modules:
//! - [`ping_data`] for latencies between Solana mainnet validators.
//! - [`stake_distribution`] for working with the Solana mainnet stake distribution.
mod core;
pub mod ping_data;
pub mod stake_distribution;
mod token_bucket;
use std::marker::PhantomData;
use std::net::SocketAddr;
use std::sync::Arc;
use async_trait::async_trait;
use futures::future::join_all;
use log::warn;
use tokio::sync::{Mutex, RwLock, mpsc};
use wincode::{SchemaRead, SchemaWrite};
pub use self::core::SimulatedNetworkCore;
use self::token_bucket::TokenBucket;
use super::Network;
use crate::ValidatorId;
use crate::network::MTU_BYTES;
/// A simulated network interface for local testing and simulations.
pub struct SimulatedNetwork<S, R> {
/// ID of the validator this network interface belongs to.
id: ValidatorId,
/// Reference to the simulated network core this interface is attached to.
network_core: Arc<SimulatedNetworkCore>,
/// Receiver for incoming messages.
receiver: Mutex<mpsc::Receiver<Vec<u8>>>,
/// Optional rate limiter.
limiter: Option<RwLock<TokenBucket>>,
_msg_types: PhantomData<(S, R)>,
}
impl<S, R> SimulatedNetwork<S, R> {
async fn send_byte_vec(&self, bytes: Vec<u8>, to: ValidatorId) -> std::io::Result<()> {
if let Some(limiter) = &self.limiter {
limiter.write().await.wait_for(bytes.len()).await;
}
self.network_core.send(bytes, self.id, to).await;
Ok(())
}
async fn send_serialized(&self, bytes: Vec<u8>, addr: SocketAddr) -> std::io::Result<()> {
assert!(bytes.len() <= MTU_BYTES, "each message should fit in MTU");
let validator_id = addr.port().into();
self.send_byte_vec(bytes, validator_id).await?;
Ok(())
}
}
#[async_trait]
impl<S, R> Network for SimulatedNetwork<S, R>
where
S: SchemaWrite<Src = S> + Send + Sync,
R: for<'de> SchemaRead<'de, Dst = R> + Send + Sync,
{
type Recv = R;
type Send = S;
async fn send_to_many(
&self,
msg: &S,
addrs: impl Iterator<Item = SocketAddr> + Send,
) -> std::io::Result<()> {
let bytes = wincode::serialize(msg).unwrap();
let tasks = addrs.map(|addr| {
let bytes = bytes.clone();
async move { self.send_serialized(bytes, addr).await }
});
for res in join_all(tasks).await {
let () = res?;
}
Ok(())
}
async fn send(&self, msg: &S, addr: SocketAddr) -> std::io::Result<()> {
let bytes = wincode::serialize(msg).unwrap();
self.send_serialized(bytes, addr).await
}
async fn receive(&self) -> std::io::Result<R> {
loop {
let Some(buf) = self.receiver.lock().await.recv().await else {
return Err(std::io::Error::other("channel closed"));
};
let msg = match wincode::deserialize(&buf) {
Ok(r) => r,
Err(err) => {
warn!("deserializing failed with {err:?}");
continue;
}
};
return Ok(msg);
}
}
}
#[cfg(test)]
mod tests {
use std::time::Instant;
use super::*;
use crate::Slot;
use crate::crypto::signature::SecretKey;
use crate::network::localhost_ip_sockaddr;
use crate::shredder::{
DATA_SHREDS, MAX_DATA_PER_SLICE, RegularShredder, Shred, Shredder, TOTAL_SHREDS,
};
use crate::test_utils::Ping;
use crate::types::slice::create_slice_payload_with_invalid_txs;
use crate::types::{Slice, SliceHeader, SliceIndex};
#[tokio::test]
async fn basic() {
// set up network with two nodes
let core = Arc::new(SimulatedNetworkCore::default().with_packet_loss(0.0));
let net1 = core.join(0, 8192, 8192).await;
let net2 = core.join(1, 8192, 8192).await;
let msg = Ping::default();
// one direction
net1.send(&msg, localhost_ip_sockaddr(1)).await.unwrap();
let received: Ping = net2.receive().await.expect("didn't receive message");
if received.0 != msg.0 {
panic!("received wrong message");
}
// other direction
net2.send(&msg, localhost_ip_sockaddr(0)).await.unwrap();
let received: Ping = net1.receive().await.expect("didn't receive message");
if received.0 != msg.0 {
panic!("received wrong message");
}
}
#[tokio::test]
async fn low_bandwidth() {
// set up network with two nodes
let core = Arc::new(
SimulatedNetworkCore::default()
.with_jitter(0.0)
.with_packet_loss(0.0),
);
let net1: SimulatedNetwork<Shred, Shred> = core.join(0, 32_768, 32_768).await; // 32 KiB/s
let net2: SimulatedNetwork<Shred, Shred> = core.join(1, 32_768, 32_768).await; // 32 KiB/s
// create 2 slices
let mut shredder = RegularShredder::default();
let mut rng = rand::rng();
let sk = SecretKey::new(&mut rng);
let mut shreds = Vec::new();
let final_slice_index = SliceIndex::new_unchecked(1);
for slice_index in final_slice_index.until() {
let payload = create_slice_payload_with_invalid_txs(None, MAX_DATA_PER_SLICE);
let header = SliceHeader {
slot: Slot::new(0),
slice_index,
is_last: slice_index == final_slice_index,
};
let slice = Slice::from_parts(header, payload, None);
let slice_shreds = shredder.shred(slice, &sk).unwrap();
shreds.extend(slice_shreds);
}
let t_latency = 2.0 * MAX_DATA_PER_SLICE as f64 / 32_768.0;
let p_latency = 0.1;
let expansion_ratio = (TOTAL_SHREDS as f64) / (DATA_SHREDS as f64);
let min = p_latency + t_latency * expansion_ratio; // accoutn for erasure coding
let max = p_latency + t_latency * expansion_ratio * 1.41; // +36% metadata overhead, +5% margin
// background task: receive shreds and measure latency
let receiver = tokio::spawn(async move {
let mut shreds_received = 0;
let now = Instant::now();
while let Ok(_shred) = net2.receive().await {
shreds_received += 1;
if shreds_received == 2 * TOTAL_SHREDS {
return now.elapsed().as_secs_f64();
}
}
panic!("not all shreds received");
});
for shred in shreds {
net1.send(&shred, localhost_ip_sockaddr(1)).await.unwrap();
}
let latency = tokio::join!(receiver).0.unwrap();
assert!(latency > min);
assert!(latency < max);
}
#[tokio::test]
#[ignore]
async fn high_bandwidth() {
// set up network with two nodes
let core = Arc::new(
SimulatedNetworkCore::default()
.with_jitter(0.0)
.with_packet_loss(0.0),
);
let net1: SimulatedNetwork<Shred, Shred> = core.join(0, 104_857_600, 104_857_600).await; // 100 MiB/s
let net2: SimulatedNetwork<Shred, Shred> = core.join(1, 104_857_600, 104_857_600).await; // 100 MiB/s
// create a full block (1024 slices)
let mut shredder = RegularShredder::default();
let mut rng = rand::rng();
let sk = SecretKey::new(&mut rng);
let mut shreds = Vec::new();
let final_slice_index = SliceIndex::new_unchecked(1023);
for slice_index in final_slice_index.until() {
let payload = create_slice_payload_with_invalid_txs(None, MAX_DATA_PER_SLICE);
let header = SliceHeader {
slot: Slot::new(0),
slice_index,
is_last: slice_index == final_slice_index,
};
let slice = Slice::from_parts(header, payload, None);
let slice_shreds = shredder.shred(slice, &sk).unwrap();
shreds.extend(slice_shreds);
}
let t_latency = 1024.0 * MAX_DATA_PER_SLICE as f64 / 100.0 / 1024.0 / 1024.0;
let p_latency = 0.1;
let expansion_ratio = (TOTAL_SHREDS as f64) / (DATA_SHREDS as f64);
let min = p_latency + t_latency * expansion_ratio; // account for erasure coding
let max = p_latency + t_latency * expansion_ratio * 1.41; // +36% metadata overhead, +5% margin
// background task: receive shreds and measure latency
let receiver = tokio::spawn(async move {
let mut shreds_received = 0;
let now = Instant::now();
while let Ok(_shred) = net2.receive().await {
shreds_received += 1;
if shreds_received == 1024 * TOTAL_SHREDS {
return now.elapsed().as_secs_f64();
}
}
panic!("not all shreds received");
});
for shred in shreds {
net1.send(&shred, localhost_ip_sockaddr(1)).await.unwrap();
}
let latency = tokio::join!(receiver).0.unwrap();
assert!(latency > min);
assert!(latency < max);
}
#[tokio::test]
#[ignore]
async fn unlimited_bandwidth() {
// set up network with two nodes
let core = Arc::new(
SimulatedNetworkCore::default()
.with_jitter(0.0)
.with_packet_loss(0.0),
);
let net1: SimulatedNetwork<Shred, Shred> = core.join_unlimited(0).await;
let net2: SimulatedNetwork<Shred, Shred> = core.join_unlimited(1).await;
// create a full block (1024 slices)
let mut shredder = RegularShredder::default();
let mut rng = rand::rng();
let sk = SecretKey::new(&mut rng);
let mut shreds = Vec::new();
let final_slice_index = SliceIndex::new_unchecked(1023);
for slice_index in final_slice_index.until() {
let payload = create_slice_payload_with_invalid_txs(None, MAX_DATA_PER_SLICE);
let header = SliceHeader {
slot: Slot::new(0),
slice_index,
is_last: slice_index == final_slice_index,
};
let slice = Slice::from_parts(header, payload, None);
let slice_shreds = shredder.shred(slice, &sk).unwrap();
shreds.extend(slice_shreds);
}
// achieving at least 256 MiB/s
let t_latency = 1024.0 * MAX_DATA_PER_SLICE as f64 / 256.0 / 1024.0 / 1024.0;
let p_latency = 0.1;
let expansion_ratio = (TOTAL_SHREDS as f64) / (DATA_SHREDS as f64);
let max = p_latency + t_latency * expansion_ratio * 1.41; // account for erasure coding + 36% metadata overhead + 5% margin
// background task: receive shreds and measure latency
let receiver = tokio::spawn(async move {
let mut shreds_received = 0;
let now = Instant::now();
while let Ok(_shred) = net2.receive().await {
shreds_received += 1;
if shreds_received == 1024 * TOTAL_SHREDS {
return now.elapsed().as_secs_f64();
}
}
panic!("not all shreds received");
});
for shred in shreds {
net1.send(&shred, localhost_ip_sockaddr(1)).await.unwrap();
}
let latency = tokio::join!(receiver).0.unwrap();
assert!(latency < max);
}
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/network/tcp.rs | src/network/tcp.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! TCP network interface.
//!
//! This module provides an implementation of the [`Network`] trait for TCP.
//! It uses [`tokio::net::TcpListener`] and [`tokio::net::TcpStream`] under the hood.
use std::marker::PhantomData;
use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4};
use async_trait::async_trait;
use futures::SinkExt;
use futures::future::join_all;
use tokio::net::TcpListener;
use tokio::net::tcp::{OwnedReadHalf, OwnedWriteHalf};
use tokio::sync::{Mutex, RwLock, mpsc};
use tokio_util::codec::{FramedRead, FramedWrite, LengthDelimitedCodec};
use wincode::{SchemaRead, SchemaWrite};
use super::Network;
use crate::network::MTU_BYTES;
type StreamReader = FramedRead<OwnedReadHalf, LengthDelimitedCodec>;
type StreamWriter = FramedWrite<OwnedWriteHalf, LengthDelimitedCodec>;
/// Implementation of network abstraction over TCP connections.
// WARN: this is incomplete!
pub struct TcpNetwork<S, R> {
listener: TcpListener,
readers: RwLock<Vec<Mutex<StreamReader>>>,
writers: RwLock<Vec<Mutex<StreamWriter>>>,
_msg_types: PhantomData<(S, R)>,
}
#[allow(dead_code)]
enum TcpMessage<S, R> {
Sender(S),
Receiver(R),
}
impl<S, R> TcpNetwork<S, R> {
/// Creates a new `TcpNetwork` instance bound to the given `port`.
///
/// # Panics
///
/// Panics if the TCP `port` is already in use.
#[must_use]
pub fn new(port: u16) -> Self {
let addr = SocketAddrV4::new(Ipv4Addr::new(0, 0, 0, 0), port);
let listener = futures::executor::block_on(TcpListener::bind(addr)).unwrap();
let (_tx, _rx) = mpsc::channel::<TcpMessage<S, R>>(1024);
Self {
listener,
readers: RwLock::new(Vec::new()),
writers: RwLock::new(Vec::new()),
_msg_types: PhantomData,
}
}
/// Creates a new `TcpNetwork` instance bound to an arbitrary port.
///
/// The port is arbitrarily assigned by the OS.
#[must_use]
pub fn new_with_any_port() -> Self {
Self::new(0)
}
/// Returns the TCP port number the network is bound to.
///
/// This port is used by all streams and to listen for incoming connections.
pub fn port(&self) -> u16 {
self.listener.local_addr().unwrap().port()
}
async fn send_serialized(&self, bytes: &[u8], _addr: SocketAddr) -> std::io::Result<()> {
assert!(bytes.len() <= MTU_BYTES, "each message should fit in MTU");
// TODO: use correct socket
let writer = &self.writers.read().await[0];
writer.lock().await.send(bytes.to_vec().into()).await?;
Ok(())
}
}
#[async_trait]
impl<S, R> Network for TcpNetwork<S, R>
where
S: SchemaWrite<Src = S> + Send + Sync,
R: for<'de> SchemaRead<'de, Dst = R> + Send + Sync,
{
type Recv = R;
type Send = S;
async fn send_to_many(
&self,
msg: &S,
addrs: impl Iterator<Item = SocketAddr> + Send,
) -> std::io::Result<()> {
let bytes = &wincode::serialize(msg).unwrap();
let tasks = addrs.map(async move |addr| self.send_serialized(bytes, addr).await);
for res in join_all(tasks).await {
let () = res?;
}
Ok(())
}
async fn send(&self, msg: &Self::Send, addr: SocketAddr) -> std::io::Result<()> {
let bytes = wincode::serialize(msg).unwrap();
self.send_serialized(&bytes, addr).await
}
async fn receive(&self) -> std::io::Result<R> {
loop {
tokio::select! {
// accept new incoming connections
Ok((stream, _)) = self.listener.accept() => {
let (read_half, write_half) = stream.into_split();
let read_framed = FramedRead::new(
read_half,
LengthDelimitedCodec::builder()
.length_field_length(2)
.big_endian()
.new_codec(),
);
let write_framed = FramedWrite::new(
write_half,
LengthDelimitedCodec::builder()
.length_field_length(2)
.big_endian()
.new_codec(),
);
self.readers.write().await.push(Mutex::new(read_framed));
self.writers.write().await.push(Mutex::new(write_framed));
},
// TODO: read from all readers
// Some(Ok(msg)) = readers[0].lock().await.next() => {
// match NetworkMessage::from_bytes(&msg) {
// Ok(msg) => return Ok(msg),
// Err(NetworkReceiveError:Deserialization(_)) => warn!("failed deserializing message"),
// Err(err) => return Err(err),
// }
// },
}
}
}
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/network/simulated/token_bucket.rs | src/network/simulated/token_bucket.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
use tokio::time::{Duration, Instant, sleep};
/// Token bucket for rate limiting.
pub struct TokenBucket {
/// Current number of tokens in the bucket.
bucket: usize,
/// Maximum number of tokens the bucket can hold.
capacity: usize,
/// Rate at which the bucket is refilled (tokens/second).
refill_rate: usize,
/// Last time the bucket was refilled.
last_refill: Instant,
}
impl TokenBucket {
/// Creates a new token bucket with the given refill rate.
#[must_use]
pub fn new(refill_rate: usize) -> Self {
Self {
bucket: 1500,
capacity: 1000 * 1500,
refill_rate,
last_refill: Instant::now(),
}
}
/// Refills the bucket with the correct number of tokens,
/// based on the time since the last refill.
pub fn refill(&mut self) {
let now = Instant::now();
let elapsed = now.duration_since(self.last_refill);
let added = (self.refill_rate as f64 * elapsed.as_nanos() as f64 / 1e9) as usize;
self.bucket = (self.bucket + added).min(self.capacity);
self.last_refill = now;
}
/// Waits until the bucket has at least `bytes` tokens.
pub async fn wait_for(&mut self, tokens: usize) {
loop {
self.refill();
if self.bucket >= tokens {
self.bucket -= tokens;
break;
}
let wait_time =
Duration::from_secs_f64((tokens - self.bucket) as f64 / self.refill_rate as f64);
sleep(wait_time).await;
}
}
}
#[cfg(test)]
mod tests {
use super::*;
// testing token bucket accuracy to within +/-3% margin
const ACCURACY: f64 = 0.03;
async fn token_bucket_experiment(bucket_rate: usize, elements: usize, element_size: usize) {
let mut bucket = TokenBucket::new(bucket_rate);
let now = Instant::now();
for _ in 0..elements {
bucket.wait_for(element_size).await;
}
let elapsed = now.elapsed().as_secs_f64();
let expected = elements as f64 * element_size as f64 / bucket_rate as f64;
assert!(elapsed > expected * (1.0 - ACCURACY));
assert!(elapsed < expected * (1.0 + ACCURACY));
}
#[tokio::test]
async fn low_rate() {
// 256 KiB/s : 1000 packets a 1500 bytes
token_bucket_experiment(256 * 1024, 1000, 1500).await;
}
#[tokio::test]
async fn high_rate() {
// 100 MiB/s : 500k packets a 1500 bytes
token_bucket_experiment(100 * 1024 * 1024, 500_000, 1500).await;
}
// When run concurrently with other tests on github, then the test fails.
// Running sequentially seems to help.
#[tokio::test]
#[ignore]
async fn extreme_rate() {
// 1 GiB/s : 5M packets a 1500 bytes
token_bucket_experiment(1024 * 1024 * 1024, 5_000_000, 1500).await;
}
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/network/simulated/stake_distribution.rs | src/network/simulated/stake_distribution.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Utilities for working with the stake distribution of Solana mainnet.
//!
//! Validator data is taken from [Validators.app](https://validators.app/).
//! The data is stored in the `data/mainnet_validators_epoch860.json` file.
//! It contains all validators (i.e. nodes with non-zero stake) on Solana mainnet.
//!
//! # Examples
//!
//! ```
//! use alpenglow::network::simulated::stake_distribution::{VALIDATOR_DATA, ValidatorData};
//!
//! let mut stakes = VALIDATOR_DATA
//! .iter()
//! .filter_map(ValidatorData::active_stake)
//! .collect::<Vec<_>>();
//! ```
use std::collections::HashSet;
use std::fs::File;
use std::sync::LazyLock;
use log::{info, warn};
use serde::Deserialize;
use super::ping_data::{PingServer, coordinates_for_city, find_closest_ping_server, get_ping};
use crate::crypto::aggsig;
use crate::crypto::signature::SecretKey;
use crate::network::dontcare_sockaddr;
use crate::{Stake, ValidatorId, ValidatorInfo};
/// Information about all validators on Solana mainnet.
pub static VALIDATOR_DATA: LazyLock<Vec<ValidatorData>> = LazyLock::new(|| {
let file = File::open("data/mainnet_validators_epoch860.json").unwrap();
let validators: Vec<ValidatorData> = serde_json::from_reader(file).unwrap();
validators
});
/// Data for a single validator on Solana.
///
/// This matches the format of the data in `data/mainnet_validators_epoch860.json`.
#[allow(dead_code)]
#[derive(Clone, Debug, Default, Deserialize)]
pub struct ValidatorData {
network: String,
account: String,
name: Option<String>,
keybase_id: Option<String>,
www_url: Option<String>,
details: Option<String>,
avatar_url: Option<String>,
created_at: String,
updated_at: String,
admin_warning: Option<String>,
jito: bool,
jito_commission: Option<u64>,
stake_pools_list: Vec<String>,
is_active: bool,
avatar_file_url: Option<String>,
active_stake: Option<Stake>,
authorized_withdrawer_score: i8,
commission: Option<u8>,
data_center_concentration_score: i8,
delinquent: bool,
published_information_score: i8,
root_distance_score: i8,
security_report_score: i8,
skipped_slot_score: i8,
skipped_after_score: i8,
software_version: String,
software_version_score: i8,
stake_concentration_score: i8,
consensus_mods_score: i8,
total_score: i8,
vote_distance_score: i8,
ip: String,
data_center_key: Option<String>,
autonomous_system_number: Option<u32>,
latitude: Option<String>,
longitude: Option<String>,
data_center_host: Option<String>,
vote_account: String,
epoch_credits: Option<u64>,
epoch: Option<u16>,
skipped_slots: Option<u64>,
skipped_slot_percent: Option<String>,
ping_time: Option<f64>,
url: String,
}
impl ValidatorData {
/// Returns the active stake of a validator, if it has non-zero active stake.
pub fn active_stake(&self) -> Option<Stake> {
if !self.is_active || self.delinquent {
return None;
}
self.active_stake.filter(|stake| *stake > 0)
}
}
/// Same as [`VALIDATOR_DATA`], but for Sui mainnet.
pub static SUI_VALIDATOR_DATA: LazyLock<Vec<ValidatorData>> = LazyLock::new(|| {
// read CSV
let file = File::open("data/sui_validators.csv").unwrap();
let reader = csv::Reader::from_reader(file);
let sui_validators = reader
.into_deserialize::<SuiValidatorData>()
.map(Result::unwrap);
// map from SuiValidatorData to ValidatorData
let validators: Vec<ValidatorData> = sui_validators
.into_iter()
.map(|v| {
let (lat, lon) = v.coords.split_once(',').unwrap();
ValidatorData {
name: Some(v.name),
is_active: true,
active_stake: Some((v.stake.round() * 100.0) as Stake),
delinquent: false,
ip: v.ip.unwrap_or_else(|| v.address.clone()),
data_center_key: Some(format!(
"{}-{}-{}",
v.country.unwrap_or_default(),
v.city.unwrap_or_default(),
v.cloud.clone().unwrap_or_default()
)),
latitude: Some(lat.to_owned()),
longitude: Some(lon.to_owned()),
data_center_host: v.cloud,
ping_time: Some(v.ping),
url: v.address,
..Default::default()
}
})
.collect();
validators
});
/// Data for a single validator on Sui.
///
/// This matches the format of the data in `data/sui_validators.csv`.
#[derive(Clone, Debug, Deserialize)]
pub struct SuiValidatorData {
name: String,
stake: f64,
address: String,
ip: Option<String>,
cloud: Option<String>,
city: Option<String>,
country: Option<String>,
coords: String,
ping: f64,
}
/// Artificial stake distribution for 5 global hubs.
///
/// Uses the same data format as [`VALIDATOR_DATA`].
pub static FIVE_HUBS_VALIDATOR_DATA: LazyLock<Vec<ValidatorData>> = LazyLock::new(|| {
hub_validator_data(vec![
("San Francisco".to_string(), 0.2),
("Secaucus".to_string(), 0.2), // NYC/NJ
("London".to_string(), 0.2),
("Shanghai".to_string(), 0.2),
("Tokyo".to_string(), 0.2),
])
});
/// Artificial stake distribution for location of top 10 global stock exchanges.
///
/// Uses the same data format as [`VALIDATOR_DATA`].
pub static STOCK_EXCHANGES_VALIDATOR_DATA: LazyLock<Vec<ValidatorData>> = LazyLock::new(|| {
hub_validator_data(vec![
("Toronto".to_string(), 0.1),
("Secaucus".to_string(), 0.2), // NYC/NJ
("Westpoort".to_string(), 0.1), // Amsterdam
("Taipei".to_string(), 0.1), // should maybe be Shenzhen (but we don't have ping data)
("Pune".to_string(), 0.2), // should maybe be Mumbai (but we don't have ping data)
("Shanghai".to_string(), 0.1),
("Hong Kong".to_string(), 0.1),
("Tokyo".to_string(), 0.1),
])
});
/// Loads and converts a list of [`ValidatorData`], augmenting it with ping data.
///
/// Returns a tuple `(all_val, val_with_ping)`, where:
/// - `all_val` is a list [`ValidatorInfo`] for all validators
/// - `val_with_ping` is a list of tuples of [`ValidatorInfo`] and [`PingServer`]
/// for validators for which we find ping data in the dataset
#[must_use]
pub fn validators_from_validator_data(
validator_data: &[ValidatorData],
) -> (
Vec<ValidatorInfo>,
Vec<(ValidatorInfo, &'static PingServer)>,
) {
let mut validators = Vec::new();
for v in validator_data {
if let Some(stake) = v.active_stake() {
let id = validators.len() as ValidatorId;
let sk = SecretKey::new(&mut rand::rng());
let voting_sk = aggsig::SecretKey::new(&mut rand::rng());
validators.push(ValidatorInfo {
id,
stake,
pubkey: sk.to_pk(),
voting_pubkey: voting_sk.to_pk(),
all2all_address: dontcare_sockaddr(),
disseminator_address: dontcare_sockaddr(),
repair_request_address: dontcare_sockaddr(),
repair_response_address: dontcare_sockaddr(),
});
}
}
// assign closest ping servers to validators
let total_stake: Stake = validators.iter().map(|v| v.stake).sum();
let mut validators_with_ping_data = Vec::new();
let mut stake_with_ping_server = 0;
for v in validator_data {
let Some(stake) = v.active_stake() else {
continue;
};
let (Some(lat), Some(lon)) = (&v.latitude, &v.longitude) else {
continue;
};
let ping_server = find_closest_ping_server(lat.parse().unwrap(), lon.parse().unwrap());
stake_with_ping_server += stake;
let sk = SecretKey::new(&mut rand::rng());
let voting_sk = aggsig::SecretKey::new(&mut rand::rng());
validators_with_ping_data.push((
ValidatorInfo {
id: validators_with_ping_data.len() as ValidatorId,
stake,
pubkey: sk.to_pk(),
voting_pubkey: voting_sk.to_pk(),
all2all_address: dontcare_sockaddr(),
disseminator_address: dontcare_sockaddr(),
repair_request_address: dontcare_sockaddr(),
repair_response_address: dontcare_sockaddr(),
},
ping_server,
));
}
let frac_wo_ping_server = 100.0 - stake_with_ping_server as f64 * 100.0 / total_stake as f64;
warn!("discarding {frac_wo_ping_server:.2}% of validators w/o ping server");
// determine pings of validator pairs
let mut nodes_without_ping = HashSet::new();
for (v1, p1) in &validators_with_ping_data {
for (v2, p2) in &validators_with_ping_data {
if get_ping(p1.id, p2.id).is_none()
|| (get_ping(p2.id, p1.id) == Some(0.0) && p2.id != p1.id)
{
nodes_without_ping.insert(v1.id);
nodes_without_ping.insert(v2.id);
}
}
}
let frac_wo_ping =
nodes_without_ping.len() as f64 * 100.0 / validators_with_ping_data.len() as f64;
warn!("discarding {frac_wo_ping:.2}% of nodes w/o ping");
warn!("{} validators without ping data", nodes_without_ping.len());
validators_with_ping_data.retain(|(v, _)| !nodes_without_ping.contains(&v.id));
let vals_left = validators_with_ping_data.len();
info!("{vals_left} validators with ping data",);
// give validators with ping data consecutive IDs
for (i, v) in validators_with_ping_data.iter_mut().enumerate() {
v.0.id = i as ValidatorId;
}
(validators, validators_with_ping_data)
}
/// Generates an artificial stake distribution.
///
/// The input `hubs` contains a list of (city, fraction of total stake).
/// Each city has to be in the ping dataset, i.e. supported by [`coordinates_for_city`].
/// Outputs a stake distribution in the same data format as [`VALIDATOR_DATA`].
#[must_use]
pub fn hub_validator_data(hubs: Vec<(String, f64)>) -> Vec<ValidatorData> {
let mut validators = Vec::new();
for (city, frac_stake) in hubs {
let (lat, lon) = coordinates_for_city(&city).unwrap();
for _ in 0..30 {
let stake = (frac_stake * 100.0 * 10_000.0 / 30.0).round() as Stake;
validators.push(ValidatorData {
is_active: true,
active_stake: Some(stake),
delinquent: false,
latitude: Some(lat.to_string()),
longitude: Some(lon.to_string()),
..Default::default()
});
}
}
validators
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn basic() {
let validator_data = hub_validator_data(vec![("San Francisco".to_string(), 1.0)]);
let (_, vals_with_ping) = validators_from_validator_data(&validator_data);
assert_eq!(count_different_cities(&vals_with_ping), 1);
let (validators, _) = validators_from_validator_data(&VALIDATOR_DATA);
assert_eq!(validators.len(), 954);
let (validators, _) = validators_from_validator_data(&SUI_VALIDATOR_DATA);
assert_eq!(validators.len(), 106);
let (_, vals_with_ping) = validators_from_validator_data(&FIVE_HUBS_VALIDATOR_DATA);
assert_eq!(count_different_cities(&vals_with_ping), 5);
let (_, vals_with_ping) = validators_from_validator_data(&STOCK_EXCHANGES_VALIDATOR_DATA);
assert_eq!(count_different_cities(&vals_with_ping), 8);
}
fn count_different_cities(validators: &[(ValidatorInfo, &PingServer)]) -> usize {
let mut cities = HashSet::new();
for (_, p) in validators {
cities.insert(&p.location);
}
cities.len()
}
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/network/simulated/core.rs | src/network/simulated/core.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::collections::{BinaryHeap, HashMap};
use std::marker::PhantomData;
use std::sync::Arc;
use std::time::{Duration, Instant};
use log::warn;
use rand::Rng;
use tokio::sync::{Mutex, RwLock, mpsc};
use super::SimulatedNetwork;
use super::token_bucket::TokenBucket;
use crate::ValidatorId;
struct SimulatedPacket {
_from: ValidatorId,
to: ValidatorId,
payload: Vec<u8>,
deliver_at: Instant,
}
// Needed to turn BinaryHeap into min-heap
impl Ord for SimulatedPacket {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
other.deliver_at.cmp(&self.deliver_at)
}
}
impl PartialOrd for SimulatedPacket {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl PartialEq for SimulatedPacket {
fn eq(&self, other: &Self) -> bool {
self.deliver_at == other.deliver_at
}
}
impl Eq for SimulatedPacket {}
/// Simulated network core, where messages are routed between nodes.
///
/// It stores virtual latencies for all links between any pair of nodes.
/// Messages sent by nodes into the network core are then delayed accordingly.
pub struct SimulatedNetworkCore {
/// Map from node ID to channel for delivering packets.
nodes: Arc<RwLock<HashMap<ValidatorId, mpsc::Sender<SimulatedPacket>>>>,
/// Latency between each pair of nodes.
latencies: RwLock<HashMap<(ValidatorId, ValidatorId), Duration>>,
/// Fallback latency to use for any link that is not configured.
default_latency: Duration,
/// Maximum jitter to apply to each packet in milliseconds.
per_packet_jitter_ms: f64,
/// Any packet is lost with this probability.
per_packet_loss_probability: f64,
/// Priority queue of packets that are waiting to be delivered.
pending: Arc<Mutex<BinaryHeap<SimulatedPacket>>>,
}
impl SimulatedNetworkCore {
/// Creates a new network core with the given latency and packet loss parameters.
pub fn new(latency_ms: u64, jitter_ms: f64, packet_loss: f64) -> Self {
let pending = Arc::new(Mutex::new(BinaryHeap::<SimulatedPacket>::new()));
let nodes = Arc::new(RwLock::new(HashMap::<
ValidatorId,
mpsc::Sender<SimulatedPacket>,
>::new()));
let p = pending.clone();
let n = nodes.clone();
tokio::spawn(async move {
loop {
let mut guard = p.lock().await;
if let Some(msg) = guard.peek()
&& msg.deliver_at <= Instant::now()
{
let msg = guard.pop().unwrap();
let n_guard = n.read().await;
let channel = n_guard.get(&msg.to).unwrap();
if let Err(_e) = channel.send(msg).await {
#[cfg(test)]
println!("sending failed. Ignoring");
warn!("sending failed. Ignoring");
}
}
}
});
Self {
nodes,
latencies: RwLock::new(HashMap::new()),
default_latency: Duration::from_millis(latency_ms),
per_packet_jitter_ms: jitter_ms,
per_packet_loss_probability: packet_loss,
pending,
}
}
/// Turns this instance into a new instance with a different default latency.
#[must_use]
pub const fn with_default_latency(mut self, latency: Duration) -> Self {
self.default_latency = latency;
self
}
/// Turns this instance into a new instance with a different latency jitter.
#[must_use]
pub const fn with_jitter(mut self, jitter: f64) -> Self {
self.per_packet_jitter_ms = jitter;
self
}
/// Turns this instance into a new instance with a different packet loss rate.
#[must_use]
pub const fn with_packet_loss(mut self, probability: f64) -> Self {
self.per_packet_loss_probability = probability;
self
}
/// Adds a node *without* bandwidth limits to the simulated network.
///
/// The node is registered in the network core with channels.
/// Returns a [`SimulatedNetwork`] interface for the node.
/// A new task is spawned that handles the delayed delivery of any messages
/// targeting that node.
///
/// For limited bandwidth, use [`Self::join`] instead.
pub async fn join_unlimited<S, R>(self: &Arc<Self>, id: ValidatorId) -> SimulatedNetwork<S, R> {
// pending -> background
let (pb_tx, mut pb_rx) = mpsc::channel(65536);
// background -> receiver
let (br_tx, br_rx) = mpsc::channel(65536);
self.nodes.write().await.insert(id, pb_tx);
let receiver = Mutex::new(br_rx);
let network_core = Arc::clone(self);
// background task: receive and push to buffer
tokio::spawn(async move {
while let Some(msg) = pb_rx.recv().await {
br_tx.send(msg.payload).await.unwrap();
}
});
SimulatedNetwork {
id,
network_core,
receiver,
limiter: None,
_msg_types: PhantomData,
}
}
/// Adds a node *with* bandwidth limits to the simulated network.
///
/// The node is registered in the network core with channels.
/// Returns a [`SimulatedNetwork`] interface for the node.
/// A new task is spawned that handles the delayed delivery of any messages
/// targeting that node.
///
/// For unlimited bandwidth, use [`Self::join_unlimited`] instead.
pub async fn join<S, R>(
self: &Arc<Self>,
id: ValidatorId,
up_bandwidth: usize,
down_bandwidth: usize,
) -> SimulatedNetwork<S, R> {
// pending -> background
let (pb_tx, mut pb_rx) = mpsc::channel(1000);
// background -> receiver
let (br_tx, br_rx) = mpsc::channel(1000);
self.nodes.write().await.insert(id, pb_tx);
let receiver = Mutex::new(br_rx);
let network_core = Arc::clone(self);
// background task: receive and push to buffer
tokio::spawn(async move {
let dl_bw = down_bandwidth.max(1); // prevent div by zero
let mut limiter = TokenBucket::new(dl_bw);
while let Some(msg) = pb_rx.recv().await {
limiter.wait_for(msg.payload.len()).await;
br_tx.send(msg.payload).await.unwrap();
}
});
let limiter = RwLock::new(TokenBucket::new(up_bandwidth.max(1)));
SimulatedNetwork {
id,
network_core,
receiver,
limiter: Some(limiter),
_msg_types: PhantomData,
}
}
/// Sets the latency between two nodes.
///
/// The latency is symmetric in both directions.
/// For asymmetric links, use [`Self::set_asymmetric_latency`] instead.
pub async fn set_latency(&self, node1: ValidatorId, node2: ValidatorId, latency: Duration) {
self.latencies.write().await.insert((node1, node2), latency);
self.latencies.write().await.insert((node2, node1), latency);
}
/// Sets the latency from one node to the other.
///
/// The latency is set only in one direction, `from` -> `to`.
/// For symmetric links, use [`Self::set_latency`] instead.
pub async fn set_asymmetric_latency(
&self,
from: ValidatorId,
to: ValidatorId,
latency: Duration,
) {
self.latencies.write().await.insert((from, to), latency);
}
/// Sends a simulated message from one node to another.
///
/// This schedules delivery for the message after the correct propagation delay.
pub async fn send(&self, payload: Vec<u8>, from: ValidatorId, to: ValidatorId) {
if rand::rng().random_range(0.0..1.0) < self.per_packet_loss_probability {
return;
}
let now = Instant::now();
let guard = self.latencies.read().await;
let mut latency = *guard.get(&(from, to)).unwrap_or(&self.default_latency);
if self.per_packet_jitter_ms > 0.0 {
let jitter = rand::rng().random_range(0.0..self.per_packet_jitter_ms);
latency += Duration::from_secs_f64(jitter / 1000.0);
}
if from == to {
latency = Duration::from_millis(0);
}
let packet = SimulatedPacket {
deliver_at: now + latency,
_from: from,
to,
payload,
};
let mut guard = self.pending.lock().await;
guard.push(packet);
}
}
impl Default for SimulatedNetworkCore {
fn default() -> Self {
Self::new(100, 5.0, 0.01)
}
}
#[cfg(test)]
mod tests {
use tokio::time::timeout;
use super::*;
use crate::network::{Network, localhost_ip_sockaddr};
use crate::test_utils::{Ping, PingOrPong};
// test simulated latency accuracy to within +/-5%
const ACCURACY: f64 = 0.05;
// When run concurrently with other tests on github, then the test fails.
// Running sequentially seems to help.
#[tokio::test]
#[ignore]
async fn symmetric() {
// set up network with two nodes
let msg = Ping::default();
let core = Arc::new(
SimulatedNetworkCore::default()
.with_jitter(0.0)
.with_packet_loss(0.0),
);
let net1 = core.join_unlimited(0).await;
let net2 = core.join_unlimited(1).await;
core.set_latency(0, 1, Duration::from_millis(10)).await;
// one direction
net1.send(&msg, localhost_ip_sockaddr(1)).await.unwrap();
let now = Instant::now();
let _: Ping = net2.receive().await.unwrap();
let latency = now.elapsed().as_micros();
let min = (10_000.0 * (1.0 - ACCURACY)) as u128;
let max = (10_000.0 * (1.0 + ACCURACY)) as u128;
assert!(latency > min);
assert!(latency < max);
// other direction
net2.send(&msg, localhost_ip_sockaddr(0)).await.unwrap();
let now = Instant::now();
let _: Ping = net1.receive().await.unwrap();
let latency = now.elapsed().as_micros();
let min = (10_000.0 * (1.0 - ACCURACY)) as u128;
let max = (10_000.0 * (1.0 + ACCURACY)) as u128;
assert!(latency > min);
assert!(latency < max);
}
// When run concurrently with other tests on github, then the test fails.
// Running sequentially seems to help.
#[tokio::test]
#[ignore]
async fn asymmetric() {
// set up network with two nodes
let msg = Ping::default();
let core = Arc::new(
SimulatedNetworkCore::default()
.with_jitter(0.0)
.with_packet_loss(0.0),
);
let net1 = core.join_unlimited(0).await;
let net2 = core.join_unlimited(1).await;
core.set_asymmetric_latency(0, 1, Duration::from_millis(10))
.await;
core.set_asymmetric_latency(1, 0, Duration::from_millis(100))
.await;
// one direction
net1.send(&msg, localhost_ip_sockaddr(1)).await.unwrap();
let now = Instant::now();
let _: Ping = net2.receive().await.unwrap();
let latency = now.elapsed().as_micros();
let min = (10_000.0 * (1.0 - ACCURACY)) as u128;
let max = (10_000.0 * (1.0 + ACCURACY)) as u128;
assert!(
latency > min,
"latency {latency} should be greater than {min}"
);
assert!(
latency < max,
"latency {latency} should be less than max {max}"
);
// other direction
net2.send(&msg, localhost_ip_sockaddr(0)).await.unwrap();
let now = Instant::now();
let _: Ping = net1.receive().await.unwrap();
let latency = now.elapsed().as_micros();
let min = (100_000.0 * (1.0 - ACCURACY)) as u128;
let max = (100_000.0 * (1.0 + ACCURACY)) as u128;
assert!(latency > min);
assert!(latency < max);
}
#[tokio::test]
async fn latency_order() {
// set up network with three nodes
let core = Arc::new(SimulatedNetworkCore::default().with_packet_loss(0.0));
let net1: SimulatedNetwork<PingOrPong, PingOrPong> = core.join_unlimited(0).await;
let net2: SimulatedNetwork<PingOrPong, PingOrPong> = core.join_unlimited(1).await;
let net3: SimulatedNetwork<PingOrPong, PingOrPong> = core.join_unlimited(2).await;
let sock0 = localhost_ip_sockaddr(0);
core.set_latency(0, 1, Duration::from_millis(10)).await;
core.set_latency(0, 2, Duration::from_millis(20)).await;
// send ping on faster link
let msg = PingOrPong::Ping([0; 32]);
net2.send(&msg, sock0).await.unwrap();
// send pong on slower link
let msg = PingOrPong::Pong([0; 32]);
net3.send(&msg, sock0).await.unwrap();
// ping should arrive before pong
let received = net1.receive().await.unwrap();
assert_eq!(received, PingOrPong::Ping([0; 32]));
let received = net1.receive().await.unwrap();
assert_eq!(received, PingOrPong::Pong([0; 32]));
// queue messages in the other order
let msg = PingOrPong::Pong([0; 32]);
net3.send(&msg, sock0).await.unwrap();
let msg = PingOrPong::Ping([0; 32]);
net2.send(&msg, sock0).await.unwrap();
// ping should still arrive before pong
let received = net1.receive().await.unwrap();
assert_eq!(received, PingOrPong::Ping([0; 32]));
let received = net1.receive().await.unwrap();
assert_eq!(received, PingOrPong::Pong([0; 32]));
}
#[tokio::test]
async fn packet_loss() {
// set up network with two nodes and 50% packet loss
let core = Arc::new(SimulatedNetworkCore::default().with_packet_loss(0.5));
let net1: SimulatedNetwork<Ping, Ping> = core.join_unlimited(0).await;
let net2: SimulatedNetwork<Ping, Ping> = core.join_unlimited(1).await;
// send 1000 pings
let msg = Ping::default();
for _ in 0..1000 {
net1.send(&msg, localhost_ip_sockaddr(1)).await.unwrap();
}
let mut pings_received = 0;
let max_time = Duration::from_millis(100);
while let Ok(Ok(_)) = timeout(max_time, net2.receive()).await {
pings_received += 1;
}
// should receive roughly 50% of pings
assert!(pings_received > 400);
assert!(pings_received < 600);
}
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/network/simulated/ping_data.rs | src/network/simulated/ping_data.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Utilities for working with a real-world ping dataset.
//!
//! The specfic dataset is from [WonderProxy](https://wonderproxy.com/blog/a-day-in-the-life-of-the-internet/).
//! It contains ping measurements between 200+ servers all around the world.
//! These ping measurements were collected over the 24 hours of 2020-07-19.
//!
//! If not already done, the dataset can be downloaded with the following command:
//! ```bash
//! ./download_data.sh
//! ```
//!
//! # Examples
//!
//! ```
//! use alpenglow::network::simulated::ping_data::{find_closest_ping_server, get_ping};
//!
//! let berlin = find_closest_ping_server(52.516, 13.378);
//! let zurich = find_closest_ping_server(47.376, 8.547);
//! let ping_b2z = get_ping(berlin.id, zurich.id);
//! ```
use std::fs::File;
use std::sync::LazyLock;
use csv::ReaderBuilder;
use geo::{Distance, Haversine, Point};
use serde::Deserialize;
const MAX_PING_SERVERS: usize = 300;
static PING_SERVERS: LazyLock<Vec<PingServer>> = LazyLock::new(|| {
let mut output = Vec::with_capacity(MAX_PING_SERVERS);
let mut rdr = ReaderBuilder::new()
.trim(csv::Trim::All)
.from_path("data/servers-2020-07-19.csv")
.unwrap();
for result in rdr.deserialize() {
let record: PingServer = result.unwrap();
output.push(record);
}
assert!(output.len() <= MAX_PING_SERVERS);
output
});
static PING_DATA: LazyLock<Vec<f64>> = LazyLock::new(|| {
let mut output = vec![0.0; MAX_PING_SERVERS * MAX_PING_SERVERS];
let mut counts = vec![0; MAX_PING_SERVERS * MAX_PING_SERVERS];
let file = File::open("data/pings-2020-07-19-2020-07-20.csv").unwrap();
let mut rdr = csv::Reader::from_reader(file);
for result in rdr.deserialize() {
let record: PingMeasurement = result.unwrap();
assert!(record.source < MAX_PING_SERVERS);
assert!(record.destination < MAX_PING_SERVERS);
let index = get_index(record.source, record.destination);
let count = counts.get_mut(index).unwrap();
if *count == 0 {
output[index] = record.avg;
} else {
let avg = output[index];
let new_avg = (avg * *count as f64 + record.avg) / (*count + 1) as f64;
output[index] = new_avg;
}
*count += 1;
}
output
});
/// A ping server from the dataset.
#[derive(Clone, Debug, Deserialize)]
pub struct PingServer {
/// Server ID, to be used as `source` or `destination` in ping measurements.
pub id: usize,
#[serde(rename = "name")]
_name: String,
#[serde(rename = "title")]
_title: String,
/// City of the server.
pub location: String,
#[serde(rename = "state")]
_state: String,
#[serde(rename = "country")]
_country: String,
#[serde(rename = "state_abbv")]
_state_abbv: String,
#[serde(rename = "continent")]
_contintent: Option<u8>,
latitude: f64,
longitude: f64,
}
/// A ping measurement from the dataset.
#[derive(Clone, Debug, Deserialize)]
struct PingMeasurement {
source: usize,
destination: usize,
#[serde(rename = "timestamp")]
_timestamp: String,
#[serde(rename = "min")]
_min: f64,
avg: f64,
#[serde(rename = "max")]
_max: f64,
#[serde(rename = "mdev")]
_mdev: f64,
}
/// Gives the coordinates for a city from the ping dataset.
///
/// Returns `None` if no ping server with the given city is in the dataset.
pub fn coordinates_for_city(city: &str) -> Option<(f64, f64)> {
PING_SERVERS.iter().find_map(|server| {
if server.location == city {
Some(server.coordinates())
} else {
None
}
})
}
/// Gives the ping server from the dataset that is closest to the given coordinates.
pub fn find_closest_ping_server(lat: f64, lon: f64) -> &'static PingServer {
PING_SERVERS
.iter()
.min_by_key(|server| {
let server_pos = Point::new(server.longitude, server.latitude);
let target_pos = Point::new(lon, lat);
Haversine.distance(server_pos, target_pos) as u64
})
.unwrap()
}
/// Gives the average ping from one server to another from the dataset.
///
/// Returns `None` if the servers are not in the dataset or ping measurements
/// for this specific pair are not available.
pub fn get_ping(source: usize, destination: usize) -> Option<f64> {
let index = get_index(source, destination);
PING_DATA.get(index).copied()
}
fn get_index(source: usize, destination: usize) -> usize {
source * PING_SERVERS.len() + destination
}
impl PingServer {
pub fn coordinates(&self) -> (f64, f64) {
(self.latitude, self.longitude)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn basic() {
let frankfurt_coords = coordinates_for_city("Frankfurt").unwrap();
let singapore_coords = coordinates_for_city("Singapore").unwrap();
let frankfurt = find_closest_ping_server(frankfurt_coords.0, frankfurt_coords.1);
let singapore = find_closest_ping_server(singapore_coords.0, singapore_coords.1);
assert_eq!(frankfurt.location, "Frankfurt");
assert_eq!(singapore.location, "Singapore");
assert_eq!(frankfurt.coordinates(), frankfurt_coords);
assert_eq!(singapore.coordinates(), singapore_coords);
assert_ne!(frankfurt.coordinates(), singapore.coordinates());
let ping = get_ping(frankfurt.id, singapore.id).unwrap();
// ping is at least speed of light
assert!(ping > 34.0);
}
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/crypto/aggsig.rs | src/crypto/aggsig.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Implementation of an aggregate signature scheme.
//!
//! This uses the [`blst`] implementation of BLS signatures.
//! Specifically, it uses the BLS12-381 G1 (min sig) signature scheme.
//!
//! # Examples
//!
//! ```
//! use alpenglow::crypto::aggsig::{AggregateSignature, SecretKey};
//!
//! let msg = b"hello world";
//!
//! let sk1 = SecretKey::new(&mut rand::rng());
//! let pk1 = sk1.to_pk();
//! let sig1 = sk1.sign(msg);
//!
//! let sk2 = SecretKey::new(&mut rand::rng());
//! let pk2 = sk2.to_pk();
//! let sig2 = sk2.sign(msg);
//!
//! let mut aggsig = AggregateSignature::new(&[sig1, sig2], [0, 1], 2);
//! assert!(aggsig.verify(msg, &[pk1, pk2]));
//! ```
use std::mem::MaybeUninit;
use bitvec::vec::BitVec;
use blst::BLST_ERROR;
use blst::min_sig::{
AggregateSignature as BlstAggSig, PublicKey as BlstPublicKey, SecretKey as BlstSecretKey,
Signature as BlstSignature,
};
use log::warn;
use rand::prelude::*;
use serde::{Deserialize, Deserializer, Serialize};
use static_assertions::const_assert_eq;
use wincode::{SchemaRead, SchemaWrite};
use crate::ValidatorId;
/// Domain separator corresponding to the G1 (min sig), RO (random oracle) variant.
const DST: &[u8] = b"BLS_SIG_BLS12381G1_XMD:SHA-256_SSWU_RO_NUL_";
/// Size of an uncompressed BLS signature (in the `min_sig` scheme).
///
/// We deal with uncompressed signatures everywhere.
/// This way signatures are twice as big as if we used compressed signatures.
/// However, we save the time of uncompressing the signature before verifying.
const UNCOMPRESSED_SIG_SIZE: usize = 96;
const_assert_eq!(
UNCOMPRESSED_SIG_SIZE,
std::mem::size_of::<blst::blst_p1_affine>()
);
/// Maximum number of signers that can be aggregated into an aggregate signature.
const MAX_SIGNERS: usize = 2048;
/// A secret key for the aggregate signature scheme.
///
/// This is a wrapper around [`blst::min_sig::SecretKey`].
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct SecretKey(BlstSecretKey);
/// A public key for the aggregate signature scheme.
///
/// This is a wrapper around [`blst::min_sig::PublicKey`].
#[derive(Clone, Copy, Debug, Serialize, Deserialize)]
pub struct PublicKey(BlstPublicKey);
impl PublicKey {
/// Tries to convert a byte array into a public key.
///
/// Returns a `BLST_ERROR` if the provided bytes are not a valid BLS public key.
pub fn try_from_bytes(pk_in: &[u8]) -> Result<Self, BLST_ERROR> {
Ok(Self(BlstPublicKey::from_bytes(pk_in)?))
}
/// Tries to deserialize a `Vec<u8>` into a public key.
///
/// This is for use with `serde(deserialize_with)`.
pub fn from_array_of_bytes<'de, D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let buf: Vec<u8> = Deserialize::deserialize(deserializer)?;
Self::try_from_bytes(&buf)
.map_err(|e| serde::de::Error::custom(format!("BLST error {e:?}")))
}
}
/// An individual signature as part of the aggregate signature scheme.
///
/// This is a wrapper around [`blst::min_sig::Signature`].
//
// NOTE: Deriving `PartialEq` and `Eq` to support testing.
// It only makes sense beccause the underlying signature scheme happens to be deterministic and unique.
// Reevaluate if we change the signature scheme.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct IndividualSignature(BlstSignature);
impl<'de> SchemaRead<'de> for IndividualSignature {
type Dst = IndividualSignature;
fn read(
reader: &mut impl wincode::io::Reader<'de>,
dst: &mut MaybeUninit<Self::Dst>,
) -> wincode::ReadResult<()> {
let sig_bytes = reader.borrow_exact(UNCOMPRESSED_SIG_SIZE)?;
let sig = BlstSignature::deserialize(sig_bytes).map_err(|e| {
warn!("encountered invalid BLS sig: {e:?}");
wincode::ReadError::Custom("invalid BLS encoding")
})?;
dst.write(IndividualSignature(sig));
wincode::ReadResult::Ok(())
}
}
impl SchemaWrite for IndividualSignature {
type Src = IndividualSignature;
fn size_of(_src: &Self::Src) -> wincode::WriteResult<usize> {
Ok(UNCOMPRESSED_SIG_SIZE)
}
fn write(writer: &mut impl wincode::io::Writer, src: &Self::Src) -> wincode::WriteResult<()> {
Ok(writer.write(&src.0.serialize())?)
}
}
/// An aggregated signature that contains a bitmask of signers.
///
/// This is a wrapper around [`blst::min_sig::Signature`].
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct AggregateSignature {
sig: BlstSignature,
bitmask: BitVec,
}
impl<'de> SchemaRead<'de> for AggregateSignature {
type Dst = AggregateSignature;
fn read(
reader: &mut impl wincode::io::Reader<'de>,
dst: &mut MaybeUninit<Self::Dst>,
) -> wincode::ReadResult<()> {
// read raw data
let sig_bytes = reader.borrow_exact(UNCOMPRESSED_SIG_SIZE)?;
let num_bits = <usize>::get(reader)?;
let bitmask_raw_vec = <Vec<usize>>::get(reader)?;
// map BLS signature
let sig = BlstSignature::from_bytes(sig_bytes).map_err(|e| {
warn!("encountered invalid BLS sig: {e:?}");
wincode::ReadError::Custom("invalid BLS encoding")
})?;
// map bitmask
if bitmask_raw_vec.len() > MAX_SIGNERS.div_ceil(usize::BITS as usize) {
warn!(
"bitmask too long: {} bits > {} max signers",
bitmask_raw_vec.len() * usize::BITS as usize,
MAX_SIGNERS
);
return Err(wincode::ReadError::Custom("bitmask too long"));
}
if num_bits > usize::BITS as usize * bitmask_raw_vec.len() {
warn!(
"want to use too many bits: {} bits > {} bits allocated",
num_bits,
bitmask_raw_vec.len() * usize::BITS as usize,
);
return Err(wincode::ReadError::Custom("want to use too many bits"));
}
let mut bitmask =
BitVec::try_from_vec(bitmask_raw_vec).expect("bitmask vector should never be too big");
// the `BitVec` is now initialized with some `usize` elements
// we only want to use the first `num_bits` bits, as this is the intended length
// some last bits may be uninitialized and will be ignored by `BitVec`
bitmask.truncate(num_bits);
dst.write(AggregateSignature { sig, bitmask });
wincode::ReadResult::Ok(())
}
}
impl SchemaWrite for AggregateSignature {
type Src = AggregateSignature;
fn size_of(src: &Self::Src) -> wincode::WriteResult<usize> {
let bitslice_num_elements = src.bitmask.as_bitslice().len();
// sig + num_bits + num_usizes + usize_len * num_usizes
Ok(UNCOMPRESSED_SIG_SIZE + 8 + 8 + 8 * bitslice_num_elements)
}
fn write(writer: &mut impl wincode::io::Writer, src: &Self::Src) -> wincode::WriteResult<()> {
writer.write(&src.sig.serialize())?;
<usize as SchemaWrite>::write(writer, &src.bitmask.as_bitslice().len())?;
let data = src.bitmask.as_bitslice().domain();
<usize as SchemaWrite>::write(writer, &data.len())?;
for elem in data {
<usize as SchemaWrite>::write(writer, &elem)?;
}
Ok(())
}
}
impl SecretKey {
/// Generates a new secret key.
///
/// The required entropy is derived from the provided `rng`.
pub fn new(rng: &mut impl CryptoRng) -> Self {
let mut ikm = [0u8; 32];
rng.fill_bytes(&mut ikm);
let sk = blst::min_sig::SecretKey::key_gen(&ikm, &[])
.expect("key_gen only fails for wrong sized ikm");
Self(sk)
}
/// Tries to convert a byte string into a secret key.
///
/// Returns a `BLST_ERROR` if the provided bytes are not a valid BLS secret key.
pub fn try_from_bytes(sk_in: &[u8]) -> Result<Self, BLST_ERROR> {
Ok(Self(blst::min_sig::SecretKey::from_bytes(sk_in)?))
}
/// Tries to deserialize a `Vec<u8>` into a secret key.
///
/// This is for use with `serde(deserialize_with)`.
pub fn from_array_of_bytes<'de, D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let buf: Vec<u8> = Deserialize::deserialize(deserializer)?;
Self::try_from_bytes(&buf)
.map_err(|e| serde::de::Error::custom(format!("BLST error {e:?}")))
}
/// Converts this secret key into the corresponding public key.
#[must_use]
pub fn to_pk(&self) -> PublicKey {
let pk = self.0.sk_to_pk();
PublicKey(pk)
}
/// Signs the byte string `msg` using this secret key.
// TODO: use `Signable` here, and add new `sign_bytes` function?
#[must_use]
pub fn sign(&self, msg: &[u8]) -> IndividualSignature {
let sig = self.0.sign(msg, DST, &[]);
IndividualSignature(sig)
}
}
impl IndividualSignature {
/// Verifies that this is a valid signature of `msg` under `pk`.
#[must_use]
pub fn verify(&self, msg: &[u8], pk: &PublicKey) -> bool {
self.0.verify(true, msg, DST, &[], &pk.0, true) == blst::BLST_ERROR::BLST_SUCCESS
}
}
impl AggregateSignature {
/// Aggregates the signatures of `sigs` into a single BLS signature.
///
/// Augments the aggregate signature with a bitmask of length `num_bits`,
/// where the bits in `indices` are set to 1.
#[must_use]
pub fn new<'a>(
sigs: impl IntoIterator<Item = &'a IndividualSignature>,
indices: impl IntoIterator<Item = ValidatorId>,
num_bits: usize,
) -> Self {
let mut sigs_iter = sigs.into_iter();
let mut agg_sig = BlstAggSig::from_signature(&sigs_iter.next().unwrap().0);
for sig in sigs_iter {
agg_sig.add_signature(&sig.0, true).unwrap();
}
let mut bitmask = bitvec::bitvec![0; num_bits];
for i in indices {
bitmask.set(i as usize, true);
}
Self {
sig: agg_sig.to_signature(),
bitmask,
}
}
/// Verifies the aggregate signature against `msg` and `pks`.
#[must_use]
pub fn verify(&self, msg: &[u8], pks: &[PublicKey]) -> bool {
if self.bitmask.len() != pks.len() {
return false;
}
let pks: Vec<_> = self.signers().map(|v| &pks[v as usize].0).collect();
let err = self.sig.fast_aggregate_verify(true, msg, DST, &pks);
err == blst::BLST_ERROR::BLST_SUCCESS
}
/// Verifies the aggregate signature against `msg` and `pks`.
#[must_use]
pub fn verify_without_bitmask(&self, msg: &[u8], pks: &[PublicKey]) -> bool {
if self.bitmask.count_ones() != pks.len() {
return false;
}
let pks: Vec<_> = pks.iter().map(|p| &p.0).collect();
let err = self.sig.fast_aggregate_verify(true, msg, DST, &pks);
err == blst::BLST_ERROR::BLST_SUCCESS
}
/// Returns `true` iff this validator's signature is part of the aggregate.
#[must_use]
pub fn is_signer(&self, validator_id: ValidatorId) -> bool {
*self
.bitmask
.get(validator_id as usize)
.as_deref()
.unwrap_or(&false)
}
/// Iterates over all signers of this aggregate signature.
pub fn signers(&self) -> impl Iterator<Item = ValidatorId> {
self.bitmask.iter_ones().map(|i| i as ValidatorId)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn basic() {
let sk = SecretKey::new(&mut rand::rng());
let pk = sk.to_pk();
let msg = b"blst is such a blast";
let sig = sk.sign(msg);
assert!(sig.verify(msg, &pk));
}
#[test]
fn aggregate() {
let msg = b"blst is such a blast";
let sk1 = SecretKey::new(&mut rand::rng());
let pk1 = sk1.to_pk();
let sig1 = sk1.sign(msg);
let sk2 = SecretKey::new(&mut rand::rng());
let pk2 = sk2.to_pk();
let sig2 = sk2.sign(msg);
// check individual signatures
assert!(sig1.verify(msg, &pk1));
assert!(sig2.verify(msg, &pk2));
let mut aggsig = AggregateSignature::new(&[sig1, sig2], [0, 1], 2);
// check aggregate signature
assert!(aggsig.verify(msg, &[pk1, pk2]));
assert!(aggsig.verify_without_bitmask(msg, &[pk1, pk2]));
assert!(aggsig.verify_without_bitmask(msg, &[pk2, pk1]));
// check failure cases
assert!(!aggsig.verify(msg, &[pk1, pk2, pk1]));
assert!(!aggsig.verify(msg, &[pk1, pk1]));
assert!(!aggsig.verify(msg, &[pk1]));
assert!(!aggsig.verify(msg, &[]));
assert!(!aggsig.verify(b"not the original message", &[pk1]));
assert!(!aggsig.verify_without_bitmask(msg, &[pk1, pk2, pk1]));
assert!(!aggsig.verify_without_bitmask(msg, &[pk1, pk1]));
assert!(!aggsig.verify_without_bitmask(msg, &[pk1]));
assert!(!aggsig.verify_without_bitmask(msg, &[]));
assert!(!aggsig.verify_without_bitmask(b"not the original message", &[pk1]));
// modifying bitmask makes signature invalid
aggsig.bitmask.set(0, false);
assert!(!aggsig.verify(msg, &[pk1, pk2]));
assert!(!aggsig.verify(msg, &[pk2]));
assert!(!aggsig.verify_without_bitmask(msg, &[pk1, pk2]));
assert!(!aggsig.verify_without_bitmask(msg, &[pk2]));
aggsig.bitmask.set(1, false);
assert!(!aggsig.verify(msg, &[pk1, pk2]));
assert!(!aggsig.verify(msg, &[]));
assert!(!aggsig.verify_without_bitmask(msg, &[pk1, pk2]));
assert!(!aggsig.verify_without_bitmask(msg, &[]));
}
#[test]
fn signers() {
let msg = b"blst is such a blast";
let sk1 = SecretKey::new(&mut rand::rng());
let pk1 = sk1.to_pk();
let sig1 = sk1.sign(msg);
let sk2 = SecretKey::new(&mut rand::rng());
let pk2 = sk2.to_pk();
let sig2 = sk2.sign(msg);
let sk3 = SecretKey::new(&mut rand::rng());
let pk3 = sk3.to_pk();
let sig3 = sk3.sign(msg);
assert!(sig1.verify(msg, &pk1));
assert!(sig2.verify(msg, &pk2));
assert!(sig3.verify(msg, &pk3));
// only aggregate over 2/3 signatures
let aggsig = AggregateSignature::new(&[sig1, sig3], [0, 2], 3);
// check signers bitmask
let signers: Vec<_> = aggsig.signers().collect();
assert_eq!(signers.len(), 2);
assert!(signers.contains(&0));
assert!(!signers.contains(&1));
assert!(signers.contains(&2));
// check aggregate signature
assert!(aggsig.verify_without_bitmask(msg, &[pk1, pk3]));
assert!(aggsig.verify(msg, &[pk1, pk2, pk3]));
// order for set of PKs matters
assert!(!aggsig.verify(msg, &[pk2, pk1, pk3]));
assert!(!aggsig.verify(msg, &[pk1, pk3, pk2]));
assert!(!aggsig.verify(msg, &[pk2, pk3, pk1]));
assert!(!aggsig.verify(msg, &[pk3, pk1, pk2]));
}
#[test]
fn serialize_toml() {
#[derive(Serialize, Deserialize)]
struct KeyPair {
#[serde(deserialize_with = "SecretKey::from_array_of_bytes")]
sk: SecretKey,
#[serde(deserialize_with = "PublicKey::from_array_of_bytes")]
pk: PublicKey,
}
let sk = SecretKey::new(&mut rand::rng());
let pk = sk.to_pk();
// serialize and deserialize to/from TOML
let kp = KeyPair { sk, pk };
let serialized = toml::to_string(&kp).unwrap();
let deserialized: KeyPair = toml::from_str(&serialized).unwrap();
assert_eq!(kp.sk.0.to_bytes(), deserialized.sk.0.to_bytes());
assert_eq!(kp.pk.0.to_bytes(), deserialized.pk.0.to_bytes());
// wrong type for secret key
let wrong_sk_str = format!("sk = \"hello\"\npk = {:?}", kp.pk.0.to_bytes());
let deserialized: Result<KeyPair, toml::de::Error> = toml::from_str(&wrong_sk_str);
assert!(deserialized.is_err());
// invalid bytes for secret key
let wrong_sk_str = format!("sk = [0, 0, 0, 0]\npk = {:?}", kp.pk.0.to_bytes());
let deserialized: Result<KeyPair, toml::de::Error> = toml::from_str(&wrong_sk_str);
assert!(deserialized.is_err());
// wrong type for public key
let wrong_pk_str = format!("sk = {:?}\npk = \"hello\"", kp.sk.0.to_bytes());
let deserialized: Result<KeyPair, toml::de::Error> = toml::from_str(&wrong_pk_str);
assert!(deserialized.is_err());
// invalid bytes for public key
let wrong_pk_str = format!("sk = {:?}\npk = [0, 0, 0, 0]", kp.sk.0.to_bytes());
let deserialized: Result<KeyPair, toml::de::Error> = toml::from_str(&wrong_pk_str);
assert!(deserialized.is_err());
}
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/crypto/signature.rs | src/crypto/signature.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Implementation of a digital signature scheme.
//!
//! This module abstratcs the digital signatures used throughout the entire library.
//! Currently, it provides Ed25519 digital signature scheme, as specified in [RFC 8032].
//! Specifically, it is a wrapper around the [`ed25519_consensus`] crate.
//!
//! [RFC 8032]: https://tools.ietf.org/html/rfc8032
use ed25519_consensus::{SigningKey, VerificationKey};
use rand::CryptoRng;
use serde::{Deserialize, Serialize};
use wincode::containers::Pod;
use wincode::{SchemaRead, SchemaWrite};
/// Secret key for the digital signature scheme.
///
/// This is a wrapper around [`ed25519_consensus::SigningKey`].
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct SecretKey(SigningKey);
/// Public key for the digital signature scheme.
///
/// This is a wrapper around [`ed25519_consensus::VerificationKey`].
#[derive(Clone, Copy, Debug, Serialize, Deserialize)]
pub struct PublicKey(VerificationKey);
/// Digital signature.
///
/// This is a wrapper around [`ed25519_consensus::Signature`].
#[derive(Clone, Copy, Debug, SchemaRead, SchemaWrite)]
pub struct Signature(#[wincode(with = "Pod<_>")] ed25519_consensus::Signature);
impl SecretKey {
/// Generates a new secret key.
///
/// The required entropy is derived from the provided `rng`.
pub fn new(rng: &mut impl CryptoRng) -> Self {
let mut bytes = [0u8; 32];
rng.fill_bytes(&mut bytes[..]);
let sk: SigningKey = bytes.into();
Self(sk)
}
/// Converts this secret key into the corresponding public key.
#[must_use]
pub fn to_pk(&self) -> PublicKey {
let pk = self.0.verification_key();
PublicKey(pk)
}
/// Signs the byte string `msg` using this secret key.
// TODO: use `Signable` here, and add new `sign_bytes` function?
#[must_use]
pub fn sign(&self, msg: &[u8]) -> Signature {
let sig = self.0.sign(msg);
Signature(sig)
}
/// Returns the bytes of this secret key.
#[must_use]
pub fn as_bytes(&self) -> &[u8; 32] {
self.0.as_bytes()
}
}
impl PublicKey {
/// Returns the bytes of this public key.
#[must_use]
pub fn as_bytes(&self) -> &[u8; 32] {
self.0.as_bytes()
}
}
impl Signature {
/// Verifies that this is a valid signature of `msg` under `pk`.
#[must_use]
pub fn verify(&self, msg: &[u8], pk: &PublicKey) -> bool {
pk.0.verify(&self.0, msg).is_ok()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn basic() {
let sk = SecretKey::new(&mut rand::rng());
let pk = sk.to_pk();
assert_ne!(sk.as_bytes(), pk.as_bytes());
let msg = b"ed25519 is pretty fine";
let sig = sk.sign(msg);
assert!(sig.verify(msg, &pk));
}
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/crypto/merkle.rs | src/crypto/merkle.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Implementation of a Merkle tree.
//!
//! It supports non-power-of-two leaf count by adding empty leaves.
//! That is, a tree with 3 leaves is equivalent to a tree with 4 leaves,
//! where the 4th leaf has the empty byte slice `&[]` as its data.
//!
//! The maximum height of trees supported is [`MAX_MERKLE_TREE_HEIGHT`].
//! Once constructed, the tree is immutable.
//!
//! Labels are used to reduce the impact of multiple attack vectors:
//! - multi-target attacks against this and other implementations
//! - rainbow tables / pre-calculation attacks
//! - ambiguity between leaf and inner nodes with unknown tree height
use std::marker::PhantomData;
use derive_more::{From, Into};
use hex_literal::hex;
use smallvec::SmallVec;
use static_assertions::const_assert;
use wincode::{SchemaRead, SchemaWrite};
use super::Hash;
use super::hash::hash_all;
use crate::shredder::TOTAL_SHREDS;
use crate::types::slice_index::MAX_SLICES_PER_BLOCK;
/// The hash of the genesis block.
pub const GENESIS_BLOCK_HASH: BlockHash = DoubleMerkleRoot(Hash([0; 32]));
/// Maximum height of Merkle trees currently supported.
pub const MAX_MERKLE_TREE_HEIGHT: usize = 32;
/// Maximum number of leaf nodes in the Merkle trees currently supported.
pub const MAX_MERKLE_TREE_LEAVES: usize = 1 << MAX_MERKLE_TREE_HEIGHT;
// need to be able to build Merkle tree for each slice
const_assert!(TOTAL_SHREDS <= MAX_MERKLE_TREE_LEAVES);
// need to be able to build double-Merkle tree for each block
const_assert!(MAX_SLICES_PER_BLOCK <= MAX_MERKLE_TREE_LEAVES);
const LEAF_LABEL: [u8; 32] = *b"ALPENGLOW-MERKLE-TREE LEAF-NODE";
const LEFT_LABEL: [u8; 32] = *b"ALPENGLOW-MERKLE-TREE LEFT-NODE";
const RIGHT_LABEL: [u8; 32] = *b"ALPENGLOW-MERKLE-TREE RIGHT-NODE";
/// Pre-calculated empty roots for up to `2 ^ MAX_MERKLE_TREE_HEIGHT` leaves.
///
/// These are calculated by running `cargo test -- empty_roots --no-capture`,
/// which is much faster than but equivalent to this straightforward snippet:
/// ```no_run
/// use alpenglow::crypto::hash::Hash;
/// use alpenglow::crypto::merkle::{MAX_MERKLE_TREE_HEIGHT, PlainMerkleTree};
///
/// for height in 0..MAX_MERKLE_TREE_HEIGHT {
/// let data = vec![vec![]; 1 << height];
/// let tree = PlainMerkleTree::new(&data);
/// println!("{}", hex::encode(tree.get_root()));
/// }
/// ```
///
/// Used for efficient check whether leaf is last in [`MerkleTree::check_proof_last`].
const EMPTY_ROOTS: [Hash; MAX_MERKLE_TREE_HEIGHT] = [
Hash(hex!(
"50c4672b7a309041b109458b1f3a11f82c225970975a95cd1025209301fcbcab"
)),
Hash(hex!(
"2aa0cc78c82100f0fa3a26606a9794a928d5ddd0f5d381f93d6b0d64d065aa6f"
)),
Hash(hex!(
"be22d038e2a34ff9386f4df7241c0a381ff433c98e36e2e0a59b3cfef7950c5b"
)),
Hash(hex!(
"afc22402352574edbbfa7e3fd5221c7f1ab256c70ed826c8e2b8adc5b0b56ae9"
)),
Hash(hex!(
"83831b4f0df87b978810491c31fd670c90e8e223e1a1f2876a96ab30c29161a4"
)),
Hash(hex!(
"800e9e154844513cdbf5e11bec487c8953bcf2951d98eb5ef39cae18520d5e1b"
)),
Hash(hex!(
"6c5328d45f1d420776a56188732b2ae0eba2956008148e4f0383eb8de65c65fe"
)),
Hash(hex!(
"c356e3d71c9aae22fb8e449ad2d37b795b9861bb8fbb028c2378c86eff2e0a26"
)),
Hash(hex!(
"89c79a35bdbb8aa17e08560fdbc0bbc3f2fc6ba882deedf81a3f37c4534e9e3d"
)),
Hash(hex!(
"e5fefbb76548773dd0ba0765286fe98d1b6560bafd1abc071120380f4fb0bb78"
)),
Hash(hex!(
"24a72454e312eddaee4f4504e04e309152f27dd9848bb5c648f5f6a0aa960cb1"
)),
Hash(hex!(
"1be1a1ccce5c9dee569c7560033f41414c10792939408d857f35cb2c4273877d"
)),
Hash(hex!(
"b43f0ce6212391b7c57b2452a6d989db222ae6f274d8cdfafc28afd98d267e23"
)),
Hash(hex!(
"3f346c4f375fe9a9a9d41ad270bd36d69409e2c7d8b4c38653b6d7d225a6add5"
)),
Hash(hex!(
"3c4120e345597ad037337dd72d2c536553e2b52242f4ef81ce8def38d17b38af"
)),
Hash(hex!(
"4396524427a665c659fea115ce68d3357ce1d6a04ca5b8eeb69d76e439ea2247"
)),
Hash(hex!(
"68eb4a1c412f67ef86dadaa6c5b6cf2ba5e7bd266645b69bb12a1d10704487f7"
)),
Hash(hex!(
"75271d1c9e8b79fdeaceb051cfd29ed03350e4808b262e3a750277c82a2a5206"
)),
Hash(hex!(
"b99c05ba6039ddcb754abcf9e66aeb529348e708220569fe20cc4d587ddbd267"
)),
Hash(hex!(
"a0f8ea088f640460978534c9821ae114231e0730bff2c361b64a5824a3237341"
)),
Hash(hex!(
"b50f9fa20d9df525a29ccfe4cbe9b014142275e7e5ac9ef81a6c20112774418c"
)),
Hash(hex!(
"b48a3973c4e0c5bad395ecd1c0cd430adc9680c7fd574460bf21c45f74840662"
)),
Hash(hex!(
"826bfb27619fbf799f6000e4a3acdec264a13661e9c730e265c1c74c63501cb3"
)),
Hash(hex!(
"720d56bf703cd7677805d33e73d2e7cb6104c932b62458ac3ae2f9d7469a1f38"
)),
Hash(hex!(
"ab2960c67fdd892b5eb1e63ac0c182c8d174671fd0f8065363895b5a3f7f0a88"
)),
Hash(hex!(
"ab7f4126b993ca5037a84190933bb8b97b9427a977d6872ca0b5d61470ea1834"
)),
Hash(hex!(
"dfaae89b54a08d497b2e8251de039fb38a7137b802d2d6c2554540c7b027721a"
)),
Hash(hex!(
"9451a516e349a64cd75fde5558bc92d3f359d3d49b560d2414b7eaa1e6889b1e"
)),
Hash(hex!(
"1c78453e429f9dbeaa70097b76abe42ad3da19a2ec5cda345aa77d713b930145"
)),
Hash(hex!(
"ee4878f4417f8dea52e54aaad49962a9fab5b746e54d6c49a5125e41e3ed6511"
)),
Hash(hex!(
"0cca41b299cd9be76a4b810fcbfff766e3f93ae2d8c7d761c0af2b524af97b56"
)),
Hash(hex!(
"33ebfb34d3aa119cb665d564acdd77b318dc86aa6a97744edb3bc03e97d776ca"
)),
];
/// Marker trait for the leaf nodes of a Merkle tree.
pub trait MerkleLeaf: AsRef<[u8]> {}
/// Trait for the root of a Merkle tree.
pub trait MerkleRoot: From<Hash> {
fn as_hash(&self) -> &Hash;
}
/// Marker trait for the proof of a Merkle tree.
pub trait MerkleProof: AsRef<[Hash]> + From<Vec<Hash>> {}
impl<T> MerkleLeaf for T where T: AsRef<[u8]> {}
impl MerkleRoot for Hash {
fn as_hash(&self) -> &Hash {
self
}
}
impl MerkleProof for Vec<Hash> {}
#[repr(transparent)]
#[derive(
Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, From, Into, SchemaRead, SchemaWrite,
)]
pub struct SliceRoot(Hash);
impl MerkleRoot for SliceRoot {
fn as_hash(&self) -> &Hash {
&self.0
}
}
impl AsRef<[u8]> for SliceRoot {
fn as_ref(&self) -> &[u8] {
self.0.as_ref()
}
}
#[repr(transparent)]
#[derive(Clone, Debug, PartialEq, Eq, From, Into, SchemaRead, SchemaWrite)]
pub struct SliceProof(Vec<Hash>);
impl MerkleProof for SliceProof {}
impl AsRef<[Hash]> for SliceProof {
fn as_ref(&self) -> &[Hash] {
self.0.as_ref()
}
}
#[repr(transparent)]
#[derive(
Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, From, Into, SchemaRead, SchemaWrite,
)]
pub struct DoubleMerkleRoot(Hash);
impl MerkleRoot for DoubleMerkleRoot {
fn as_hash(&self) -> &Hash {
&self.0
}
}
#[repr(transparent)]
#[derive(Clone, Debug, PartialEq, Eq, From, Into, SchemaRead, SchemaWrite)]
pub struct DoubleMerkleProof(Vec<Hash>);
impl MerkleProof for DoubleMerkleProof {}
pub type BlockHash = DoubleMerkleRoot;
impl AsRef<[Hash]> for DoubleMerkleProof {
fn as_ref(&self) -> &[Hash] {
self.0.as_ref()
}
}
/// A plain Merkle tree over arbitrary bytes.
///
/// Usually, you want the additional type-safety of not using these basic types.
/// For this implement [`MerkleLeaf`], [`MerkleRoot`] and [`MerkleProof`] on your own types.
pub type PlainMerkleTree = MerkleTree<Vec<u8>, Hash, Vec<Hash>>;
/// Per-slice Merkle tree for use in Rotor.
///
/// The leaves of this tree are shreds within a single slice of a block.
/// The root of this tree is signed by the leader and included in each shred, together with a proof.
pub type SliceMerkleTree = MerkleTree<Vec<u8>, SliceRoot, SliceProof>;
/// Alpenglow's double-Merkle tree.
///
/// The leaves of this tree are roots of per-slice Merkle trees.
/// The root of this tree represents the block hash.
pub type DoubleMerkleTree = MerkleTree<SliceRoot, DoubleMerkleRoot, DoubleMerkleProof>;
/// Implementation of a Merkle tree.
pub struct MerkleTree<Leaf: MerkleLeaf, Root: MerkleRoot, Proof: MerkleProof> {
/// All hashes in the tree, leaf hashes and inner nodes.
nodes: Vec<Hash>,
/// For each level, has the offset in `nodes` and the number of hashes on that level.
levels: SmallVec<[(u32, u32); MAX_MERKLE_TREE_HEIGHT]>,
/// Marker for the type of the tree.
_type: PhantomData<(Leaf, Root, Proof)>,
}
impl<Leaf: MerkleLeaf, Root: MerkleRoot, Proof: MerkleProof> MerkleTree<Leaf, Root, Proof> {
/// Creates a new Merkle tree from the given data for each leaf.
///
/// This will always create a perfect binary tree (filling with empty leaves as necessary).
/// If you want to create a tree with more than half of the leaves empty,
/// you have to explicitly pass in empty leaves as part of `data`.
pub fn new<'a>(data: impl IntoIterator<Item = &'a Leaf>) -> Self
where
Leaf: 'a,
{
// calculate leaf hashes
let mut nodes = data
.into_iter()
.map(|leaf| Self::hash_leaf(leaf))
.collect::<Vec<Hash>>();
assert!(!nodes.is_empty());
// reserve enough space for inner nodes
let mut num_inner_nodes = 1;
for i in 1..=nodes.len().ilog2() {
num_inner_nodes += nodes.len().div_ceil(1 << i);
}
nodes.reserve(num_inner_nodes);
// prepare levels index with correct size
let mut levels = SmallVec::new();
levels.push((0, nodes.len().try_into().expect("too many leaves")));
// calculate inner nodes
let mut left = 0;
let mut right = nodes.len();
let mut len = right - left;
let mut h = 0;
while len > 1 {
for i in (left..right).step_by(2) {
if i == right {
break;
} else if i + 1 == right {
let inner_node = Self::hash_pair(&nodes[i], &EMPTY_ROOTS[h]);
nodes.push(inner_node);
break;
}
let inner_node = Self::hash_pair(&nodes[i], &nodes[i + 1]);
nodes.push(inner_node);
}
len = len.div_ceil(2);
left = right;
right = left + len;
h += 1;
levels.push((left as u32, len as u32));
}
Self {
nodes,
levels,
_type: PhantomData,
}
}
/// Gives the root hash of the tree.
#[must_use]
pub fn get_root(&self) -> Root {
let root_hash = self.nodes.last().expect("empty tree").clone();
root_hash.into()
}
/// Gives the height of the tree.
pub fn height(&self) -> usize {
self.levels.len() - 1
}
/// Generates a proof of membership for the element at the given `index`.
///
/// The proof is the Merkle path from the leaf to the root.
#[must_use]
pub fn create_proof(&self, index: usize) -> Proof {
assert!(index < 1 << self.height());
assert!(index < self.levels[0].1 as usize);
let mut proof = Vec::with_capacity(self.height());
let mut i = index;
for (h, (offset, len)) in self.levels.iter().enumerate().take(self.height()) {
if i ^ 1 >= *len as usize {
proof.push(EMPTY_ROOTS[h].clone());
} else {
proof.push(self.nodes[*offset as usize + (i ^ 1)].clone());
}
i /= 2;
}
proof.into()
}
/// Checks a Merkle path against a leaf's data.
///
/// Returns `true` iff `proof` is a valid Merkle path for a leaf containing
/// `data` at the given `index` in the tree corresponding to the given `root`.
#[must_use]
pub fn check_proof(data: &Leaf, index: usize, root: &Root, proof: &Proof) -> bool {
let hash = Self::hash_leaf(data);
Self::check_hash_proof(hash, index, root, proof)
}
/// Checks a Merkle path against a leaf hash.
///
/// Returns `true` iff `proof` is a valid Merkle path for a leaf that hashes
/// to the given `hash` at the given `index` in the tree corresponding to the given `root`.
#[must_use]
fn check_hash_proof(hash: Hash, index: usize, root: &Root, proof: &Proof) -> bool {
let mut i = index;
let mut node = hash;
for h in proof.as_ref() {
node = match i % 2 {
0 => Self::hash_pair(&node, h),
_ => Self::hash_pair(h, &node),
};
i /= 2;
}
node == *root.as_hash()
}
/// Checks a Merkle path proves the given leaf's data is last in the tree.
///
/// Returns `true` iff the Merkle proof is valid and `index` is the last leaf in the tree.
#[must_use]
pub fn check_proof_last(leaf: &Leaf, index: usize, root: &Root, proof: &Proof) -> bool {
let hash = Self::hash_leaf(leaf);
Self::check_hash_proof_last(hash, index, root, proof)
}
/// Checks a Merkle path proves the given leaf hash is last in the tree.
///
/// Returns `true` iff the Merkle proof is valid and `index` is the last leaf in the tree.
#[must_use]
fn check_hash_proof_last(hash: Hash, index: usize, root: &Root, proof: &Proof) -> bool {
assert!(proof.as_ref().len() <= MAX_MERKLE_TREE_HEIGHT);
let mut i = index;
let mut node = hash;
for (height, h) in proof.as_ref().iter().enumerate() {
node = match i % 2 {
0 => Self::hash_pair(&node, &EMPTY_ROOTS[height]),
_ => Self::hash_pair(h, &node),
};
i /= 2;
}
node == *root.as_hash()
}
/// Hashes some leaf data with a label into a leaf node.
///
/// The label prevents the possibility to claim an intermediate node was a leaf.
/// It also makes the Merkle tree more robust against pre-calculation attacks.
fn hash_leaf(leaf: &Leaf) -> Hash {
let data: &[u8] = leaf.as_ref();
hash_all(&[&LEAF_LABEL, data])
}
/// Hashes a pair of child hashes with labels into a parent (non-leaf) node.
///
/// The labels prevent the possibility to claim an intermediate node was a leaf.
/// They also make the Merkle tree more robust against pre-calculation attacks.
fn hash_pair(left: &Hash, right: &Hash) -> Hash {
hash_all(&[&LEFT_LABEL, left.as_ref(), &RIGHT_LABEL, right.as_ref()])
}
}
#[cfg(test)]
mod tests {
use rand::prelude::*;
use super::*;
#[test]
fn basic() {
let data = [b"hello".to_vec(), b"world".to_vec()];
let tree = PlainMerkleTree::new(&data);
assert_eq!(tree.nodes.len(), 3);
}
#[test]
fn two_leaves() {
let data = [b"hello".to_vec(), b"world".to_vec()];
let tree = PlainMerkleTree::new(&data);
// calculate expected root hash manually
let leaf1 = PlainMerkleTree::hash_leaf(&data[0]);
let leaf2 = PlainMerkleTree::hash_leaf(&data[1]);
let expected_root = PlainMerkleTree::hash_pair(&leaf1, &leaf2);
assert_eq!(tree.get_root(), expected_root);
}
#[test]
fn empty_trees() {
// one empty leaf
let data = [vec![]];
let tree1 = PlainMerkleTree::new(&data);
// two empty leaves
let data = [vec![], vec![]];
let tree2 = PlainMerkleTree::new(&data);
// these should have different roots
assert_ne!(tree1.get_root(), tree2.get_root());
}
#[test]
fn proofs() {
let data = [
b"hello".to_vec(),
b"world".to_vec(),
b"data".to_vec(),
b"test".to_vec(),
];
let tree = PlainMerkleTree::new(&data);
let root = tree.get_root();
// proof and verify all leaves
let proof = tree.create_proof(0);
assert!(PlainMerkleTree::check_proof(&data[0], 0, &root, &proof));
let proof = tree.create_proof(1);
assert!(PlainMerkleTree::check_proof(&data[1], 1, &root, &proof));
let proof = tree.create_proof(2);
assert!(PlainMerkleTree::check_proof(&data[2], 2, &root, &proof));
let proof = tree.create_proof(3);
assert!(PlainMerkleTree::check_proof(&data[3], 3, &root, &proof));
}
#[test]
fn three_leaves() {
let data1 = [b"a".to_vec(), b"b".to_vec(), b"c".to_vec()];
let tree1 = PlainMerkleTree::new(&data1);
let data2 = [b"a".to_vec(), b"b".to_vec(), b"c".to_vec(), vec![]];
let tree2 = PlainMerkleTree::new(&data2);
// missing leaves should be equivalent to empty leaves
assert_eq!(tree1.get_root(), tree2.get_root());
}
#[test]
fn non_power_of_two() {
let data1 = vec![b"hello".to_vec(); 33];
let tree1 = PlainMerkleTree::new(&data1);
let mut data2 = vec![b"hello".to_vec(); 33];
let empty_slice = vec![];
data2.extend_from_slice(vec![empty_slice; 31].as_slice());
let tree2 = PlainMerkleTree::new(data2.as_slice());
// missing leaves should be equivalent to empty leaves
assert_eq!(tree1.get_root(), tree2.get_root());
}
#[test]
fn proof_last() {
let data = vec![b"hello".to_vec(); 33];
let tree = PlainMerkleTree::new(&data);
let root = tree.get_root();
let proof = tree.create_proof(31);
assert!(!PlainMerkleTree::check_proof_last(
&data[31], 31, &root, &proof
));
let proof = tree.create_proof(32);
assert!(PlainMerkleTree::check_proof_last(
&data[32], 32, &root, &proof
));
}
#[test]
fn fuzzing() {
const ITERATIONS: u64 = 10_000;
const MAX_NUM_LEAVES: usize = 64;
const MAX_LEAF_DATA_LEN: usize = 64;
const QUERIES_PER_TREE: usize = 10;
let mut rng = rand::rng();
for _ in 0..ITERATIONS {
let num_data = rng.random_range(1..=MAX_NUM_LEAVES);
let mut data = Vec::with_capacity(num_data);
for _ in 0..num_data {
let leaf_data_len = rng.random_range(0..=MAX_LEAF_DATA_LEN);
let mut leaf_data = vec![0; leaf_data_len];
rng.fill_bytes(&mut leaf_data);
data.push(leaf_data);
}
let tree = PlainMerkleTree::new(data.iter());
let root = tree.get_root();
for _ in 0..QUERIES_PER_TREE {
let index = rng.random_range(0..num_data);
let proof = tree.create_proof(index);
let leaf = &data[index];
assert!(PlainMerkleTree::check_proof(leaf, index, &root, &proof));
if index == num_data - 1 {
assert!(PlainMerkleTree::check_proof_last(
leaf, index, &root, &proof
));
}
}
}
}
// NOTE: This is used for calculating `EMPTY_ROOTS`.
#[test]
fn empty_roots() {
for (height, empty_root) in EMPTY_ROOTS.iter().enumerate() {
let mut node = PlainMerkleTree::hash_leaf(&vec![]);
for _ in 0..height {
node = PlainMerkleTree::hash_pair(&node, &node);
}
assert_eq!(node, *empty_root);
println!("{}", hex::encode(node));
}
}
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/src/crypto/hash.rs | src/crypto/hash.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Cryptographic hash function.
//!
//! This module abstratcs the hash function used throughout the entire library.
//! Currently, SHA-256, specifically the implementation from the [`sha2`] crate is used.
use sha2::{Digest, Sha256};
use wincode::{SchemaRead, SchemaWrite};
/// Regular hash that should be used in most cases.
///
/// This provides 256-bit resistance against (second) preimage attacks.
/// It also provides 128-bit resistance against collision attacks.
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, SchemaRead, SchemaWrite)]
pub struct Hash(pub(super) [u8; 32]);
impl Hash {
/// Creates a random hash for testing.
#[cfg(test)]
pub fn random_for_test() -> Self {
let mut bytes = [0; 32];
rand::RngCore::fill_bytes(&mut rand::rng(), &mut bytes);
Hash(bytes)
}
}
impl AsRef<[u8]> for Hash {
fn as_ref(&self) -> &[u8] {
&self.0
}
}
impl std::hash::Hash for Hash {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
state.write(&self.0);
}
}
/// Short hash that should be used carefully.
///
/// Usually a regular [`self::Hash`] is what you want.
///
/// This provides up to 128-bit resistance against (second) preimage attacks.
/// However, it provides at most 64-bit resistance against collision attacks.
/// Only use this if you are 100% certain that second preimage resistance is enough!
#[derive(Clone, Debug, PartialEq, Eq, SchemaRead, SchemaWrite)]
pub struct ShortHash([u8; 16]);
impl AsRef<[u8]> for ShortHash {
fn as_ref(&self) -> &[u8] {
&self.0
}
}
impl std::hash::Hash for ShortHash {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
state.write(&self.0);
}
}
/// Hashes the given data using SHA-256.
#[must_use]
pub fn hash(data: &[u8]) -> Hash {
Hash(Sha256::digest(data).into())
}
/// Hashes all the given data concatenated together.
#[must_use]
pub fn hash_all(data: &[&[u8]]) -> Hash {
let mut hasher = Sha256::new();
for item in data {
hasher.update(item);
}
Hash(hasher.finalize().into())
}
/// Truncates the given hash into a [`ShortHash`].
#[must_use]
pub fn truncate(hash: Hash) -> ShortHash {
ShortHash(hash.0[..16].try_into().expect("wrong length"))
}
#[cfg(test)]
mod tests {
use std::collections::HashSet;
use super::*;
const HASH_ITERATIONS: u64 = 100_000;
#[test]
fn deterministic() {
let hash1 = hash(&[0; 32]);
let hash2 = hash(&[0; 32]);
assert_eq!(hash1, hash2);
for i in 0..HASH_ITERATIONS {
let bytes = i.to_be_bytes();
let hash1 = hash(&bytes);
let hash2 = hash(&bytes);
assert_eq!(hash1, hash2);
}
}
#[test]
fn unique() {
let hash1 = hash(&[0; 16]);
let hash2 = hash(&[0; 32]);
assert_ne!(hash1, hash2);
// should find no duplicate hashes
let unique_hashes = (0..HASH_ITERATIONS)
.map(|i| {
let bytes = i.to_be_bytes();
hash(&bytes)
})
.collect::<HashSet<_>>()
.len();
assert_eq!(unique_hashes as u64, HASH_ITERATIONS);
// should find no duplicate truncated hashes
let unique_hashes = (0..HASH_ITERATIONS)
.map(|i| {
let bytes = i.to_be_bytes();
truncate(hash(&bytes))
})
.collect::<HashSet<_>>()
.len();
assert_eq!(unique_hashes as u64, HASH_ITERATIONS);
}
#[test]
fn concatenation() {
let hash_direct = hash(&[[0; 32], [1; 32]].concat());
let hash_all_1 = hash_all(&[&[0; 32], &[1; 32]]);
let hash_all_2 = hash_all(&[&[0; 16], &[0; 16], &[1; 16], &[1; 16]]);
assert_eq!(hash_all_1, hash_direct);
assert_eq!(hash_all_2, hash_direct);
}
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/tests/liveness.rs | tests/liveness.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::time::Duration;
use alpenglow::create_test_nodes;
use alpenglow::types::Slot;
use log::debug;
use rand::prelude::*;
#[tokio::test]
#[ignore]
async fn only_correct_nodes() {
liveness_test(6, 0).await;
}
#[tokio::test]
#[ignore]
async fn single_crash() {
liveness_test(11, 1).await;
}
#[tokio::test]
#[ignore]
async fn max_fast_crashes() {
liveness_test(11, 2).await;
}
#[tokio::test]
#[ignore]
async fn too_many_fast_crashes() {
liveness_test(11, 3).await;
}
#[tokio::test]
#[ignore]
async fn max_crashes() {
liveness_test(11, 4).await;
}
#[tokio::test]
#[ignore]
async fn three_nodes() {
liveness_test(3, 0).await;
}
#[tokio::test]
#[ignore]
async fn three_nodes_crash() {
liveness_test(3, 1).await;
}
// TODO: implement transient failure test
//
// #[tokio::test]
// async fn transient_failure() {
// liveness_test(11, 1).await;
// }
async fn liveness_test(num_nodes: usize, num_crashes: usize) {
liveness_test_internal(num_nodes, num_crashes, true).await
}
async fn liveness_test_internal(num_nodes: usize, num_crashes: usize, should_succeed: bool) {
// start `num_nodes` nodes
let nodes = create_test_nodes(num_nodes as u64);
let mut node_cancel_tokens = Vec::new();
let mut pools = Vec::new();
for node in nodes {
pools.push(node.get_pool());
node_cancel_tokens.push(node.get_cancel_token());
tokio::spawn(node.run());
}
// spawn a thread checking pool for progress
let cancel_tokens = node_cancel_tokens.clone();
let mut liveness_tester = tokio::spawn(async move {
let mut finalized = vec![Slot::new(0); pools.len()];
for t in 1.. {
tokio::time::sleep(Duration::from_secs(10)).await;
for (i, pool) in pools.iter().enumerate() {
if cancel_tokens[i].is_cancelled() {
continue;
}
let new_finalized = pool.read().await.finalized_slot();
if new_finalized <= finalized[i] {
panic!("no progress on node {} after {} s", i, 10 * t);
}
finalized[i] = new_finalized;
}
}
});
// let `num_crashes` nodes crash after random delays
let mut rng = rand::rng();
let to_kill = (0..num_nodes).choose_multiple(&mut rng, num_crashes);
for id in to_kill {
let millis = rng.random_range(0..10_000);
let delay = tokio::time::Duration::from_millis(millis);
tokio::time::sleep(delay).await;
debug!("crashing node {}", id);
node_cancel_tokens[id].cancel();
}
// let it run for a while
let res = tokio::select! {
() = tokio::time::sleep(Duration::from_secs(60)) => {
liveness_tester.abort();
liveness_tester.await
}
res = &mut liveness_tester => res,
};
// check result of liveness test
assert_eq!(res.unwrap_err().is_cancelled(), should_succeed);
// kill other nodes
for token in node_cancel_tokens {
token.cancel();
}
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/benches/disseminator.rs | benches/disseminator.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
use alpenglow::crypto::signature::SecretKey;
use alpenglow::disseminator::Turbine;
use alpenglow::network::UdpNetwork;
use alpenglow::shredder::{MAX_DATA_PER_SLICE, RegularShredder, Shredder};
use alpenglow::types::slice::create_slice_with_invalid_txs;
use divan::counter::ItemsCount;
fn main() {
// run registered benchmarks.
// TODO: enable once divan supports tokio
// divan::main();
}
#[divan::bench]
fn turbine_tree(bencher: divan::Bencher) {
bencher
.counter(ItemsCount::new(1_usize))
.with_inputs(|| {
let net1 = UdpNetwork::new_with_any_port();
let net2 = UdpNetwork::new_with_any_port();
let turbine1 = Turbine::new(0, Vec::new(), net1);
let turbine2 = Turbine::new(1, Vec::new(), net2);
let slice = create_slice_with_invalid_txs(MAX_DATA_PER_SLICE);
let mut rng = rand::rng();
let sk = SecretKey::new(&mut rng);
let shreds = RegularShredder::default().shred(slice, &sk).unwrap();
let shred = shreds[shreds.len() - 1].clone();
(shred, turbine1, turbine2)
})
.bench_values(|(shred, turbine1, _turbine2)| {
futures::executor::block_on(turbine1.send_shred_to_root(&shred)).unwrap()
});
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/benches/network.rs | benches/network.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
use alpenglow::consensus::{Cert, ConsensusMessage, NotarCert, Vote};
use alpenglow::crypto::aggsig::SecretKey;
use alpenglow::crypto::merkle::GENESIS_BLOCK_HASH;
use alpenglow::crypto::{aggsig, signature};
use alpenglow::network::localhost_ip_sockaddr;
use alpenglow::shredder::{MAX_DATA_PER_SLICE, RegularShredder, Shred, Shredder};
use alpenglow::types::Slot;
use alpenglow::types::slice::create_slice_with_invalid_txs;
use alpenglow::{ValidatorId, ValidatorInfo};
use divan::counter::{BytesCount, ItemsCount};
fn main() {
// run registered benchmarks.
divan::main();
}
fn generate_vote() -> Vote {
let mut rng = rand::rng();
let hash = GENESIS_BLOCK_HASH;
let sk = aggsig::SecretKey::new(&mut rng);
Vote::new_notar(Slot::new(0), hash, &sk, 0)
}
#[divan::bench]
fn serialize_vote(bencher: divan::Bencher) {
bencher
.counter(ItemsCount::new(1_usize))
.with_inputs(|| ConsensusMessage::Vote(generate_vote()))
.bench_values(|msg: ConsensusMessage| wincode::serialize(&msg).unwrap());
}
#[divan::bench]
fn deserialize_vote(bencher: divan::Bencher) {
bencher
.counter(ItemsCount::new(1_usize))
.with_inputs(|| {
let msg = ConsensusMessage::Vote(generate_vote());
wincode::serialize(&msg).unwrap()
})
.bench_values(|bytes: Vec<u8>| {
let _msg: ConsensusMessage = wincode::deserialize(&bytes).unwrap();
});
}
fn generate_cert() -> Cert {
let (sks, val_info) = generate_validators(100);
let hash = GENESIS_BLOCK_HASH;
let votes = sks
.iter()
.enumerate()
.map(|(v, sk)| Vote::new_notar(Slot::new(0), hash.clone(), sk, v as ValidatorId))
.collect::<Vec<_>>();
let notar_cert = NotarCert::try_new(&votes, &val_info).unwrap();
Cert::Notar(notar_cert)
}
#[divan::bench]
fn serialize_cert(bencher: divan::Bencher) {
bencher
.counter(ItemsCount::new(1_usize))
.with_inputs(|| ConsensusMessage::Cert(generate_cert()))
.bench_values(|msg: ConsensusMessage| wincode::serialize(&msg).unwrap());
}
#[divan::bench]
fn deserialize_cert(bencher: divan::Bencher) {
bencher
.counter(ItemsCount::new(1_usize))
.with_inputs(|| {
let msg = ConsensusMessage::Cert(generate_cert());
wincode::serialize(&msg).unwrap()
})
.bench_values(|bytes: Vec<u8>| {
let _msg: ConsensusMessage = wincode::deserialize(&bytes).unwrap();
});
}
#[divan::bench]
fn serialize_slice(bencher: divan::Bencher) {
bencher
.counter(ItemsCount::new(1_usize))
.counter(BytesCount::new(MAX_DATA_PER_SLICE))
.with_inputs(|| {
let slice = create_slice_with_invalid_txs(MAX_DATA_PER_SLICE);
let mut rng = rand::rng();
let sk = signature::SecretKey::new(&mut rng);
RegularShredder::default()
.shred(slice, &sk)
.unwrap()
.into_iter()
.map(|v| v.into_shred())
.collect::<Vec<_>>()
})
.bench_values(|shreds: Vec<Shred>| {
for shred in shreds {
let _bytes = wincode::serialize(&shred).unwrap();
}
});
}
#[divan::bench]
fn serialize_slice_into(bencher: divan::Bencher) {
bencher
.counter(ItemsCount::new(1_usize))
.counter(BytesCount::new(MAX_DATA_PER_SLICE))
.with_inputs(|| {
let mut rng = rand::rng();
let slice = create_slice_with_invalid_txs(MAX_DATA_PER_SLICE);
let sk = signature::SecretKey::new(&mut rng);
let shreds = RegularShredder::default()
.shred(slice, &sk)
.unwrap()
.into_iter()
.map(|v| v.into_shred())
.collect::<Vec<_>>();
let buf = vec![0; 1500];
(buf, shreds)
})
.bench_values(|(mut buf, shreds): (Vec<u8>, Vec<Shred>)| {
for shred in shreds {
wincode::serialize_into(&mut buf, &shred).expect("serialization should not panic");
}
});
}
#[divan::bench]
fn deserialize_slice(bencher: divan::Bencher) {
bencher
.counter(ItemsCount::new(1_usize))
.counter(BytesCount::new(MAX_DATA_PER_SLICE))
.with_inputs(|| {
let mut rng = rand::rng();
let slice = create_slice_with_invalid_txs(MAX_DATA_PER_SLICE);
let sk = signature::SecretKey::new(&mut rng);
let shreds = RegularShredder::default().shred(slice, &sk).unwrap();
let mut serialized = Vec::new();
for shred in shreds {
let bytes = wincode::serialize(&shred.into_shred()).unwrap();
serialized.push(bytes);
}
serialized
})
.bench_values(|serialized: Vec<Vec<u8>>| {
for bytes in serialized {
let _shred: Shred = wincode::deserialize(&bytes).unwrap();
}
});
}
pub fn generate_validators(num_validators: u64) -> (Vec<SecretKey>, Vec<ValidatorInfo>) {
let mut rng = rand::rng();
let mut sks = Vec::new();
let mut voting_sks = Vec::new();
let mut validators = Vec::new();
for i in 0..num_validators {
sks.push(signature::SecretKey::new(&mut rng));
voting_sks.push(SecretKey::new(&mut rng));
validators.push(ValidatorInfo {
id: i,
stake: 1,
pubkey: sks[i as usize].to_pk(),
voting_pubkey: voting_sks[i as usize].to_pk(),
all2all_address: localhost_ip_sockaddr(0),
disseminator_address: localhost_ip_sockaddr(0),
repair_request_address: localhost_ip_sockaddr(0),
repair_response_address: localhost_ip_sockaddr(0),
});
}
(voting_sks, validators)
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/benches/shredder.rs | benches/shredder.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
use alpenglow::crypto::signature::SecretKey;
use alpenglow::shredder::{
AontShredder, CodingOnlyShredder, DATA_SHREDS, PetsShredder, RegularShredder, Shredder,
TOTAL_SHREDS, ValidatedShred,
};
use alpenglow::types::Slice;
use alpenglow::types::slice::create_slice_with_invalid_txs;
use divan::counter::BytesCount;
fn main() {
divan::main();
}
#[divan::bench(types = [RegularShredder, CodingOnlyShredder, PetsShredder, AontShredder])]
fn shred<S: Shredder>(bencher: divan::Bencher) {
let size = S::MAX_DATA_SIZE;
bencher
.counter(BytesCount::new(size))
.with_inputs(|| {
let slice = create_slice_with_invalid_txs(size);
let mut rng = rand::rng();
let sk = SecretKey::new(&mut rng);
let shredder = S::default();
(shredder, slice, sk)
})
.bench_values(|(mut shredder, slice, sk): (S, Slice, SecretKey)| {
let _ = shredder.shred(slice, &sk).unwrap();
});
}
#[divan::bench(types = [RegularShredder, CodingOnlyShredder, PetsShredder, AontShredder])]
fn deshred<S: Shredder>(bencher: divan::Bencher) {
let size = S::MAX_DATA_SIZE;
bencher
.counter(BytesCount::new(size))
.with_inputs(|| {
let slice = create_slice_with_invalid_txs(size);
let mut rng = rand::rng();
let sk = SecretKey::new(&mut rng);
let mut shredder = S::default();
let mut shreds = shredder.shred(slice, &sk).unwrap().map(Some);
// need at least DATA_SHREDS to reconstruct and want to include as many coding shreds as possible which should be at the end of the array
// so mark the first TOTAL_SHREDS - DATA_SHREDS as None
for shred in shreds.iter_mut().take(TOTAL_SHREDS - DATA_SHREDS) {
*shred = None;
}
(shredder, shreds)
})
.bench_values(
|(mut shredder, shreds): (S, [Option<ValidatedShred>; TOTAL_SHREDS])| {
let _ = shredder.deshred(&shreds).unwrap();
},
);
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
qkniep/alpenglow | https://github.com/qkniep/alpenglow/blob/dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3/benches/crypto.rs | benches/crypto.rs | // Copyright (c) Anza Technology, Inc.
// SPDX-License-Identifier: Apache-2.0
use alpenglow::crypto::merkle::{SliceMerkleTree, SliceProof};
use alpenglow::crypto::{IndividualSignature, Signature, aggsig, hash, signature};
use alpenglow::shredder::{MAX_DATA_PER_SHRED, MAX_DATA_PER_SLICE};
use divan::counter::{BytesCount, ItemsCount};
use rand::RngCore;
fn main() {
// run registered benchmarks.
divan::main();
}
#[divan::bench(name = "hash", consts = [16, 32, MAX_DATA_PER_SHRED, MAX_DATA_PER_SLICE])]
fn hash_bytes<const N: usize>(bencher: divan::Bencher) {
bencher
.counter(BytesCount::new(N))
.with_inputs(|| (0..N).map(|_| rand::random::<u8>()).collect())
.bench_values(|s: Vec<u8>| hash(&s));
}
#[divan::bench(consts = [64, 512, 1024])]
fn merkle_tree<const N: usize>(bencher: divan::Bencher) {
bencher
.counter(ItemsCount::new(1_usize))
.with_inputs(|| {
let mut rng = rand::rng();
let mut leaves = vec![Vec::new(); N];
for leaf in &mut leaves {
rng.fill_bytes(leaf);
}
leaves
})
.bench_values(|leaves: Vec<Vec<u8>>| {
let _ = SliceMerkleTree::new(&leaves);
});
}
#[divan::bench(consts = [64, 512, 1024])]
fn merkle_proof<const N: usize>(bencher: divan::Bencher) {
bencher
.counter(ItemsCount::new(1_usize))
.with_inputs(|| {
let mut rng = rand::rng();
let mut leaves = vec![Vec::new(); N];
for leaf in &mut leaves {
rng.fill_bytes(leaf);
}
SliceMerkleTree::new(&leaves)
})
.bench_values(|tree: SliceMerkleTree| tree.create_proof(0));
}
#[divan::bench(consts = [64, 512, 1024])]
fn merkle_verify<const N: usize>(bencher: divan::Bencher) {
bencher
.counter(ItemsCount::new(1_usize))
.with_inputs(|| {
let mut rng = rand::rng();
let mut leaves = vec![Vec::new(); N];
for leaf in &mut leaves {
rng.fill_bytes(leaf);
}
let tree = SliceMerkleTree::new(&leaves);
let proof = tree.create_proof(0);
(tree, leaves[0].clone(), 0, proof)
})
.bench_values(
|(tree, data, index, proof): (SliceMerkleTree, Vec<u8>, usize, SliceProof)| {
SliceMerkleTree::check_proof(&data, index, &tree.get_root(), &proof)
},
);
}
#[divan::bench]
fn sign_ed25519(bencher: divan::Bencher) {
bencher
.counter(ItemsCount::new(1_usize))
.with_inputs(|| {
let mut rng = rand::rng();
let mut bytes = [0; 128];
rng.fill_bytes(&mut bytes);
let sk = signature::SecretKey::new(&mut rng);
(sk, bytes)
})
.bench_values(|(sk, bytes): (signature::SecretKey, [u8; 128])| {
let _ = sk.sign(&bytes);
});
}
#[divan::bench]
fn verify_ed25519(bencher: divan::Bencher) {
bencher
.counter(ItemsCount::new(1_usize))
.with_inputs(|| {
let mut rng = rand::rng();
let mut bytes = [0; 128];
rng.fill_bytes(&mut bytes);
let sk = signature::SecretKey::new(&mut rng);
(sk.sign(&bytes), bytes, sk.to_pk())
})
.bench_values(
|(sig, bytes, pk): (Signature, [u8; 128], signature::PublicKey)| {
sig.verify(&bytes, &pk)
},
);
}
#[divan::bench(consts = [100, 1000, 10_000])]
fn aggregate_bls<const N: usize>(bencher: divan::Bencher) {
bencher
.counter(ItemsCount::new(1_usize))
.with_inputs(|| {
let mut rng = rand::rng();
let mut bytes = [0; 128];
let mut pks = Vec::new();
for _ in 0..N {
rng.fill_bytes(&mut bytes);
let sk = blst::min_pk::SecretKey::key_gen(&bytes, &[]).unwrap();
pks.push(sk.sk_to_pk());
}
pks
})
.bench_values(|pks: Vec<blst::min_pk::PublicKey>| {
let pk_refs: Vec<_> = pks.iter().collect();
blst::min_pk::AggregatePublicKey::aggregate(&pk_refs, false)
});
}
#[divan::bench]
fn compress_bls(bencher: divan::Bencher) {
bencher
.counter(ItemsCount::new(1_usize))
.with_inputs(|| {
let mut rng = rand::rng();
let mut ikm = [0; 32];
let mut msg = [0; 32];
rng.fill_bytes(&mut ikm);
rng.fill_bytes(&mut msg);
let sk = blst::min_sig::SecretKey::key_gen(&ikm, &[]).unwrap();
sk.sign(&msg, &[], &[])
})
.bench_values(|sig: blst::min_sig::Signature| sig.compress());
}
#[divan::bench]
fn uncompress_bls(bencher: divan::Bencher) {
bencher
.counter(ItemsCount::new(1_usize))
.with_inputs(|| {
let mut rng = rand::rng();
let mut ikm = [0; 32];
let mut msg = [0; 32];
rng.fill_bytes(&mut ikm);
rng.fill_bytes(&mut msg);
let sk = blst::min_sig::SecretKey::key_gen(&ikm, &[]).unwrap();
let sig = sk.sign(&msg, &[], &[]);
sig.compress()
})
.bench_values(|comp: [u8; 48]| blst::min_sig::Signature::uncompress(&comp));
}
#[divan::bench]
fn sign_bls(bencher: divan::Bencher) {
bencher
.counter(ItemsCount::new(1_usize))
.with_inputs(|| {
let mut rng = rand::rng();
let mut bytes = [0; 128];
rng.fill_bytes(&mut bytes);
let sk = aggsig::SecretKey::new(&mut rng);
(sk, bytes)
})
.bench_values(|(sk, bytes): (aggsig::SecretKey, [u8; 128])| {
let _ = sk.sign(&bytes);
});
}
#[divan::bench]
fn verify_bls(bencher: divan::Bencher) {
bencher
.counter(ItemsCount::new(1_usize))
.with_inputs(|| {
let mut rng = rand::rng();
let mut bytes = [0; 128];
rng.fill_bytes(&mut bytes);
let sk = aggsig::SecretKey::new(&mut rng);
(sk.sign(&bytes), bytes, sk.to_pk())
})
.bench_values(
|(sig, bytes, pk): (IndividualSignature, [u8; 128], aggsig::PublicKey)| {
sig.verify(&bytes, &pk)
},
);
}
| rust | Apache-2.0 | dbf7f05ed359156910a5ffb6df5a0adbb2ce71d3 | 2026-01-04T20:22:21.174106Z | false |
duck-compiler/duckc | https://github.com/duck-compiler/duckc/blob/03febfa849f54a237380dfed2d91a52a2df0313b/src/multi_map.rs | src/multi_map.rs | use std::{borrow::Borrow, collections::HashMap, hash::Hash};
#[derive(Clone, Debug)]
pub struct MultiMap<K: Eq + Hash, V> {
inner: HashMap<K, Vec<V>>,
}
impl<K: Eq + Hash, V> Default for MultiMap<K, V> {
fn default() -> Self {
Self::new()
}
}
impl<K: Eq + Hash, V> MultiMap<K, V> {
pub fn new() -> Self {
Self {
inner: HashMap::new(),
}
}
pub fn insert(&mut self, k: K, v: V) {
self.inner.entry(k).or_default().push(v);
}
pub fn get<'a, Q>(&'a self, k: &Q) -> Vec<&'a V>
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
let v = self.inner.get(k);
match v {
Some(v) => unsafe {
let ptr = v.as_ptr();
let mut result = Vec::with_capacity(v.len());
for i in 0..v.len() {
result.push(ptr.add(i).as_ref().unwrap());
}
result
},
None => Vec::new(),
}
}
pub fn get_mut<'a, Q>(&'a mut self, k: &Q) -> Vec<&'a mut V>
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
let v = self.inner.get_mut(k);
match v {
Some(v) => unsafe {
let ptr = v.as_mut_ptr();
let mut result = Vec::with_capacity(v.len());
for i in 0..v.len() {
result.push(ptr.add(i).as_mut().unwrap());
}
result
},
None => Vec::new(),
}
}
pub fn contains_key<Q>(&self, k: &Q) -> bool
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
self.inner.contains_key(k.borrow())
}
pub fn iter(&self) -> impl Iterator<Item = (&K, Vec<&V>)> {
MultiMapIterator {
multi_map: self,
key_idx: 0,
}
}
pub fn iter_flat(&self) -> impl Iterator<Item = (&K, &V)> {
self.inner
.iter()
.flat_map(|(k, v)| v.iter().map(move |v| (k, v)))
}
}
struct MultiMapIterator<'a, K: Eq + Hash, V> {
multi_map: &'a MultiMap<K, V>,
key_idx: usize,
}
impl<'a, K: Eq + Hash, V> Iterator for MultiMapIterator<'a, K, V> {
type Item = (&'a K, Vec<&'a V>);
fn next(&mut self) -> Option<Self::Item> {
let key = self.multi_map.inner.keys().nth(self.key_idx)?;
let values = self.multi_map.get(key);
self.key_idx += 1;
Some((key, values))
}
}
#[cfg(test)]
mod tests {
use crate::multi_map::MultiMap;
#[test]
fn test_multi_map() {
let mut m = MultiMap::new();
m.insert("A", "1");
m.insert("A", "2");
assert_eq!(
m.get(&"A")
.iter()
.map(ToString::to_string)
.collect::<Vec<_>>(),
vec!["1".to_string(), "2".to_string()]
);
let expected = vec![("A", "1"), ("A", "2")];
for pair in m.iter_flat().map(|(k, v)| (*k, *v)) {
assert!(expected.contains(&pair), "{pair:?}");
}
}
}
| rust | MIT | 03febfa849f54a237380dfed2d91a52a2df0313b | 2026-01-04T20:22:21.418266Z | false |
duck-compiler/duckc | https://github.com/duck-compiler/duckc/blob/03febfa849f54a237380dfed2d91a52a2df0313b/src/tags.rs | src/tags.rs | use std::fmt::Display;
use colored::{Color, Colorize};
#[derive(Debug, Clone)]
pub enum Tag {
Err,
Dargo,
Clean,
Note,
GitHub,
Twitter,
Check,
Lexer,
Parser,
Compiler,
Git,
Build,
Run,
Docs,
Dependency,
Setup,
IO,
Go,
TypeResolve,
}
impl Display for Tag {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
return match self {
Self::Err => write!(f, "{}", " error ".on_red().bright_white()),
Self::Dargo => write!(f, "{}", " dargo ".on_bright_yellow().black()),
Self::Note => write!(f, "{}", " NOTE ".on_bright_black().bright_white()),
Self::GitHub => write!(f, "{}", " GitHub ".on_black().bright_white()),
Self::Twitter => write!(f, "{}", " Twitter ".on_blue().bright_white()),
Self::Check => write!(f, "{}", " ✓ ".on_green().bright_white()),
Self::Lexer => write!(f, "{}", " lexer ".on_bright_white().black()),
Self::Parser => write!(f, "{}", " parser ".on_white().black()),
Self::Compiler => write!(f, "{}", " compiler ".on_black().bright_white()),
Self::Docs => write!(f, "{}", " docs ".on_green().bright_white()),
Self::Git => write!(
f,
"{}",
" git "
.on_color(Color::TrueColor {
r: 243,
g: 20,
b: 20
})
.bright_white()
),
Self::Build => write!(
f,
"{}",
" build "
.on_color(Color::TrueColor {
r: 43,
g: 20,
b: 20
})
.bright_white()
),
Self::Run => write!(
f,
"{}",
" run "
.on_color(Color::TrueColor {
r: 43,
g: 80,
b: 20
})
.bright_white()
),
Self::Dependency => write!(
f,
"{}",
" dependency "
.on_color(Color::TrueColor {
r: 23,
g: 10,
b: 120
})
.bright_white()
),
Self::Setup => write!(
f,
"{}",
" setup "
.on_color(Color::TrueColor {
r: 23,
g: 120,
b: 20
})
.bright_white()
),
Self::IO => write!(
f,
"{}",
" IO "
.on_color(Color::TrueColor {
r: 23,
g: 120,
b: 20
})
.bright_white()
),
Self::Clean => write!(f, "{}", " clean ".on_bright_magenta().bright_white()),
Self::Go => write!(f, "{}", " go ".on_bright_blue().bright_white()),
Self::TypeResolve => write!(f, "{}", " type resolve ".on_bright_blue().bright_white()),
};
}
}
| rust | MIT | 03febfa849f54a237380dfed2d91a52a2df0313b | 2026-01-04T20:22:21.418266Z | false |
duck-compiler/duckc | https://github.com/duck-compiler/duckc/blob/03febfa849f54a237380dfed2d91a52a2df0313b/src/main.rs | src/main.rs | #![feature(impl_trait_in_bindings)]
#![allow(
clippy::needless_return,
clippy::match_like_matches_macro,
clippy::only_used_in_recursion,
clippy::large_enum_variant
)]
use std::{
env,
error::Error,
fs::{self, File},
io::Write,
path::{Path, PathBuf},
process,
sync::mpsc::Sender,
};
use chumsky::{Parser, error::Rich};
use colored::Colorize;
use parse::{Spanned, lexer::Token, source_file_parser::SourceFile};
use tags::Tag;
use crate::{
parse::{
Context, SS,
function_parser::LambdaFunctionExpr,
lexer::lex_parser,
make_input, parse_failure,
source_file_parser::source_file_parser,
type_parser::{Duck, TypeExpr},
use_statement_parser::UseStatement,
value_parser::{
Assignment, Declaration, ValFmtStringContents, ValHtmlStringContents, ValueExpr,
},
},
semantics::type_resolve::{self, TypeEnv},
};
use lazy_static::lazy_static;
pub mod cli;
pub mod dargo;
pub mod emit;
pub mod go_fixup;
pub mod multi_map;
pub mod parse;
pub mod reports;
pub mod semantics;
pub mod tags;
lazy_static! {
static ref DUCK_STD_PATH: PathBuf = {
env::home_dir()
.map(|mut path| {
path.push(".duck");
path.push("std");
path.push("std.duck");
path
})
.expect("couldn't get pathbuf for std lib")
};
static ref DARGO_DOT_DIR: PathBuf = {
fn require_sub_dir(str: &str) {
let Ok(current_dir) = env::current_dir() else {
println!("{}{} coulnd't read current dir", Tag::Dargo, Tag::Err,);
panic!()
};
let required_dir = {
let mut current_dir_clone = current_dir.clone();
current_dir_clone.push(str);
current_dir_clone
};
if required_dir.exists() {
return;
}
if let Err(err) = fs::create_dir(required_dir.clone()) {
println!(
"{}{} Couldn't create {} dot dir in current directory. - {err}",
Tag::Dargo,
Tag::Err,
required_dir.to_string_lossy()
);
}
}
let duck_dir = Path::new(".dargo");
require_sub_dir(".dargo");
require_sub_dir(".dargo/git");
require_sub_dir(".dargo/project");
return duck_dir.to_path_buf();
};
}
// todo(@Mvmo): code doc generation, using doc comments
// we already have the function lex_with_comments, right below this issue.
// now the only thing we have to do is lex the code with comments and then generate some kind of report, maybe in json, which can be interpreted by some docs generator.
// or maybe just generate the html docs directly
#[allow(dead_code)]
fn lex_with_comments(file_name: &'static str, file_contents: &'static str) -> Vec<Spanned<Token>> {
let (lex, lex_errors) = lex_parser(file_name, file_contents)
.parse(file_contents)
.into_output_errors();
lex_errors.into_iter().for_each(|e| {
parse_failure(
file_name,
&Rich::<&str, SS>::custom(
SS {
start: e.span().start,
end: e.span().end,
context: crate::parse::Context {
file_name,
file_contents,
},
},
format!("{}{} {}", Tag::Lexer, Tag::Err, e.reason()),
),
file_contents,
);
});
lex.unwrap()
}
fn lex(file_name: &'static str, file_contents: &'static str) -> Vec<Spanned<Token>> {
let (lex, lex_errors) = lex_parser(file_name, file_contents)
.parse(file_contents)
.into_output_errors();
lex_errors.into_iter().for_each(|e| {
parse_failure(
file_name,
&Rich::<&str, SS>::custom(
SS {
start: e.span().start,
end: e.span().end,
context: crate::parse::Context {
file_name,
file_contents,
},
},
format!("{}{} {}", Tag::Lexer, Tag::Err, e.reason()),
),
file_contents,
);
});
lex.unwrap()
.iter()
// .filter(|(token, _)| !matches!(token, Token::Comment(..) | Token::DocComment(..)))
.filter(|(token, _)| !matches!(token, Token::Comment(..)))
.cloned()
.collect::<Vec<_>>()
}
fn parse_src_file(
src_file: &Path,
src_file_name: &'static str,
src_file_file_contents: &'static str,
tokens: Vec<Spanned<Token>>,
) -> SourceFile {
if !DUCK_STD_PATH.exists() {
println!(
"{}{}{} Standard library not found",
Tag::Dargo,
Tag::Err,
Tag::Build,
);
std::process::exit(0);
}
let file_text = std::fs::read_to_string(DUCK_STD_PATH.to_path_buf())
.unwrap()
.leak();
let lex = lex("std.duck", file_text);
let mut std_src_file = source_file_parser(
{
let mut buf = DUCK_STD_PATH.to_path_buf();
buf.pop();
buf
},
make_input,
)
.parse(make_input(
SS {
start: 0,
end: file_text.len(),
context: Context {
file_name: "std.duck",
file_contents: file_text,
},
},
lex.as_slice(),
))
.unwrap()
.flatten(&vec!["std".to_string()], false);
for func in std_src_file.function_definitions.iter_mut() {
for (_, p) in &mut func.params {
typename_reset_global(&mut p.0);
}
typename_reset_global(&mut func.return_type.0);
typename_reset_global_value_expr(&mut func.value_expr.0);
}
fn typename_reset_global(t: &mut TypeExpr) {
match t {
TypeExpr::TypeName(global, _, type_params) => {
type_params
.iter_mut()
.for_each(|(t, _)| typename_reset_global(t));
*global = false;
}
TypeExpr::Array(t) => typename_reset_global(&mut t.0),
TypeExpr::Duck(Duck { fields }) => {
for field in fields {
typename_reset_global(&mut field.type_expr.0);
}
}
TypeExpr::Tuple(fields) => {
for field in fields {
typename_reset_global(&mut field.0);
}
}
TypeExpr::Fun(params, ret, _) => {
for (_, p) in params {
typename_reset_global(&mut p.0);
}
typename_reset_global(&mut ret.0);
}
_ => {}
}
}
fn typename_reset_global_value_expr(type_expr: &mut ValueExpr) {
match type_expr {
ValueExpr::BitAnd { lhs, rhs }
| ValueExpr::BitOr { lhs, rhs }
| ValueExpr::BitXor { lhs, rhs }
| ValueExpr::ShiftLeft {
target: lhs,
amount: rhs,
}
| ValueExpr::ShiftRight {
target: lhs,
amount: rhs,
} => {
typename_reset_global_value_expr(&mut lhs.0);
typename_reset_global_value_expr(&mut rhs.0);
}
ValueExpr::Negate(d)
| ValueExpr::Async(d)
| ValueExpr::Defer(d)
| ValueExpr::BitNot(d) => typename_reset_global_value_expr(&mut d.0),
ValueExpr::As(v, t) => {
typename_reset_global(&mut t.0);
typename_reset_global_value_expr(&mut v.0);
}
ValueExpr::Deref(v) | ValueExpr::Ref(v) | ValueExpr::RefMut(v) => {
typename_reset_global_value_expr(&mut v.0)
}
ValueExpr::For {
ident: _,
target,
block,
} => {
typename_reset_global_value_expr(&mut target.0);
typename_reset_global_value_expr(&mut block.0);
}
ValueExpr::HtmlString(contents) => {
for c in contents {
if let ValHtmlStringContents::Expr(e) = c {
typename_reset_global_value_expr(&mut e.0);
}
}
}
ValueExpr::Match {
value_expr,
arms,
else_arm,
span: _,
} => {
typename_reset_global_value_expr(&mut value_expr.0);
for arm in arms {
typename_reset_global_value_expr(&mut arm.value_expr.0);
typename_reset_global(&mut arm.type_case.0);
}
if let Some(else_arm) = else_arm {
typename_reset_global_value_expr(&mut else_arm.value_expr.0);
typename_reset_global(&mut else_arm.type_case.0);
}
}
ValueExpr::Block(exprs) => {
for expr in exprs {
typename_reset_global_value_expr(&mut expr.0);
}
}
ValueExpr::Add(l, r)
| ValueExpr::Mul(l, r)
| ValueExpr::Sub(l, r)
| ValueExpr::Div(l, r)
| ValueExpr::Mod(l, r)
| ValueExpr::Equals(l, r)
| ValueExpr::NotEquals(l, r)
| ValueExpr::LessThan(l, r)
| ValueExpr::LessThanOrEquals(l, r)
| ValueExpr::GreaterThan(l, r)
| ValueExpr::GreaterThanOrEquals(l, r)
| ValueExpr::And(l, r)
| ValueExpr::Or(l, r) => {
typename_reset_global_value_expr(&mut l.0);
typename_reset_global_value_expr(&mut r.0);
}
ValueExpr::Lambda(l) => {
let LambdaFunctionExpr {
is_mut: _,
params,
return_type,
value_expr,
} = &mut **l;
for (_, p) in params {
if let Some(p) = p.as_mut() {
typename_reset_global(&mut p.0);
}
}
if let Some(return_type) = return_type {
typename_reset_global(&mut return_type.0);
}
typename_reset_global_value_expr(&mut value_expr.0);
}
ValueExpr::ArrayAccess(target, idx) => {
typename_reset_global_value_expr(&mut target.0);
typename_reset_global_value_expr(&mut idx.0);
}
ValueExpr::FunctionCall {
target,
params,
type_params: _,
..
} => {
// todo: type_params
for p in params {
typename_reset_global_value_expr(&mut p.0);
}
typename_reset_global_value_expr(&mut target.0);
}
ValueExpr::FieldAccess { target_obj, .. } => {
typename_reset_global_value_expr(&mut target_obj.0);
}
ValueExpr::Array(exprs, _ty) => {
for expr in exprs {
typename_reset_global_value_expr(&mut expr.0);
}
}
ValueExpr::BoolNegate(expr) | ValueExpr::Return(Some(expr)) => {
typename_reset_global_value_expr(&mut expr.0);
}
ValueExpr::FormattedString(content) => {
for c in content {
if let ValFmtStringContents::Expr(e) = c {
typename_reset_global_value_expr(&mut e.0);
}
}
}
ValueExpr::If {
condition,
then,
r#else,
} => {
typename_reset_global_value_expr(&mut condition.0);
typename_reset_global_value_expr(&mut then.0);
if let Some(r#else) = r#else {
typename_reset_global_value_expr(&mut r#else.0);
}
}
ValueExpr::While { condition, body } => {
typename_reset_global_value_expr(&mut condition.0);
typename_reset_global_value_expr(&mut body.0);
}
ValueExpr::VarDecl(b) => {
let Declaration {
name: _,
type_expr,
initializer,
is_const: _,
} = &mut b.0;
if let Some(type_expr) = type_expr.as_mut() {
typename_reset_global(&mut type_expr.0);
}
if let Some(initializer) = initializer.as_mut() {
typename_reset_global_value_expr(&mut initializer.0);
}
}
ValueExpr::VarAssign(b) => {
let Assignment { target, value_expr } = &mut b.0;
typename_reset_global_value_expr(&mut target.0);
typename_reset_global_value_expr(&mut value_expr.0);
}
ValueExpr::Tuple(fields) => {
for field in fields {
typename_reset_global_value_expr(&mut field.0);
}
}
ValueExpr::Duck(fields) => {
for field in fields {
typename_reset_global_value_expr(&mut field.1.0);
}
}
ValueExpr::Struct { fields, .. } => {
for field in fields {
typename_reset_global_value_expr(&mut field.1.0);
}
}
ValueExpr::RawStruct {
is_global,
name: _,
fields,
type_params,
} => {
*is_global = false;
for field in fields {
typename_reset_global_value_expr(&mut field.1.0);
}
for type_param in type_params {
typename_reset_global(&mut type_param.0);
}
}
ValueExpr::Break
| ValueExpr::Char(..)
| ValueExpr::Continue
| ValueExpr::Float(..)
| ValueExpr::String(..)
| ValueExpr::Int(..)
| ValueExpr::Bool(..)
| ValueExpr::Variable(..)
| ValueExpr::RawVariable(..)
| ValueExpr::Tag(..)
| ValueExpr::Return(..)
| ValueExpr::InlineGo(..) => {}
}
}
let (src_file, parse_errors) = source_file_parser(
{
let mut src_file_clone = src_file.to_path_buf();
src_file_clone.pop();
src_file_clone
},
make_input,
)
.parse(make_input(
SS {
start: 0,
end: src_file_file_contents.len(),
context: Context {
file_name: src_file_name,
file_contents: src_file_file_contents,
},
},
&tokens,
))
.into_output_errors();
parse_errors.into_iter().for_each(|e| {
parse_failure(src_file_name, &e, src_file_file_contents);
});
// TODO: do this for all dependencies
let mut result = src_file.unwrap().flatten(&vec![], true);
#[allow(clippy::nonminimal_bool)]
if true {
// <- use this if you want to test without std
for s in &std_src_file.function_definitions {
result.function_definitions.push(s.clone());
}
for s in &std_src_file.type_definitions {
result.type_definitions.push(s.clone());
}
for s in &std_src_file.struct_definitions {
result.struct_definitions.push(s.clone());
}
for s in &std_src_file.use_statements {
if let UseStatement::Go(..) = s {
result.push_use(s);
}
}
for s in &std_src_file.jsx_compontents {
result.jsx_compontents.push(s.clone());
}
for s in &std_src_file.duckx_components {
result.duckx_components.push(s.clone());
}
for test_case in &std_src_file.test_cases {
result.test_cases.push(test_case.clone());
}
for extension_def in std_src_file.extensions_defs {
result.extensions_defs.push(extension_def.clone());
}
for global_def in std_src_file.global_var_decls {
result.global_var_decls.push(global_def.clone());
}
}
result
}
fn typecheck<'a>(src_file_ast: &mut SourceFile, tailwind_tx: &'a Sender<String>) -> TypeEnv<'a> {
let mut type_env = TypeEnv {
tailwind_sender: Some(tailwind_tx),
..TypeEnv::default()
};
type_resolve::typeresolve_source_file(src_file_ast, &mut type_env);
type_env
}
fn write_in_duck_dotdir(file_name: &str, content: &str) -> PathBuf {
let target_file = {
let mut target_file_path = DARGO_DOT_DIR.clone();
target_file_path.push(file_name);
target_file_path
};
let mut file = File::create(target_file.clone()).expect("couldn't create file in duck dot dir"); // TODO error handling
file.write_all(content.as_bytes())
.expect("couldn't write file in duck dot dir"); // TODO error handling
target_file
}
pub fn duck_with_message(msg: &str) {
println!(
"{}\n{}{}{} {msg}\n{}",
" _,".bright_yellow().bold(),
"(".bright_yellow().bold(),
"~".blue().bold(),
"<".yellow().bold(),
"<_)".bright_yellow().bold(),
);
}
fn main() -> Result<(), Box<dyn Error>> {
let cli_result = dargo::cli::run_cli();
if let Err(err) = cli_result {
duck_with_message("Ooops... something went wrong!!");
println!("{}", err.0);
process::exit(1);
}
Ok(())
}
| rust | MIT | 03febfa849f54a237380dfed2d91a52a2df0313b | 2026-01-04T20:22:21.418266Z | false |
duck-compiler/duckc | https://github.com/duck-compiler/duckc/blob/03febfa849f54a237380dfed2d91a52a2df0313b/src/emit/value.rs | src/emit/value.rs | use std::{
collections::{HashMap, HashSet, VecDeque},
panic,
};
use crate::{
emit::{fix_ident_for_go, function::function_epilogue_2, types::escape_string_for_go},
parse::{
SS, Spanned,
duckx_component_parser::find_client_components,
failure, failure_with_occurence,
function_parser::LambdaFunctionExpr,
struct_parser::NamedDuckDefinition,
type_parser::{Duck, TypeExpr},
value_parser::{
Declaration, ValFmtStringContents, ValHtmlStringContents, ValueExpr, empty_range,
},
},
semantics::{
ident_mangler::{MANGLE_SEP, mangle},
type_resolve::TypeEnv,
},
};
fn emit_duck(
d: &Spanned<ValueExpr>,
t: Option<&Spanned<TypeExpr>>,
ir: &mut ToIr,
type_env: &mut TypeEnv,
) -> (Vec<IrInstruction>, Option<IrValue>) {
let t = if let Some(t) = t.cloned() {
t
} else {
TypeExpr::from_value_expr(d, type_env)
};
let mut d = d.clone();
let ValueExpr::Duck(fields) = &mut d.0 else {
panic!("Compiler Bug: Only call this method by with a duck type ");
};
let TypeExpr::Duck(Duck { fields: ty_fields }) = &t.0 else {
panic!("Compiler Bug: type is not a duck")
};
fields.retain(|f| ty_fields.iter().any(|f2| f2.name.as_str() == f.0.as_str()));
let mut res = Vec::new();
let mut res_vars = Vec::new();
for (field_name, (field_expr, _)) in fields {
let (field_instr, field_res) = field_expr.direct_or_with_instr(type_env, ir, d.1);
res.extend(field_instr);
if let Some(field_res) = field_res {
res_vars.push((field_name.clone(), field_res));
} else {
return (res, None);
}
}
let res_var = ir.new_var();
res.extend([
IrInstruction::VarDecl(res_var.clone(), t.0.as_go_type_annotation(type_env)),
IrInstruction::VarAssignment(
res_var.clone(),
IrValue::Duck(t.0.as_clean_go_type_name(type_env), res_vars),
),
]);
(res, as_rvar(res_var))
}
#[derive(Debug, Clone)]
pub struct ToIr {
pub var_counter: usize,
pub per_var_counter: Vec<HashMap<String, usize>>, // for shadowing
pub labels: Vec<String>,
}
impl Default for ToIr {
fn default() -> Self {
ToIr {
var_counter: 0,
labels: vec![],
per_var_counter: vec![HashMap::new()],
}
}
}
/// Expression further down should use this
/// if they want the result
type IrRes = String;
type Identifier = String;
type Param = (String, String);
type ReturnType = Option<String>;
#[derive(Debug, Clone, PartialEq)]
pub enum IrInstruction {
GlobalVarDecl {
name: String,
go_type: String,
init_code: Vec<IrInstruction>,
},
Defer(Box<IrInstruction>),
ForRangeElem {
ident: String,
range_target: IrValue,
body: Vec<IrInstruction>,
label: String,
},
// Code Statements
VarDecl(String, String),
VarAssignment(IrRes, IrValue),
FunCall(Option<IrRes>, IrValue, Vec<IrValue>), //
StringConcat(IrRes, Vec<IrValue>),
Add(IrRes, IrValue, IrValue, TypeExpr),
Mul(IrRes, IrValue, IrValue, TypeExpr),
Sub(IrRes, IrValue, IrValue, TypeExpr),
Mod(IrRes, IrValue, IrValue, TypeExpr),
Div(IrRes, IrValue, IrValue, TypeExpr),
Equals(IrRes, IrValue, IrValue, TypeExpr),
NotEquals(IrRes, IrValue, IrValue, TypeExpr),
LessThan(IrRes, IrValue, IrValue, TypeExpr),
LessThanOrEquals(IrRes, IrValue, IrValue, TypeExpr),
GreaterThan(IrRes, IrValue, IrValue, TypeExpr),
GreaterThanOrEquals(IrRes, IrValue, IrValue, TypeExpr),
And(IrRes, IrValue, IrValue, TypeExpr),
Or(IrRes, IrValue, IrValue, TypeExpr),
Break(Option<String>),
Continue(Option<String>),
Return(Option<IrValue>),
InlineGo(String),
If(
IrValue, // bool_value
Vec<IrInstruction>, // body
Option<Vec<IrInstruction>>, // else
),
Loop(Vec<IrInstruction>, String),
Block(Vec<IrInstruction>),
// Top-Level Statements
GoPackage(String),
GoImports(Vec<(Option<String>, String)>),
GenericFun(
String, // Name
Vec<(String, String)>, // Generics
Vec<(String, String)>, // Params
Option<String>, // Return Type
Vec<IrInstruction>, // Body
),
FunDef(
String, // Name
Option<(String, String)>, // Receiver
Vec<(String, String)>, // Params
Option<String>, // Return Type
Vec<IrInstruction>, // Body
),
StructDef(String, Vec<(String, String)>),
InterfaceDef(
Identifier, // Name
Vec<(String, String)>, // Generics
Vec<(Identifier, Vec<Param>, ReturnType)>, // Methods
),
SwitchType(IrValue, Vec<Case>),
}
type IrCondition = (Vec<IrInstruction>, Option<IrValue>);
#[derive(Debug, Clone, PartialEq)]
pub struct Case {
pub type_name: String,
pub instrs: Vec<IrInstruction>,
pub identifier_binding: Option<String>,
pub condition: Option<Spanned<ValueExpr>>,
pub conditional_branches: Option<Vec<(IrCondition, Case)>>,
pub span: SS,
}
#[derive(Debug, Clone, PartialEq)]
pub enum IrValue {
Int(u64),
Float(f64),
String(String, bool),
Bool(bool),
Char(char),
Array(String, Vec<IrValue>),
Lambda(
Vec<(String, String)>, // params
Option<String>, // return type
Vec<IrInstruction>, // body
),
Tuple(String, Vec<IrValue>),
Duck(String, Vec<(String, IrValue)>),
Struct(String, Vec<(String, IrValue)>),
Tag(String),
Var(String),
BoolNegate(Box<IrValue>),
FieldAccess(Box<IrValue>, String),
ArrayAccess(Box<IrValue>, Box<IrValue>),
Imm(String),
Pointer(Box<IrValue>),
Negate(Box<IrValue>),
Deref(Box<IrValue>),
Nil,
ShiftLeft(Box<IrValue>, Box<IrValue>),
ShiftRight(Box<IrValue>, Box<IrValue>),
BitAnd(Box<IrValue>, Box<IrValue>),
BitOr(Box<IrValue>, Box<IrValue>),
BitXor(Box<IrValue>, Box<IrValue>),
BitNot(Box<IrValue>),
}
impl IrValue {
pub fn empty_tuple() -> Self {
Self::Tuple(
TypeExpr::from_value_expr(
&ValueExpr::Tuple(vec![]).into_empty_span(),
&mut TypeEnv::default(),
)
.0
.as_go_type_annotation(&mut TypeEnv::default()),
vec![],
)
}
}
impl ToIr {
pub fn push_var_counters(&mut self) {
self.per_var_counter.push(Default::default());
}
pub fn reset_var_counters(&mut self) {
self.per_var_counter.clear();
self.push_var_counters();
}
pub fn pop_var_counters(&mut self) {
self.per_var_counter.pop();
}
/// Returns whether it has been previously declared
pub fn already_declared_and_inc(&mut self, s: &str) -> bool {
let before = self.already_declared(s);
self.per_var_counter
.last_mut()
.unwrap()
.entry(s.to_string())
.and_modify(|f| *f += 1)
.or_default();
before
}
pub fn already_declared(&mut self, s: &str) -> bool {
let count = *self
.per_var_counter
.last_mut()
.unwrap()
.entry(s.to_string())
.or_default();
count > 0
}
pub fn top_label_cloned(&self) -> Option<String> {
self.labels.last().cloned()
}
pub fn new_label(&mut self) -> &String {
let label = format!("label_{}", self.var_counter);
self.var_counter += 1;
self.labels.push(label);
&self.labels[self.labels.len() - 1]
}
pub fn new_var(&mut self) -> String {
let var_name = format!("var_{}", self.var_counter);
self.var_counter += 1;
var_name
}
}
pub fn as_rvar(s: impl Into<String>) -> Option<IrValue> {
Some(IrValue::Var(s.into()))
}
pub fn as_var(s: impl Into<String>) -> IrValue {
IrValue::Var(s.into())
}
pub fn needs_mut(v: &ValueExpr, type_env: &mut TypeEnv) -> bool {
match v {
ValueExpr::VarAssign(_) => true,
ValueExpr::FunctionCall {
target,
params: _,
type_params: _,
..
} => {
if let ValueExpr::FieldAccess {
target_obj,
field_name,
} = &target.0
{
let ty = TypeExpr::from_value_expr_dereferenced(target_obj, type_env);
if let TypeExpr::TypeName(_, type_name, _) = ty.0
&& let Some(struct_def) = type_env.get_struct_def_opt(&type_name)
&& struct_def.mut_methods.contains(&field_name.to_string())
{
return true;
}
}
false
}
_ => false,
}
}
#[allow(dead_code)]
fn contains_imm_ref(mut t: &TypeExpr) -> bool {
while let TypeExpr::RefMut(to) = t {
if matches!(&to.0, TypeExpr::Ref(..)) {
return false;
}
t = &to.0;
}
true
}
pub fn can_do_mut_stuff_through2(
v: &Spanned<ValueExpr>,
type_env: &mut TypeEnv,
mut var_needs_const: bool,
) -> bool {
let ty = TypeExpr::from_value_expr(v, type_env);
if matches!(ty.0, TypeExpr::Ref(..)) {
return false;
}
if matches!(ty.0, TypeExpr::RefMut(..)) {
var_needs_const = false;
}
if let ValueExpr::ArrayAccess(target_obj, _) = &v.0 {
can_do_mut_stuff_through2(target_obj, type_env, var_needs_const)
} else if let ValueExpr::FieldAccess {
target_obj,
field_name: _,
} = &v.0
{
can_do_mut_stuff_through2(target_obj, type_env, var_needs_const)
} else {
!var_needs_const || !matches!(&v.0, ValueExpr::Variable(_, _, _, Some(true), _))
}
}
pub fn can_do_mut_stuff_through(v: &Spanned<ValueExpr>, type_env: &mut TypeEnv) -> bool {
can_do_mut_stuff_through2(v, type_env, true)
}
#[derive(Debug, Clone, PartialEq)]
enum FrontPart {
Deref(usize),
ExtCall(String, usize),
ExtCall2(String, usize),
}
fn walk_access_raw(
obj: &Spanned<ValueExpr>,
type_env: &mut TypeEnv,
env: &mut ToIr,
span: SS,
only_read: bool,
deref_needs_to_be_mut: bool,
last_needs_mut: bool,
) -> (Vec<IrInstruction>, Option<Vec<String>>) {
let imports = type_env.all_go_imports;
let mut res_instr = VecDeque::new();
let mut current_obj = obj.clone();
let mut s = VecDeque::new();
let mut derefs = Vec::new();
let mut stars = 0;
let mut is_calling_fun = false;
loop {
let cloned = is_calling_fun;
is_calling_fun = false;
match current_obj.0.clone() {
ValueExpr::Variable(_, name, _type_expr, is_const, needs_copy) => {
if needs_copy {
let (emit_instr, Some(IrValue::Var(var_name))) =
current_obj.0.emit(type_env, env, current_obj.1)
else {
panic!("this should be a var")
};
emit_instr
.into_iter()
.rev()
.for_each(|i| res_instr.push_front(i));
s.push_front(var_name);
} else {
s.push_front(name.clone());
}
s[0] = fix_ident_for_go(&s[0], imports);
if is_const.is_some_and(|v| v) && last_needs_mut && stars == 0 && !cloned {
failure(
current_obj.1.context.file_name,
format!("NEED LET VAR {stars} {name}"),
("need let var".to_string(), current_obj.1),
[],
current_obj.1.context.file_contents,
);
}
if stars > 0 {
derefs.push(FrontPart::Deref(stars));
s[0].push(')');
}
break;
}
ValueExpr::ArrayAccess(target_obj, index) => {
let (instr, res) = index.0.emit(type_env, env, span);
let mut type_expr = TypeExpr::from_value_expr(&target_obj, type_env);
let mut stars_to_set = 0;
if deref_needs_to_be_mut {
if !can_do_mut_stuff_through(&target_obj, type_env) {
failure_with_occurence(
"This needs to allow mutable access",
target_obj.1,
[(
"This needs to allow mutable access".to_string(),
target_obj.1,
)],
);
}
while let TypeExpr::RefMut(v) = type_expr.0 {
type_expr.0 = v.0;
stars_to_set += 1;
}
if let TypeExpr::Ref(_) = type_expr.0 {
panic!(
"need only mut refs for mut stuff {}..{}",
span.start, span.end
);
}
} else {
while let TypeExpr::Ref(v) | TypeExpr::RefMut(v) = type_expr.0 {
type_expr.0 = v.0;
stars_to_set += 1;
}
}
instr
.into_iter()
.rev()
.for_each(|x| res_instr.push_front(x));
if let Some(res) = res {
let IrValue::Var(res) = res else {
panic!("need var");
};
s.push_front(format!("[{res}]"));
} else {
return (res_instr.into(), None);
}
if stars > 0 {
derefs.push(FrontPart::Deref(stars));
s[0].push(')');
}
stars = stars_to_set;
current_obj = *target_obj;
}
ValueExpr::FunctionCall {
target,
params,
type_params,
..
} => {
is_calling_fun = true;
let mut param_res = Vec::new();
let mut flag = None;
if let ValueExpr::FieldAccess {
target_obj,
field_name,
} = &target.0
{
let (target_field_type, stars_count) =
TypeExpr::from_value_expr_dereferenced_with_count(target_obj, type_env);
let clean_go_type_name = target_field_type.0.as_clean_go_type_name(type_env);
let mut skip = false;
if field_name.as_str() == "iter_mut"
&& target_field_type.0.implements_into_iter_mut(type_env)
{
if let TypeExpr::Array(..) = target_field_type.clone().0 {
if !can_do_mut_stuff_through(target_obj, type_env) {
failure_with_occurence(
"This needs to allow mutable access",
target.1,
[(
"This needs to allow mutable access".to_string(),
target_obj.1,
)],
);
}
flag = Some((
format!("{clean_go_type_name}_IterMut("),
stars_count,
false,
));
skip = true;
}
} else if field_name.as_str() == "iter"
&& target_field_type.0.implements_into_iter(type_env)
{
if let TypeExpr::Array(..) = target_field_type.clone().0 {
flag =
Some((format!("{clean_go_type_name}_Iter("), stars_count, false));
skip = true;
}
} else if field_name.as_str() == "len" && target_field_type.0.is_array() {
if let TypeExpr::Array(..) = target_field_type.clone().0 {
flag = Some(("len(".to_string(), stars_count, false));
skip = true;
}
} else if field_name.as_str() == "to_json"
&& target_field_type.0.implements_to_json(type_env)
{
match target_field_type.clone().0 {
TypeExpr::Duck(..) => {
flag = Some((
format!("{clean_go_type_name}_ToJson("),
stars_count,
false,
));
skip = true;
}
TypeExpr::Array(..) => {
flag = Some((
format!("{clean_go_type_name}_ToJson("),
stars_count,
false,
));
skip = true;
}
TypeExpr::String(..) => {
flag = Some((
r#"fmt.Sprintf("\"%s\"", "#.to_string(),
stars_count,
false,
));
skip = true;
}
TypeExpr::Int => {
flag =
Some((r#"fmt.Sprintf("%d", "#.to_string(), stars_count, false));
skip = true;
}
TypeExpr::Byte => {
flag =
Some((r#"fmt.Sprintf("%d", "#.to_string(), stars_count, false));
skip = true;
}
TypeExpr::UInt => {
flag =
Some((r#"fmt.Sprintf("%d", "#.to_string(), stars_count, false));
skip = true;
}
TypeExpr::Or(t) => {
let mut go_code = r#"
var p1 any = param1
"#
.to_string();
for t in t {
let conc_type = t.0.as_go_type_annotation(type_env);
go_code.push('\n');
go_code.push_str(&format!(
r#"
switch p1.(type) {{
case {conc_type}:
tmp := p1.({conc_type})
_ = tmp
return {}
}}
"#,
t.0.call_to_json("tmp", type_env)
));
}
go_code.push_str("\nreturn \"\"");
let c = format!("func(param1 any) string {{ {go_code} }}(");
flag = Some((c, stars_count, false));
skip = true;
}
TypeExpr::Bool(..) => {
flag =
Some((r#"fmt.Sprintf("%t", "#.to_string(), stars_count, false));
skip = true;
}
TypeExpr::Char => {
flag =
Some((r#"fmt.Sprintf("%c", "#.to_string(), stars_count, false));
skip = true;
}
TypeExpr::Tag(t) => {
flag = Some((
format!(r#"(func(_ any) string {{ return "\".{t}\"" }})("#),
stars_count,
false,
));
skip = true;
}
TypeExpr::Float => {
flag =
Some((r#"fmt.Sprintf("%f", "#.to_string(), stars_count, false));
skip = true;
}
_ => {}
}
} else if field_name.as_str() == "to_string"
&& target_field_type.0.implements_to_string(type_env)
{
match target_field_type.clone().0 {
TypeExpr::Array(..) | TypeExpr::Duck(..) => {
flag = Some((
format!("{clean_go_type_name}_ToString("),
stars_count,
false,
));
skip = true;
}
TypeExpr::String(..) => {
flag =
Some((r#"fmt.Sprintf("%s", "#.to_string(), stars_count, false));
skip = true;
}
TypeExpr::Int => {
flag =
Some((r#"fmt.Sprintf("%d", "#.to_string(), stars_count, false));
skip = true;
}
TypeExpr::Byte => {
flag =
Some((r#"fmt.Sprintf("%d", "#.to_string(), stars_count, false));
skip = true;
}
TypeExpr::UInt => {
flag =
Some((r#"fmt.Sprintf("%d", "#.to_string(), stars_count, false));
skip = true;
}
TypeExpr::Or(t) => {
let mut go_code = r#"
var p1 any = param1
"#
.to_string();
for t in t {
let conc_type = t.0.as_go_type_annotation(type_env);
go_code.push('\n');
go_code.push_str(&format!(
r#"
switch p1.(type) {{
case {conc_type}:
tmp := p1.({conc_type})
_ = tmp
return {}
}}
"#,
t.0.call_to_string("tmp", type_env)
));
}
go_code.push_str("\nreturn \"\"");
let c = format!("func(param1 any) string {{ {go_code} }}(");
flag = Some((c, stars_count, false));
skip = true;
}
TypeExpr::Bool(..) => {
flag =
Some((r#"fmt.Sprintf("%t", "#.to_string(), stars_count, false));
skip = true;
}
TypeExpr::Char => {
flag =
Some((r#"fmt.Sprintf("%c", "#.to_string(), stars_count, false));
skip = true;
}
TypeExpr::Tag(t) => {
flag = Some((
format!(r#"(func(_ any) string {{ return ".{t}" }})("#),
stars_count,
false,
));
skip = true;
}
TypeExpr::Float => {
flag =
Some((r#"fmt.Sprintf("%f", "#.to_string(), stars_count, false));
skip = true;
}
_ => {}
}
} else if field_name.as_str() == "clone"
&& target_field_type.0.implements_clone(type_env)
{
match target_field_type.0.clone() {
TypeExpr::Array(..) => {
flag = Some((
format!("{clean_go_type_name}_Clone("),
stars_count,
false,
));
skip = true;
}
TypeExpr::String(..) => {
flag = Some((r#"IDENTITY("#.to_string(), stars_count, false));
skip = true;
}
TypeExpr::Int => {
flag = Some((r#"IDENTITY("#.to_string(), stars_count, false));
skip = true;
}
TypeExpr::Byte => {
flag = Some((r#"IDENTITY("#.to_string(), stars_count, false));
skip = true;
}
TypeExpr::UInt => {
flag = Some((r#"IDENTITY("#.to_string(), stars_count, false));
skip = true;
}
TypeExpr::Bool(..) => {
flag = Some((r#"IDENTITY("#.to_string(), stars_count, false));
skip = true;
}
TypeExpr::Char => {
flag = Some((r#"IDENTITY("#.to_string(), stars_count, false));
skip = true;
}
TypeExpr::Tag(..) => {
flag = Some((r#"IDENTITY("#.to_string(), stars_count, false));
skip = true;
}
TypeExpr::Float => {
flag = Some((r#"IDENTITY("%f", "#.to_string(), stars_count, false));
skip = true;
}
_ => {}
}
} else if field_name.as_str() == "hash"
&& target_field_type.0.implements_hash(type_env)
{
match target_field_type.clone().0 {
TypeExpr::Array(..) => {
flag = Some((
format!("{clean_go_type_name}_Hash("),
stars_count,
false,
));
skip = true;
}
TypeExpr::String(..) => {
flag = Some((r#"String_Hash("#.to_string(), stars_count, false));
skip = true;
}
TypeExpr::Int => {
flag = Some((r#"Int_Hash("#.to_string(), stars_count, false));
skip = true;
}
TypeExpr::Byte => {
flag = Some((r#"Int_Hash("#.to_string(), stars_count, false));
skip = true;
}
TypeExpr::UInt => {
flag = Some((r#"UInt_Hash("#.to_string(), stars_count, false));
skip = true;
}
TypeExpr::Bool(..) => {
flag = Some((r#"Bool_Hash("#.to_string(), stars_count, false));
skip = true;
}
TypeExpr::Char => {
flag = Some((r#"Char_Hash("#.to_string(), stars_count, false));
skip = true;
}
TypeExpr::Tag(t) => {
flag = Some((format!(r#"String_Hash("{t}""#), stars_count, false));
skip = true;
}
TypeExpr::Float => {
flag = Some((r#"Float_Hash("#.to_string(), stars_count, false));
skip = true;
}
_ => {}
}
} else if field_name.as_str() == "ord"
&& target_field_type.0.implements_ord(type_env)
{
match target_field_type.clone().0 {
TypeExpr::Array(..) => {
flag = Some((
format!("{clean_go_type_name}_Ord("),
stars_count,
false,
));
skip = true;
}
TypeExpr::String(..) => {
flag = Some((r#"String_Ord("#.to_string(), stars_count, false));
skip = true;
}
TypeExpr::Int => {
flag = Some((r#"Int_Ord("#.to_string(), stars_count, false));
skip = true;
}
TypeExpr::Byte => {
flag = Some((r#"Byte_Ord("#.to_string(), stars_count, false));
skip = true;
}
TypeExpr::UInt => {
flag = Some((r#"UInt_Ord("#.to_string(), stars_count, false));
skip = true;
}
TypeExpr::Bool(..) => {
flag = Some((r#"Bool_Ord("#.to_string(), stars_count, false));
skip = true;
}
TypeExpr::Char => {
flag = Some((r#"Char_Ord("#.to_string(), stars_count, false));
skip = true;
| rust | MIT | 03febfa849f54a237380dfed2d91a52a2df0313b | 2026-01-04T20:22:21.418266Z | true |
duck-compiler/duckc | https://github.com/duck-compiler/duckc/blob/03febfa849f54a237380dfed2d91a52a2df0313b/src/emit/source_file.rs | src/emit/source_file.rs | use std::collections::HashSet;
use crate::{
emit::{
ir::fix_idents_in_ir,
types::emit_type_definitions,
value::{IrInstruction, IrValue, ToIr},
},
parse::{SS, source_file_parser::SourceFile, use_statement_parser::UseStatement},
semantics::type_resolve::TypeEnv,
};
const JSON_UTILITIES: &str = include_str!("json_util.go");
impl SourceFile {
pub fn emit(
mut self,
pkg_name: String,
type_env: &mut TypeEnv,
span: SS,
) -> Vec<IrInstruction> {
let mut to_ir = ToIr::default();
let mut go_imports = vec![];
go_imports.push((None, "hash/maphash".to_string()));
go_imports.push((None, "errors".to_string()));
let mut imports = HashSet::new();
for use_statement in &self.use_statements {
if let UseStatement::Go(name, alias) = use_statement {
let import_name = alias
.as_ref()
.cloned()
.unwrap_or_else(|| name.split("/").last().unwrap().to_string());
if !import_name.is_empty() && import_name != "." && import_name != "_" {
imports.insert(import_name.clone());
}
go_imports.push((alias.to_owned(), name.to_owned()));
}
}
let imports = Box::leak(Box::new(imports)) as &'static HashSet<String>;
type_env.all_go_imports = imports;
let type_definitions = emit_type_definitions(type_env, &mut to_ir, &self);
let mut instructions = Vec::new();
instructions.push(IrInstruction::GoPackage(pkg_name));
instructions.push(IrInstruction::GoImports(go_imports));
for global_var in &self.global_var_decls {
let (mut init_code, Some(IrValue::Var(res_name) | IrValue::Imm(res_name))) =
global_var.initializer.0.emit(type_env, &mut to_ir, span)
else {
panic!(
"Compiler Bug: need a var (global declaration {} {:?})",
global_var.name, global_var.initializer.0
)
};
let go_type = global_var.type_expr.0.as_go_type_annotation(type_env);
init_code.push(IrInstruction::Return(Some(IrValue::Var(res_name))));
instructions.push(IrInstruction::GlobalVarDecl {
name: global_var.name.clone(),
go_type,
init_code,
});
}
let mut emitted = HashSet::new();
for function_definition in self
.function_definitions
.iter_mut()
.chain(type_env.generic_fns_generated.clone().iter_mut())
{
// generic functions shouldn't be emitted, as they have incomplete type information
if !function_definition.generics.is_empty() {
continue;
}
if emitted.insert(function_definition.name.clone()) {
let mut fn_instr = function_definition.emit(None, type_env, &mut to_ir);
if function_definition.name.as_str() == "main" {
let IrInstruction::FunDef(_, _, _, _, _body) = &mut fn_instr else {
panic!("how")
};
}
instructions.push(fn_instr);
}
}
let mut emitted = HashSet::new();
for schema_def in self.schema_defs {
if emitted.insert(schema_def.name.clone()) {
let fn_instr = schema_def.emit(type_env, &mut to_ir);
instructions.push(fn_instr);
}
}
instructions.push(IrInstruction::StructDef(
"RenderCall".to_string(),
vec![
("Jsx".to_string(), "string".to_string()),
("Id".to_string(), "string".to_string()),
],
));
instructions.push(IrInstruction::StructDef(
"TemplEnv".to_string(),
vec![
("ClientComponents".to_string(), "[]string".to_string()),
("RenderCalls".to_string(), "[]RenderCall".to_string()),
],
));
instructions.push(IrInstruction::FunDef(
"push_client_component".to_string(),
Some(("self".to_string(), ("*TemplEnv".to_string()))),
vec![("comp".to_string(), "string".to_string())],
None,
vec![IrInstruction::InlineGo(
r#"
if !slices.Contains(self.ClientComponents, comp) {
self.ClientComponents = append(self.ClientComponents, comp)
}
"#
.to_string(),
)],
));
instructions.push(IrInstruction::FunDef(
"push_render".to_string(),
Some(("self".to_string(), ("*TemplEnv".to_string()))),
vec![
("js".to_string(), "string".to_string()),
("id".to_string(), "string".to_string()),
],
None,
vec![IrInstruction::InlineGo(
r#"
for _, e := range self.RenderCalls {
if e.Id == id {
return
}
}
self.RenderCalls = append(self.RenderCalls, RenderCall{js, id})
"#
.to_string(),
)],
));
for c in self.jsx_compontents {
instructions.push(c.emit(type_env));
}
for c in self.duckx_components {
instructions.push(c.emit(type_env, &mut to_ir, span));
}
for extensions_def in self.extensions_defs {
for fn_def in extensions_def.function_definitions {
instructions.push(fn_def.0.emit_as_extension_fun(
type_env,
&mut to_ir,
&extensions_def.target_type_expr.0,
))
}
}
instructions.extend(type_definitions);
for i in &mut instructions {
fix_idents_in_ir(i, imports);
}
instructions.push(IrInstruction::InlineGo(JSON_UTILITIES.to_string()));
instructions
}
}
| rust | MIT | 03febfa849f54a237380dfed2d91a52a2df0313b | 2026-01-04T20:22:21.418266Z | false |
duck-compiler/duckc | https://github.com/duck-compiler/duckc/blob/03febfa849f54a237380dfed2d91a52a2df0313b/src/emit/function.rs | src/emit/function.rs | use crate::{
emit::value::{IrInstruction, IrValue, ToIr},
parse::{
failure_with_occurence,
function_parser::FunctionDefintion,
type_parser::TypeExpr,
value_parser::{ValueExpr, empty_range},
},
semantics::type_resolve::TypeEnv,
};
pub fn function_epilogue_2(t: &str) -> IrInstruction {
if t == "Tup_" {
IrInstruction::InlineGo("return Tup_{}".to_string())
} else {
IrInstruction::InlineGo(format!("var ΔΔΔretΔΔΔ *{t}\nreturn *ΔΔΔretΔΔΔ"))
}
}
pub fn function_epilogue(t: &TypeExpr, type_env: &mut TypeEnv) -> IrInstruction {
let t = t.as_go_type_annotation(type_env);
function_epilogue_2(&t)
}
impl FunctionDefintion {
pub fn emit(
&self,
receiver: Option<(String, String)>,
type_env: &mut TypeEnv,
to_ir: &mut ToIr,
) -> IrInstruction {
to_ir.reset_var_counters();
let ValueExpr::Return(Some(what)) = &self.value_expr.0 else {
panic!(
"Compiler Bug: every function needs to return something {} {:?}",
self.name, self.value_expr.0
)
};
let (mut emitted_body, result_ir_value) = what.0.emit(type_env, to_ir, self.span);
if let Some(result) = result_ir_value {
emitted_body.push(IrInstruction::Return(Some(result)));
}
if self.name != "main" {
emitted_body.push(function_epilogue(&self.return_type.0, type_env));
} else {
if !self.return_type.0.is_unit() {
let msg = "Main must not have a return type";
failure_with_occurence(msg, self.span, [(msg, self.span)]);
}
let wrapped_in_lambda = IrValue::Lambda(
vec![],
Some(self.return_type.0.as_go_return_type(type_env)),
emitted_body,
);
emitted_body = vec![IrInstruction::FunCall(None, wrapped_in_lambda, vec![])];
}
IrInstruction::FunDef(
self.name.clone(),
receiver,
self.params
.iter()
.map(|(name, (ty, _))| (name.clone(), ty.as_go_type_annotation(type_env)))
.collect::<Vec<_>>(),
if self.name == "main" {
None
} else {
Some(self.return_type.0.as_go_return_type(type_env))
},
emitted_body,
)
}
pub fn emit_as_extension_fun(
&self,
type_env: &mut TypeEnv,
to_ir: &mut ToIr,
target_type: &TypeExpr,
) -> IrInstruction {
to_ir.reset_var_counters();
let (emitted_body, _result_var) = self.value_expr.0.emit(type_env, to_ir, self.span);
let mut final_params = vec![("self".to_string(), (target_type.clone(), empty_range()))];
final_params.extend_from_slice(&self.params);
IrInstruction::FunDef(
target_type.build_extension_access_function_name(&self.name, type_env),
None,
final_params
.first()
.iter()
.map(|(name, (ty, _))| (name.clone(), ty.as_go_type_annotation(type_env)))
.collect::<Vec<_>>(),
Some(format!(
"func ({}) {}",
final_params
.iter()
.filter(|(name, ..)| name != "self")
.map(|(_name, (ty, _))| ty.as_go_type_annotation(type_env))
.collect::<Vec<_>>()
.join(","),
self.return_type.0.as_go_return_type(type_env),
)),
vec![IrInstruction::Return(Some(IrValue::Lambda(
self.params
.iter()
.map(|(name, (ty, _))| (name.clone(), ty.as_go_type_annotation(type_env)))
.collect::<Vec<_>>(),
Some(self.return_type.0.as_go_return_type(type_env)),
emitted_body,
)))],
)
}
}
| rust | MIT | 03febfa849f54a237380dfed2d91a52a2df0313b | 2026-01-04T20:22:21.418266Z | false |
duck-compiler/duckc | https://github.com/duck-compiler/duckc/blob/03febfa849f54a237380dfed2d91a52a2df0313b/src/emit/types.rs | src/emit/types.rs | use std::collections::HashSet;
use crate::{
emit::{
fix_ident_for_go,
value::{IrInstruction, IrValue, ToIr},
},
parse::{
Field,
source_file_parser::SourceFile,
struct_parser::StructDefinition,
type_parser::{Duck, TypeExpr},
},
semantics::{
ident_mangler::{MANGLE_SEP, mangle},
type_resolve::{NeedsSearchResult, TypeEnv},
},
};
pub fn primitive_native_type_name<'a>(primitive_type_expr: &TypeExpr) -> &'a str {
match primitive_type_expr {
TypeExpr::Float => "float64",
TypeExpr::Char => "rune",
TypeExpr::Int => "int",
TypeExpr::Bool(..) => "bool",
TypeExpr::String(..) => "string",
_ => panic!("That's not a primitive"),
}
}
pub fn escape_string_for_go(input_str: &str) -> String {
let mut out = String::new();
for c in input_str.chars() {
match c {
'\\' | '"' => out.push('\\'),
'\n' => {
out.push_str("\\n");
continue;
}
_ => {}
}
out.push(c);
}
out
}
pub fn string_to_byte_string(input_str: &str) -> String {
input_str
.chars()
.map(|c| format!("{}_", c as u32))
.collect()
}
pub fn primitive_conc_type_name<'a>(primitive_type_expr: &TypeExpr) -> &'a str {
match primitive_type_expr {
TypeExpr::String(..) => "string",
TypeExpr::Int => "int",
TypeExpr::Float => "float64",
TypeExpr::Bool(..) => "bool",
TypeExpr::Char => "rune",
_ => panic!("That's not a primitive"),
}
}
pub fn primitive_type_name(primitive_type_expr: &TypeExpr) -> &'static str {
match primitive_type_expr {
TypeExpr::String(..) => "string",
TypeExpr::Int => "int",
TypeExpr::Float => "float64",
TypeExpr::Bool(..) => "bool",
TypeExpr::Char => "rune",
_ => panic!("That's not a primitive"),
}
}
pub fn fixup_method_body(
_struct_name: &str,
fixed_struct_method: &str,
body: &mut Vec<IrInstruction>,
insert_org_addr: bool,
) {
let mut to_insert = Vec::new();
if insert_org_addr {
to_insert.push(IrInstruction::VarDecl(
"Δorg_addr".to_string(),
format!("*{fixed_struct_method}"),
));
to_insert.push(IrInstruction::VarAssignment(
"Δorg_addr".to_string(),
IrValue::Imm("duck_internal_self".to_string()),
));
}
to_insert.push(IrInstruction::VarDecl(
"self".to_string(),
format!("**{fixed_struct_method}"),
));
to_insert.push(IrInstruction::VarAssignment(
"self".to_string(),
IrValue::Imm("&duck_internal_self".to_string()),
));
if insert_org_addr {
to_insert.push(IrInstruction::InlineGo(
"defer func() { *Δorg_addr = **self }()".to_string(),
));
}
to_insert
.into_iter()
.rev()
.for_each(|elem| body.insert(0, elem));
}
pub fn fix_type_name(s: &str, imports: &HashSet<String>) -> String {
fix_ident_for_go(s, imports)
}
pub fn emit_type_definitions(
type_env: &mut TypeEnv,
to_ir: &mut ToIr,
src_file: &SourceFile,
) -> Vec<IrInstruction> {
let imports = type_env.all_go_imports;
let mut result = Vec::new();
let mut emitted_types = HashSet::new();
let mut all_tuples_and_ducks = type_env.find_ducks_and_tuples(src_file);
let tags_to_push = ["greater", "smaller", "equal"];
for tag in tags_to_push {
if !all_tuples_and_ducks.iter().any(|t| match t {
NeedsSearchResult::Tag { name } => name.as_str() == tag,
_ => false,
}) {
all_tuples_and_ducks.push(NeedsSearchResult::Tag {
name: tag.to_string(),
});
}
}
for tuple_or_duck in &all_tuples_and_ducks {
match tuple_or_duck {
NeedsSearchResult::Array { type_expr } => {
let array_type = TypeExpr::Array(type_expr.clone().into());
let array_type_name = array_type.as_clean_go_type_name(type_env);
let array_type_ano = array_type.as_go_type_annotation(type_env);
if array_type.implements_into_iter(type_env) {
let fun_name = format!("{array_type_name}_Iter");
let iter_struct_name = &format!(
"Struct_{}",
mangle(&[
"std",
"col",
"Iter",
&TypeExpr::Ref(type_expr.clone().into())
.as_clean_go_type_name(type_env),
])
);
let iter_from_fn_name = mangle(&[
"std",
"col",
"Iter",
"from",
&TypeExpr::Ref(type_expr.clone().into()).as_clean_go_type_name(type_env),
]);
let go_code = format!(
r#"
idx := 0
f := func() any {{
if idx >= len(self) {{
return Tag__no_next_elem{{}}
}}
elem_to_ret := &self[idx]
idx = idx + 1
return elem_to_ret
}}
return {iter_from_fn_name}(f)
"#
);
result.push(IrInstruction::FunDef(
fun_name,
None,
vec![("self".to_string(), array_type_ano.clone())],
Some(format!("*{iter_struct_name}")),
vec![IrInstruction::InlineGo(go_code)],
))
}
if array_type.implements_into_iter_mut(type_env) {
let fun_name = format!("{array_type_name}_IterMut");
let iter_struct_name = &format!(
"Struct_{}",
mangle(&[
"std",
"col",
"Iter",
&TypeExpr::RefMut(type_expr.clone().into())
.as_clean_go_type_name(type_env),
])
);
let iter_from_fn_name = mangle(&[
"std",
"col",
"Iter",
"from",
&TypeExpr::RefMut(type_expr.clone().into()).as_clean_go_type_name(type_env),
]);
let go_code = format!(
r#"
idx := 0
f := func() any {{
if idx >= len(self) {{
return Tag__no_next_elem{{}}
}}
elem_to_ret := &self[idx]
idx = idx + 1
return elem_to_ret
}}
return {iter_from_fn_name}(f)
"#
);
result.push(IrInstruction::FunDef(
fun_name,
None,
vec![("self".to_string(), array_type_ano.clone())],
Some(format!("*{iter_struct_name}")),
vec![IrInstruction::InlineGo(go_code)],
))
}
if array_type.implements_eq(type_env) {
let fun_name = format!("{array_type_name}_Eq");
let go_code = r#"
if len(self) != len(other) {
return false
}
for i := range self {
a := self[i]
_ = a
b := other[i]
_ = b
if !($%$%$%) {
return false
}
}
return true
"#
.replace("$%$%$%", &type_expr.0.call_eq("a", "b", type_env));
result.push(IrInstruction::FunDef(
fun_name,
None,
vec![
("self".to_string(), array_type_ano.clone()),
("other".to_string(), array_type_ano.clone()),
],
Some("bool".to_string()),
vec![IrInstruction::InlineGo(go_code)],
));
}
if array_type.implements_from_json(type_env) {
let fun_name = format!("{array_type_name}_FromJson");
let go_code = format!(
r#"
res := make({}, 1)
parts, _, err := scan_json_array_parts(json_str)
if err != nil {{
return res, err
}}
res = make({}, len(parts))
for i := range res {{
tmp, err := {}
if err != nil {{
return res, err
}}
_ = tmp
res[i] = tmp
}}
return res, nil
"#,
array_type_ano,
array_type_ano,
type_expr.0.call_from_json("parts[i]", type_env)
);
result.push(IrInstruction::FunDef(
fun_name,
None,
vec![("json_str".to_string(), "string".to_string())],
Some(format!("({}, error)", array_type_ano.clone())),
vec![IrInstruction::InlineGo(go_code)],
));
}
if array_type.implements_to_json(type_env) {
let fun_name = format!("{array_type_name}_ToJson");
let go_code = r#"
res := ""
for i := range self {
a := self[i]
_ = a
if i != 0 {
res = res + ", "
}
a_x := ($%$%$%)
res = res + a_x
}
return fmt.Sprintf("[%s]", res)
"#
.replace("$%$%$%", &type_expr.0.call_to_json("a", type_env));
result.push(IrInstruction::FunDef(
fun_name,
None,
vec![("self".to_string(), array_type_ano.clone())],
Some("string".to_string()),
vec![IrInstruction::InlineGo(go_code)],
));
}
if array_type.implements_to_string(type_env) {
let fun_name = format!("{array_type_name}_ToString");
let go_code = r#"
res := ""
for i := range self {
a := self[i]
_ = a
if i != 0 {
res = res + ", "
}
a_x := ($%$%$%)
res = res + a_x
}
return fmt.Sprintf("[%s]", res)
"#
.replace("$%$%$%", &type_expr.0.call_to_string("a", type_env));
result.push(IrInstruction::FunDef(
fun_name,
None,
vec![("self".to_string(), array_type_ano.clone())],
Some("string".to_string()),
vec![IrInstruction::InlineGo(go_code)],
));
}
if array_type.implements_clone(type_env) {
let fun_name = format!("{array_type_name}_Clone");
let go_code = r#"
res := make($$$ARRAY_TYPE, len(self))
for i := range self {
a := self[i]
_ = a
a_x := ($%$%$%)
res[i] = a_x
}
return res
"#
.replace("$$$ARRAY_TYPE", &array_type_ano)
.replace("$%$%$%", &type_expr.0.call_clone("a", type_env));
result.push(IrInstruction::FunDef(
fun_name,
None,
vec![("self".to_string(), array_type_ano.clone())],
Some(array_type_ano.clone()),
vec![IrInstruction::InlineGo(go_code)],
));
}
{
let fun_name = format!("{array_type_name}_Copy");
let go_code = format!(
r#"
res := make({array_type_ano}, len(self))
for i := range self {{
a := self[i]
_ = a
a_x := {}
res[i] = a_x
}}
return res
"#,
&type_expr.0.call_copy("a", type_env)
);
result.push(IrInstruction::FunDef(
fun_name,
None,
vec![("self".to_string(), array_type_ano.clone())],
Some(array_type_ano.clone()),
vec![IrInstruction::InlineGo(go_code)],
));
}
if array_type.implements_hash(type_env) {
let fun_name = format!("{array_type_name}_Hash");
let go_code = r#"
var res int
res = 1
for i := range self {
a := self[i]
_ = a
a_x := ($%$%$%)
res = (31 * res) + a_x
}
return res
"#
.replace("$%$%$%", &type_expr.0.call_hash("a", type_env));
result.push(IrInstruction::FunDef(
fun_name,
None,
vec![("self".to_string(), array_type_ano.clone())],
Some("int".to_string()),
vec![IrInstruction::InlineGo(go_code)],
));
}
if array_type.implements_ord(type_env) {
let fun_name = format!("{array_type_name}_Ord");
let go_code = r#"
other := *other_param
if len(self) < len(other) {
return Tag__smaller{}
} else if len(self) > len(other) {
return Tag__greater{}
}
for i := range self {
a := self[i]
_ = a
b := other[i]
_ = b
inter_res := ($%$%$%)
var mm any
mm = inter_res
switch mm.(type) {
case Tag__greater:
return Tag__greater{}
case Tag__smaller:
return Tag__smaller{}
}
}
return Tag__equal{}
"#
.replace("$%$%$%", &type_expr.0.call_ord("a", "b", type_env));
result.push(IrInstruction::FunDef(
fun_name,
None,
vec![
("self".to_string(), array_type_ano.clone()),
("other_param".to_string(), format!("*{array_type_ano}")),
],
Some("any".to_string()),
vec![IrInstruction::InlineGo(go_code)],
));
}
}
NeedsSearchResult::Duck { fields } => {
let duck_type_expr = TypeExpr::Duck(Duck {
fields: fields.clone(),
});
let type_name = duck_type_expr.as_clean_go_type_name(type_env);
if duck_type_expr.implements_to_string(type_env) {
let receiver = duck_type_expr.as_go_type_annotation(type_env);
let mut string_parts = Vec::new();
for f in fields.iter() {
string_parts.push(format!(
r#" "{}: " + ({})"#,
f.name,
f.type_expr
.0
.call_to_string(&format!("self.Get{}()", f.name), type_env),
));
}
if string_parts.is_empty() {
string_parts.push("\"\"".to_string());
}
result.push(IrInstruction::FunDef(
format!("{type_name}_ToString"),
None,
vec![("self".to_string(), receiver.clone())],
Some("string".to_string()),
vec![IrInstruction::Return(Some(IrValue::Imm(format!(
r#"fmt.Sprintf("{{%s}}", {})"#,
string_parts.join(" + \",\" + ")
))))],
));
}
if duck_type_expr.implements_to_json(type_env) {
let receiver = duck_type_expr.as_go_type_annotation(type_env);
let mut string_parts = Vec::new();
for f in fields.iter() {
string_parts.push(format!(
r#" "\"{}\": " + ({})"#,
f.name,
f.type_expr
.0
.call_to_json(&format!("self.Get{}()", f.name), type_env),
));
}
if string_parts.is_empty() {
string_parts.push("\"\"".to_string());
}
result.push(IrInstruction::FunDef(
format!("{type_name}_ToJson"),
None,
vec![("self".to_string(), receiver.clone())],
Some("string".to_string()),
vec![IrInstruction::Return(Some(IrValue::Imm(format!(
r#"fmt.Sprintf("{{%s}}", {})"#,
string_parts.join(" + \",\" + ")
))))],
));
}
if duck_type_expr.implements_from_json(type_env) {
let mut go_code = format!(
r#"
var res {}
obj, _, err := scan_json_struct_parts(json_str)
if err != nil {{
return res, err
}}
"#,
duck_type_expr.as_go_type_annotation(type_env)
);
let mut all = Vec::new();
for field in fields {
let call = field
.type_expr
.0
.call_from_json(&format!("obj[\"{}\"]", field.name), type_env);
let var_name = format!("field_expr_{}", field.name);
go_code.push_str(&format!(
r#"
{var_name}, err := {call}
_ = {var_name}
if err != nil {{
return res, err
}}
"#
));
all.push((field.name.clone(), var_name));
}
go_code.push_str(&format!(
"return &{type_name}{{\n{}\n}}, nil",
all.iter()
.map(|(field_name, expr_var_name)| format!(
"{field_name}: {expr_var_name},\n"
))
.collect::<Vec<_>>()
.join("")
));
result.push(IrInstruction::FunDef(
format!("{type_name}_FromJson"),
None,
vec![("json_str".to_string(), "string".to_string())],
Some(format!(
"({}, error)",
duck_type_expr.as_go_type_annotation(type_env)
)),
vec![IrInstruction::InlineGo(go_code)],
));
}
result.push(IrInstruction::StructDef(
type_name.clone(),
fields
.iter()
.map(
|Field {
name,
type_expr: (type_expr, _),
}| {
(name.clone(), type_expr.as_go_type_annotation(type_env))
},
)
.collect::<Vec<_>>(),
));
for field in fields.iter() {
let param_name = &field.name;
let interface_name = format!("Has{param_name}");
if emitted_types.insert(interface_name.clone()) {
result.push(IrInstruction::InterfaceDef(
interface_name,
vec![("T".into(), "any".into())],
vec![
(format!("Get{param_name}"), vec![], Some("T".into())),
(format!("GetPtr{param_name}"), vec![], Some("*T".into())),
(
format!("Set{param_name}"),
vec![("param".into(), "T".into())],
None,
),
],
));
}
result.extend([
IrInstruction::FunDef(
format!("Get{}", field.name),
Some(("self".into(), format!("*{}", type_name.clone()))),
vec![],
Some(field.type_expr.0.as_go_type_annotation(type_env)),
vec![IrInstruction::Return(Some(IrValue::FieldAccess(
IrValue::Var("self".into()).into(),
field.name.clone(),
)))],
),
IrInstruction::FunDef(
format!("GetPtr{}", field.name),
Some(("self".into(), format!("*{}", type_name.clone()))),
vec![],
Some(format!(
"*{}",
field.type_expr.0.as_go_type_annotation(type_env)
)),
vec![IrInstruction::Return(Some(IrValue::Pointer(
IrValue::FieldAccess(
IrValue::Var("self".into()).into(),
field.name.clone(),
)
.into(),
)))],
),
IrInstruction::FunDef(
format!("Set{}", field.name),
Some(("self".into(), format!("*{}", type_name.clone()))),
vec![(
"param".into(),
field.type_expr.0.as_go_type_annotation(type_env),
)],
None,
vec![IrInstruction::VarAssignment(
format!("self.{}", fix_ident_for_go(&field.name, imports)),
IrValue::Var("param".into()),
)],
),
]);
}
}
NeedsSearchResult::Tag { name } => {
let self_type = TypeExpr::Tag(name.clone());
let type_name = self_type.as_clean_go_type_name(type_env);
result.push(IrInstruction::StructDef(type_name.clone(), vec![]));
{
let go_code = format!(
r#"
r := {type_name}{{}}
var s string
e := errors.New("from json {name} failed")
json_err := json.Unmarshal([]byte(json_str), &s)
if json_err != nil {{
return r, json_err
}}
if s != "{name}" {{
return r, e
}}
return r, nil
"#
);
result.push(IrInstruction::FunDef(
format!("{type_name}_FromJson"),
None,
vec![("json_str".to_string(), "string".to_string())],
Some(format!("({type_name}, error)")),
vec![IrInstruction::InlineGo(go_code)],
));
}
}
NeedsSearchResult::Tuple { fields } => {
let tuple_type = TypeExpr::Tuple(fields.clone());
let type_name = tuple_type.as_clean_go_type_name(type_env);
let type_anno = tuple_type.as_go_type_annotation(type_env);
result.push(IrInstruction::StructDef(
type_name.clone(),
fields
.iter()
.enumerate()
.map(|(i, x)| (format!("field_{i}"), x.0.as_go_type_annotation(type_env)))
.collect::<Vec<_>>(),
));
if tuple_type.implements_eq(type_env) {
let mut comparisons = Vec::new();
for (i, f) in fields.iter().enumerate() {
let field_name = format!("field_{i}");
comparisons.push(f.0.call_eq(
&format!("self.{field_name}"),
&format!("other.{field_name}"),
type_env,
));
}
if comparisons.is_empty() {
comparisons.push(String::from("true"));
}
result.push(IrInstruction::FunDef(
"eq".to_string(),
Some(("self".to_string(), format!("*{type_name}"))),
vec![("other".to_string(), format!("*{type_name}"))],
Some("bool".to_string()),
vec![IrInstruction::Return(Some(IrValue::Imm(
comparisons.join(" && "),
)))],
));
}
if tuple_type.implements_from_json(type_env) {
let fun_name = format!("{type_name}_FromJson");
let needed_len = fields.len();
let mut go_code = format!(
r#"
var res {type_name}
parts, _, err := scan_json_array_parts(json_str)
if err != nil {{
return res, err
}}
if len(parts) < {needed_len} {{
return res, errors.New("Not long enough")
}}
"#,
);
for (i, f) in fields.iter().enumerate() {
go_code.push_str(&format!(
r#"
value_{i}, err := {}
if err != nil {{
return res, err
}}
res.field_{i} = value_{i}
"#,
f.0.call_from_json(&format!("parts[{i}]"), type_env)
));
}
go_code.push_str("\nreturn res, nil");
result.push(IrInstruction::FunDef(
fun_name,
None,
vec![("json_str".to_string(), "string".to_string())],
Some(format!("({type_name}, error)")),
vec![IrInstruction::InlineGo(go_code)],
));
}
if tuple_type.implements_to_json(type_env) {
let mut go_code = String::from(
r#"
res := ""
"#,
);
for (i, field) in fields.iter().enumerate() {
if i != 0 {
go_code.push_str("\nres = res + \", \"");
}
let to_json_call =
field.0.call_to_json(&format!("self.field_{i}"), type_env);
go_code.push_str(&format!("\nres = res + ({to_json_call})"))
}
go_code.push_str("\nreturn fmt.Sprintf(\"[%s]\", res)");
result.push(IrInstruction::FunDef(
"to_json".to_string(),
Some(("self".to_string(), type_name.clone())),
vec![],
Some("string".to_string()),
vec![IrInstruction::InlineGo(go_code)],
));
}
if tuple_type.implements_to_string(type_env) {
let mut go_code = String::from(
r#"
res := ""
"#,
);
for (i, field) in fields.iter().enumerate() {
if i != 0 {
go_code.push_str("\nres = res + \", \"");
}
let to_string_call =
field.0.call_to_string(&format!("self.field_{i}"), type_env);
go_code.push_str(&format!("\nres = res + ({to_string_call})"))
}
go_code.push_str("\nreturn fmt.Sprintf(\"(%s)\", res)");
| rust | MIT | 03febfa849f54a237380dfed2d91a52a2df0313b | 2026-01-04T20:22:21.418266Z | true |
duck-compiler/duckc | https://github.com/duck-compiler/duckc/blob/03febfa849f54a237380dfed2d91a52a2df0313b/src/emit/duckx_component.rs | src/emit/duckx_component.rs | use crate::{
emit::value::{IrInstruction, ToIr},
parse::{SS, duckx_component_parser::DuckxComponent},
semantics::type_resolve::TypeEnv,
};
impl DuckxComponent {
pub fn emit(&self, type_env: &mut TypeEnv, to_ir: &mut ToIr, span: SS) -> IrInstruction {
let (instr, _) = self.value_expr.0.emit(type_env, to_ir, span);
IrInstruction::FunDef(
self.name.clone(),
None,
vec![(
"props".to_string(),
self.props_type.0.as_go_type_annotation(type_env),
)],
Some("func (env *TemplEnv) string".to_string()),
instr,
)
}
}
| rust | MIT | 03febfa849f54a237380dfed2d91a52a2df0313b | 2026-01-04T20:22:21.418266Z | false |
duck-compiler/duckc | https://github.com/duck-compiler/duckc/blob/03febfa849f54a237380dfed2d91a52a2df0313b/src/emit/schema_def.rs | src/emit/schema_def.rs | use crate::{
emit::{
ir::join_ir,
value::{IrInstruction, ToIr},
},
parse::{
schema_def_parser::{SchemaDefinition, SchemaField},
type_parser::TypeExpr,
value_parser::{Assignment, ValueExpr},
},
semantics::type_resolve::TypeEnv,
};
impl SchemaDefinition {
fn emit_field(&self, field: &SchemaField, type_env: &mut TypeEnv) -> String {
let omit_empty = if field.else_branch_value_expr.is_some() {
",omitempty"
} else {
""
};
let is_duck = matches!(field.type_expr.0, TypeExpr::Duck(_));
let is_array_of_duck = matches!(&field.type_expr.0, TypeExpr::Array(inner) if matches!(inner.as_ref(), (TypeExpr::Duck(_), ..)));
if is_duck || is_array_of_duck {
return format!(
"F_{0} json.RawMessage `json:\"{0}{1}\"`",
field.name, omit_empty
);
}
return format!(
"F_{0} *{1} `json:\"{0}{2}\"`",
field.name,
field.type_expr.0.as_go_type_annotation(type_env),
omit_empty
);
}
pub fn emit(&self, type_env: &mut TypeEnv, to_ir: &mut ToIr) -> IrInstruction {
let schema_fn_return_type = self.out_type.as_ref().unwrap().0.clone();
let mut body_instructions = vec![];
let mut struct_construction_fields = vec![];
for schema_field in &self.fields {
struct_construction_fields.push(self.emit_field(schema_field, type_env));
}
let struct_construction_src = format!(
"var ref_struct struct {{ {0} }} = struct {{ {0} }}{{}};",
struct_construction_fields
.iter()
.map(|field_src| field_src.to_string())
.collect::<Vec<_>>()
.join("\n"),
);
let mut schema_struct_access_srcs = vec![];
for schema_field in &self.fields {
let field_name = &schema_field.name;
let field_type_annotation = if schema_field.else_branch_value_expr.is_some() {
"any".to_string()
} else {
schema_field.type_expr.0.as_go_type_annotation(type_env)
};
let null_action_emitted = if let Some(value_expr) = &schema_field.else_branch_value_expr
{
ValueExpr::VarAssign(Box::new((
Assignment {
target: (
ValueExpr::Variable(
false,
format!("ref_struct.F_{field_name}"),
Some(schema_field.type_expr.0.clone()),
None,
false,
),
schema_field.span,
),
value_expr: value_expr.clone(),
},
schema_field.span,
)))
.emit(type_env, to_ir, schema_field.span)
} else {
ValueExpr::InlineGo(
"return Tag__err {}".to_string(),
Some((TypeExpr::Never, schema_field.span)),
)
.emit(type_env, to_ir, schema_field.span)
};
let null_action_src = join_ir(&null_action_emitted.0);
let src = match &schema_field.type_expr.0 {
TypeExpr::Duck(_) => {
let duck_type_name = schema_field.type_expr.0.as_clean_go_type_name(type_env);
format!("
var field_{field_name} {field_type_annotation}
if len(ref_struct.F_{field_name}) == 0 {{
{null_action_src}
}}
if len(ref_struct.F_{field_name}) > 0 {{
var parsed_any any = {duck_type_name}_FromJson(string(ref_struct.F_{field_name}))
switch v := parsed_any.(type) {{
case Tag__err:
return Tag__err{{}}
case {field_type_annotation}:
field_{field_name} = v
default:
return Tag__err{{}}
}}
}}
")
}
TypeExpr::Array(inner) if matches!(inner.as_ref(), (TypeExpr::Duck(_), ..)) => {
let duck_type_expr = inner.as_ref();
let duck_type_name = duck_type_expr.0.as_clean_go_type_name(type_env);
let element_type_annotation = duck_type_expr.0.as_go_type_annotation(type_env);
format!("
var field_{field_name} {field_type_annotation}
if len(ref_struct.F_{field_name}) == 0 {{
{null_action_src}
}}
if len(ref_struct.F_{field_name}) > 0 {{
var raw_slice []json.RawMessage
if err := json.Unmarshal(ref_struct.F_{field_name}, &raw_slice); err != nil {{
return Tag__err{{}}
}}
field_{field_name} = make([]{element_type_annotation}, len(raw_slice))
for i, raw_elem := range raw_slice {{
var parsed_any any = {duck_type_name}_FromJson(string(raw_elem))
switch v := parsed_any.(type) {{
case Tag__err:
return Tag__err{{}}
case {element_type_annotation}:
field_{field_name}[i] = v
default:
return Tag__err{{}}
}}
}}
}}
")
}
_ => {
format!(
"
if ref_struct.F_{field_name} == nil {{
{null_action_src}
}}
var field_{field_name} {field_type_annotation} = *ref_struct.F_{field_name}
"
)
}
};
schema_struct_access_srcs.push(src);
if let Some((branch, span)) = &schema_field.if_branch {
let emitted_condition = branch.condition.0.emit(type_env, to_ir, *span);
let condition_src = join_ir(&emitted_condition.0);
let condition_var_src =
emitted_condition.1.expect("expect result var").emit_as_go();
let condition_based_value_emitted = if let Some(value_expr) = &branch.value_expr {
ValueExpr::Return(Some(Box::new(value_expr.clone())))
.emit(type_env, to_ir, *span)
} else {
ValueExpr::InlineGo("".to_string(), None).emit(type_env, to_ir, *span)
};
let condition_based_src = join_ir(&condition_based_value_emitted.0);
let src = format!(
"
{condition_src}
if {condition_var_src} {{
{condition_based_src}
}}
"
);
schema_struct_access_srcs.push(src);
}
}
let return_duck = ValueExpr::Return(Some(Box::new((
ValueExpr::Duck(
self.fields
.iter()
.map(|schema_field| {
(
schema_field.name.clone(),
(
ValueExpr::Variable(
false,
format!("field_{}", schema_field.name),
Some(schema_field.type_expr.0.clone()),
None,
false,
),
schema_field.span,
),
)
})
.collect::<Vec<_>>(),
),
self.span,
))));
let emitted_duck = return_duck.emit(type_env, to_ir, self.span);
let return_duck_src = join_ir(&emitted_duck.0);
let schema_struct_access_src = schema_struct_access_srcs.join("\n");
let from_json_body = format!(
"
{struct_construction_src}
err := json.Unmarshal([]byte(str), &ref_struct)
if err != nil {{
fmt.Println(err)
return Tag__err{{}}
}}
{schema_struct_access_src}
{return_duck_src}
return \"\"
"
);
{};
let instr = IrInstruction::InlineGo(format!(
"return &{}{{
from_json: func(str string) any {{
{from_json_body}
}},
}}",
schema_fn_return_type.as_clean_go_type_name(type_env)
));
body_instructions.push(instr);
IrInstruction::FunDef(
self.name.clone(),
None,
vec![],
Some(schema_fn_return_type.as_go_return_type(type_env)),
body_instructions,
)
}
}
| rust | MIT | 03febfa849f54a237380dfed2d91a52a2df0313b | 2026-01-04T20:22:21.418266Z | false |
duck-compiler/duckc | https://github.com/duck-compiler/duckc/blob/03febfa849f54a237380dfed2d91a52a2df0313b/src/emit/mod.rs | src/emit/mod.rs | use std::collections::HashSet;
use crate::{
parse::{
Spanned,
function_parser::LambdaFunctionExpr,
type_parser::{Duck, TypeExpr},
value_parser::ValueExpr,
},
semantics::type_resolve::{TypeEnv, trav_type_expr, trav_value_expr},
};
pub mod duckx_component;
pub mod function;
pub mod ir;
pub mod jsx_component;
pub mod schema_def;
pub mod source_file;
pub mod types;
pub mod value;
pub fn is_identifier_blocked_by_go(ident: &str) -> bool {
match ident {
"go" | "map" | "chan" | "int" | "int8" | "int16" | "int32" | "int64" | "uint" | "uint8"
| "uint16" | "uint32" | "uint64" | "string" | "float" | "float32" | "float64"
| "select" | "package" | "import" | "interface" | "var" | "rune" | "bool" | "complex64"
| "range" | "goto" | "break" | "fallthrough" | "default" | "complex128" => true,
_ => false,
}
}
pub fn fix_ident_for_go(ident: &str, _imports: &HashSet<String>) -> String {
if is_identifier_blocked_by_go(ident)
/*|| imports.contains(ident)*/
{
format!("Δ{ident}")
} else {
ident.to_string()
}
}
pub fn fix_all_idents_type_expr(
t: &mut Spanned<TypeExpr>,
type_env: &mut TypeEnv,
imports: &'static HashSet<String>,
) {
trav_type_expr(
|t, _| match &mut t.0 {
TypeExpr::Struct {
name,
type_params: _,
} => *name = fix_ident_for_go(name, imports),
TypeExpr::Duck(Duck { fields }) => {
for field in fields.iter_mut() {
field.name = fix_ident_for_go(&field.name, imports);
}
}
TypeExpr::NamedDuck {
name,
type_params: _,
} => *name = fix_ident_for_go(name, imports),
TypeExpr::TypeOf(z) => *z = fix_ident_for_go(z, imports),
TypeExpr::Fun(params, _, _) => {
for p in params {
if let Some(p) = p.0.as_mut() {
*p = fix_ident_for_go(p, imports);
}
}
}
TypeExpr::TypeName(_, name, _) => {
*name = fix_ident_for_go(name, imports);
}
_ => {}
},
t,
type_env,
);
}
pub fn fix_all_idents_value_expr(
v: &mut Spanned<ValueExpr>,
type_env: &mut TypeEnv,
imports: &'static HashSet<String>,
) {
trav_value_expr(
|t, _| match &mut t.0 {
TypeExpr::Struct {
name,
type_params: _,
} => *name = fix_ident_for_go(name, imports),
TypeExpr::Duck(Duck { fields }) => {
for field in fields.iter_mut() {
field.name = fix_ident_for_go(&field.name, imports);
}
}
TypeExpr::NamedDuck {
name,
type_params: _,
} => *name = fix_ident_for_go(name, imports),
TypeExpr::TypeOf(z) => *z = fix_ident_for_go(z, imports),
TypeExpr::Fun(params, _, _) => {
for p in params {
if let Some(p) = p.0.as_mut() {
*p = fix_ident_for_go(p, imports);
}
}
}
TypeExpr::TypeName(_, name, _) => {
*name = fix_ident_for_go(name, imports);
}
_ => {}
},
|v, _| match &mut v.0 {
ValueExpr::Struct {
name,
fields,
type_params: _,
} => {
*name = fix_ident_for_go(name, imports);
for f in fields {
f.0 = fix_ident_for_go(&f.0, imports);
}
}
ValueExpr::Lambda(expr) => {
let LambdaFunctionExpr {
is_mut: _,
params,
return_type: _,
value_expr: _,
} = expr.as_mut();
for p in params {
p.0 = fix_ident_for_go(&p.0, imports);
}
}
ValueExpr::Duck(fields) => {
for f in fields {
f.0 = fix_ident_for_go(&f.0, imports);
}
}
ValueExpr::Variable(_, name, _, _, _) => {
*name = fix_ident_for_go(name, imports);
}
ValueExpr::VarDecl(decl) => decl.0.name = fix_ident_for_go(&decl.0.name, imports),
ValueExpr::For {
ident,
target: _,
block: _,
} => ident.0 = fix_ident_for_go(&ident.0, imports),
ValueExpr::Match {
value_expr: _,
arms,
else_arm,
span: _,
} => {
for arm in arms {
if let Some(ident) = arm.identifier_binding.as_mut() {
*ident = fix_ident_for_go(ident, imports);
}
}
if let Some(arm) = else_arm
&& let Some(ident) = arm.identifier_binding.as_mut()
{
*ident = fix_ident_for_go(ident, imports);
}
}
_ => {}
},
v,
type_env,
);
}
| rust | MIT | 03febfa849f54a237380dfed2d91a52a2df0313b | 2026-01-04T20:22:21.418266Z | false |
duck-compiler/duckc | https://github.com/duck-compiler/duckc/blob/03febfa849f54a237380dfed2d91a52a2df0313b/src/emit/ir.rs | src/emit/ir.rs | use std::collections::HashSet;
use crate::emit::{
fix_ident_for_go,
value::{Case, IrInstruction, IrValue},
};
impl IrInstruction {
fn emit_as_go(&self) -> String {
#![allow(clippy::format_in_format_args)]
match self {
IrInstruction::GlobalVarDecl {
name,
go_type,
init_code,
} => {
format!(
"var {name} {go_type} = func() {go_type} {{ {} }}()",
join_ir(init_code)
)
}
IrInstruction::Defer(d) => {
let fun_call @ (IrInstruction::FunCall(None, ..) | IrInstruction::InlineGo(..)) =
d.as_ref()
else {
panic!("Compiler Bug: can only defer a func call without result {d:?}")
};
format!("defer {}", fun_call.emit_as_go())
}
IrInstruction::ForRangeElem {
ident: _,
range_target,
body,
label,
} => {
format!(
"{{\nif false {{goto {label}}}\n{label}:\nfor DUCK_FOR_IDX := range {} {{\n_ = DUCK_FOR_IDX\n{}\n}}\n}}",
range_target.emit_as_go(),
body.iter()
.map(|i| i.emit_as_go())
.fold(String::new(), |mut acc, x| {
acc.push_str(&x);
acc.push('\n');
acc
})
)
}
IrInstruction::StringConcat(target, v) => {
format!(
"{target} = {}",
if v.is_empty() {
String::from("\"\"")
} else {
v.iter()
.map(|x| x.emit_as_go().to_string())
.collect::<Vec<_>>()
.join(" + ")
}
)
}
IrInstruction::SwitchType(against, type_cases) => {
fn emit_case_go(case: &Case, actual: &str, else_case: Option<&Case>) -> String {
fn format_case_output(case: &Case, instructions: &[IrInstruction]) -> String {
format!(
"case {}: {{\n{}\nbreak\n}}",
case.type_name,
instructions
.iter()
.map(|instr| instr.emit_as_go())
.collect::<Vec<_>>()
.join("\n")
)
}
let mut instructions = vec![];
let type_name = case.type_name.clone();
let Some(branches) = &case.conditional_branches else {
if let Some(identifier) = &case.identifier_binding {
instructions.push(IrInstruction::VarDecl(
identifier.clone(),
type_name.clone(),
));
instructions.push(IrInstruction::VarAssignment(
identifier.clone(),
IrValue::Imm(format!("{actual}.({type_name})")),
));
}
instructions.extend(case.instrs.clone());
return format_case_output(case, &instructions);
};
if branches.is_empty() {
if let Some(identifier) = &case.identifier_binding {
instructions.push(IrInstruction::VarDecl(
identifier.clone(),
type_name.clone(),
));
instructions.push(IrInstruction::VarAssignment(
identifier.clone(),
IrValue::Imm(format!("{actual}.({type_name})")),
));
}
instructions.extend(case.instrs.clone());
return format_case_output(case, &instructions);
}
for branch in branches {
let mut block_instructions = vec![];
if let Some(identifier) = &branch.1.identifier_binding {
block_instructions.push(IrInstruction::VarDecl(
identifier.clone(),
type_name.clone(),
));
block_instructions.push(IrInstruction::VarAssignment(
identifier.clone(),
IrValue::Imm(format!("{actual}.({type_name})")),
));
}
block_instructions.extend(branch.0.0.clone());
let mut if_body = branch.1.instrs.clone();
if_body.push(IrInstruction::Break(None));
block_instructions.push(IrInstruction::If(
branch
.0
.1
.clone()
.expect("compiler error: we expect that there's an value"),
if_body,
None,
));
instructions.push(IrInstruction::Block(block_instructions));
}
if let Some(else_case) = else_case {
let Some(last_instruction) = instructions.last_mut() else {
return format_case_output(case, &instructions);
};
let IrInstruction::Block(block_instrs) = last_instruction else {
return format_case_output(case, &instructions);
};
let Some(last_block_instr) = block_instrs.last_mut() else {
return format_case_output(case, &instructions);
};
let IrInstruction::If(_, _, else_branch) = last_block_instr else {
return format_case_output(case, &instructions);
};
*else_branch = Some(else_case.instrs.clone());
}
format_case_output(case, &instructions)
}
let else_case = type_cases.iter().find(|case| case.type_name == "__else");
let processed_cases: Vec<String> = type_cases
.iter()
.map(|case| emit_case_go(case, &against.emit_as_go(), else_case))
.collect();
format!(
"switch ({}).(type) {{\n{}\n}}",
against.emit_as_go(),
processed_cases.join("\n"),
)
}
IrInstruction::GoPackage(s) => format!("package {s}"),
IrInstruction::Add(r, left, right, _type_expr) => {
// TODO: check if this is correct
format!("{r} = {} + {}", left.emit_as_go(), right.emit_as_go(),)
}
IrInstruction::Mul(r, v1, v2, _type_expr) => {
// TODO: check if this is correct
format!("{r} = {} * {}", v1.emit_as_go(), v2.emit_as_go(),)
}
IrInstruction::Sub(r, v1, v2, _type_expr) => {
format!("{r} = {} - {}", v1.emit_as_go(), v2.emit_as_go(),)
}
IrInstruction::Div(r, v1, v2, _type_expr) => {
format!("{r} = {} / {}", v1.emit_as_go(), v2.emit_as_go(),)
}
IrInstruction::Mod(r, v1, v2, _type_expr) => {
format!("{r} = {} % {}", v1.emit_as_go(), v2.emit_as_go(),)
}
IrInstruction::Continue(label) => format!(
"continue{}",
label.as_ref().map(|l| format!(" {l}")).unwrap_or_default()
),
IrInstruction::Break(label) => format!(
"break{}",
label.as_ref().map(|l| format!(" {l}")).unwrap_or_default()
),
IrInstruction::Return(o) => format!(
"return {}",
o.as_ref()
.map(IrValue::emit_as_go)
.unwrap_or("".to_string())
),
IrInstruction::Equals(r, v1, v2, _type_expr) => {
format!("{r} = {} == {}", v1.emit_as_go(), v2.emit_as_go(),)
}
IrInstruction::NotEquals(r, v1, v2, _type_expr) => {
format!("{r} = {} != {}", v1.emit_as_go(), v2.emit_as_go(),)
}
IrInstruction::LessThan(r, v1, v2, _type_expr) => {
format!("{r} = {} < {}", v1.emit_as_go(), v2.emit_as_go(),)
}
IrInstruction::LessThanOrEquals(r, v1, v2, _type_expr) => {
format!("{r} = {} <= {}", v1.emit_as_go(), v2.emit_as_go(),)
}
IrInstruction::GreaterThan(r, v1, v2, _type_expr) => {
format!("{r} = {} > {}", v1.emit_as_go(), v2.emit_as_go(),)
}
IrInstruction::GreaterThanOrEquals(r, v1, v2, _type_expr) => {
format!("{r} = {} >= {}", v1.emit_as_go(), v2.emit_as_go(),)
}
IrInstruction::And(r, v1, v2, _type_expr) => {
format!("{r} = {} && {}", v1.emit_as_go(), v2.emit_as_go(),)
}
IrInstruction::Or(r, v1, v2, _type_expr) => {
format!("{r} = {} || {}", v1.emit_as_go(), v2.emit_as_go(),)
}
IrInstruction::Block(block_instr) => {
format!("{{\n{}\n}}", join_ir(block_instr))
}
IrInstruction::FunCall(r, t, p) => {
format!(
"{}{}({})",
r.as_ref().map(|x| format!("{x} = ")).unwrap_or_default(),
t.emit_as_go(),
p.iter()
.map(IrValue::emit_as_go)
.collect::<Vec<_>>()
.join(", ")
)
}
IrInstruction::VarDecl(name, ty) => {
if ty == "Tup_" {
format!("var {name} {ty}\n{name} = Tup_{{}}\n_ = {name}")
} else {
format!("var {name} {ty}\n_ = {name}")
}
}
IrInstruction::VarAssignment(name, v) => format!("{name} = {}", v.emit_as_go()),
IrInstruction::If(cond, then, els) => {
format!(
"if {} {{\n{}\n}} {}",
cond.emit_as_go(),
then.iter()
.map(IrInstruction::emit_as_go)
.collect::<Vec<_>>()
.join("\n"),
els.as_ref()
.map(|x| {
format!(
"else {{\n{}\n}} ",
x.iter()
.map(IrInstruction::emit_as_go)
.collect::<Vec<_>>()
.join("\n")
)
})
.unwrap_or("".to_string())
)
}
IrInstruction::Loop(v, label) => {
format!(
"if false {{goto {label}}}\n{label}:\nfor {{\n{}\n}}",
join_ir(v)
)
}
IrInstruction::InlineGo(t) => t.to_string(),
IrInstruction::GoImports(imports) => {
format!(
"import (\n{}\n)",
imports
.iter()
.map(|(n, m)| format!("{} \"{m}\"", n.clone().unwrap_or_default()))
.collect::<Vec<_>>()
.join("\n")
)
}
IrInstruction::GenericFun(name, generics, params, return_type, body) => {
format!(
"func {name}[{}]({}) {} {{\n{}\n}}",
generics
.iter()
.map(|(n, ty)| format!("{n} {ty}"))
.collect::<Vec<_>>()
.join(", "),
params
.iter()
.map(|(n, ty)| format!("{n} {ty}"))
.collect::<Vec<_>>()
.join(", "),
return_type
.as_ref()
.map(|return_type| if name == "main" {
String::new()
} else {
return_type.clone()
})
.unwrap_or(String::new()),
format!(
"{}\n{}",
params
.iter()
.map(|(name, _)| format!("_ = {name}"))
.collect::<Vec<_>>()
.join("\n"),
join_ir(body)
),
)
}
IrInstruction::FunDef(name, receiver, params, return_type, body) => {
format!(
"func {} {name}({}) {} {{\n{}\n}}",
receiver
.as_ref()
.map(|(self_name, recv_type)| format!("({self_name} {recv_type})"))
.unwrap_or_default(),
params
.iter()
.map(|(n, ty)| format!("{n} {ty}"))
.collect::<Vec<_>>()
.join(", "),
return_type
.as_ref()
.map(|return_type| if name == "main" {
String::new()
} else {
return_type.clone()
})
.unwrap_or(String::new()),
format!(
"{}\n{}",
params
.iter()
.map(|(name, _)| format!("_ = {name}"))
.collect::<Vec<_>>()
.join("\n"),
join_ir(body)
),
)
}
IrInstruction::StructDef(name, fields) => {
format!(
"type {name} struct {{\n{}\n}}",
fields
.iter()
.map(|(n, ty)| format!("{n} {ty}"))
.collect::<Vec<_>>()
.join("\n"),
)
}
IrInstruction::InterfaceDef(name, generics, fields) => {
format!(
"type {name}{} interface {{\n{}\n}}",
{
let generics = generics
.iter()
.map(|(type_param_name, type_name)| {
format!("{type_param_name} {type_name}")
})
.collect::<Vec<String>>()
.join(", ");
if !generics.is_empty() {
format!("[{generics}]")
} else {
"".to_string()
}
},
fields
.iter()
.map(|(n, params, ty)| format!(
"{n}({}) {}",
params
.iter()
.map(|(param_name, param_type)| format!(
"{param_name} {param_type}"
))
.collect::<Vec<_>>()
.join(", "),
ty.as_ref().unwrap_or(&String::new())
))
.collect::<Vec<_>>()
.join("\n"),
)
}
}
}
}
pub fn fix_idents_in_ir_value(v: &mut IrValue, imports: &HashSet<String>) {
match v {
IrValue::BitAnd(l, r)
| IrValue::BitXor(l, r)
| IrValue::BitOr(l, r)
| IrValue::ShiftLeft(l, r)
| IrValue::ShiftRight(l, r) => {
fix_idents_in_ir_value(l, imports);
fix_idents_in_ir_value(r, imports);
}
IrValue::BitNot(v) | IrValue::Negate(v) => fix_idents_in_ir_value(v, imports),
IrValue::Array(_ty, sub_values) => {
for sub in sub_values {
fix_idents_in_ir_value(sub, imports);
}
}
IrValue::ArrayAccess(target, idx) => {
fix_idents_in_ir_value(target, imports);
fix_idents_in_ir_value(idx, imports);
}
IrValue::BoolNegate(neg) => {
fix_idents_in_ir_value(neg, imports);
}
IrValue::Deref(v) => fix_idents_in_ir_value(v, imports),
IrValue::FieldAccess(v, field_name) => {
*field_name = fix_ident_for_go(field_name, imports);
fix_idents_in_ir_value(v, imports);
}
IrValue::Imm(_s) => {
// *s = fix_ident_for_go(s, imports);
}
IrValue::Lambda(params, _ret, body) => {
for p in params {
p.0 = fix_ident_for_go(&p.0, imports);
}
for body in body {
fix_idents_in_ir(body, imports);
}
}
IrValue::Struct(struct_name, fields) => {
*struct_name = fix_ident_for_go(struct_name, imports);
for field in fields {
field.0 = fix_ident_for_go(&field.0, imports);
fix_idents_in_ir_value(&mut field.1, imports);
}
}
IrValue::Tuple(_tuple_struct_name, fields) => {
for field in fields {
fix_idents_in_ir_value(field, imports);
}
}
IrValue::Duck(_duck_struct_name, fields) => {
for field in fields {
field.0 = fix_ident_for_go(&field.0, imports);
fix_idents_in_ir_value(&mut field.1, imports);
}
}
IrValue::Pointer(d) => fix_idents_in_ir_value(d, imports),
IrValue::Var(var_name) => *var_name = fix_ident_for_go(var_name, imports),
IrValue::Bool(..)
| IrValue::Char(..)
| IrValue::Nil
| IrValue::String(..)
| IrValue::Float(..)
| IrValue::Int(..)
| IrValue::Tag(..) => {}
}
}
pub fn fix_idents_in_ir(v: &mut IrInstruction, imports: &HashSet<String>) {
match v {
IrInstruction::GlobalVarDecl {
name,
go_type: _,
init_code,
} => {
*name = fix_ident_for_go(name, imports);
for instr in init_code {
fix_idents_in_ir(instr, imports);
}
}
IrInstruction::Add(res, lhs, rhs, res_ty)
| IrInstruction::Sub(res, lhs, rhs, res_ty)
| IrInstruction::Mul(res, lhs, rhs, res_ty)
| IrInstruction::Div(res, lhs, rhs, res_ty)
| IrInstruction::Mod(res, lhs, rhs, res_ty)
| IrInstruction::Equals(res, lhs, rhs, res_ty)
| IrInstruction::NotEquals(res, lhs, rhs, res_ty)
| IrInstruction::LessThan(res, lhs, rhs, res_ty)
| IrInstruction::LessThanOrEquals(res, lhs, rhs, res_ty)
| IrInstruction::GreaterThan(res, lhs, rhs, res_ty)
| IrInstruction::GreaterThanOrEquals(res, lhs, rhs, res_ty)
| IrInstruction::And(res, lhs, rhs, res_ty)
| IrInstruction::Or(res, lhs, rhs, res_ty) => {
let _res_type = res_ty;
*res = fix_ident_for_go(res, imports);
fix_idents_in_ir_value(lhs, imports);
fix_idents_in_ir_value(rhs, imports);
}
IrInstruction::Block(sub) => {
for sub in sub {
fix_idents_in_ir(sub, imports);
}
}
IrInstruction::Break(..) | IrInstruction::Continue(..) => {}
IrInstruction::Defer(sub) => fix_idents_in_ir(sub, imports),
IrInstruction::ForRangeElem {
ident,
range_target,
body,
label: _,
} => {
*ident = fix_ident_for_go(ident, imports);
fix_idents_in_ir_value(range_target, imports);
for body in body {
fix_idents_in_ir(body, imports);
}
}
IrInstruction::VarDecl(name, _ty) => {
*name = fix_ident_for_go(name, imports);
}
IrInstruction::VarAssignment(name, v) => {
*name = fix_ident_for_go(name, imports);
fix_idents_in_ir_value(v, imports);
}
IrInstruction::FunCall(_result, target, params) => {
fix_idents_in_ir_value(target, imports);
for param in params {
fix_idents_in_ir_value(param, imports);
}
}
IrInstruction::StringConcat(_res, values) => {
for v in values {
fix_idents_in_ir_value(v, imports);
}
}
IrInstruction::Return(v) => {
if let Some(v) = v.as_mut() {
fix_idents_in_ir_value(v, imports)
}
}
IrInstruction::InlineGo(..) => {}
IrInstruction::If(cond, then, el) => {
fix_idents_in_ir_value(cond, imports);
for instr in then
.iter_mut()
.chain(el.iter_mut().flat_map(|el| el.iter_mut()))
{
fix_idents_in_ir(instr, imports);
}
}
IrInstruction::Loop(body, label) => {
*label = fix_ident_for_go(label, imports);
for body in body {
fix_idents_in_ir(body, imports);
}
}
IrInstruction::GoPackage(package_name) => {
*package_name = fix_ident_for_go(package_name, imports);
}
IrInstruction::GoImports(..) => {}
IrInstruction::GenericFun(name, generics, params, _ret, body) => {
*name = fix_ident_for_go(name, imports);
for param in generics {
param.0 = fix_ident_for_go(¶m.0, imports);
}
for param in params {
param.0 = fix_ident_for_go(¶m.0, imports);
}
for body in body {
fix_idents_in_ir(body, imports);
}
}
IrInstruction::FunDef(name, receiver, params, _ret, body) => {
*name = fix_ident_for_go(name, imports);
if let Some(receiver) = receiver.as_mut() {
receiver.0 = fix_ident_for_go(&receiver.0, imports);
}
for param in params {
param.0 = fix_ident_for_go(¶m.0, imports);
}
for body in body {
fix_idents_in_ir(body, imports);
}
}
IrInstruction::StructDef(_name, fields) => {
for field in fields {
field.0 = fix_ident_for_go(&field.0, imports);
}
}
IrInstruction::InterfaceDef(_name, type_params, methods) => {
for type_param in type_params {
type_param.0 = fix_ident_for_go(&type_param.0, imports);
}
for method in methods {
method.0 = fix_ident_for_go(&method.0, imports);
for param in &mut method.1 {
param.0 = fix_ident_for_go(¶m.0, imports);
}
}
}
IrInstruction::SwitchType(v, cases) => {
fix_idents_in_ir_value(v, imports);
for case in cases {
for branch in case
.conditional_branches
.iter_mut()
.flat_map(|v| v.iter_mut())
{
for i in &mut branch.0.0 {
fix_idents_in_ir(i, imports);
}
if let Some(cond) = branch.0.1.as_mut() {
fix_idents_in_ir_value(cond, imports);
}
if let Some(ident) = branch.1.identifier_binding.as_mut() {
*ident = fix_ident_for_go(ident, imports);
}
for instr in &mut branch.1.instrs {
fix_idents_in_ir(instr, imports);
}
}
if let Some(ident) = case.identifier_binding.as_mut() {
*ident = fix_ident_for_go(ident, imports);
}
for instr in &mut case.instrs {
fix_idents_in_ir(instr, imports);
}
}
}
}
}
pub fn join_ir(v: &[IrInstruction]) -> String {
v.iter()
.map(IrInstruction::emit_as_go)
.collect::<Vec<_>>()
.join("\n")
}
impl IrValue {
pub fn emit_as_go(&self) -> String {
match self {
IrValue::ShiftLeft(lhs, rhs) => {
format!("({} << {})", lhs.emit_as_go(), rhs.emit_as_go())
}
IrValue::ShiftRight(lhs, rhs) => {
format!("({} >> {})", lhs.emit_as_go(), rhs.emit_as_go())
}
IrValue::BitAnd(lhs, rhs) => format!("({} & {})", lhs.emit_as_go(), rhs.emit_as_go()),
IrValue::BitOr(lhs, rhs) => format!("({} | {})", lhs.emit_as_go(), rhs.emit_as_go()),
IrValue::BitXor(lhs, rhs) => format!("({} ^ {})", lhs.emit_as_go(), rhs.emit_as_go()),
IrValue::BitNot(target) => format!("(^{})", target.emit_as_go()),
IrValue::Negate(target) => format!("-{}", target.emit_as_go()),
IrValue::Deref(target) => format!("*{}", target.emit_as_go()),
IrValue::Pointer(target) => format!("&{}", target.emit_as_go()),
IrValue::Imm(str) => str.to_string(),
IrValue::ArrayAccess(target, idx) => {
format!("{}[{}]", target.emit_as_go(), idx.emit_as_go())
}
IrValue::Array(arr_type, contents) => format!(
"{arr_type}{{{}}}",
contents
.iter()
.map(|x| x.emit_as_go())
.collect::<Vec<_>>()
.join(", ")
),
IrValue::Bool(b) => format!("{b}"),
IrValue::Int(i) => format!("{i}"),
IrValue::Float(f) => format!("{f}"),
IrValue::Char(c) => format!(
"'{}'",
c.to_string()
.replace("\\", "\\\\")
.replace("\\\\o", "\\")
.replace("\0", "\\x00")
.replace("\t", "\\t")
.replace("\n", "\\n")
.replace("\'", "\\'")
),
IrValue::String(s, _is_const) => format!(
"\"{}\"",
s.replace("\\", "\\\\")
.replace("\0", "\\x00")
.replace("\\\\o", "\\")
.replace("\n", "\\n")
.replace("\"", "\\\"")
.replace("\t", "\\\t")
),
IrValue::Var(v) => v.to_string(),
IrValue::Struct(s, fields) => {
format!(
// TODO: check if this should be a reference
"&{s}{{{}}}",
fields
.iter()
.map(|x| format!("{}: {}", x.0, x.1.emit_as_go()))
.collect::<Vec<_>>()
.join(", ")
)
}
IrValue::Tag(identifier) => {
format!(
// TODO: check if this should be a reference
"{identifier}{{}}",
)
}
IrValue::Duck(s, fields) => {
format!(
// TODO: check if this should be a reference
"&{s}{{{}}}",
fields
.iter()
.map(|x| format!("{}: {}", x.0, x.1.emit_as_go()))
.collect::<Vec<_>>()
.join(", ")
)
}
IrValue::FieldAccess(o, field_name) => {
format!("{}.{field_name}", o.emit_as_go())
}
IrValue::Tuple(go_struct, fields) => {
format!(
"{go_struct}{{{}}}",
fields
.iter()
.map(IrValue::emit_as_go)
.collect::<Vec<_>>()
.join(", ")
)
}
IrValue::Nil => "nil".to_string(),
IrValue::BoolNegate(o) => format!("!{}", o.emit_as_go()),
IrValue::Lambda(params, return_type, body) => format!(
"func({}) {} {{\n{}\n}} ",
params
.iter()
.map(|(name, ty)| format!("{name} {ty}"))
.collect::<Vec<_>>()
.join(", "),
return_type.as_ref().cloned().unwrap_or_default(),
body.iter()
.map(IrInstruction::emit_as_go)
.collect::<Vec<_>>()
.join("\n")
),
}
}
}
| rust | MIT | 03febfa849f54a237380dfed2d91a52a2df0313b | 2026-01-04T20:22:21.418266Z | false |
duck-compiler/duckc | https://github.com/duck-compiler/duckc/blob/03febfa849f54a237380dfed2d91a52a2df0313b/src/emit/jsx_component.rs | src/emit/jsx_component.rs | use crate::{
emit::{types::escape_string_for_go, value::IrInstruction},
parse::jsx_component_parser::JsxComponent,
semantics::type_resolve::TypeEnv,
};
impl JsxComponent {
fn emit_js(&self) -> String {
format!(
"function {}(props){{{}}}",
self.name, self.javascript_source.0
)
}
pub fn emit(&self, type_env: &mut TypeEnv) -> IrInstruction {
let props = escape_string_for_go(&self.props_type.0.call_to_json("props", type_env));
let all = format!(
"fmt.Sprintf(\"{}\\nfunction {}(props){{\\nprops = (%s)\\n%s}}\", {props}, \"{}\")",
escape_string_for_go(
&type_env
.get_component_dependencies(self.name.clone())
.client_components
.clone()
.into_iter()
.map(|x| type_env.get_component(x.as_str()).unwrap().emit_js())
.collect::<Vec<_>>()
.join("\n")
),
self.name,
escape_string_for_go(&self.javascript_source.0)
);
IrInstruction::FunDef(
self.name.clone(),
None,
vec![(
"props".to_string(),
self.props_type.0.as_go_type_annotation(type_env),
)],
Some("Tup_string_string".to_string()),
vec![IrInstruction::InlineGo(format!(
"return Tup_string_string {{ field_0: \"{}\", field_1: {all} }}",
self.name
))],
)
}
}
| rust | MIT | 03febfa849f54a237380dfed2d91a52a2df0313b | 2026-01-04T20:22:21.418266Z | false |
duck-compiler/duckc | https://github.com/duck-compiler/duckc/blob/03febfa849f54a237380dfed2d91a52a2df0313b/src/dargo/test.rs | src/dargo/test.rs | use colored::Colorize;
use lazy_static::lazy_static;
use std::io::ErrorKind as IOErrKind;
use std::process::Command;
use crate::dargo::cli::{CompileArgs, TestArgs};
use crate::dargo::compile_test::{self, CompileTestErrKind};
use crate::{
dargo::build::{BuildErrKind, build},
tags::Tag,
};
#[derive(Debug)]
pub enum TestErrKind {
BuildErr(BuildErrKind),
MissingTargetBinary,
NoBinaryFound,
CompileTestErrKind(CompileTestErrKind),
IOErr(IOErrKind),
Unknown(),
}
lazy_static! {
static ref COMPILE_TEST_TAG: String = " compile test "
.on_bright_black()
.bright_white()
.to_string();
}
pub fn test(test_args: &TestArgs) -> Result<(), (String, TestErrKind)> {
if test_args.file.is_some() {
let run_args_file = test_args.file.clone().unwrap();
let compile_result = compile_test::compile(CompileArgs {
file: run_args_file.clone(),
output_name: None,
optimize_go: test_args.optimize_go,
})
.map_err(|err| {
(
format!(
"{}{} couldn't compile the code\n{}",
Tag::Build,
Tag::Err,
err.0
),
TestErrKind::CompileTestErrKind(err.1),
)
})?;
let full_path_name = compile_result.binary_path.canonicalize().map_err(|err| {
(
format!(
"{}{} couldn't canonicalize path name of just compiled duck binary",
Tag::IO,
Tag::Err
),
TestErrKind::IOErr(err.kind()),
)
})?;
Command::new(full_path_name.clone())
.spawn()
.map_err(|err| {
(
format!(
"{}{}{} couldn't spawn duck process",
Tag::Run,
Tag::IO,
Tag::Err
),
TestErrKind::IOErr(err.kind()),
)
})?
.wait()
.map_err(|err| {
(
format!(
"{}{}{} couldn't wait for process",
Tag::Run,
Tag::IO,
Tag::Err
),
TestErrKind::IOErr(err.kind()),
)
})?;
println!(
"{}{}{} Successfully run executable output of {} which is located at {}",
Tag::Dargo,
Tag::Run,
Tag::Check,
run_args_file.to_string_lossy(),
full_path_name.to_string_lossy()
);
return Ok(());
}
let build_result = build(&crate::dargo::cli::BuildArgs {
bin: test_args.bin.clone(),
output_name: None,
optimize_go: test_args.optimize_go,
})
.map_err(|err| {
(
format!(
"{}{} couldn't build the code\n{}",
Tag::Build,
Tag::Err,
err.0
),
TestErrKind::BuildErr(err.1),
)
})?;
let binary_path = if build_result.binaries.len() > 1 {
let Some(binary_name) = &test_args.bin else {
return Err((
format!(
"{}{} missing target binary to run. mutliple binaries are available, specify using --bin <binary_name>\n",
Tag::Build,
Tag::Err,
),
TestErrKind::MissingTargetBinary,
));
};
let binary = build_result
.binaries
.iter()
.find(|binary| *binary.0 == *binary_name);
if binary.is_none() {
return Err((
format!(
"{}{} specified binary {} not found",
Tag::Build,
Tag::Err,
binary_name
),
TestErrKind::NoBinaryFound,
));
}
let binary = binary.unwrap();
binary.1.clone()
} else {
let first_binary = build_result.binaries.first();
if first_binary.is_none() {
return Err((
format!("{}{} missing target binary to run.", Tag::Build, Tag::Err,),
TestErrKind::NoBinaryFound,
));
}
let first_binary = first_binary.unwrap();
first_binary.1.clone()
};
let full_path_name = binary_path.canonicalize().map_err(|err| {
(
format!(
"{}{} couldn't canonicalize path name of just compiled duck binary",
Tag::IO,
Tag::Err
),
TestErrKind::IOErr(err.kind()),
)
})?;
Command::new(full_path_name.clone())
.spawn()
.map_err(|err| {
(
format!(
"{}{}{} couldn't spawn duck process",
Tag::Run,
Tag::IO,
Tag::Err
),
TestErrKind::IOErr(err.kind()),
)
})?
.wait()
.map_err(|err| {
(
format!(
"{}{}{} couldn't wait for process",
Tag::Run,
Tag::IO,
Tag::Err
),
TestErrKind::IOErr(err.kind()),
)
})?;
println!(
"{}{}{} Successfully run executable output of current workspace which is located at {}",
Tag::Dargo,
Tag::Run,
Tag::Check,
full_path_name.to_string_lossy()
);
Ok(())
}
| rust | MIT | 03febfa849f54a237380dfed2d91a52a2df0313b | 2026-01-04T20:22:21.418266Z | false |
duck-compiler/duckc | https://github.com/duck-compiler/duckc/blob/03febfa849f54a237380dfed2d91a52a2df0313b/src/dargo/compile.rs | src/dargo/compile.rs | use colored::Colorize;
use duckwind::EmitEnv;
use lazy_static::lazy_static;
use std::{
ffi::OsString,
fs,
path::{Path, PathBuf},
sync::mpsc,
time::Duration,
};
use crate::{
DARGO_DOT_DIR,
cli::go_cli::{self, GoCliErrKind},
dargo::cli::CompileArgs,
emit::{ir::join_ir, types::escape_string_for_go},
go_fixup::remove_unused_imports::cleanup_go_source,
lex,
parse::value_parser::empty_range,
parse_src_file,
tags::Tag,
typecheck, write_in_duck_dotdir,
};
#[derive(Debug)]
pub enum CompileErrKind {
CorruptedFileName,
TargetPathIsDirectory,
FileNotFound,
CannotReadFile,
GoCli(GoCliErrKind),
}
lazy_static! {
static ref COMPILE_TAG: String = " compile ".on_bright_black().bright_white().to_string();
}
pub struct CompileOutput {
pub binary_path: PathBuf,
}
pub fn compile(compile_args: CompileArgs) -> Result<CompileOutput, (String, CompileErrKind)> {
let src_file: PathBuf = compile_args.file;
let binary_output_name: Option<String> = compile_args.output_name;
if src_file.is_dir() {
let message = format!(
"{}{} the path you provided is a directory. You need to provide a .duck file",
*COMPILE_TAG,
Tag::Err,
);
return Err((message, CompileErrKind::TargetPathIsDirectory));
}
if src_file
.extension()
.ok_or_else(|| {
format!(
"{}{} couldn't extract file extension from provided source file",
*COMPILE_TAG,
Tag::Err,
)
})
.unwrap()
!= "duck"
{
let message = format!(
"{}{} the path you provided is not a valid duck source file. You need to provide a .duck file",
*COMPILE_TAG,
Tag::Err,
);
return Err((message, CompileErrKind::TargetPathIsDirectory));
}
let src_file_name: &'static str = src_file
.file_name()
.ok_or_else(|| {
(
format!(
"{}{} couldn't get the filename from given ",
*COMPILE_TAG,
Tag::Err
),
CompileErrKind::CorruptedFileName,
)
})?
.to_str()
.ok_or_else(|| {
(
format!(
"{}{} the filename is an invalid utf-8 string",
*COMPILE_TAG,
Tag::Err
),
CompileErrKind::CorruptedFileName,
)
})?
.to_string()
.leak();
let src_file_file_contents: &'static str = fs::read_to_string(&src_file)
.map_err(|err| {
(
format!(
"{}{} couldn't read file '{}'. msg='{}'",
*COMPILE_TAG,
Tag::Err,
src_file.to_string_lossy().bright_blue(),
err.to_string().bright_red()
),
CompileErrKind::CannotReadFile,
)
})?
.to_string()
.leak();
let tokens = lex(src_file_name, src_file_file_contents);
let mut src_file_ast = parse_src_file(&src_file, src_file_name, src_file_file_contents, tokens);
let (tailwind_worker_send, tailwind_worker_receive) = mpsc::channel::<String>();
let (tailwind_result_send, tailwind_result_receive) = mpsc::channel::<String>();
let tailwind_prefix = None::<String>;
std::thread::spawn(move || {
let mut emit_env = EmitEnv::new_with_default_config();
// emit_env.parse_full_string(src_file_file_contents);
loop {
let s = tailwind_worker_receive.recv();
match s {
Ok(s) => emit_env.parse_full_string(tailwind_prefix.as_deref(), s.as_str()),
Err(_) => break,
}
}
let _ = tailwind_result_send.send(emit_env.to_css_stylesheet(true));
});
let mut type_env = typecheck(&mut src_file_ast, &tailwind_worker_send);
let mut go_code = join_ir(&src_file_ast.emit("main".into(), &mut type_env, empty_range()));
// drop the sender here so that the thread knows it should emit the final tailwind
drop(tailwind_worker_send);
let css = tailwind_result_receive
.recv_timeout(Duration::from_secs(30))
.expect("tailwind timed out");
go_code = cleanup_go_source(&go_code, true);
go_code = format!(
"{go_code}\nconst TAILWIND_STR = \"{}\"",
escape_string_for_go(css.as_str())
);
let go_file_name = format!("{src_file_name}.gen.go");
let go_output_file = write_in_duck_dotdir(&go_file_name, &go_code);
if compile_args.optimize_go {
let _ = go_cli::format(go_output_file.as_path());
}
let executable_path = go_cli::build(
&DARGO_DOT_DIR,
binary_output_name
.map(OsString::from)
.unwrap_or(OsString::from("duck_out"))
.as_os_str(),
Path::new(&go_file_name),
)
.map_err(|err| {
(
format!("{}{}", *COMPILE_TAG, err.0),
CompileErrKind::GoCli(err.1),
)
})?;
println!(
"{}{}{} Successfully compiled binary",
Tag::Dargo,
*COMPILE_TAG,
Tag::Check,
);
return Ok(CompileOutput {
binary_path: executable_path,
});
}
| rust | MIT | 03febfa849f54a237380dfed2d91a52a2df0313b | 2026-01-04T20:22:21.418266Z | false |
duck-compiler/duckc | https://github.com/duck-compiler/duckc/blob/03febfa849f54a237380dfed2d91a52a2df0313b/src/dargo/cli.rs | src/dargo/cli.rs | use clap::{Parser as CliParser, Subcommand};
use std::path::PathBuf;
use crate::{
dargo::{
self, compile::CompileErrKind, docs::DocsErrKind, init::InitErrKind, new::NewErrKind,
run::RunErrKind, test::TestErrKind,
},
tags::Tag,
};
use super::{
build::{self, BuildErrKind},
clean::CleanErrKind,
};
#[derive(CliParser, Debug)]
pub struct DargoCliParser {
#[arg(long, short = 'v', global = true)]
verbose: bool,
#[command(subcommand)]
pub command: Commands,
}
#[derive(Subcommand, Debug)]
pub enum Commands {
Build(BuildArgs),
Compile(CompileArgs),
Init(InitArgs),
New(NewArgs),
Clean,
Run(RunArgs),
Test(TestArgs),
Docs(DocsGenerateArgs),
}
#[derive(clap::Args, Debug)]
pub struct BuildArgs {
// #[arg(long, value_parser = ["x86", "arm"])]
// arch: Option<String>
#[arg(long, short = 'b')]
pub bin: Option<String>,
#[arg(long, short = 'o')]
pub output_name: Option<String>,
#[arg(long, short = 'G')]
pub optimize_go: bool,
}
#[derive(clap::Args, Debug)]
pub struct CompileArgs {
pub file: PathBuf,
#[arg(long, short = 'o')]
pub output_name: Option<String>,
#[arg(long, short = 'G')]
pub optimize_go: bool,
}
#[derive(clap::Args, Debug)]
pub struct DocsGenerateArgs {
pub file: PathBuf,
}
#[derive(clap::Args, Debug)]
pub struct RunArgs {
pub file: Option<PathBuf>,
#[arg(long, short = 'G')]
pub optimize_go: bool,
#[arg(long, short = 'b')]
pub bin: Option<String>,
}
#[derive(clap::Args, Debug)]
pub struct TestArgs {
pub file: Option<PathBuf>,
#[arg(long, short = 'G')]
pub optimize_go: bool,
#[arg(long, short = 'b')]
pub bin: Option<String>,
}
#[derive(clap::Args, Debug)]
pub struct InitArgs {
pub project_name: Option<String>,
// Examples:
// #[arg(long, short = 'o')]
// optimize: bool,
// #[arg(long, value_parser = ["x86", "arm"])]
// arch: Option<String>
}
#[derive(clap::Args, Debug)]
pub struct NewArgs {
pub project_name: Option<String>,
// Examples:
// #[arg(long, short = 'o')]
// optimize: bool,
// #[arg(long, value_parser = ["x86", "arm"])]
// arch: Option<String>
}
#[derive(Debug)]
pub enum CliErrKind {
Init(InitErrKind),
New(NewErrKind),
Compile(CompileErrKind),
Build(BuildErrKind),
Clean(CleanErrKind),
Run(RunErrKind),
Test(TestErrKind),
Docs(DocsErrKind),
}
pub fn run_cli() -> Result<(), (String, CliErrKind)> {
let args = DargoCliParser::parse();
match args.command {
Commands::Build(build_args) => {
build::build(&build_args).map_err(|err| {
(
format!("{}{}{}", Tag::Dargo, Tag::Build, err.0,),
CliErrKind::Build(err.1),
)
})?;
}
Commands::Compile(compile_args) => {
dargo::compile::compile(compile_args).map_err(|err| {
(
format!("{}{}", Tag::Dargo, err.0),
CliErrKind::Compile(err.1),
)
})?;
}
Commands::Init(init_args) => {
dargo::init::init_project(None, init_args)
.map_err(|err| (format!("{}{}", Tag::Dargo, err.0), CliErrKind::Init(err.1)))?;
}
Commands::Clean => {
dargo::clean::clean().map_err(|err| {
(
format!("{}{} {}", Tag::Dargo, Tag::Clean, err.0,),
CliErrKind::Clean(err.1),
)
})?;
}
Commands::Run(run_args) => {
dargo::run::run(&run_args).map_err(|err| {
(
format!("{}{}{}", Tag::Dargo, Tag::Run, err.0,),
CliErrKind::Run(err.1),
)
})?;
}
Commands::Test(test_args) => {
dargo::test::test(&test_args).map_err(|err| {
(
format!("{}{}{}", Tag::Dargo, Tag::Run, err.0,),
CliErrKind::Test(err.1),
)
})?;
}
Commands::Docs(docs_generate_args) => {
dargo::docs::generate(docs_generate_args).map_err(|err| {
(
format!("{}{}{}", Tag::Dargo, Tag::Docs, err.0,),
CliErrKind::Docs(err.1),
)
})?;
}
Commands::New(new_args) => {
dargo::new::new_project(None, new_args)
.map_err(|err| (format!("{}{}", Tag::Dargo, err.0,), CliErrKind::New(err.1)))?;
}
}
Ok(())
}
| rust | MIT | 03febfa849f54a237380dfed2d91a52a2df0313b | 2026-01-04T20:22:21.418266Z | false |
duck-compiler/duckc | https://github.com/duck-compiler/duckc/blob/03febfa849f54a237380dfed2d91a52a2df0313b/src/dargo/compile_test.rs | src/dargo/compile_test.rs | use colored::Colorize;
use duckwind::EmitEnv;
use lazy_static::lazy_static;
use std::{ffi::OsString, fs, path::PathBuf, sync::mpsc, time::Duration};
use crate::{
DARGO_DOT_DIR,
cli::go_cli::{self, GoCliErrKind},
dargo::cli::CompileArgs,
emit::{ir::join_ir, types::escape_string_for_go},
lex,
parse::value_parser::empty_range,
parse_src_file,
tags::Tag,
typecheck, write_in_duck_dotdir,
};
#[derive(Debug)]
pub enum CompileTestErrKind {
CorruptedFileName,
TargetPathIsDirectory,
FileNotFound,
CannotReadFile,
GoCli(GoCliErrKind),
}
lazy_static! {
static ref COMPILE_TEST_TAG: String = " compile test "
.on_bright_black()
.bright_white()
.to_string();
}
pub struct CompileOutput {
pub binary_path: PathBuf,
}
pub fn compile(compile_args: CompileArgs) -> Result<CompileOutput, (String, CompileTestErrKind)> {
let src_file: PathBuf = compile_args.file;
let binary_output_name: Option<String> = compile_args.output_name;
if src_file.is_dir() {
let message = format!(
"{}{} the path you provided is a directory. You need to provide a .duck file",
*COMPILE_TEST_TAG,
Tag::Err,
);
return Err((message, CompileTestErrKind::TargetPathIsDirectory));
}
if src_file
.extension()
.ok_or_else(|| {
format!(
"{}{} couldn't extract file extension from provided source file",
*COMPILE_TEST_TAG,
Tag::Err,
)
})
.unwrap()
!= "duck"
{
let message = format!(
"{}{} the path you provided is not a valid duck source file. You need to provide a .duck file",
*COMPILE_TEST_TAG,
Tag::Err,
);
return Err((message, CompileTestErrKind::TargetPathIsDirectory));
}
let src_file_name: &'static str = src_file
.file_name()
.ok_or_else(|| {
(
format!(
"{}{} couldn't get the filename from given ",
*COMPILE_TEST_TAG,
Tag::Err
),
CompileTestErrKind::CorruptedFileName,
)
})?
.to_str()
.ok_or_else(|| {
(
format!(
"{}{} the filename is an invalid utf-8 string",
*COMPILE_TEST_TAG,
Tag::Err
),
CompileTestErrKind::CorruptedFileName,
)
})?
.to_string()
.leak();
let src_file_file_contents: &'static str = fs::read_to_string(&src_file)
.map_err(|err| {
(
format!(
"{}{} couldn't read file '{}'. msg='{}'",
*COMPILE_TEST_TAG,
Tag::Err,
src_file.to_string_lossy().bright_blue(),
err.to_string().bright_red()
),
CompileTestErrKind::CannotReadFile,
)
})?
.to_string()
.leak();
let tokens = lex(src_file_name, src_file_file_contents);
let mut src_file_ast = parse_src_file(&src_file, src_file_name, src_file_file_contents, tokens);
src_file_ast
.use_statements
.push(crate::parse::use_statement_parser::UseStatement::Go(
"io".to_string(),
None,
));
src_file_ast
.use_statements
.push(crate::parse::use_statement_parser::UseStatement::Go(
"bufio".to_string(),
None,
));
src_file_ast
.use_statements
.push(crate::parse::use_statement_parser::UseStatement::Go(
"bytes".to_string(),
None,
));
src_file_ast
.use_statements
.push(crate::parse::use_statement_parser::UseStatement::Go(
"sync".to_string(),
None,
));
let (tailwind_worker_send, tailwind_worker_receive) = mpsc::channel::<String>();
let (tailwind_result_send, tailwind_result_receive) = mpsc::channel::<String>();
let tailwind_prefix = None::<String>;
std::thread::spawn(move || {
let mut emit_env = EmitEnv::new_with_default_config();
// emit_env.parse_full_string(src_file_file_contents);
loop {
let s = tailwind_worker_receive.recv();
match s {
Ok(s) => emit_env.parse_full_string(tailwind_prefix.as_deref(), s.as_str()),
Err(_) => break,
}
}
let _ = tailwind_result_send.send(emit_env.to_css_stylesheet(true));
});
let mut type_env = typecheck(&mut src_file_ast, &tailwind_worker_send);
let maybe_main_fn = src_file_ast
.function_definitions
.iter_mut()
.find(|fun_def| fun_def.name == "main");
if let Some(main_fn) = maybe_main_fn {
main_fn.name = "____thrown_away_main_LOL".to_string();
}
let test_source = src_file_ast
.test_cases
.iter()
.map(|test_case| {
format!(
r#"
DuckTestCase {{
name: "{}",
test_case_fn: func() {{
fmt.Println("Running Test \"{}\"")
time.Sleep(1000 * time.Millisecond)
{}
}},
}},
"#,
test_case.name,
test_case.name,
join_ir(
&test_case
.body
.0
.emit(
&mut type_env,
&mut crate::emit::value::ToIr {
var_counter: 0,
per_var_counter: vec![Default::default()],
labels: Vec::new(),
},
test_case.body.1
)
.0
)
)
})
.collect::<Vec<_>>()
.join("\n");
let main_fn = format!(
r#"
type test_case_fn func()
type DuckTestCase struct {{
name string
test_case_fn test_case_fn
}}
func spinner(wg *sync.WaitGroup, writer *bufio.Writer, stop chan bool, testName string) {{
defer wg.Done()
animation := `|/-\`
i := 0
for {{
select {{
case <-stop:
fmt.Fprintf(writer, "\r")
writer.Flush()
return
default:
fmt.Fprintf(writer, "\r[%c] Running test: %s", animation[i], testName)
writer.Flush()
i = (i + 1) % len(animation)
time.Sleep(100 * time.Millisecond)
}}
}}
}}
func main() {{
fmt.Println("tests")
tests := []DuckTestCase {{
{test_source}
}}
writer := bufio.NewWriter(os.Stdout)
for _, test := range tests {{
func (currentTest DuckTestCase) {{
var wg sync.WaitGroup
stop := make(chan bool)
wg.Add(1)
go spinner(&wg, writer, stop, currentTest.name)
originalStdout := os.Stdout
r, w, _ := os.Pipe()
os.Stdout = w
var testErr interface{{}}
func() {{
defer func() {{
testErr = recover()
}}()
currentTest.test_case_fn()
}}()
var capturedOutput bytes.Buffer
w.Close()
io.Copy(&capturedOutput, r)
os.Stdout = originalStdout
stop <- true
wg.Wait()
if testErr != nil {{
fmt.Fprintf(writer, "[\x1b[31m✘\x1b[0m] Test failed: %s\n", currentTest.name)
}} else {{
fmt.Fprintf(writer, "[\x1b[32m✔\x1b[0m] Test successful: %s\n", currentTest.name)
}}
fmt.Fprint(writer, capturedOutput.String())
writer.Flush()
}}(test)
}}
}}"#,
);
let mut go_code = format!(
"{}\n\n{main_fn}",
join_ir(&src_file_ast.emit("main".into(), &mut type_env, empty_range()))
);
// drop the sender here so that the thread knows it should emit the final tailwind
drop(tailwind_worker_send);
let css = tailwind_result_receive
.recv_timeout(Duration::from_secs(30))
.expect("tailwind timed out");
go_code = format!(
"{go_code}\nconst TAILWIND_STR = \"{}\"",
escape_string_for_go(css.as_str())
);
let go_output_file =
write_in_duck_dotdir(format!("{src_file_name}.gen.test.go").as_str(), &go_code);
if compile_args.optimize_go {
let _ = go_cli::format(go_output_file.as_path());
}
let executable_path = go_cli::build(
&DARGO_DOT_DIR,
binary_output_name
.map(OsString::from)
.unwrap_or(OsString::from("duck_out"))
.as_os_str(),
&go_output_file,
)
.map_err(|err| {
(
format!("{}{}", *COMPILE_TEST_TAG, err.0),
CompileTestErrKind::GoCli(err.1),
)
})?;
println!(
"{}{}{} Successfully compiled binary",
Tag::Dargo,
*COMPILE_TEST_TAG,
Tag::Check,
);
return Ok(CompileOutput {
binary_path: executable_path,
});
}
| rust | MIT | 03febfa849f54a237380dfed2d91a52a2df0313b | 2026-01-04T20:22:21.418266Z | false |
duck-compiler/duckc | https://github.com/duck-compiler/duckc/blob/03febfa849f54a237380dfed2d91a52a2df0313b/src/dargo/build.rs | src/dargo/build.rs | use std::io::{self, ErrorKind as IOErrKind};
use std::path::{Path, PathBuf};
use std::{env, fs};
use crate::DARGO_DOT_DIR;
use crate::cli::git_cli::{self, GitCliErrKind};
use crate::dargo::cli::CompileArgs;
use crate::tags::Tag;
use super::cli::BuildArgs;
use super::compile::{self, CompileErrKind};
use super::loader::{ProjectLoadErrKind, load_dargo_config};
#[derive(Debug)]
pub enum BuildErrKind {
CargoConfigLoad(ProjectLoadErrKind),
DependencyPull(GitCliErrKind),
DependencySetup,
IOErr(IOErrKind),
Compile(CompileErrKind),
}
pub struct BuildOutput {
pub binaries: Vec<(String, PathBuf)>,
}
pub fn build(build_args: &BuildArgs) -> Result<BuildOutput, (String, BuildErrKind)> {
// this is to ensure that the dargo dot dir exists
_ = DARGO_DOT_DIR.clone();
let dargo_config =
load_dargo_config(None).map_err(|err| (err.0, BuildErrKind::CargoConfigLoad(err.1)))?;
if let Some(dependencies) = dargo_config.dependencies {
for (git_uri, _) in dependencies.iter() {
let module_name = git_uri.split("/").collect::<Vec<_>>()[1];
let git_dir_path = Path::new(&format!("./.dargo/git/{module_name}")).to_path_buf();
git_cli::pull_repository(&format!("https://github.com/{git_uri}"), &git_dir_path)
.map_err(|err| (err.0, BuildErrKind::DependencyPull(err.1)))?;
let dargo_toml_path = {
let mut git_dir_path_clone = git_dir_path.clone();
git_dir_path_clone.push("dargo.toml");
git_dir_path_clone
};
if !dargo_toml_path.exists() {
return Err((
format!(
"{}{} the remote dependency {module_name} doesn't contain a dargo.toml",
Tag::Dependency,
Tag::Setup,
),
BuildErrKind::DependencySetup,
));
}
let src_dir_path = {
let mut git_dir_path_clone = git_dir_path.clone();
git_dir_path_clone.push("src");
git_dir_path_clone
};
if !src_dir_path.exists() {
return Err((
format!(
"{}{} the remote dependency {module_name} doesn't contain a src directory",
Tag::Dependency,
Tag::Setup,
),
BuildErrKind::DependencySetup,
));
}
let mut current_dir = env::current_dir().map_err(|err| {
(
format!("{}{} coulnd't read current dir", Tag::IO, Tag::Err,),
BuildErrKind::IOErr(err.kind()),
)
})?;
current_dir.push(format!(".dargo/project/{module_name}"));
let target_dir = current_dir;
if target_dir.is_symlink() {
fs::remove_dir_all(target_dir.clone()).map_err(|err| {
(
format!(
"{}{} couldn't remove existing symlink to {module_name} library.",
Tag::IO,
Tag::Err,
),
BuildErrKind::IOErr(err.kind()),
)
})?;
}
let mut current_dir = env::current_dir()
.map_err(|err| {
(
format!("{}{} coulnd't get current dir! - {err}", Tag::IO, Tag::Err,),
BuildErrKind::IOErr(err.kind()),
)
})?
.clone();
current_dir.push(format!(".dargo/git/{module_name}/src"));
let absolute_src_dir: PathBuf = current_dir;
create_symlink(absolute_src_dir, target_dir).map_err(|err| {
(
format!("{}{} error creating symlink - {err}", Tag::IO, Tag::Err,),
BuildErrKind::IOErr(err.kind()),
)
})?;
}
}
let copy_target = Path::new(".dargo/project/");
copy_dir_all(Path::new("./src"), copy_target)?;
if !dargo_config.binaries.is_empty() {
let mut binaries = vec![];
for target_binary in dargo_config.binaries {
let mut copy_target_clone = copy_target.to_path_buf();
copy_target_clone.push(target_binary.file);
let compile_output = compile::compile(CompileArgs {
file: copy_target_clone,
output_name: Some(target_binary.name.clone()),
optimize_go: build_args.optimize_go,
})
.map_err(|err| {
(
format!(
"{}{} couldn't compile the code\n{}",
Tag::Build,
Tag::Err,
err.0,
),
BuildErrKind::Compile(err.1),
)
})?;
binaries.push((target_binary.name, compile_output.binary_path))
}
return Ok(BuildOutput { binaries });
}
let mut copy_target_clone = copy_target.to_path_buf();
copy_target_clone.push("main.duck");
let compile_output = compile::compile(CompileArgs {
file: copy_target_clone,
output_name: build_args.output_name.clone(),
optimize_go: build_args.optimize_go,
})
.map_err(|err| {
(
format!(
"{}{} couldn't compile the code\n{}",
Tag::Build,
Tag::Err,
err.0,
),
BuildErrKind::Compile(err.1),
)
})?;
return Ok(BuildOutput {
binaries: vec![("default_target".to_string(), compile_output.binary_path)],
});
}
fn copy_dir_all(
src: impl AsRef<Path>,
dst: impl AsRef<Path>,
) -> Result<(), (String, BuildErrKind)> {
fs::create_dir_all(&dst).map_err(|err| {
(
format!("{}{} couldn't copy files - {err}", Tag::IO, Tag::Err,),
BuildErrKind::IOErr(err.kind()),
)
})?;
let dirs = fs::read_dir(src).map_err(|err| {
(
format!(
"{}{} couldn't read directory files - {err}",
Tag::IO,
Tag::Err,
),
BuildErrKind::IOErr(err.kind()),
)
})?;
for entry in dirs {
let entry = entry.map_err(|err| {
(
format!("{}{} couldn't read file - {err}", Tag::IO, Tag::Err,),
BuildErrKind::IOErr(err.kind()),
)
})?;
let file_type = entry.file_type().map_err(|err| {
(
format!("{}{} couldn't read file type - {err}", Tag::IO, Tag::Err,),
BuildErrKind::IOErr(err.kind()),
)
})?;
if file_type.is_dir() {
copy_dir_all(entry.path(), dst.as_ref().join(entry.file_name()))?;
} else {
fs::copy(entry.path(), dst.as_ref().join(entry.file_name())).map_err(|err| {
(
format!("{}{} couldn't copy file - {err}", Tag::IO, Tag::Err,),
BuildErrKind::IOErr(err.kind()),
)
})?;
}
}
Ok(())
}
pub fn create_symlink<OriginalG: AsRef<Path>, LinkG: AsRef<Path>>(
original: OriginalG,
link: LinkG,
) -> io::Result<()> {
#[cfg(unix)]
{
std::os::unix::fs::symlink(original, link)
}
#[cfg(windows)]
{
std::os::windows::fs::symlink_file(original, link)
}
#[cfg(not(any(unix, windows)))]
{
Err(io::Error::new(
io::ErrorKind::Unsupported,
"platform not supported",
))
}
}
| rust | MIT | 03febfa849f54a237380dfed2d91a52a2df0313b | 2026-01-04T20:22:21.418266Z | false |
duck-compiler/duckc | https://github.com/duck-compiler/duckc/blob/03febfa849f54a237380dfed2d91a52a2df0313b/src/dargo/mod.rs | src/dargo/mod.rs | pub mod build;
pub mod clean;
pub mod cli;
pub mod compile;
pub mod compile_test;
pub mod docs;
pub mod init;
pub mod loader;
pub mod new;
pub mod run;
pub mod test;
| rust | MIT | 03febfa849f54a237380dfed2d91a52a2df0313b | 2026-01-04T20:22:21.418266Z | false |
duck-compiler/duckc | https://github.com/duck-compiler/duckc/blob/03febfa849f54a237380dfed2d91a52a2df0313b/src/dargo/run.rs | src/dargo/run.rs | use colored::Colorize;
use lazy_static::lazy_static;
use std::io::ErrorKind as IOErrKind;
use std::process::Command;
use crate::dargo::cli::{CompileArgs, RunArgs};
use crate::dargo::compile::{CompileErrKind, compile};
use crate::{
dargo::build::{BuildErrKind, build},
tags::Tag,
};
#[derive(Debug)]
pub enum RunErrKind {
BuildErr(BuildErrKind),
MissingTargetBinary,
NoBinaryFound,
CompileErr(CompileErrKind),
IOErr(IOErrKind),
Unknown(),
}
lazy_static! {
static ref COMPILE_TAG: String = " compile ".on_bright_black().bright_white().to_string();
}
pub fn run(run_args: &RunArgs) -> Result<(), (String, RunErrKind)> {
if run_args.file.is_some() {
let run_args_file = run_args.file.clone().unwrap();
let compile_result = compile(CompileArgs {
file: run_args_file.clone(),
output_name: None,
optimize_go: run_args.optimize_go,
})
.map_err(|err| {
(
format!(
"{}{} couldn't compile the code\n{}",
Tag::Build,
Tag::Err,
err.0
),
RunErrKind::CompileErr(err.1),
)
})?;
let full_path_name = compile_result.binary_path.canonicalize().map_err(|err| {
(
format!(
"{}{} couldn't canonicalize path name of just compiled duck binary",
Tag::IO,
Tag::Err
),
RunErrKind::IOErr(err.kind()),
)
})?;
Command::new(full_path_name.clone())
.spawn()
.map_err(|err| {
(
format!(
"{}{}{} couldn't spawn duck process",
Tag::Run,
Tag::IO,
Tag::Err
),
RunErrKind::IOErr(err.kind()),
)
})?
.wait()
.map_err(|err| {
(
format!(
"{}{}{} couldn't wait for process",
Tag::Run,
Tag::IO,
Tag::Err
),
RunErrKind::IOErr(err.kind()),
)
})?;
println!(
"{}{}{} Successfully run executable output of {} which is located at {}",
Tag::Dargo,
Tag::Run,
Tag::Check,
run_args_file.to_string_lossy(),
full_path_name.to_string_lossy()
);
return Ok(());
}
let build_result = build(&crate::dargo::cli::BuildArgs {
bin: run_args.bin.clone(),
output_name: None,
optimize_go: run_args.optimize_go,
})
.map_err(|err| {
(
format!(
"{}{} couldn't build the code\n{}",
Tag::Build,
Tag::Err,
err.0
),
RunErrKind::BuildErr(err.1),
)
})?;
let binary_path = if build_result.binaries.len() > 1 {
let Some(binary_name) = &run_args.bin else {
return Err((
format!(
"{}{} missing target binary to run. mutliple binaries are available, specify using --bin <binary_name>\n",
Tag::Build,
Tag::Err,
),
RunErrKind::MissingTargetBinary,
));
};
let binary = build_result
.binaries
.iter()
.find(|binary| *binary.0 == *binary_name);
if binary.is_none() {
return Err((
format!(
"{}{} specified binary {} not found",
Tag::Build,
Tag::Err,
binary_name
),
RunErrKind::NoBinaryFound,
));
}
let binary = binary.unwrap();
binary.1.clone()
} else {
let first_binary = build_result.binaries.first();
if first_binary.is_none() {
return Err((
format!("{}{} missing target binary to run.", Tag::Build, Tag::Err,),
RunErrKind::NoBinaryFound,
));
}
let first_binary = first_binary.unwrap();
first_binary.1.clone()
};
let full_path_name = binary_path.canonicalize().map_err(|err| {
(
format!(
"{}{} couldn't canonicalize path name of just compiled duck binary",
Tag::IO,
Tag::Err
),
RunErrKind::IOErr(err.kind()),
)
})?;
Command::new(full_path_name.clone())
.spawn()
.map_err(|err| {
(
format!(
"{}{}{} couldn't spawn duck process",
Tag::Run,
Tag::IO,
Tag::Err
),
RunErrKind::IOErr(err.kind()),
)
})?
.wait()
.map_err(|err| {
(
format!(
"{}{}{} couldn't wait for process",
Tag::Run,
Tag::IO,
Tag::Err
),
RunErrKind::IOErr(err.kind()),
)
})?;
println!(
"{}{}{} Successfully run executable output of current workspace which is located at {}",
Tag::Dargo,
Tag::Run,
Tag::Check,
full_path_name.to_string_lossy()
);
Ok(())
}
| rust | MIT | 03febfa849f54a237380dfed2d91a52a2df0313b | 2026-01-04T20:22:21.418266Z | false |
duck-compiler/duckc | https://github.com/duck-compiler/duckc/blob/03febfa849f54a237380dfed2d91a52a2df0313b/src/dargo/init.rs | src/dargo/init.rs | use colored::Colorize;
use lazy_static::lazy_static;
use std::{
fs::{self, create_dir},
path::{Path, PathBuf},
};
use crate::{dargo::cli::InitArgs, tags::Tag};
#[derive(Debug)]
pub enum InitErrKind {
CannotWriteFile,
DargoTomlAlreadyExists,
}
lazy_static! {
static ref INIT_TAG: String = " init ".on_purple().bright_white().to_string();
}
pub fn generate_default_dargo_toml(project_name: impl Into<String>) -> String {
let dargo_toml = format!(
r#"
name = "{}"
version = "0.0.1"
[dependencies]
"#,
project_name.into()
);
return dargo_toml
.split("\n")
.map(|line| line.trim())
.collect::<Vec<_>>()
.join("\n")
.trim()
.to_string();
}
pub fn generate_default_main_duck() -> String {
return "use std::io::{println};\n\nfn main() {\n println(\"Hello, World!\");\n}"
.to_string();
}
pub fn init_project(
custom_dargo_toml_path: Option<PathBuf>,
init_args: InitArgs,
) -> Result<(), (String, InitErrKind)> {
let dargo_toml_path =
custom_dargo_toml_path.unwrap_or_else(|| Path::new("./dargo.toml").to_path_buf());
if dargo_toml_path.exists() {
let message = format!(
"{}{} dargo.toml already exists in working directory.",
*INIT_TAG,
Tag::Err
);
return Err((message, InitErrKind::DargoTomlAlreadyExists));
}
let dargo_toml_content = generate_default_dargo_toml(
init_args
.project_name
.unwrap_or_else(|| "my project".to_string()),
);
fs::write(&dargo_toml_path, dargo_toml_content).map_err(|write_error| {
let message = format!(
"{}{} Failed to create default dargo.toml file '{}': {}",
Tag::Err,
*INIT_TAG,
dargo_toml_path.display(),
write_error
);
(message, InitErrKind::DargoTomlAlreadyExists)
})?;
let src_dir = Path::new("./src").to_path_buf();
if !src_dir.exists() {
let mkdir_result = create_dir(src_dir.clone());
if mkdir_result.is_ok() {
let main_src_file = {
let mut src_dir = src_dir.clone();
src_dir.push("main.duck");
src_dir
};
if !main_src_file.exists() {
// todo: this is currently a silent error - if there's one
let _ = fs::write(&main_src_file, generate_default_main_duck());
}
}
}
Ok(())
}
#[cfg(test)]
pub mod test {
use crate::dargo::init::generate_default_dargo_toml;
#[test]
pub fn test_dargo_toml_generation() {
let output = generate_default_dargo_toml("test");
assert!(output.contains("test"));
assert!(output.contains("0.0.1"));
assert!(output.contains("dependencies"));
assert_eq!(output.lines().count(), 4);
}
}
| rust | MIT | 03febfa849f54a237380dfed2d91a52a2df0313b | 2026-01-04T20:22:21.418266Z | false |
duck-compiler/duckc | https://github.com/duck-compiler/duckc/blob/03febfa849f54a237380dfed2d91a52a2df0313b/src/dargo/loader.rs | src/dargo/loader.rs | use colored::Colorize;
use serde::Deserialize;
use std::collections::HashMap;
use std::fs;
use std::path::{Path, PathBuf};
use toml;
use crate::tags::Tag;
#[derive(Debug, Clone, PartialEq, Deserialize)]
#[serde(untagged)]
pub enum Dependency {
WithVersion(String),
// WithConfig(DependencyConfig)
}
// [[bin]]
// name = "binary-name"
// file = "src/main.duck"
#[derive(Debug, Clone, PartialEq, Deserialize)]
pub struct BinaryConfig {
pub name: String,
pub file: PathBuf,
}
// this is place at the top-level
// name = "project-name"
// [[bin]]
// [dependencies]
#[derive(Debug, Clone, PartialEq, Deserialize)]
pub struct ProjectConfig {
pub name: String,
pub version: String,
#[serde(rename = "bin", default)]
pub binaries: Vec<BinaryConfig>,
pub dependencies: Option<HashMap<String, Dependency>>,
}
#[derive(Debug, Clone, Deserialize)]
pub enum ProjectLoadErrKind {
FileRead,
TomlParse,
MissingDuckToml,
}
// custom_toml_path is only for testing purposes atm
pub fn load_dargo_config(
custom_toml_path: Option<PathBuf>,
) -> Result<ProjectConfig, (String, ProjectLoadErrKind)> {
let path = custom_toml_path.unwrap_or(Path::new("dargo.toml").to_path_buf());
if !path.exists() {
let message = [
format!("{} {}", Tag::Err, "Couldn't locate dargo.toml in current directory."),
format!("\n{} If you feel like this is an bug, please reach out to us on one of our official channels or create an issue on our github page.", Tag::Note),
format!(" {} https://x.com/ducklang", Tag::Twitter),
format!(" {} https://github.com/duck-compiler/duckc", Tag::GitHub),
].join("\n");
return Err((message, ProjectLoadErrKind::MissingDuckToml));
}
let file_content = fs::read_to_string(path).map_err(|read_error| {
let message = format!("{} Couldn't read dargo.toml.\n -> {read_error}", Tag::Err,);
(message, ProjectLoadErrKind::FileRead)
})?;
let project_config = toml::from_str(&file_content).map_err(|parse_error| {
let message = format!(
"{} {} Couldn't parse dargo.toml file.\n -> {parse_error}",
Tag::Err,
" TOML ".on_yellow().bright_white(),
);
(message, ProjectLoadErrKind::TomlParse)
})?;
return Ok(project_config);
}
#[cfg(test)]
mod tests {
use super::*;
use std::io::Write;
fn assert_dependency_with_version(
dependencies: &HashMap<String, Dependency>,
name: &str,
version: &str,
) {
assert!(dependencies.contains_key(name));
let dep = dependencies.get(name).unwrap();
assert!(matches!(dep, Dependency::WithVersion(..)));
let Dependency::WithVersion(actual_version) = dep;
assert_eq!(*actual_version, version);
}
fn create_temp_file(file_name: &str, content: &str) -> PathBuf {
let mut path = std::env::temp_dir();
path.push(file_name);
let mut file = fs::File::create(&path).unwrap();
file.write_all(content.as_bytes()).unwrap();
path
}
#[test]
fn test_load_project_env_valid_config() {
let toml_content = r#"
name="My Project"
version="1.0.0"
[[bin]]
name = "example_binary"
file = "./src/main.duck"
[[bin]]
name = "another_binary"
file = "./src/another.duck"
[dependencies]
"some/fetch" = "1.0.0"
"#;
let file_path = create_temp_file("valid_dargo.toml", toml_content);
let result = load_dargo_config(Some(file_path.clone()));
assert!(result.is_ok());
let config = result.unwrap();
assert_eq!(config.name, "My Project");
assert_eq!(config.binaries.len(), 2);
assert_eq!(config.binaries[0].name, "example_binary");
assert_eq!(config.binaries[0].file, PathBuf::from("./src/main.duck"));
assert_eq!(config.binaries[1].name, "another_binary");
assert_eq!(config.binaries[1].file, PathBuf::from("./src/another.duck"));
assert!(matches!(config.dependencies, Some(..)));
let Some(dependencies) = config.dependencies else {
unreachable!()
};
assert_eq!(dependencies.len(), 1);
assert_dependency_with_version(&dependencies, "some/fetch", "1.0.0");
let _ = fs::remove_file(file_path);
}
#[test]
fn test_load_project_env_valid_config_with_versions() {
let toml_content = r#"
name="My Project"
version="1.0.0"
[[bin]]
name = "example_binary"
file = "./src/main.duck"
[[bin]]
name = "another_binary"
file = "./src/another.duck"
[dependencies]
"some/fetch" = "1.0.0"
"#;
let file_path = create_temp_file("valid_dargo_versions.toml", toml_content);
let result = load_dargo_config(Some(file_path.clone()));
assert!(result.is_ok());
let config = result.unwrap();
assert_eq!(config.name, "My Project");
assert_eq!(config.version, "1.0.0");
assert_eq!(config.binaries.len(), 2);
assert_eq!(config.binaries[0].name, "example_binary");
assert_eq!(config.binaries[0].file, PathBuf::from("./src/main.duck"));
assert_eq!(config.binaries[1].name, "another_binary");
assert_eq!(config.binaries[1].file, PathBuf::from("./src/another.duck"));
assert!(matches!(config.dependencies, Some(..)));
let Some(dependencies) = config.dependencies else {
unreachable!()
};
assert_eq!(dependencies.len(), 1);
assert_dependency_with_version(&dependencies, "some/fetch", "1.0.0");
let _ = fs::remove_file(file_path);
}
#[test]
fn test_load_project_env_missing_file() {
let file_path = PathBuf::from("non_existent_dargo.toml");
let result = load_dargo_config(Some(file_path.clone()));
assert!(result.is_err());
let err = result.unwrap_err();
assert!(matches!(err, (.., ProjectLoadErrKind::MissingDuckToml)));
}
#[test]
fn test_load_project_env_malformed_toml() {
let malformed_content = r#"
name="My Project"
[[bin]]
name = "example_binary"
file = "./src/main.duck"
[dependencies]
test
"#
.trim();
let file_path = create_temp_file("malformed_dargo.toml", malformed_content);
let result = load_dargo_config(Some(file_path.clone()));
assert!(result.is_err());
let err = result.unwrap_err();
assert!(matches!(err, (.., ProjectLoadErrKind::TomlParse)));
fs::remove_file(file_path).unwrap();
}
#[test]
fn test_load_project_env_missing_optional_sections() {
let toml_content = "name=\"My Project Only\"\nversion=\"1.0.0\"";
let file_path = create_temp_file("optional_dargo.toml", toml_content);
let result = load_dargo_config(Some(file_path.clone()));
let result = result;
assert!(result.is_ok());
let config = result.unwrap();
assert_eq!(config.name, "My Project Only");
assert!(config.binaries.is_empty());
assert!(config.dependencies.is_none());
fs::remove_file(file_path).unwrap();
}
#[test]
fn test_load_project_env_empty_file() {
let toml_content = r#""#;
let file_path = create_temp_file("empty_dargo.toml", toml_content);
let result = load_dargo_config(Some(file_path.clone()));
assert!(result.is_err());
let err = result.unwrap_err();
assert!(matches!(err, (.., ProjectLoadErrKind::TomlParse)));
fs::remove_file(file_path).unwrap();
}
#[test]
fn test_load_project_env_complex_dependency_urls() {
let toml_content = r#"
name="Project With Complex Dependencies"
version="1.0.0"
[dependencies]
"first/fetch" = "3.0.0"
"second/fetch" = "2.0.0"
"third/fetch" = "1.0.0"
"#;
let file_path = create_temp_file("complex_deps_dargo.toml", toml_content);
let result = load_dargo_config(Some(file_path.clone()));
assert!(result.is_ok());
let config = result.unwrap();
assert_eq!(config.name, "Project With Complex Dependencies");
let Some(dependencies) = config.dependencies else {
unreachable!()
};
assert_eq!(dependencies.len(), 3);
assert_dependency_with_version(&dependencies, "first/fetch", "3.0.0");
assert_dependency_with_version(&dependencies, "second/fetch", "2.0.0");
assert_dependency_with_version(&dependencies, "third/fetch", "1.0.0");
fs::remove_file(file_path).unwrap();
}
}
| rust | MIT | 03febfa849f54a237380dfed2d91a52a2df0313b | 2026-01-04T20:22:21.418266Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.