repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/verkle-trie/src/proof/prover.rs | verkle-trie/src/proof/prover.rs | use super::{VerificationHint, VerkleProof};
use crate::{
constants::{CRS, PRECOMPUTED_WEIGHTS},
database::ReadOnlyHigherDb,
errors::ProofCreationError,
proof::opening_data::{OpeningData, Openings},
};
use ipa_multipoint::{
multiproof::{MultiPoint, ProverQuery},
transcript::Transcript,
};
use itertools::Itertools;
use std::collections::BTreeSet;
pub fn create_verkle_proof<Storage: ReadOnlyHigherDb>(
storage: &Storage,
keys: Vec<[u8; 32]>,
) -> Result<VerkleProof, ProofCreationError> {
if keys.is_empty() {
return Err(ProofCreationError::EmptyKeySet);
}
let (queries, verification_hint) = create_prover_queries(storage, keys);
// Commitments without duplicates and without the root, (implicitly) sorted by path, since the queries were
// processed by path order
let root_query = match queries.first() {
Some(query) => query,
None => return Err(ProofCreationError::ExpectedOneQueryAgainstRoot),
};
let root_comm = root_query.commitment;
let comms_sorted: Vec<_> = queries
.iter()
// Filter out the root commitment
.filter(|query| query.commitment != root_comm)
// Pull out the commitments from each query
.map(|query| query.commitment)
// Duplicate all commitments
.dedup()
.collect();
let mut transcript = Transcript::new(b"vt");
let proof = MultiPoint::open(CRS.clone(), &PRECOMPUTED_WEIGHTS, &mut transcript, queries);
Ok(VerkleProof {
comms_sorted,
verification_hint,
proof,
})
}
// First we need to produce all of the key paths for a key
// We can do some caching here to save memory, in particular if we fetch the same node more than once
// we just need to save it once.
//
// Notes on this abstraction, since a stem always comes with an extension, we can abstract this away
// An extension always has two openings, so we can also abstract this away (1, stem)
pub(super) fn create_prover_queries<Storage: ReadOnlyHigherDb>(
storage: &Storage,
keys: Vec<[u8; 32]>,
) -> (Vec<ProverQuery>, VerificationHint) {
assert!(!keys.is_empty(), "cannot create a proof without keys");
let opening_data = OpeningData::collect_opening_data(keys, storage);
let openings = opening_data.openings;
let extension_present_by_stem = opening_data.extension_present_by_stem;
let depths_by_stem = opening_data.depths_by_stem;
// Process all of the node openings data and create polynomial queries from them
// We also collect all of the stems which are in the trie, however they do not have their own proofs
// These are the Openings which are jus extensions
let mut queries = Vec::new();
//Stems that are in the trie, but don't have their own extension proofs
let mut diff_stem_no_proof = BTreeSet::new();
for (path, openings) in &openings {
match openings {
Openings::Suffix(so) => queries.extend(so.open_query(storage)),
Openings::Branch(bo) => queries.extend(bo.open_query(path, storage)),
Openings::Extension(eo) => {
diff_stem_no_proof.insert(eo.stem);
queries.extend(eo.open_query(false, false));
}
}
}
// Values to help the verifier reconstruct the trie and verify the proof
let depths: Vec<_> = depths_by_stem.into_values().collect();
let extension_present: Vec<_> = extension_present_by_stem.into_values().collect();
(
queries,
VerificationHint {
depths,
extension_present,
diff_stem_no_proof,
},
)
}
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/verkle-trie/src/proof/stateless_updater.rs | verkle-trie/src/proof/stateless_updater.rs | use crate::constants::TWO_POW_128;
use crate::{errors::VerificationError, group_to_field, proof::ExtPresent};
use banderwagon::{trait_defs::*, Element, Fr};
use ipa_multipoint::committer::Committer;
use std::collections::{BTreeMap, HashSet};
use super::{UpdateHint, VerkleProof};
// TODO fix all panics and return Results instead
//
// TODO: This needs to be modified more, ie to return a correct error variant
// and to refactor panics into error variants
pub fn verify_and_update<C: Committer>(
proof: VerkleProof,
root: Element,
keys: Vec<[u8; 32]>,
values: Vec<Option<[u8; 32]>>,
updated_values: Vec<Option<[u8; 32]>>,
commiter: C,
) -> Result<Element, VerificationError> {
// TODO: replace Clone with references if possible
let (ok, update_hint) = proof.check(keys.clone(), values.clone(), root);
if !ok {
return Err(VerificationError::InvalidProof);
}
let update_hint =
update_hint.expect("update hint should be `Some` if the proof passes verification");
// Return the new root
update_root(update_hint, keys, values, updated_values, root, commiter)
}
pub(crate) fn update_root<C: Committer>(
hint: UpdateHint,
keys: Vec<[u8; 32]>,
values: Vec<Option<[u8; 32]>>,
updated_values: Vec<Option<[u8; 32]>>,
root: Element,
committer: C,
) -> Result<Element, VerificationError> {
if !values.len() == updated_values.len() {
return Err(VerificationError::UnexpectedUpdatedLength(
values.len(),
updated_values.len(),
));
}
if !keys.len() == updated_values.len() {
return Err(VerificationError::MismatchedKeyLength);
}
// check that keys are unique
// Since this is the main place this is used, make sure to exit early as soon as 2 keys are the same
let keys_unique = has_unique_elements(keys.iter());
if !keys_unique {
return Err(VerificationError::DuplicateKeys);
}
// TODO Check root against the root in commitments by path
// type Prefix = Vec<u8>;
// struct StemUp {
// stem: [u8; 31],
// suffices: Vec<u8>,
// }
// struct UpdateData {
// // Many stems means we need a subtree
// stems: Vec<[u8; 31]>,
// suffices: Vec<u8>,
// }
// let all_data: BTreeMap<Prefix, UpdateData> = BTreeMap::new();
// Maps suffix -> (old_value, new_value)
type SuffixUpdate = BTreeMap<u8, (Option<[u8; 32]>, [u8; 32])>;
// Maps stem -> SuffixUpdate
let mut updated_stems: BTreeMap<[u8; 31], SuffixUpdate> = BTreeMap::new();
// First, not all of the keys will need to be updated, so we filter
// for all of the keys which will need to be updated
for ((key, old_value), updated_value) in keys.into_iter().zip(values).zip(updated_values) {
let stem: [u8; 31] = key[0..31].try_into().unwrap();
let suffix = key[31];
let updated_value = match updated_value {
Some(x) => x,
None => continue,
};
// if let Some(val) = old_value {
// if val == updated_value {
// continue;
// }
// }
updated_stems
.entry(stem)
.or_default()
.insert(suffix, (old_value, updated_value));
}
// TODO: Prefix can be &'a [u8] instead of Vec<u8> which avoids unnecessary allocations... This may be unneeded when we switch to SmallVec32
let mut updated_stems_by_prefix: BTreeMap<Vec<u8>, HashSet<[u8; 31]>> = BTreeMap::new();
let mut updated_commitents_by_stem: BTreeMap<[u8; 31], (Element, Fr)> = BTreeMap::new();
for (stem, suffix_update) in updated_stems {
let (ext_pres, depth) = hint.depths_and_ext_by_stem[&stem];
let prefix = stem[0..depth as usize].to_vec();
updated_stems_by_prefix
.entry(prefix.clone())
.or_default()
.insert(stem);
if ext_pres == ExtPresent::Present {
let ext_path = stem[0..depth as usize].to_vec(); // It is the prefix
let mut c_1_delta_update = Element::zero();
let mut c_2_delta_update = Element::zero();
// TODO abstract this into a function, since it's duplicated
for (suffix, (old_value, new_value)) in suffix_update {
// Split values into low_16 and high_16
let new_value_low_16 = new_value[0..16].to_vec();
let new_value_high_16 = new_value[16..32].to_vec();
let (old_value_low_16, old_value_high_16) = match old_value {
Some(val) => (
Fr::from_le_bytes_mod_order(&val[0..16]) + TWO_POW_128,
Fr::from_le_bytes_mod_order(&val[16..32]),
),
None => (Fr::zero(), Fr::zero()), // The extension can be present, but it's suffix can be missing
};
// We need to compute two deltas
let delta_low =
Fr::from_le_bytes_mod_order(&new_value_low_16) + TWO_POW_128 - old_value_low_16;
let delta_high =
Fr::from_le_bytes_mod_order(&new_value_high_16) - old_value_high_16;
let position = suffix;
let is_c1_comm_update = position < 128;
let pos_mod_128 = position % 128;
let low_index = 2 * pos_mod_128 as usize;
let high_index = low_index + 1;
let generator_low = committer.scalar_mul(delta_low, low_index);
let generator_high = committer.scalar_mul(delta_high, high_index);
if is_c1_comm_update {
c_1_delta_update += generator_low + generator_high;
} else {
c_2_delta_update += generator_low + generator_high;
}
}
// Compute the delta for C1 and C2, so that we can update the extension commitment
let mut hash_c1_delta = Fr::zero();
let mut hash_c2_delta = Fr::zero();
if !c_1_delta_update.is_zero() {
let mut c1_path = ext_path.clone();
c1_path.push(2);
let old_c1_comm = hint.commitments_by_path[&c1_path];
let new_c1_commitment = old_c1_comm + c_1_delta_update;
let hash_c1_new = group_to_field(&new_c1_commitment);
let hash_c1_old = group_to_field(&old_c1_comm);
hash_c1_delta = hash_c1_new - hash_c1_old;
}
if !c_2_delta_update.is_zero() {
let mut c2_path = ext_path.clone();
c2_path.push(3);
let old_c2_comm = hint.commitments_by_path[&c2_path];
let new_c2_commitment = old_c2_comm + c_2_delta_update;
let hash_c2_new = group_to_field(&new_c2_commitment);
let hash_c2_old = group_to_field(&old_c2_comm);
hash_c2_delta = hash_c2_new - hash_c2_old;
}
let mut stem_comm_update = Element::zero();
stem_comm_update += committer.scalar_mul(hash_c1_delta, 2);
stem_comm_update += committer.scalar_mul(hash_c2_delta, 3);
let stem_comm_old = hint.commitments_by_path[&ext_path];
let stem_comm_new = stem_comm_old + stem_comm_update;
let hash_stem_comm_new = group_to_field(&stem_comm_new);
// Note that we have been given a stem to which we know is in the trie (ext_pres) and
// we have computed all of the updates for that particular stem
updated_commitents_by_stem.insert(stem, (stem_comm_new, hash_stem_comm_new));
} else if ext_pres == ExtPresent::DifferentStem {
let other_stem = hint.other_stems_by_prefix[&prefix];
updated_stems_by_prefix
.entry(prefix)
.or_default()
.insert(other_stem);
// Since this stem was not present in the trie, we need to make its initial stem commitment
//
// This is similar to the case of ExtPres::Present, except that the old_value is zero, so we can ignore it
// TODO we could take this for loop out of the if statement and then use the if statement for the rest
let mut c_1 = Element::zero();
let mut c_2 = Element::zero();
for (suffix, (old_value, new_value)) in suffix_update {
if old_value.is_some() {
return Err(VerificationError::OldValueIsPopulated);
}
// Split values into low_16 and high_16
let new_value_low_16 = new_value[0..16].to_vec();
let new_value_high_16 = new_value[16..32].to_vec();
// We need to compute two deltas
let value_low = Fr::from_le_bytes_mod_order(&new_value_low_16) + TWO_POW_128;
let value_high = Fr::from_le_bytes_mod_order(&new_value_high_16);
let position = suffix;
let is_c1_comm_update = position < 128;
let pos_mod_128 = position % 128;
let low_index = 2 * pos_mod_128 as usize;
let high_index = low_index + 1;
let generator_low = committer.scalar_mul(value_low, low_index);
let generator_high = committer.scalar_mul(value_high, high_index);
if is_c1_comm_update {
c_1 += generator_low + generator_high;
} else {
c_2 += generator_low + generator_high;
}
}
let stem_comm_0 = Fr::one(); // TODO: We can get rid of this and just add SRS[0]
let stem_comm_1 = Fr::from_le_bytes_mod_order(&stem);
let stem_comm_2 = group_to_field(&c_1);
let stem_comm_3 = group_to_field(&c_2);
let stem_comm = committer.commit_sparse(vec![
(stem_comm_0, 0),
(stem_comm_1, 1),
(stem_comm_2, 2),
(stem_comm_3, 3),
]);
let hash_stem_comm = group_to_field(&stem_comm);
updated_commitents_by_stem.insert(stem, (stem_comm, hash_stem_comm));
}
//We have now processed all of the necessary extension proof edits that need to be completed.
// let recompute the root
}
let mut tree = SparseVerkleTree::new(root);
for (prefix, stems) in updated_stems_by_prefix {
// First fetch the old commitment for this prefix
// If the prefix is for a stem that was not in the trie, then it will be 0
let old_hash_value = match hint.commitments_by_path.get(&prefix) {
Some(comm) => group_to_field(comm),
None => Fr::zero(),
};
if stems.len() == 1 {
let stem = stems.iter().next().unwrap();
let (_, new_hash_value) = updated_commitents_by_stem[stem];
tree.update_prefix(
&hint.commitments_by_path,
&committer,
prefix.clone(),
old_hash_value,
new_hash_value,
)
.unwrap();
} else {
// If we have more than one stem to be processed for a prefix, we need to build a subtree and
// then update the prefix with the root of the subtree
//
// Note, once we take the root from the subtree, we can discard the tree
// We know that all updates to this prefix will happen here.
//
// Get all of the stems and their commitments
let mut elements = Vec::new();
for stem in stems {
let updated_comm = updated_commitents_by_stem.get(&stem);
let stem_comm = match updated_comm {
Some((comm, _)) => *comm,
None => hint.commitments_by_path[&prefix],
};
elements.push((stem, stem_comm))
}
let subtree_root_comm = build_subtree(prefix.clone(), elements, &committer);
let new_hash_value = group_to_field(&subtree_root_comm);
tree.update_prefix(
&hint.commitments_by_path,
&committer,
prefix.clone(),
old_hash_value,
new_hash_value,
)
.unwrap();
}
}
// There are two types of updates that we need to distinguish, an update where the key was None (Other stem) and an update where the key was some
Ok(tree.root)
}
// Build a subtree from a set of stems and their commitments
// We will start from an empty tree and iterate each stem
// modifying the inner node commitments along the way
//
// This algorithm should match the stateful trie insert, however
// it has been rewritten here so that that section of the code does not increase in complexity
//
// TODO we can rewrite this to place the node commitments in the tree and
// TODO then recursively sweep up the tree, updating each node using commit_sparse
//
// We could _not_ pass in the prefix and slice off stem[prefix.len()..], then compute the root
// as if we were starting from a tree of depth=0
fn build_subtree<C: Committer>(
prefix: Vec<u8>,
elements: Vec<([u8; 31], Element)>,
committer: &C,
) -> Element {
let mut tree: BTreeMap<Vec<u8>, Node> = BTreeMap::new();
// Insert the root
tree.insert(
vec![],
Node::Inner(InnerNode {
commitment: Element::zero(),
}),
);
#[derive(Debug, Clone)]
struct InnerNode {
commitment: Element,
}
#[derive(Debug, Clone, Copy)]
struct Stem {
id: [u8; 31],
commitment: Element,
}
#[derive(Debug, Clone)]
enum Node {
Inner(InnerNode),
Stem(Stem),
}
impl Node {
fn is_inner(&self) -> bool {
match self {
Node::Inner(_) => true,
Node::Stem(_) => false,
}
}
fn inner(&self) -> &InnerNode {
match self {
Node::Inner(inner) => inner,
Node::Stem(_) => panic!("found stem"),
}
}
fn inner_mut(&mut self) -> &mut InnerNode {
match self {
Node::Inner(inner) => inner,
Node::Stem(_) => panic!("found stem"),
}
}
}
for (stem, commitment) in elements {
let mut depth = prefix.len();
// Everything before the prefix is irrelevant to the subtree
let _relative_stem = &stem[depth..];
let mut path = vec![];
let mut current_node = tree[&path].clone();
while current_node.is_inner() {
let index = stem[depth];
path.push(index);
depth += 1;
match tree.get(&path) {
Some(node) => {
current_node = node.clone();
}
None => {
break;
}
};
}
let (mut child_old_value, mut child_new_value) = match current_node {
Node::Inner(_) => {
// Add a stem at the path which points to the child of the previous node
tree.insert(
path.clone(),
Node::Stem(Stem {
id: stem,
commitment,
}),
);
let child_new_value = group_to_field(&commitment);
let child_old_value = Fr::zero();
(child_old_value, child_new_value)
}
Node::Stem(old_stem) => {
// assert_ne!(old_stem.id, stem);
// We now need to add a bunch of new inner nodes for each index that these two stems share
// The current node which is a stem will be shifted down the tree, the node that was pointing to this stem will now point to an inner node
let mut new_inner_node = InnerNode {
commitment: Element::zero(),
};
let stem_to_innernode_path = path.clone(); // Save the path of the node which was a stem and is now a inner node (currently an edge case)
tree.insert(path.clone(), Node::Inner(new_inner_node)); // This inner node now replaces the old_stem
while old_stem.id[depth] == stem[depth] {
let index = stem[depth];
depth += 1;
path.push(index);
new_inner_node = InnerNode {
commitment: Element::zero(),
};
tree.insert(path.clone(), Node::Inner(new_inner_node));
}
let mut old_stem_path = path.clone();
let old_stem_index = old_stem.id[depth];
old_stem_path.push(old_stem_index);
tree.insert(old_stem_path, Node::Stem(old_stem));
let mut stem_path = path.clone();
let stem_index = stem[depth];
stem_path.push(stem_index);
tree.insert(
stem_path,
Node::Stem(Stem {
id: stem,
commitment,
}),
);
// Now lets modify the bottom inner node's commitment with respects to the two new stems
let old_stem_child_comm = committer.scalar_mul(
group_to_field(&old_stem.commitment),
old_stem_index as usize,
);
let stem_child_comm =
committer.scalar_mul(group_to_field(&commitment), stem_index as usize);
let delta_comm = old_stem_child_comm + stem_child_comm;
tree.get_mut(&path).unwrap().inner_mut().commitment += delta_comm;
let comm = tree.get(&path).unwrap().inner().commitment;
let mut child_new_value = group_to_field(&comm);
let mut child_old_value = Fr::zero(); // previous value of this bottom inner node was zero
// Process the chain of inner nodes who only have one child which is an inner node
while path != stem_to_innernode_path {
let child_index = path.pop().unwrap();
let parent_old_comm = tree.get(&path).unwrap().inner().commitment;
let delta = child_new_value - child_old_value;
tree.get_mut(&path).unwrap().inner_mut().commitment +=
committer.scalar_mul(delta, child_index as usize);
child_old_value = group_to_field(&parent_old_comm);
child_new_value = group_to_field(&tree.get(&path).unwrap().inner().commitment);
}
// Now process the node which was previously a stem and is now an inner node
child_old_value = group_to_field(&old_stem.commitment);
let child_index = path.pop().unwrap();
let delta = child_new_value - child_old_value;
let parent_old_comm = tree[&path].inner().commitment;
tree.get_mut(&path).unwrap().inner_mut().commitment +=
committer.scalar_mul(delta, child_index as usize);
child_old_value = group_to_field(&parent_old_comm);
child_new_value = group_to_field(&tree.get(&path).unwrap().inner().commitment);
(child_old_value, child_new_value)
}
};
while let Some(child_index) = path.pop() {
let parent_old_comm = tree.get(&path).unwrap().inner().commitment;
let delta = child_new_value - child_old_value;
tree.get_mut(&path).unwrap().inner_mut().commitment +=
committer.scalar_mul(delta, child_index as usize);
child_old_value = group_to_field(&parent_old_comm);
child_new_value = group_to_field(&tree.get(&path).unwrap().inner().commitment);
}
}
tree.get(&vec![]).unwrap().inner().commitment
}
struct SparseVerkleTree {
root: Element,
updated_commitments_by_path: BTreeMap<Vec<u8>, Element>,
}
impl SparseVerkleTree {
fn new(root: Element) -> SparseVerkleTree {
SparseVerkleTree {
root,
updated_commitments_by_path: BTreeMap::default(),
}
}
fn update_prefix<C: Committer>(
&mut self,
commitments_by_path: &BTreeMap<Vec<u8>, Element>,
committer: &C,
mut prefix: Vec<u8>,
old_value: Fr,
new_value: Fr,
) -> Result<(), VerificationError> {
if prefix.is_empty() {
return Err(VerificationError::EmptyPrefix);
}
// First lets compute the delta between the old_value and the new value
let mut delta = new_value - old_value;
let mut current_parent_comm = None;
// Now lets fetch the parent node's commitment and recursively update each parent
while let Some(child_index) = prefix.pop() {
// Safety: Fine unwrap because we've checked prefix isn't empty
// If we have never updated the parent node before,
// then it will be the old commitment
// If we have then it will be in updated commitments
let parent_comm = self.updated_commitments_by_path.get(&prefix);
let old_parent_comm = match parent_comm {
Some(comm) => *comm,
None => commitments_by_path[&prefix],
};
// Update the parent_comm at the child index
let comm_update = committer.scalar_mul(delta, child_index as usize);
let new_parent_comm = old_parent_comm + comm_update;
current_parent_comm = Some(new_parent_comm);
self.updated_commitments_by_path
.insert(prefix.clone(), new_parent_comm);
delta = group_to_field(&new_parent_comm) - group_to_field(&old_parent_comm)
}
self.root = current_parent_comm.unwrap();
Ok(())
}
}
// https://stackoverflow.com/a/46767732
// TODO Check if there is a similar method in itertools
fn has_unique_elements<T>(iter: T) -> bool
where
T: IntoIterator,
T::Item: Eq + std::hash::Hash,
{
let mut uniq = std::collections::HashSet::new();
iter.into_iter().all(move |x| uniq.insert(x))
}
#[cfg(test)]
mod test {
use banderwagon::trait_defs::*;
use crate::constants::new_crs;
use crate::database::memory_db::MemoryDb;
use crate::database::ReadOnlyHigherDb;
use crate::proof::prover;
use crate::proof::stateless_updater::update_root;
use crate::{group_to_field, DefaultConfig};
use crate::{trie::Trie, TrieTrait};
use ipa_multipoint::committer::DefaultCommitter;
#[test]
fn basic_update() {
let db = MemoryDb::new();
let mut trie = Trie::new(DefaultConfig::new(db));
let mut keys = Vec::new();
for i in 0..2 {
let mut key_0 = [0u8; 32];
key_0[0] = i;
keys.push(key_0);
trie.insert_single(key_0, key_0);
}
let root = vec![];
let meta = trie.storage.get_branch_meta(&root).unwrap();
let proof = prover::create_verkle_proof(&trie.storage, keys.clone()).unwrap();
let values: Vec<_> = keys.iter().map(|val| Some(*val)).collect();
let (ok, updated_hint) = proof.check(keys.clone(), values.clone(), meta.commitment);
assert!(ok);
let new_root_comm = update_root(
updated_hint.unwrap(),
keys.clone(),
values,
vec![Some([0u8; 32]), None],
meta.commitment,
DefaultCommitter::new(&new_crs().G),
);
let mut got_bytes = [0u8; 32];
group_to_field(&new_root_comm.unwrap())
.serialize_compressed(&mut got_bytes[..])
.unwrap();
trie.insert_single(keys[0], [0u8; 32]);
let expected_root = trie.root_hash();
let mut expected_bytes = [0u8; 32];
expected_root
.serialize_compressed(&mut expected_bytes[..])
.unwrap();
assert_eq!(got_bytes, expected_bytes)
}
#[test]
fn basic_update_using_subtree() {
let db = MemoryDb::new();
let mut trie = Trie::new(DefaultConfig::new(db));
let key_a = [0u8; 32];
trie.insert_single(key_a, key_a);
let key_b = [1u8; 32];
trie.insert_single(key_b, key_b);
let mut key_c = [0u8; 32];
key_c[3] = 1;
let keys = vec![key_a, key_b, key_c];
let values = vec![Some(key_a), Some(key_b), None];
let root = vec![];
let meta = trie.storage.get_branch_meta(&root).unwrap();
let proof = prover::create_verkle_proof(&trie.storage, keys.clone()).unwrap();
let (ok, updated_hint) = proof.check(keys.clone(), values.clone(), meta.commitment);
assert!(ok);
let updated_values = vec![None, None, Some(key_c)];
let new_root_comm = update_root(
updated_hint.unwrap(),
keys,
values,
updated_values,
meta.commitment,
DefaultCommitter::new(&new_crs().G),
);
let mut got_bytes = [0u8; 32];
group_to_field(&new_root_comm.unwrap())
.serialize_compressed(&mut got_bytes[..])
.unwrap();
trie.insert_single(key_c, key_c);
let expected_root = trie.root_hash();
let mut expected_bytes = [0u8; 32];
expected_root
.serialize_compressed(&mut expected_bytes[..])
.unwrap();
assert_eq!(got_bytes, expected_bytes)
}
#[test]
fn basic_update3() {
// traverse the subtree twice
let db = MemoryDb::new();
let mut trie = Trie::new(DefaultConfig::new(db));
let key_a = [0u8; 32];
trie.insert_single(key_a, key_a);
let key_b = [1u8; 32];
trie.insert_single(key_b, key_b);
let mut keys = vec![key_a, key_b];
let mut values = vec![Some(key_a), Some(key_b)];
let mut updated_values = vec![None, None];
for i in 1..=30 {
let mut key = [0u8; 32];
key[i] = 1;
keys.push(key);
values.push(None);
updated_values.push(Some(key))
}
let root = vec![];
let meta = trie.storage.get_branch_meta(&root).unwrap();
let proof = prover::create_verkle_proof(&trie.storage, keys.clone()).unwrap();
let (ok, updated_hint) = proof.check(keys.clone(), values.clone(), meta.commitment);
assert!(ok, "proof failed to verify");
let new_root_comm = update_root(
updated_hint.unwrap(),
keys.clone(),
values,
updated_values,
meta.commitment,
DefaultCommitter::new(&new_crs().G),
);
let mut got_bytes = [0u8; 32];
group_to_field(&new_root_comm.unwrap())
.serialize_uncompressed(&mut got_bytes[..])
.unwrap();
dbg!(&got_bytes);
for key in keys.into_iter().skip(2) {
// skip two keys that are already in the trie
trie.insert_single(key, key);
}
let expected_root = trie.root_hash();
let mut expected_bytes = [0u8; 32];
expected_root
.serialize_uncompressed(&mut expected_bytes[..])
.unwrap();
dbg!(&expected_bytes);
assert_eq!(got_bytes, expected_bytes)
}
}
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/verkle-trie/src/proof/golang_proof_format.rs | verkle-trie/src/proof/golang_proof_format.rs | // This is just tech debt. The golang codebase should be reverted to make proofs opaque again
// and the rest of the code should be handled by clients.
use banderwagon::{CanonicalDeserialize, Element, Fr};
use ipa_multipoint::{ipa::IPAProof, multiproof::MultiPointProof};
use super::{ExtPresent, VerificationHint, VerkleProof};
struct SuffixDiff {
suffix: u8,
current_value: Option<[u8; 32]>,
new_value: Option<[u8; 32]>,
}
struct StateDiff {
stem: [u8; 31],
suffix_diffs: Vec<SuffixDiff>,
}
impl StateDiff {
#[allow(clippy::type_complexity)]
/// Returns the keys, their old values and their new values
pub fn keys_with_current_values(
&self,
) -> (Vec<[u8; 32]>, Vec<Option<[u8; 32]>>, Vec<Option<[u8; 32]>>) {
let mut keys = Vec::new();
let mut current_values = Vec::new();
let mut new_values = Vec::new();
let stem = self.stem;
for suffix_diff in &self.suffix_diffs {
let suffix = suffix_diff.suffix;
let current_value = suffix_diff.current_value;
let new_value = suffix_diff.new_value;
let mut key = stem.to_vec();
key.push(suffix);
keys.push(key.try_into().unwrap());
current_values.push(current_value);
new_values.push(new_value);
}
(keys, current_values, new_values)
}
}
pub struct VerkleProofGo {
state_diffs: Vec<StateDiff>,
commitments_by_path: Vec<[u8; 32]>,
other_stems: Vec<[u8; 31]>,
proof: MultiPointProofGo,
depths_extension_present: Vec<u8>,
}
pub struct KeysValues {
pub keys: Vec<[u8; 32]>,
pub current_values: Vec<Option<[u8; 32]>>,
pub new_values: Vec<Option<[u8; 32]>>,
}
impl VerkleProofGo {
pub fn from_verkle_proof_go_to_verkle_proof(&self) -> Option<(VerkleProof, KeysValues)> {
let mut depths = Vec::new();
let mut extension_present = Vec::new();
for byte in &self.depths_extension_present {
let (ext_status, depth) = byte_to_depth_extension_present(*byte);
extension_present.push(ext_status);
depths.push(depth);
}
let mut keys: Vec<[u8; 32]> = Vec::new();
let mut current_values = Vec::new();
let mut new_values = Vec::new();
for state_diff in &self.state_diffs {
let (state_keys, state_current_values, state_new_values) =
state_diff.keys_with_current_values();
keys.extend(state_keys);
current_values.extend(state_current_values);
new_values.extend(state_new_values);
}
let mut comms_sorted = Vec::with_capacity(self.commitments_by_path.len());
for comm_sorted in &self.commitments_by_path {
comms_sorted.push(bytes32_to_element(*comm_sorted)?)
}
let mut l_vec = Vec::with_capacity(self.proof.cl.len());
for cl in &self.proof.cl {
l_vec.push(bytes32_to_element(*cl)?)
}
let mut r_vec = Vec::with_capacity(self.proof.cr.len());
for cr in &self.proof.cr {
r_vec.push(bytes32_to_element(*cr)?)
}
let proof = MultiPointProof {
open_proof: IPAProof {
L_vec: l_vec,
R_vec: r_vec,
a: bytes32_to_scalar(self.proof.final_evaluation),
},
g_x_comm: bytes32_to_element(self.proof.d)?,
};
Some((
VerkleProof {
verification_hint: VerificationHint {
depths,
extension_present,
diff_stem_no_proof: self.other_stems.iter().copied().collect(),
},
comms_sorted,
proof,
},
KeysValues {
keys,
current_values,
new_values,
},
))
}
pub fn from_json_str(execution_witness: &str) -> Self {
let execution_witness: serde_conversions::ExecutionWitness =
serde_json::from_str(execution_witness).unwrap();
let state_diffs = execution_witness
.state_diffs
.into_iter()
.map(|state_diff| StateDiff {
stem: hex_to_bytes31(&state_diff.stem),
suffix_diffs: state_diff
.suffix_diffs
.into_iter()
.map(|suffix_diff| SuffixDiff {
suffix: suffix_diff.suffix,
current_value: suffix_diff.current_value.map(|cv| hex_to_bytes32(&cv)),
new_value: suffix_diff.new_value.map(|nv| hex_to_bytes32(&nv)),
})
.collect(),
})
.collect();
let other_stems = execution_witness
.verkle_proof
.other_stems
.into_iter()
.map(|os| hex_to_bytes31(&os))
.collect();
let commitments_by_path = execution_witness
.verkle_proof
.commitments_by_path
.into_iter()
.map(|cbp| hex_to_bytes32(&cbp))
.collect();
let proof = MultiPointProofGo {
d: hex_to_bytes32(&execution_witness.verkle_proof.d),
cl: execution_witness
.verkle_proof
.ipa_proof
.cl
.into_iter()
.map(|cl| hex_to_bytes32(&cl))
.collect(),
cr: execution_witness
.verkle_proof
.ipa_proof
.cr
.into_iter()
.map(|cr| hex_to_bytes32(&cr))
.collect(),
final_evaluation: hex_to_bytes32(
&execution_witness.verkle_proof.ipa_proof.final_evaluation,
),
};
Self {
state_diffs,
commitments_by_path,
other_stems,
proof,
depths_extension_present: hex_to_bytes(
&execution_witness.verkle_proof.depth_extension_present,
),
}
}
}
struct MultiPointProofGo {
d: [u8; 32],
cl: Vec<[u8; 32]>,
cr: Vec<[u8; 32]>,
final_evaluation: [u8; 32],
}
pub fn hex_to_bytes32(hex: &str) -> [u8; 32] {
hex_to_fixed_size_array(hex)
}
fn hex_to_bytes31(hex: &str) -> [u8; 31] {
hex_to_fixed_size_array(hex)
}
fn hex_to_fixed_size_array<const N: usize>(hex: &str) -> [u8; N] {
let hex = hex.trim_start_matches("0x");
let bytes = hex::decode(hex).unwrap();
let mut bytes_n = [0u8; N];
bytes_n.copy_from_slice(&bytes);
bytes_n
}
fn hex_to_bytes(hex: &str) -> Vec<u8> {
let hex = hex.trim_start_matches("0x");
let bytes = hex::decode(hex).unwrap();
let mut bytes_vec = vec![0u8; bytes.len()];
bytes_vec.copy_from_slice(&bytes);
bytes_vec
}
pub fn bytes32_to_element(bytes: [u8; 32]) -> Option<Element> {
Element::from_bytes(&bytes)
}
fn bytes32_to_scalar(mut bytes: [u8; 32]) -> Fr {
bytes.reverse();
CanonicalDeserialize::deserialize_compressed(&bytes[..]).unwrap()
}
fn byte_to_depth_extension_present(value: u8) -> (ExtPresent, u8) {
let ext_status = value & 3;
let ext_status = match ext_status {
0 => ExtPresent::None,
1 => ExtPresent::DifferentStem,
2 => ExtPresent::Present,
x => panic!("unexpected ext status number {} ", x),
};
let depth = value >> 3;
(ext_status, depth)
}
// Taken from https://github.com/ethereumjs/ethereumjs-monorepo/blob/master/packages/statemanager/test/testdata/verkleKaustinenBlock.json#L1-L2626
// Block number 0x62
pub const PREVIOUS_STATE_ROOT: &str =
"0x2cf2ab8fed2dcfe2fa77da044ab16393dbdabbc65deea5fdf272107a039f2c60";
pub const EXECUTION_WITNESS_JSON: &str = r#"
{
"stateDiff": [
{
"stem": "0xab8fbede899caa6a95ece66789421c7777983761db3cfb33b5e47ba10f413b",
"suffixDiffs": [
{
"suffix": 97,
"currentValue": null,
"newValue": "0x2f08a1461ab75873a0f2d23170f46d3be2ade2a7f4ebf607fc53fb361cf85865"
}
]
}
],
"verkleProof": {
"otherStems": [],
"depthExtensionPresent": "0x12",
"commitmentsByPath": [
"0x4900c9eda0b8f9a4ef9a2181ced149c9431b627797ab747ee9747b229579b583",
"0x491dff71f13c89dac9aea22355478f5cfcf0af841b68e379a90aa77b8894c00e",
"0x525d67511657d9220031586db9d41663ad592bbafc89bc763273a3c2eb0b19dc"
],
"d": "0x5c6e856174962f2786f0711288c8ddd90b0c317db7769ab3485818460421f08c",
"ipaProof": {
"cl": [
"0x4ff3c1e2a97b6bd0861a2866acecd2fd6d2e5949196429e409bfd4851339832e",
"0x588cfd2b401c8afd04220310e10f7ccdf1144d2ef9191ee9f72d7d44ad1cf9d0",
"0x0bb16d917ecdec316d38b92558d46450b21553673f38a824037716bfee067220",
"0x2bdb51e80b9e43cc5011f4b51877f4d56232ce13035671f191bd4047baa11f3d",
"0x130f6822a47533ed201f5f15b144648a727217980ca9e86237977b7f0fe8f41e",
"0x2c4b83ccd0bb8ad8d370ab8308e11c95fb2020a6a62e71c9a1c08de2d32fc9f1",
"0x4424bec140960c09fc97ee29dad2c3ff467b7e01a19ada43979c55c697b4f583",
"0x5c8f76533d04c7b868e9d7fcaa901897c5f35b27552c3bf94f01951fae6fcd2a"
],
"cr": [
"0x31cb234eeff147546cabd033235c8f446812c7f44b597d9580a10bbecac9dd82",
"0x6945048c033a452d346977ab306df4df653b6e7f3e0b75a705a650427ee30e88",
"0x38ca3c4ebbee982301b6bafd55bc9e016a7c59af95e9666b56a0680ed1cd0673",
"0x16160e96b0fb20d0c9c7d9ae76ca9c74300d34e05d3688315c0062204ab0d07b",
"0x2bc96deadab15bc74546f8882d8b88c54ea0b62b04cb597bf5076fe25c53e43c",
"0x301e407f62f0d1f6bf56f2e252ca89dd9f3bf09acbb0cca9230ecda24ac783b5",
"0x3ce1800a2e3f10e641f3ef8a8aaacf6573e9e33f4cb5b429850271528ed3cd31",
"0x471b1578afbd3f2762654d04db73c6a84e9770f3d6b8a189596fbad38fffa263"
],
"finalEvaluation": "0x07ca48ff9f0fb458967f070c18e5cdf180e93212bf3efba6378384c5703a61fe"
}
}
}
"#;
// Serde conversion so we can convert from the json execution witness string into the golang proof format
pub mod serde_conversions {
use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize, Debug)]
pub struct ExecutionWitness {
#[serde(rename = "stateDiff")]
pub(crate) state_diffs: Vec<StateDiff>,
#[serde(rename = "verkleProof")]
pub(crate) verkle_proof: VerkleProof,
}
#[derive(Serialize, Deserialize, Debug)]
pub(crate) struct StateDiff {
pub(crate) stem: String,
#[serde(rename = "suffixDiffs")]
pub(crate) suffix_diffs: Vec<SuffixDiff>,
}
#[derive(Serialize, Deserialize, Debug)]
pub(crate) struct SuffixDiff {
pub(crate) suffix: u8,
#[serde(rename = "currentValue")]
pub(crate) current_value: Option<String>,
#[serde(rename = "newValue")]
pub(crate) new_value: Option<String>,
}
#[derive(Serialize, Deserialize, Debug)]
pub(crate) struct VerkleProof {
#[serde(rename = "otherStems")]
pub(crate) other_stems: Vec<String>,
#[serde(rename = "depthExtensionPresent")]
pub(crate) depth_extension_present: String,
#[serde(rename = "commitmentsByPath")]
pub(crate) commitments_by_path: Vec<String>,
pub(crate) d: String,
#[serde(rename = "ipaProof")]
pub(crate) ipa_proof: IpaProof,
}
#[derive(Serialize, Deserialize, Debug)]
pub(crate) struct IpaProof {
pub(crate) cl: Vec<String>,
pub(crate) cr: Vec<String>,
#[serde(rename = "finalEvaluation")]
pub(crate) final_evaluation: String,
}
#[test]
fn test_serde_works() {
use super::EXECUTION_WITNESS_JSON;
let _: ExecutionWitness = serde_json::from_str(EXECUTION_WITNESS_JSON).unwrap();
}
}
#[cfg(test)]
mod tests {
use crate::proof::golang_proof_format::{
bytes32_to_element, hex_to_bytes32, VerkleProofGo, EXECUTION_WITNESS_JSON,
PREVIOUS_STATE_ROOT,
};
#[test]
fn test_proof_from_json_golang_serde() {
let verkle_proof_go = VerkleProofGo::from_json_str(EXECUTION_WITNESS_JSON);
let (got_verkle_proof, keys_values) = verkle_proof_go
.from_verkle_proof_go_to_verkle_proof()
.unwrap();
let prestate_root = bytes32_to_element(hex_to_bytes32(PREVIOUS_STATE_ROOT)).unwrap();
let (ok, _) =
got_verkle_proof.check(keys_values.keys, keys_values.current_values, prestate_root);
assert!(ok);
}
}
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/verkle-trie/src/proof/key_path_finder.rs | verkle-trie/src/proof/key_path_finder.rs | use crate::database::{Meta, ReadOnlyHigherDb};
use crate::trie::BranchId;
// PathFinder is an algorithm to find the path to a given key
// If the key is not found, this algorithm returns the path to the node where
// the key would have been inserted. This gives the verifier enough information
// to update the key.
pub(crate) struct KeyPathFinder;
#[derive(Debug, Copy, Clone)]
pub(crate) enum KeyState {
// The key was found, we return its value
Found([u8; 32]),
NotFound(KeyNotFound),
}
impl KeyState {
pub(crate) fn different_stem(&self) -> Option<[u8; 31]> {
match self {
KeyState::NotFound(KeyNotFound::DifferentStem(stem)) => Some(*stem),
_ => None,
}
}
pub(crate) fn value(&self) -> Option<[u8; 32]> {
match self {
KeyState::Found(value) => Some(*value),
_ => None,
}
}
}
#[derive(Debug, Copy, Clone)]
pub(crate) enum KeyNotFound {
// The key was not found, however the slot where we would
// have inserted it, there is a different stem
// An example of this happening is:
// We insert the key [0,0,0,0], then we try to find [0,1,0,0]
DifferentStem([u8; 31]),
// The key was not found, however its stem is present
// in the trie.
StemFound,
// The key was found, however the slot that we would have inserted it at
// is empty.
Empty,
}
// The path to the key, including all of the nodes along the path
pub(crate) struct KeyPath {
pub nodes: Vec<(BranchId, u8, Meta)>,
// depth refers to the depth that the key_path terminated at
// This can also be computed by taking the length of the `nodes`
pub depth: u8,
pub key_state: KeyState,
}
impl KeyPath {
// We only require an opening for an extension,
// if the last node was a stem.
// This occurs in all cases, except for `Empty`
pub(crate) fn requires_extension_proof(&self) -> bool {
!matches!(self.key_state, KeyState::NotFound(KeyNotFound::Empty))
}
}
impl KeyPathFinder {
pub(crate) fn find_key_path<Storage: ReadOnlyHigherDb>(
storage: &Storage,
key: [u8; 32],
) -> KeyPath {
let mut nodes_by_path = Vec::new();
let mut current_node = vec![];
let mut current_node_meta = storage.get_branch_meta(¤t_node).unwrap();
for index in key.iter() {
nodes_by_path.push((
current_node.clone(),
*index,
Meta::Branch(current_node_meta),
));
let depth = nodes_by_path.len() as u8;
let child = storage.get_branch_child(¤t_node, *index);
// If the child is empty, we just return nodes_by_path
// which will have it's last element as a path to a branch node
let child = match child {
Some(child) => child,
None => {
return KeyPath {
nodes: nodes_by_path,
key_state: KeyState::NotFound(KeyNotFound::Empty),
depth,
};
}
};
match child {
crate::database::BranchChild::Stem(stem_id) => {
let stem_meta = storage.get_stem_meta(stem_id).unwrap();
current_node.push(*index);
nodes_by_path.push((current_node, key[31], Meta::Stem(stem_meta)));
let depth = nodes_by_path.len() as u8;
// We have stopped at a stem, however it is not clear whether
// this stem belongs to the key.
// This happens when the key is not in the tree, but a key with a similar path is
//
// However, we note that iff the key is in the trie
// then this must be the stem for the key. Otherwise, it is a bug.
// We assert the invariant below.
if let Some(value) = storage.get_leaf(key) {
assert_eq!(&stem_id, &key[0..31]);
return KeyPath {
nodes: nodes_by_path,
key_state: KeyState::Found(value),
depth,
};
};
// Arriving here means that we have encountered a stem
// however, the key is not present.
// There are two possible cases here, either the stem corresponding to the key is present or it's a completely different stem
//
// We will need this path for update proofs
if stem_id == key[0..31] {
return KeyPath {
nodes: nodes_by_path,
key_state: KeyState::NotFound(KeyNotFound::StemFound),
depth,
};
}
return KeyPath {
nodes: nodes_by_path,
key_state: KeyState::NotFound(KeyNotFound::DifferentStem(stem_id)),
depth,
};
}
crate::database::BranchChild::Branch(branch_meta) => {
current_node_meta = branch_meta;
current_node.push(*index);
continue;
}
}
}
// It should be impossible to arrive here, because we cannot have 32 inner nodes,
// which is the only way for the for loop to iterate until the end
// If this is the case, we have a bug
unreachable!()
}
}
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/verkle-trie/src/proof/verifier.rs | verkle-trie/src/proof/verifier.rs | use super::VerkleProof;
use crate::{
constants::TWO_POW_128,
group_to_field,
proof::{ExtPresent, UpdateHint},
};
use banderwagon::{trait_defs::*, Element, Fr};
use ipa_multipoint::multiproof::VerifierQuery;
use std::collections::{BTreeMap, BTreeSet};
// TODO Document this better and refactor
pub fn create_verifier_queries(
proof: VerkleProof,
keys: Vec<[u8; 32]>,
values: Vec<Option<[u8; 32]>>,
root: Element,
) -> Option<(Vec<VerifierQuery>, UpdateHint)> {
let commitments_sorted_by_path: Vec<_> =
std::iter::once(root).chain(proof.comms_sorted).collect();
// Get all of the stems
let stems: BTreeSet<[u8; 31]> = keys
.iter()
.map(|key| key[0..31].try_into().unwrap())
.collect();
let mut depths_and_ext_by_stem: BTreeMap<[u8; 31], (ExtPresent, u8)> = BTreeMap::new();
let mut stems_with_extension: BTreeSet<[u8; 31]> = BTreeSet::new();
let mut other_stems_used: BTreeSet<[u8; 31]> = BTreeSet::new();
let mut all_paths: BTreeSet<Vec<u8>> = BTreeSet::new();
let mut all_paths_and_zs: BTreeSet<(Vec<u8>, u8)> = BTreeSet::new();
let mut leaf_values_by_path_and_z: BTreeMap<(Vec<u8>, u8), Fr> = BTreeMap::new();
let mut other_stems_by_prefix: BTreeMap<Vec<u8>, [u8; 31]> = BTreeMap::new();
// Associate stems with their depths and extension status
// depths and extension status were sorted by stem order
// when the prover made the proof
for ((stem, depth), ext_pres) in stems
.into_iter()
.zip(proof.verification_hint.depths)
.zip(proof.verification_hint.extension_present)
{
depths_and_ext_by_stem.insert(stem, (ext_pres, depth));
if ext_pres == ExtPresent::Present {
stems_with_extension.insert(stem);
}
}
for (key, value) in keys.into_iter().zip(values) {
let stem: [u8; 31] = key[0..31].try_into().unwrap();
let (extpres, depth) = depths_and_ext_by_stem[&stem];
// Add branch node information, we know that if the stem has depth `d`
// then there are d -1 inner nodes from the root to the stem
for i in 0..depth {
all_paths.insert(stem[0..i as usize].to_vec());
all_paths_and_zs.insert((stem[0..i as usize].to_vec(), stem[i as usize]));
}
// We can use the extension present
if extpres == ExtPresent::DifferentStem || extpres == ExtPresent::Present {
all_paths.insert(stem[0..depth as usize].to_vec());
all_paths_and_zs.insert((stem[0..depth as usize].to_vec(), 0));
all_paths_and_zs.insert((stem[0..depth as usize].to_vec(), 1));
leaf_values_by_path_and_z.insert((stem[0..depth as usize].to_vec(), 0), Fr::one());
if extpres == ExtPresent::Present {
let suffix = key[31];
let opening_index = if suffix < 128 { 2 } else { 3 };
all_paths_and_zs.insert((stem[0..depth as usize].to_vec(), opening_index));
leaf_values_by_path_and_z.insert(
(stem[0..depth as usize].to_vec(), 1),
Fr::from_le_bytes_mod_order(&stem),
);
let mut suffix_tree_path = stem[0..depth as usize].to_vec();
suffix_tree_path.push(opening_index);
all_paths.insert(suffix_tree_path.clone());
let val_lower_index = 2 * (suffix % 128);
let val_upper_index = val_lower_index + 1;
all_paths_and_zs.insert((suffix_tree_path.clone(), val_lower_index));
all_paths_and_zs.insert((suffix_tree_path.clone(), val_upper_index));
let (value_low, value_high) = match value {
Some(val) => {
let value_low = Fr::from_le_bytes_mod_order(&val[0..16]) + TWO_POW_128;
let value_high = Fr::from_le_bytes_mod_order(&val[16..32]);
(value_low, value_high)
}
None => (Fr::zero(), Fr::zero()),
};
leaf_values_by_path_and_z
.insert((suffix_tree_path.clone(), val_lower_index), value_low);
leaf_values_by_path_and_z
.insert((suffix_tree_path.clone(), val_upper_index), value_high);
} else if extpres == ExtPresent::DifferentStem {
// Since this stem points to a different stem,
// the value was never set
if value.is_some() {
return None;
}
// Check if this stem already has an extension proof
// TODO this was taken from python, redo
// TODO It's left here for compatibility, but it might not be needed
// Logic: stem[...depth] points to a stem in the trie
// it cannot point to two different stems however
// if two stems do share the prefix stem[..depth], then there will be
// an inner node present
// depth cannot be 31 because then that would mean that stem[...depth]
// is looking for it's tem. This is not possible, because we have already
// noted that ExtPresent is DifferentStem
assert!(depth != stem.len() as u8);
let mut other_stem = None;
let mut found: Vec<_> = stems_with_extension
.iter()
.filter(|x| x[0..depth as usize] == stem[0..depth as usize])
.collect();
if found.len() > 1 {
// TODO return error instead when we change the func signature to return Result instead of bool
panic!("found more than one instance of stem_with_extension at depth {}, see: {:?}", depth, found)
} else if found.len() == 1 {
other_stem = found.pop();
}
// None means that we need to create the extension proof
else if found.is_empty() {
let mut found: Vec<_> = proof
.verification_hint
.diff_stem_no_proof
.iter()
.filter(|x| x[0..depth as usize] == stem[0..depth as usize])
.collect();
let encountered_stem = found.pop().expect(
"ExtPresent::DifferentStem flag but we cannot find the encountered stem",
);
other_stem = Some(encountered_stem);
other_stems_used.insert(*encountered_stem);
// Add extension node to proof in particular, we only want to open at (1, stem)
leaf_values_by_path_and_z.insert(
(stem[0..depth as usize].to_vec(), 1),
Fr::from_le_bytes_mod_order(&encountered_stem[..]),
);
}
other_stems_by_prefix
.insert(stem[0..depth as usize].to_vec(), *other_stem.unwrap());
}
} else if extpres == ExtPresent::None {
// If the extension was not present, then the value should be None
if value.is_some() {
return None;
}
//TODO: we may need to rewrite the prover/verifier algorithm to fix this if statement properly.
// This is a special case. If the depth == 1 and the there is no stem to prove the proof of absence
// then this means that the path should point to the root node.
//
// TODO: Fix in python codebase and check for this in go code
if depth == 1 {
let root_path = vec![];
leaf_values_by_path_and_z.insert((root_path, stem[depth as usize - 1]), Fr::zero());
} else {
leaf_values_by_path_and_z.insert(
(stem[0..depth as usize].to_vec(), stem[depth as usize - 1]),
Fr::zero(),
);
}
}
}
assert!(proof.verification_hint.diff_stem_no_proof == other_stems_used);
assert!(commitments_sorted_by_path.len() == all_paths.len());
let commitments_by_path: BTreeMap<Vec<_>, Element> = all_paths
.into_iter()
.zip(commitments_sorted_by_path)
.collect();
let commitment_by_path_and_z: BTreeMap<_, _> = all_paths_and_zs
.iter()
.cloned()
.map(|(path, z)| {
let comm = commitments_by_path[&path];
((path, z), comm)
})
.collect();
let mut ys_by_path_and_z: BTreeMap<(Vec<u8>, u8), Fr> = BTreeMap::new();
for (path, z) in &all_paths_and_zs {
let mut child_path = path.clone();
child_path.push(*z);
let y = match leaf_values_by_path_and_z.get(&(path.clone(), *z)) {
Some(val) => *val,
None => match commitments_by_path.get(&child_path) {
Some(commitment_by_path) => group_to_field(commitment_by_path),
None => Fr::zero(),
},
};
ys_by_path_and_z.insert((path.clone(), *z), y);
}
let cs = commitment_by_path_and_z.values();
let zs = all_paths_and_zs
.into_iter()
.map(|(_, z)| Fr::from(z as u128));
let ys = ys_by_path_and_z.into_values();
let mut queries = Vec::with_capacity(cs.len());
for ((y, z), comm) in ys.into_iter().zip(zs).zip(cs) {
let query = VerifierQuery {
commitment: *comm,
point: z,
result: y,
};
queries.push(query);
}
let update_hint = UpdateHint {
depths_and_ext_by_stem,
commitments_by_path,
other_stems_by_prefix,
};
Some((queries, update_hint))
}
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/verkle-trie/src/database/default.rs | verkle-trie/src/database/default.rs | use super::{
generic::GenericBatchDB, memory_db::MemoryDb, BranchChild, BranchMeta, Flush, ReadOnlyHigherDb,
StemMeta, WriteOnlyHigherDb,
};
use crate::database::generic::GenericBatchWriter;
use std::collections::HashMap;
use verkle_db::{BareMetalDiskDb, BareMetalKVDb, BatchDB, BatchWriter};
// A convenient structure that allows the end user to just implement BatchDb and BareMetalDiskDb
// Then the methods needed for the Trie are auto implemented. In particular, ReadOnlyHigherDb and WriteOnlyHigherDb
// are implemented
// All nodes at this level or above will be cached in memory
const CACHE_DEPTH: u8 = 4;
// A wrapper database for those that just want to implement the permanent storage
pub struct VerkleDb<Storage> {
// The underlying key value database
// We try to avoid fetching from this, and we only store at the end of a batch insert
pub storage: GenericBatchDB<Storage>,
// This stores the key-value pairs that we need to insert into the storage
// This is flushed after every batch insert
pub batch: MemoryDb,
// This stores the top 3 layers of the trie, since these are the most accessed
// in the trie on average
pub cache: MemoryDb,
}
impl<S: BareMetalDiskDb> BareMetalDiskDb for VerkleDb<S> {
fn from_path<P: AsRef<std::path::Path>>(path: P) -> Self {
VerkleDb {
storage: GenericBatchDB::from_path(path),
batch: MemoryDb::new(),
cache: MemoryDb::new(),
}
}
const DEFAULT_PATH: &'static str = S::DEFAULT_PATH;
}
impl<S: BatchDB> Flush for VerkleDb<S> {
// flush the batch to the storage
fn flush(&mut self) {
let writer = S::BatchWrite::new();
let mut w = GenericBatchWriter { inner: writer };
let now = std::time::Instant::now();
for (key, value) in self.batch.leaf_table.iter() {
w.insert_leaf(*key, *value, 0);
}
for (key, meta) in self.batch.stem_table.iter() {
w.insert_stem(*key, *meta, 0);
}
for (branch_id, b_child) in self.batch.branch_table.iter() {
let branch_id = branch_id.clone();
match b_child {
BranchChild::Stem(stem_id) => {
w.add_stem_as_branch_child(branch_id, *stem_id, 0);
}
BranchChild::Branch(b_meta) => {
w.insert_branch(branch_id, *b_meta, 0);
}
};
}
let num_items = self.batch.num_items();
println!(
"write to batch time: {}, item count : {}",
now.elapsed().as_millis(),
num_items
);
self.storage.flush(w.inner);
self.batch.clear();
}
}
impl<S: BareMetalKVDb> ReadOnlyHigherDb for VerkleDb<S> {
fn get_leaf(&self, key: [u8; 32]) -> Option<[u8; 32]> {
// First try to get it from cache
if let Some(val) = self.cache.get_leaf(key) {
return Some(val);
}
// Now try to get it from batch
if let Some(val) = self.batch.get_leaf(key) {
return Some(val);
}
// Now try the disk
self.storage.get_leaf(key)
}
fn get_stem_meta(&self, stem_key: [u8; 31]) -> Option<StemMeta> {
// First try to get it from cache
if let Some(val) = self.cache.get_stem_meta(stem_key) {
return Some(val);
}
// Now try to get it from batch
if let Some(val) = self.batch.get_stem_meta(stem_key) {
return Some(val);
}
// Now try the disk
self.storage.get_stem_meta(stem_key)
}
fn get_branch_meta(&self, key: &[u8]) -> Option<BranchMeta> {
// First try to get it from cache
if let Some(val) = self.cache.get_branch_meta(key) {
return Some(val);
}
// Now try to get it from batch
if let Some(val) = self.batch.get_branch_meta(key) {
return Some(val);
}
// Now try the disk
self.storage.get_branch_meta(key)
}
fn get_branch_child(&self, branch_id: &[u8], index: u8) -> Option<BranchChild> {
// First try to get it from cache
if let Some(val) = self.cache.get_branch_child(branch_id, index) {
return Some(val);
}
// Now try to get it from batch
if let Some(val) = self.batch.get_branch_child(branch_id, index) {
return Some(val);
}
// Now try the disk
self.storage.get_branch_child(branch_id, index)
}
fn get_branch_children(&self, branch_id: &[u8]) -> Vec<(u8, BranchChild)> {
// Check the depth. If the branch is at CACHE_DEPTH or lower, then it will be in the cache
// TODO this assumes that the cache is populated on startup from disk
if branch_id.len() as u8 <= CACHE_DEPTH {
return self.cache.get_branch_children(branch_id);
}
// First get the children from storage
let mut children: HashMap<_, _> = self
.storage
.get_branch_children(branch_id)
.into_iter()
.collect();
//
// Then get the children from the batch
let children_from_batch = self.batch.get_branch_children(branch_id);
//
// Now insert the children from batch into the storage children as they will be fresher
// overwriting if they have the same indices
for (index, val) in children_from_batch {
children.insert(index, val);
}
children.into_iter().collect()
}
fn get_stem_children(&self, stem_key: [u8; 31]) -> Vec<(u8, [u8; 32])> {
// Stems don't have a depth, however the children for all stem will always be on the same depth
// If we get any children for the stem in the cache storage, then this means we have collected all of them
// TODO this assumes that the cache is populated on startup from disk
let children = self.cache.get_stem_children(stem_key);
if !children.is_empty() {
return children;
}
// It's possible that they are in disk storage and that batch storage has some recent updates
// First get the children from storage
let mut children: HashMap<_, _> = self
.storage
.get_stem_children(stem_key)
.into_iter()
.collect();
//
// Then get the children from the batch
let children_from_batch = self.batch.get_stem_children(stem_key);
//
// Now insert the children from batch into the storage children as they will be fresher
// overwriting if they have the same indices
for (index, val) in children_from_batch {
children.insert(index, val);
}
children.into_iter().collect()
}
}
// Always save in the permanent storage and only save in the memorydb if the depth is <= cache depth
impl<S> WriteOnlyHigherDb for VerkleDb<S> {
fn insert_leaf(&mut self, key: [u8; 32], value: [u8; 32], depth: u8) -> Option<Vec<u8>> {
if depth <= CACHE_DEPTH {
self.cache.insert_leaf(key, value, depth);
}
self.batch.insert_leaf(key, value, depth)
}
fn insert_stem(&mut self, key: [u8; 31], meta: StemMeta, depth: u8) -> Option<StemMeta> {
if depth <= CACHE_DEPTH {
self.cache.insert_stem(key, meta, depth);
}
self.batch.insert_stem(key, meta, depth)
}
fn add_stem_as_branch_child(
&mut self,
branch_child_id: Vec<u8>,
stem_id: [u8; 31],
depth: u8,
) -> Option<BranchChild> {
if depth <= CACHE_DEPTH {
self.cache
.add_stem_as_branch_child(branch_child_id.clone(), stem_id, depth);
}
self.batch
.add_stem_as_branch_child(branch_child_id, stem_id, depth)
}
fn insert_branch(&mut self, key: Vec<u8>, meta: BranchMeta, depth: u8) -> Option<BranchMeta> {
if depth <= CACHE_DEPTH {
self.cache.insert_branch(key.clone(), meta, depth);
}
self.batch.insert_branch(key, meta, depth)
}
}
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/verkle-trie/src/database/memory_db.rs | verkle-trie/src/database/memory_db.rs | use super::{BranchChild, Flush, ReadOnlyHigherDb, WriteOnlyHigherDb};
use crate::database::{BranchMeta, StemMeta};
use std::collections::HashMap;
#[derive(Debug, Clone)]
pub struct MemoryDb {
pub leaf_table: HashMap<[u8; 32], [u8; 32]>,
pub stem_table: HashMap<[u8; 31], StemMeta>,
// TODO maybe change to use BChild and also include the index in the key (Vec<u8>, u8)
pub branch_table: HashMap<Vec<u8>, BranchChild>,
}
impl MemoryDb {
pub fn new() -> Self {
MemoryDb {
leaf_table: HashMap::new(),
stem_table: HashMap::new(),
branch_table: HashMap::new(),
}
}
pub fn num_items(&self) -> usize {
self.leaf_table.len() + self.stem_table.len() + self.branch_table.len()
}
pub fn clear(&mut self) {
self.leaf_table.clear();
self.stem_table.clear();
self.branch_table.clear();
}
}
impl Default for MemoryDb {
fn default() -> Self {
Self::new()
}
}
impl ReadOnlyHigherDb for MemoryDb {
fn get_stem_meta(&self, stem_key: [u8; 31]) -> Option<StemMeta> {
self.stem_table.get(&stem_key).copied()
}
fn get_branch_meta(&self, key: &[u8]) -> Option<BranchMeta> {
let branch_child = match self.branch_table.get(key) {
Some(b_child) => b_child,
None => return None,
};
match branch_child {
BranchChild::Stem(stem_id) => panic!(
"expected branch meta data, however under this path there is a stem: {}",
hex::encode(stem_id)
),
BranchChild::Branch(b_meta) => Some(*b_meta),
}
}
fn get_leaf(&self, key: [u8; 32]) -> Option<[u8; 32]> {
self.leaf_table.get(&key).copied()
}
fn get_branch_children(&self, branch_id: &[u8]) -> Vec<(u8, BranchChild)> {
let mut children = Vec::with_capacity(256);
for i in 0u8..=255 {
let mut child = branch_id.to_vec();
child.push(i);
let value = self.branch_table.get(&child);
// If its a stem, we return the stem_id
// If it's a branch, we return the branch_id
// TODO: we could return the BranchChild instead and leave the caller to do what they want with it
if let Some(b_child) = value {
children.push((i, *b_child))
}
}
children
}
fn get_stem_children(&self, stem_key: [u8; 31]) -> Vec<(u8, [u8; 32])> {
let mut children = Vec::with_capacity(256);
for i in 0u8..=255 {
let mut child = stem_key.to_vec();
child.push(i);
let child: [u8; 32] = child.try_into().unwrap();
let value = self.leaf_table.get(&child);
if let Some(i_vec) = value {
children.push((i, i_vec.to_vec().try_into().unwrap()))
}
}
children
}
fn get_branch_child(&self, branch_id: &[u8], index: u8) -> Option<BranchChild> {
let mut child_index = Vec::with_capacity(branch_id.len());
child_index.extend_from_slice(branch_id);
child_index.push(index);
self.branch_table.get(&child_index).copied()
}
}
impl WriteOnlyHigherDb for MemoryDb {
fn insert_stem(&mut self, key: [u8; 31], meta: StemMeta, _depth: u8) -> Option<StemMeta> {
self.stem_table.insert(key, meta)
}
fn insert_branch(&mut self, key: Vec<u8>, meta: BranchMeta, _depth: u8) -> Option<BranchMeta> {
let b_child = match self.branch_table.insert(key, BranchChild::Branch(meta)) {
Some(b_child) => b_child,
None => return None,
};
match b_child {
BranchChild::Stem(_) => None, // If its a stem, we return None, this only happens in ChainInsert
BranchChild::Branch(b_meta) => Some(b_meta),
}
}
fn insert_leaf(&mut self, key: [u8; 32], value: [u8; 32], _depth: u8) -> Option<Vec<u8>> {
self.leaf_table
.insert(key, value)
.map(|old_val| old_val.to_vec())
}
fn add_stem_as_branch_child(
&mut self,
branch_child_id: Vec<u8>,
stem_id: [u8; 31],
_depth: u8,
) -> Option<BranchChild> {
self.branch_table
.insert(branch_child_id, BranchChild::Stem(stem_id))
}
}
impl Flush for MemoryDb {
fn flush(&mut self) {
// No-op since this database is in memory
// The flush trait is for databases which have a
// memory database and a disk storage, flush signals them to flush the
// memory to database to disk
//
// This is implemented for the MemoryDb so that we can use it for
// tests in the Trie
}
}
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/verkle-trie/src/database/meta.rs | verkle-trie/src/database/meta.rs | #![allow(clippy::identity_op)]
#![allow(clippy::large_enum_variant)]
use banderwagon::trait_defs::*;
use banderwagon::{Element, Fr};
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
pub struct StemMeta {
pub c_1: Element,
pub hash_c1: Fr,
pub c_2: Element,
pub hash_c2: Fr,
pub stem_commitment: Element,
pub hash_stem_commitment: Fr,
}
impl std::fmt::Debug for StemMeta {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("StemMeta")
.field(
"c_1",
&hex::encode(compress_point_to_array(&self.c_1).unwrap()),
)
.field(
"c_2",
&hex::encode(compress_point_to_array(&self.c_2).unwrap()),
)
.field(
"hash_c1",
&hex::encode(scalar_to_array(&self.hash_c1).unwrap()),
)
.field(
"hash_c2",
&hex::encode(scalar_to_array(&self.hash_c2).unwrap()),
)
.field(
"stem commitment",
&hex::encode(compress_point_to_array(&self.stem_commitment).unwrap()),
)
.field(
"hash_stem_commitment",
&hex::encode(scalar_to_array(&self.hash_stem_commitment).unwrap()),
)
.finish()
}
}
fn point_to_array(p: &Element) -> Result<[u8; 64], SerializationError> {
let mut bytes = [0u8; 64];
p.serialize_uncompressed(&mut bytes[..])?;
Ok(bytes)
}
fn compress_point_to_array(p: &Element) -> Result<[u8; 32], SerializationError> {
let mut bytes = [0u8; 32];
p.serialize_compressed(&mut bytes[..])?;
Ok(bytes)
}
fn scalar_to_array(scalar: &Fr) -> Result<[u8; 32], SerializationError> {
let mut bytes = [0u8; 32];
scalar.serialize_uncompressed(&mut bytes[..])?;
Ok(bytes)
}
impl FromBytes<Vec<u8>> for StemMeta {
// panic if we cannot deserialize, do not call this method if you are unsure if the data is
// not structured properly. We can guarantee this in verkle trie.
fn from_bytes(bytes: Vec<u8>) -> Result<StemMeta, SerializationError> {
let len = bytes.len();
// TODO: Explain where this number comes from
if len != 64 * 3 + 32 * 3 {
return Err(SerializationError::InvalidData); // TODO not the most accurate error msg for now
}
let point_bytes = &bytes[0..64 * 3];
#[allow(clippy::erasing_op)]
let c_1 = Element::deserialize_uncompressed(&point_bytes[0 * 64..1 * 64])?;
let c_2 = Element::deserialize_uncompressed(&point_bytes[1 * 64..2 * 64])?;
let stem_commitment = Element::deserialize_uncompressed(&point_bytes[2 * 64..3 * 64])?;
let scalar_bytes = &bytes[64 * 3..];
#[allow(clippy::erasing_op)]
let hash_c1 = Fr::deserialize_uncompressed(&scalar_bytes[0 * 32..1 * 32])?;
let hash_c2 = Fr::deserialize_uncompressed(&scalar_bytes[1 * 32..2 * 32])?;
let hash_stem_commitment = Fr::deserialize_uncompressed(&scalar_bytes[2 * 32..3 * 32])?;
Ok(StemMeta {
c_1,
hash_c1,
c_2,
hash_c2,
stem_commitment,
hash_stem_commitment,
})
}
}
impl ToBytes<Vec<u8>> for StemMeta {
fn to_bytes(&self) -> Result<Vec<u8>, SerializationError> {
let mut bytes = Vec::with_capacity(3 * (64 + 32));
bytes.extend(point_to_array(&self.c_1)?);
bytes.extend(point_to_array(&self.c_2)?);
bytes.extend(point_to_array(&self.stem_commitment)?);
bytes.extend(scalar_to_array(&self.hash_c1)?);
bytes.extend(scalar_to_array(&self.hash_c2)?);
bytes.extend(scalar_to_array(&self.hash_stem_commitment)?);
Ok(bytes)
}
}
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
pub struct BranchMeta {
pub commitment: Element,
pub hash_commitment: Fr,
}
impl std::fmt::Debug for BranchMeta {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("BranchMeta")
.field(
"commitment",
&hex::encode(compress_point_to_array(&self.commitment).unwrap()),
)
.field(
"hash_commitment",
&hex::encode(scalar_to_array(&self.hash_commitment).unwrap()),
)
.finish()
}
}
impl BranchMeta {
pub fn zero() -> BranchMeta {
use banderwagon::trait_defs::*;
BranchMeta {
commitment: Element::zero(),
hash_commitment: Fr::zero(),
}
}
}
use crate::from_to_bytes::{FromBytes, ToBytes};
impl FromBytes<Vec<u8>> for BranchMeta {
fn from_bytes(bytes: Vec<u8>) -> Result<BranchMeta, SerializationError> {
let len = bytes.len();
if !len == 32 + 64 {
return Err(SerializationError::InvalidData);
}
let point_bytes = &bytes[0..64];
let scalar_bytes = &bytes[64..64 + 32];
let commitment = Element::deserialize_uncompressed(point_bytes)?;
let hash_commitment = Fr::deserialize_uncompressed(scalar_bytes)?;
Ok(BranchMeta {
commitment,
hash_commitment,
})
}
}
impl ToBytes<Vec<u8>> for BranchMeta {
fn to_bytes(&self) -> Result<Vec<u8>, SerializationError> {
let mut bytes = Vec::with_capacity(64 + 32);
bytes.extend(point_to_array(&self.commitment)?);
bytes.extend(scalar_to_array(&self.hash_commitment)?);
Ok(bytes)
}
}
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub enum Meta {
Stem(StemMeta),
Branch(BranchMeta),
}
impl Meta {
pub fn into_stem(self) -> StemMeta {
match self {
Meta::Stem(sm) => sm,
Meta::Branch(_) => panic!("item is a branch and not a stem"),
}
}
pub fn is_stem_meta(&self) -> bool {
match self {
Meta::Stem(_) => true,
Meta::Branch(_) => false,
}
}
pub fn is_branch_meta(&self) -> bool {
match self {
Meta::Stem(_) => false,
Meta::Branch(_) => true,
}
}
pub fn into_branch(self) -> BranchMeta {
match self {
Meta::Stem(_) => panic!("item is a stem and not a branch"),
Meta::Branch(bm) => bm,
}
}
}
impl From<StemMeta> for Meta {
fn from(sm: StemMeta) -> Self {
Meta::Stem(sm)
}
}
impl From<BranchMeta> for Meta {
fn from(bm: BranchMeta) -> Self {
Meta::Branch(bm)
}
}
#[derive(Debug, Clone, Copy)]
pub enum BranchChild {
Stem([u8; 31]),
Branch(BranchMeta),
}
impl ToBytes<Vec<u8>> for BranchChild {
fn to_bytes(&self) -> Result<Vec<u8>, SerializationError> {
match self {
BranchChild::Stem(stem_id) => Ok(stem_id.to_vec()),
BranchChild::Branch(bm) => Ok(bm.to_bytes().unwrap().to_vec()),
}
}
}
impl FromBytes<Vec<u8>> for BranchChild {
fn from_bytes(bytes: Vec<u8>) -> Result<BranchChild, SerializationError> {
if bytes.len() == 31 {
return Ok(BranchChild::Stem(bytes.try_into().unwrap()));
}
let branch_as_bytes = BranchMeta::from_bytes(bytes)?;
Ok(BranchChild::Branch(branch_as_bytes))
}
}
impl BranchChild {
pub fn is_branch(&self) -> bool {
match self {
BranchChild::Stem(_) => false,
BranchChild::Branch(_) => true,
}
}
pub fn branch(&self) -> Option<BranchMeta> {
match self {
BranchChild::Stem(_) => None,
BranchChild::Branch(bm) => Some(*bm),
}
}
pub fn stem(&self) -> Option<[u8; 31]> {
match self {
BranchChild::Stem(stem_id) => Some(*stem_id),
BranchChild::Branch(_) => None,
}
}
}
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/verkle-trie/src/database/generic.rs | verkle-trie/src/database/generic.rs | use super::{BranchChild, BranchMeta, ReadOnlyHigherDb, StemMeta, WriteOnlyHigherDb};
use crate::from_to_bytes::{FromBytes, ToBytes};
use verkle_db::{BareMetalDiskDb, BareMetalKVDb, BatchDB, BatchWriter};
// The purpose of this file is to allows us to implement generic implementation for BatchWriter and BareMetalKVDb
// Anything that implements BatchWriter can be used as a WriteOnlyHigherDb
// Anything that implements BareMetalKVDb can be used as a ReadOnlyHigherDb
pub(crate) const LEAF_TABLE_MARKER: u8 = 0;
pub(crate) const STEM_TABLE_MARKER: u8 = 1;
pub(crate) const BRANCH_TABLE_MARKER: u8 = 2;
// GenericBatchWriter does not write the values to disk
// We need to flush them later on
// This struct allows us to provide default implementations to everything that is a
// BatchWriter
pub struct GenericBatchWriter<T: BatchWriter> {
pub inner: T,
}
impl<T: BatchWriter> WriteOnlyHigherDb for GenericBatchWriter<T> {
fn insert_leaf(&mut self, key: [u8; 32], value: [u8; 32], _depth: u8) -> Option<Vec<u8>> {
let mut labelled_key = Vec::with_capacity(key.len() + 1);
labelled_key.push(LEAF_TABLE_MARKER);
labelled_key.extend_from_slice(&key);
self.inner.batch_put(&labelled_key, &value);
None
}
fn insert_stem(&mut self, key: [u8; 31], meta: StemMeta, _depth: u8) -> Option<StemMeta> {
let mut labelled_key = Vec::with_capacity(key.len() + 1);
labelled_key.push(STEM_TABLE_MARKER);
labelled_key.extend_from_slice(&key);
self.inner
.batch_put(&labelled_key, &meta.to_bytes().unwrap());
None
}
fn add_stem_as_branch_child(
&mut self,
branch_child_id: Vec<u8>,
stem_id: [u8; 31],
_depth: u8,
) -> Option<BranchChild> {
let mut labelled_key = Vec::with_capacity(branch_child_id.len() + 1);
labelled_key.push(BRANCH_TABLE_MARKER);
labelled_key.extend(branch_child_id);
self.inner.batch_put(&labelled_key, &stem_id);
None
}
fn insert_branch(&mut self, key: Vec<u8>, meta: BranchMeta, _depth: u8) -> Option<BranchMeta> {
let mut labelled_key = Vec::with_capacity(key.len() + 1);
labelled_key.push(BRANCH_TABLE_MARKER);
labelled_key.extend_from_slice(&key);
self.inner
.batch_put(&labelled_key, &meta.to_bytes().unwrap());
None
}
}
// This struct allows us to provide a default implementation of ReadOnlyHigherDB to
// all structs that implement BatchDB
pub struct GenericBatchDB<T> {
inner: T,
}
impl<T> std::ops::Deref for GenericBatchDB<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl<T: BatchDB> BatchDB for GenericBatchDB<T> {
type BatchWrite = T::BatchWrite;
fn flush(&mut self, batch: Self::BatchWrite) {
self.inner.flush(batch)
}
}
impl<T: BareMetalDiskDb> BareMetalDiskDb for GenericBatchDB<T> {
fn from_path<P: AsRef<std::path::Path>>(path: P) -> Self {
Self {
inner: T::from_path(path),
}
}
const DEFAULT_PATH: &'static str = T::DEFAULT_PATH;
}
impl<T: BareMetalKVDb> ReadOnlyHigherDb for GenericBatchDB<T> {
fn get_leaf(&self, key: [u8; 32]) -> Option<[u8; 32]> {
let mut labelled_key = Vec::with_capacity(key.len() + 1);
labelled_key.push(LEAF_TABLE_MARKER);
labelled_key.extend_from_slice(&key);
self.inner
.fetch(&labelled_key)
.map(|bytes| bytes.try_into().unwrap())
}
fn get_stem_meta(&self, stem_key: [u8; 31]) -> Option<StemMeta> {
let mut labelled_key = Vec::with_capacity(stem_key.len() + 1);
labelled_key.push(STEM_TABLE_MARKER);
labelled_key.extend_from_slice(&stem_key);
self.inner
.fetch(&labelled_key)
.map(|old_val_bytes| StemMeta::from_bytes(old_val_bytes).unwrap())
}
fn get_branch_children(&self, branch_id: &[u8]) -> Vec<(u8, BranchChild)> {
let mut children = Vec::with_capacity(256);
let mut labelled_key = Vec::with_capacity(branch_id.len() + 1);
labelled_key.push(BRANCH_TABLE_MARKER);
labelled_key.extend_from_slice(branch_id);
for i in 0u8..=255 {
let mut child = labelled_key.clone();
child.push(i);
let child_value = self.inner.fetch(&child);
if let Some(x) = child_value {
children.push((i, BranchChild::from_bytes(x).unwrap()))
}
}
children
}
fn get_branch_meta(&self, key: &[u8]) -> Option<BranchMeta> {
let mut labelled_key = Vec::with_capacity(key.len() + 1);
labelled_key.push(BRANCH_TABLE_MARKER);
labelled_key.extend_from_slice(key);
self.inner
.fetch(&labelled_key)
.map(|old_val_bytes| BranchMeta::from_bytes(old_val_bytes).unwrap())
}
fn get_branch_child(&self, branch_id: &[u8], index: u8) -> Option<BranchChild> {
let mut labelled_key = Vec::with_capacity(branch_id.len() + 2);
labelled_key.push(BRANCH_TABLE_MARKER);
labelled_key.extend_from_slice(branch_id);
labelled_key.push(index);
self.inner
.fetch(&labelled_key)
.map(|old_val_bytes| BranchChild::from_bytes(old_val_bytes).unwrap())
}
fn get_stem_children(&self, stem_key: [u8; 31]) -> Vec<(u8, [u8; 32])> {
let mut children = Vec::with_capacity(256);
let mut labelled_key = Vec::with_capacity(stem_key.len() + 1);
labelled_key.push(LEAF_TABLE_MARKER);
labelled_key.extend_from_slice(&stem_key);
for i in 0u8..=255 {
let mut child = labelled_key.clone();
child.push(i);
let child_value = self.inner.fetch(&child);
if let Some(x) = child_value {
children.push((i, x.try_into().unwrap()))
}
}
children
}
}
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/verkle-trie/tests/golang_interop.rs | verkle-trie/tests/golang_interop.rs | use std::sync::Mutex;
use once_cell::sync::Lazy;
use verkle_trie::{database::memory_db::MemoryDb, Trie, TrieTrait, VerkleConfig};
pub static CONFIG: Lazy<Mutex<VerkleConfig<MemoryDb>>> =
Lazy::new(|| Mutex::new(VerkleConfig::new(MemoryDb::new())));
// This is a fixed test, that checks whether the verkle trie logic has changed
// This test is also in the golang code, see: https://github.com/ethereum/go-verkle/blob/f8289fc59149a40673e56f790f6edaec64992294/tree_test.go#L1081
#[test]
fn golang_rust_interop() {
let mut trie = Trie::new(CONFIG.lock().unwrap().clone());
let keys = vec![
[
245, 110, 100, 66, 36, 244, 87, 100, 144, 207, 224, 222, 20, 36, 164, 83, 34, 18, 82,
155, 254, 55, 71, 19, 216, 78, 125, 126, 142, 146, 114, 0,
],
[
245, 110, 100, 66, 36, 244, 87, 100, 144, 207, 224, 222, 20, 36, 164, 83, 34, 18, 82,
155, 254, 55, 71, 19, 216, 78, 125, 126, 142, 146, 114, 1,
],
[
245, 110, 100, 66, 36, 244, 87, 100, 144, 207, 224, 222, 20, 36, 164, 83, 34, 18, 82,
155, 254, 55, 71, 19, 216, 78, 125, 126, 142, 146, 114, 2,
],
[
245, 110, 100, 66, 36, 244, 87, 100, 144, 207, 224, 222, 20, 36, 164, 83, 34, 18, 82,
155, 254, 55, 71, 19, 216, 78, 125, 126, 142, 146, 114, 3,
],
[
245, 110, 100, 66, 36, 244, 87, 100, 144, 207, 224, 222, 20, 36, 164, 83, 34, 18, 82,
155, 254, 55, 71, 19, 216, 78, 125, 126, 142, 146, 114, 4,
],
];
let values = vec![
[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0,
],
[
0, 0, 100, 167, 179, 182, 224, 13, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0,
],
[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0,
],
[
197, 210, 70, 1, 134, 247, 35, 60, 146, 126, 125, 178, 220, 199, 3, 192, 229, 0, 182,
83, 202, 130, 39, 59, 123, 250, 216, 4, 93, 133, 164, 112,
],
[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0,
],
];
let key_vals = keys.into_iter().zip(values);
trie.insert(key_vals);
let root = trie.root_commitment();
let expected = "10ed89d89047bb168baa4e69b8607e260049e928ddbcb2fdd23ea0f4182b1f8a";
use banderwagon::trait_defs::*;
let mut root_bytes = [0u8; 32];
root.serialize_compressed(&mut root_bytes[..]).unwrap();
assert_eq!(hex::encode(root_bytes), expected);
}
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/verkle-trie/tests/trie_fuzzer.rs | verkle-trie/tests/trie_fuzzer.rs | use std::sync::Mutex;
use ipa_multipoint::committer::Committer;
use once_cell::sync::Lazy;
use verkle_trie::{database::memory_db::MemoryDb, Trie, TrieTrait, VerkleConfig};
pub static CONFIG: Lazy<Mutex<VerkleConfig<MemoryDb>>> =
Lazy::new(|| Mutex::new(VerkleConfig::new(MemoryDb::new())));
#[test]
fn test_vector_insert_100_step() {
let mut prng = BasicPRNG::default();
let mut trie = Trie::new(CONFIG.lock().unwrap().clone());
let batch_size = 100;
// N = 100
step_test_helper(
&mut trie,
&mut prng,
batch_size,
"afb01df826bd42ddea9001551980f7cfa74f0ca7e0ba36a9079dea4062848600",
);
// N = 200
step_test_helper(
&mut trie,
&mut prng,
batch_size,
"4cd6573f3602df0a1438c894e2f0f465e16537c4474e3ab35ee74d5b3afe180f",
);
// N = 300
step_test_helper(
&mut trie,
&mut prng,
batch_size,
"1da1675938ba4ad2545fd163dc2053212cd75b54fc44e70f11fd20b05363650b",
);
// N = 400
step_test_helper(
&mut trie,
&mut prng,
batch_size,
"bdad99347763dc06765e329da53ae85333a9d89fa9e06ef3fccf30c8c89cb804",
);
// N = 500
step_test_helper(
&mut trie,
&mut prng,
batch_size,
"cf0b7ea967a755f6c09762aa4a650899bb79d21ef56f1fe6672621149e639905",
);
}
#[test]
fn test_vector_insert_1000_step() {
let mut prng = BasicPRNG::default();
let mut trie = Trie::new(CONFIG.lock().unwrap().clone());
let batch_size = 1_000;
// N = 1_000
step_test_helper(
&mut trie,
&mut prng,
batch_size,
"c94ef4103861b4788602e503f70ad1f47779d6b8b367532d7b4748c401f7391c",
);
// N = 2_000
step_test_helper(
&mut trie,
&mut prng,
batch_size,
"4284fb75185662925ae4b45143184147db4fd297db1912a6ca17ee3040d21104",
);
// N = 3_000
step_test_helper(
&mut trie,
&mut prng,
batch_size,
"447fa30818141f6034b99a2ece305de601e4af3b635ad216e2a0248a7039240c",
);
// N = 4_000
step_test_helper(
&mut trie,
&mut prng,
batch_size,
"9647ad8f43a64a08fd1d56af5765d9d9e265eb9be703eeb80c2117242c358305",
);
// N = 5_000
step_test_helper(
&mut trie,
&mut prng,
batch_size,
"26ed4b641a6a974f09b1b012784580d96cfbbd99f0eed9db541a89f9f2883201",
);
}
fn step_test_helper<C: Committer>(
trie: &mut Trie<MemoryDb, C>,
prng: &mut BasicPRNG,
num_keys: usize,
expected: &str,
) {
let keys = prng.rand_vec_bytes(num_keys);
let key_vals = keys.into_iter().map(|key_bytes| (key_bytes, key_bytes));
trie.insert(key_vals);
let root = trie.root_hash();
use banderwagon::trait_defs::*;
let mut root_bytes = [0u8; 32];
root.serialize_compressed(&mut root_bytes[..]).unwrap();
assert_eq!(hex::encode(root_bytes), expected);
}
// A test structure that allows us to have a seedable prng
// that is easy to implement in both python, go and Rust
// This is only used for tests
struct BasicPRNG {
seed: [u8; 32],
counter: u64,
}
impl Default for BasicPRNG {
fn default() -> Self {
BasicPRNG::new([
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0,
])
}
}
impl BasicPRNG {
pub fn new(seed: [u8; 32]) -> BasicPRNG {
let counter = 0u64;
BasicPRNG { counter, seed }
}
pub fn rand_bytes(&mut self) -> [u8; 32] {
use sha2::Digest;
let mut hasher = sha2::Sha256::new();
hasher.update(&self.counter.to_le_bytes()[..]);
hasher.update(&self.seed[..]);
let res: [u8; 32] = hasher.finalize().into();
self.counter += 1;
res
}
pub fn rand_vec_bytes(&mut self, num_keys: usize) -> Vec<[u8; 32]> {
(0..num_keys).map(|_| self.rand_bytes()).collect()
}
}
#[test]
fn prng_consistent() {
let mut prng = BasicPRNG::default();
let expected = [
"2c34ce1df23b838c5abf2a7f6437cca3d3067ed509ff25f11df6b11b582b51eb",
"b68f593141969cfeddf2011667ccdca92d2d22b414194bdf4ccbaa2833c85be2",
"74d8b89f49a16dd0a338f1dc90fe470f3137d7df12cf0b76c82b0b5f2fa9028b",
];
for expected_output in expected {
assert_eq!(hex::encode(prng.rand_bytes()), expected_output)
}
}
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/verkle-trie/benches/benchmark_main.rs | verkle-trie/benches/benchmark_main.rs | use criterion::criterion_main;
mod benchmarks;
criterion_main! {
benchmarks::insert_10k::benches,
// benchmarks::edit_10k::benches,
benchmarks::proof_10k::benches,
}
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/verkle-trie/benches/benchmarks/insert_10k.rs | verkle-trie/benches/benchmarks/insert_10k.rs | use crate::benchmarks::util::{generate_set_of_keys, KEYS_10K};
use criterion::BenchmarkId;
use criterion::{black_box, criterion_group, BatchSize, Criterion};
use verkle_trie::database::memory_db::MemoryDb;
use verkle_trie::trie::Trie;
use verkle_trie::DefaultConfig;
use verkle_trie::TrieTrait;
fn insert_10k_from_10mil_step(c: &mut Criterion) {
let mut group = c.benchmark_group("insert 10k");
for initial_keys in (0..=100_000).step_by(100_000) {
// let db = verkle_db::DefaultSledDb::from_path(&temp_dir);
let db = MemoryDb::new();
let config = DefaultConfig::new(db);
let mut trie = Trie::new(config);
// Initial set of keys
let keys = generate_set_of_keys(initial_keys);
let key_vals = keys.into_iter().map(|key_bytes| (key_bytes, key_bytes));
trie.insert(key_vals);
group.bench_with_input(
BenchmarkId::from_parameter(initial_keys),
&initial_keys,
|b, _| {
b.iter_batched(
|| trie.clone(),
|mut trie| {
// Insert different keys
let key_vals = KEYS_10K.iter().map(|key_bytes| (*key_bytes, *key_bytes));
#[allow(clippy::unit_arg)]
black_box(trie.insert(key_vals))
},
BatchSize::SmallInput,
)
},
);
}
group.finish();
}
criterion_group!(
name = benches;
config = Criterion::default().significance_level(0.1).sample_size(10);
targets = insert_10k_from_10mil_step);
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/verkle-trie/benches/benchmarks/util.rs | verkle-trie/benches/benchmarks/util.rs | use once_cell::sync::Lazy;
use sha2::{Digest, Sha256};
// pub static PRECOMPUTED_TABLE: Lazy<PrecomputeLagrange> =
// Lazy::new(|| PrecomputeLagrange::precompute(&SRS.map(|point| point.into_affine())));
pub static KEYS_10K: Lazy<Vec<[u8; 32]>> =
Lazy::new(|| generate_diff_set_of_keys(10_000).collect());
#[allow(dead_code)]
pub static SAME_KEYS_10K: Lazy<Vec<[u8; 32]>> =
Lazy::new(|| generate_set_of_keys(10_000).collect());
pub fn generate_set_of_keys(n: u32) -> impl Iterator<Item = [u8; 32]> {
(0u32..n).map(|i| {
let mut arr = [0u8; 32];
let i_bytes = i.to_be_bytes();
arr[0] = i_bytes[0];
arr[1] = i_bytes[1];
arr[2] = i_bytes[2];
arr[3] = i_bytes[3];
let mut hasher = Sha256::new();
hasher.update(&arr[..]);
hasher.update(b"seed");
let res: [u8; 32] = hasher.finalize().into();
res
})
}
pub fn generate_diff_set_of_keys(n: u32) -> impl Iterator<Item = [u8; 32]> {
(0u32..n).map(|i| {
let mut hasher = Sha256::new();
hasher.update(i.to_be_bytes());
let res: [u8; 32] = hasher.finalize().into();
res
})
}
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/verkle-trie/benches/benchmarks/mod.rs | verkle-trie/benches/benchmarks/mod.rs | pub mod edit_10k;
pub mod insert_10k;
pub mod proof_10k;
pub mod util;
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/verkle-trie/benches/benchmarks/edit_10k.rs | verkle-trie/benches/benchmarks/edit_10k.rs | rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false | |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/verkle-trie/benches/benchmarks/proof_10k.rs | verkle-trie/benches/benchmarks/proof_10k.rs | use crate::benchmarks::util::{generate_set_of_keys, KEYS_10K};
use criterion::BenchmarkId;
use criterion::{black_box, criterion_group, BatchSize, Criterion};
use verkle_trie::database::memory_db::MemoryDb;
use verkle_trie::trie::Trie;
use verkle_trie::DefaultConfig;
use verkle_trie::TrieTrait;
fn proof_10k_from_10mil_step(c: &mut Criterion) {
let mut group = c.benchmark_group("proof 10k");
let db = MemoryDb::new();
let config = DefaultConfig::new(db);
let mut trie = Trie::new(config);
// Initial set of keys
let _keys = generate_set_of_keys(1_000_000);
let key_vals = KEYS_10K.iter().map(|key_bytes| (*key_bytes, *key_bytes));
trie.insert(key_vals);
for initial_keys in (0..=100_000).step_by(100_000) {
group.bench_with_input(
BenchmarkId::from_parameter(initial_keys),
&initial_keys,
|b, _| {
b.iter_batched(
|| trie.clone(),
|trie| {
// Insert different keys
let key_vals = KEYS_10K.iter().copied();
black_box(trie.create_verkle_proof(key_vals))
},
BatchSize::SmallInput,
)
},
);
}
group.finish();
}
criterion_group!(
name = benches;
config = Criterion::default().significance_level(0.1).sample_size(10);
targets = proof_10k_from_10mil_step
);
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/verkle-spec/src/parameters.rs | verkle-spec/src/parameters.rs | use ethereum_types::U256;
// Parameters
pub(crate) const VERSION_LEAF_KEY: U256 = U256::zero();
pub(crate) const BALANCE_LEAF_KEY: U256 = U256([1, 0, 0, 0]);
pub(crate) const NONCE_LEAF_KEY: U256 = U256([2, 0, 0, 0]);
pub(crate) const CODE_KECCAK_LEAF_KEY: U256 = U256([3, 0, 0, 0]);
pub(crate) const CODE_SIZE_LEAF_KEY: U256 = U256([4, 0, 0, 0]);
pub(crate) const HEADER_STORAGE_OFFSET: U256 = U256([64, 0, 0, 0]);
pub(crate) const CODE_OFFSET: U256 = U256([128, 0, 0, 0]);
pub(crate) const VERKLE_NODE_WIDTH: U256 = U256([256, 0, 0, 0]);
pub(crate) const MAIN_STORAGE_OFFSET: U256 = U256([0, 0, 0, 2u64.pow(56)]);
#[test]
fn check_hardcoded_values() {
// Check that the constants were hardcoded correctly by
// checking against the `From` trait implementation
let version_leaf_key = U256::from(0u8);
assert_eq!(version_leaf_key, VERSION_LEAF_KEY);
let balance_leaf_key = U256::from(1u8);
assert_eq!(balance_leaf_key, BALANCE_LEAF_KEY);
let nonce_leaf_key = U256::from(2u8);
assert_eq!(nonce_leaf_key, NONCE_LEAF_KEY);
let code_keccak_leaf_key = U256::from(3u8);
assert_eq!(code_keccak_leaf_key, CODE_KECCAK_LEAF_KEY);
let code_size_leaf_key = U256::from(4u8);
assert_eq!(code_size_leaf_key, CODE_SIZE_LEAF_KEY);
let header_storage_offset = U256::from(64u8);
assert_eq!(header_storage_offset, HEADER_STORAGE_OFFSET);
let code_offset = U256::from(128u8);
assert_eq!(code_offset, CODE_OFFSET);
let verkle_node_width = U256::from(256u16);
assert_eq!(verkle_node_width, VERKLE_NODE_WIDTH);
let main_storage_offset = U256::from(256u16).pow(U256::from(31u8));
assert_eq!(main_storage_offset, MAIN_STORAGE_OFFSET);
}
#[test]
fn check_invariants() {
//It’s a required invariant that VERKLE_NODE_WIDTH > CODE_OFFSET > HEADER_STORAGE_OFFSET
// and that HEADER_STORAGE_OFFSET is greater than the leaf keys.
assert!(VERKLE_NODE_WIDTH > CODE_OFFSET);
assert!(CODE_OFFSET > HEADER_STORAGE_OFFSET);
assert!(HEADER_STORAGE_OFFSET > VERSION_LEAF_KEY);
assert!(HEADER_STORAGE_OFFSET > BALANCE_LEAF_KEY);
assert!(HEADER_STORAGE_OFFSET > NONCE_LEAF_KEY);
assert!(HEADER_STORAGE_OFFSET > CODE_KECCAK_LEAF_KEY);
assert!(HEADER_STORAGE_OFFSET > CODE_SIZE_LEAF_KEY);
// MAIN_STORAGE_OFFSET must be a power of VERKLE_NODE_WIDTH
//
assert!(MAIN_STORAGE_OFFSET == VERKLE_NODE_WIDTH.pow(U256::from(31)))
}
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/verkle-spec/src/lib.rs | verkle-spec/src/lib.rs | pub mod code;
pub mod header;
pub mod storage;
pub(crate) mod parameters;
mod util;
// TODO: specify the parts in the code where we assume VERKLE_WIDTH = 256;
// TODO: expose type markers in verkle-trie, so we can ensure that none of them overlap
// TODO: with the type markers in this crate. In particular, this crate uses 2, while verkle-trie uses 1 and 0
pub use ethereum_types::{H160, H256, U256};
pub use code::Code;
pub use header::Header;
use ipa_multipoint::committer::{Committer, DefaultCommitter};
pub use storage::Storage;
use verkle_trie::constants::new_crs;
// Used to hash the input in get_tree_key
pub trait Hasher {
fn hash64(bytes64: [u8; 64]) -> H256 {
// TODO: We should make this a part of the Hasher signature instead of
// TODO being inefficient here
let committer = DefaultCommitter::new(&new_crs().G);
hash64(&committer, bytes64)
}
fn chunk64(bytes64: [u8; 64]) -> [u128; 5] {
crate::util::chunk64(bytes64)
}
fn chunk_bytes(bytes: &[u8]) -> Vec<u128> {
crate::util::chunk_bytes(bytes)
}
}
pub fn chunk64(bytes64: [u8; 64]) -> [u128; 5] {
crate::util::chunk64(bytes64)
}
// This is the default implementation for `pedersen_hash`
// in the EIP. Since the EIP hashes 64 bytes (address32 + tree_index),
// we just special case the method here to hash 64 bytes.
pub fn hash64(committer: &DefaultCommitter, bytes64: [u8; 64]) -> H256 {
let inputs = crate::util::chunk64(bytes64).map(verkle_trie::Fr::from);
let result = committer.commit_lagrange(&inputs);
let hashed_point = result.map_to_scalar_field();
use banderwagon::trait_defs::*;
let mut output = [0u8; 32];
hashed_point
.serialize_compressed(&mut output[..])
.expect("Failed to serialize scalar to bytes");
H256::from(output)
}
// Old address styles
pub type Address20 = H160;
// New address styles
pub type Address32 = H256;
pub fn addr20_to_addr32(addr20: Address20) -> Address32 {
let bytes20: [u8; 20] = addr20.to_fixed_bytes();
let mut bytes32: [u8; 32] = [0u8; 32];
bytes32[12..].copy_from_slice(&bytes20);
Address32::from(bytes32)
}
#[test]
fn smoke_test_hash64() {
let committer = DefaultCommitter::new(&new_crs().G);
// Hash of all zeroes
let all_zeroes = [0u8; 64];
let hash = hash64(&committer, all_zeroes);
let expected =
hex::decode("1a100684fd68185060405f3f160e4bb6e034194336b547bdae323f888d533207").unwrap();
assert_eq!(hash, H256::from_slice(&expected));
// Hash of all ones
let all_ones = [1u8; 64];
let hash = hash64(&committer, all_ones);
let expected =
hex::decode("3afb8486ed3053ac55f62864da803c074844509d253260d870337c20fd73eb11").unwrap();
assert_eq!(hash, H256::from_slice(&expected));
}
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/verkle-spec/src/code.rs | verkle-spec/src/code.rs | use ethereum_types::{H256, U256};
use crate::{
parameters::{CODE_OFFSET, VERKLE_NODE_WIDTH},
util::{hash_addr_int, swap_last_byte, zero_align_bytes},
Address32, Hasher,
};
pub struct Code {
code_chunk_tree_key: H256,
}
impl Code {
pub fn new<H: Hasher>(address: Address32, chunk_id: U256) -> Code {
let index = (CODE_OFFSET + chunk_id) / VERKLE_NODE_WIDTH;
let sub_index = (CODE_OFFSET + chunk_id) % VERKLE_NODE_WIDTH;
let base_hash = hash_addr_int::<H>(address, index);
let code_chunk_tree_key = swap_last_byte(base_hash, sub_index);
Code {
code_chunk_tree_key,
}
}
pub fn code_chunk(&self) -> H256 {
self.code_chunk_tree_key
}
}
const PUSH_OFFSET: u8 = 95;
const PUSH1: u8 = PUSH_OFFSET + 1;
const PUSH32: u8 = PUSH_OFFSET + 32;
// Note: If the largest ethereum contract is ~24Kb , chunking the code
// which produces arrays, which are then stored in the stack should not give a stack
// overflow. That being said, its possible for us to store everthing as one vector and
// then call .chunks(32).try_into() when we need a [u8;32]
pub type Bytes32 = [u8; 32];
// Breaks up the code into 32 byte chunks
// The code is stored in 31 bytes and the leading byte is reserved as an indicator
// For whether the previous chunk has push data available
pub fn chunkify_code(code: Vec<u8>) -> Vec<Bytes32> {
// First pad the input, so it is 31 byte aligned
let aligned_code = zero_align_bytes(code, 31);
// First we chunk the aligned code into 31 bytes
let chunked_code31 = aligned_code.chunks_exact(31);
let mut remaining_pushdata_bytes = Vec::new();
// The first byte will not have any remaing push data bytes
// Since there was no chunk that came before it
let mut leftover_push_data = 0usize;
remaining_pushdata_bytes.push(leftover_push_data);
let last_chunk_index = chunked_code31.len() - 1;
// set this to true, if the last chunk had a push data instruction that
// needed another chunk
let mut last_chunk_push_data = false;
for (chunk_i, chunk) in chunked_code31.clone().enumerate() {
// Case1: the left over push data is larger than the chunk size
//
// The left over push data can be larger than the chunk size
// For example, if the last instruction was a PUSH32 and chunk size is 31
// We can compute the left over push data for this chunk as 31, the chunk size
// and then the left over push data for the next chunk as 32-31=1
if leftover_push_data > chunk.len() {
if chunk_i == last_chunk_index {
last_chunk_push_data = true;
break;
}
leftover_push_data -= chunk.len();
remaining_pushdata_bytes.push(leftover_push_data);
continue;
}
// Case2: the left over push data is smaller than the chunk size
//
// Increment the counter by how many bytes we need to push from the
// previous chunk. For example, if the previous chunk ended with a PUSH4
// we need to skip the first four bytes
let pc = leftover_push_data;
let offsetted_chunk = &chunk[pc..];
leftover_push_data = compute_leftover_push_data(offsetted_chunk) as usize;
remaining_pushdata_bytes.push(leftover_push_data.min(chunk.len()));
}
// Merge the remaining push data byte markers with the 31 byte chunks.
// Note: This can be done in one for loop, for now this is easier to read.
let mut chunked_code32: Vec<[u8; 32]> = Vec::with_capacity(chunked_code31.len());
for (prefix_byte, chunk31) in remaining_pushdata_bytes.into_iter().zip(chunked_code31) {
let prefix_byte: u8 = prefix_byte
.try_into()
.expect("prefix canot be stored in a u8. This should be infallible.");
let mut chunk32 = [0u8; 32];
chunk32[0] = prefix_byte;
chunk32[1..].copy_from_slice(chunk31);
chunked_code32.push(chunk32)
}
if last_chunk_push_data {
// If the last chunk had remaining push data to be added
// we add a new chunk with 32 zeroes. This is fine
chunked_code32.push([0u8; 32])
}
chunked_code32
}
// This functions returns a number which indicates how much PUSHDATA
// we still need to process. For example, if the last byte in the slice
// contained the instruction PUSH32. This function would return 32
// because we need to process 32 bytes of data.
//
// Another example, if the code_chunk has a length of `10`
// and the first byte contains the instruction PUSH24.Then this
// method will return the number 15 because we used one byte to represent
// PUSH24 leaving 9 bytes left to store some of the 24 bytes that we need.
//
// Note this implicitly assumes that we cannot push more than 254 bytes of
// PUSHDATA
fn compute_leftover_push_data(code_chunk: &[u8]) -> u8 {
// Start the program counter off at zero
let mut pos = 0usize;
while pos < code_chunk.len() {
let curr_instruction = code_chunk[pos];
let is_push_instruction = (PUSH1..=PUSH32).contains(&curr_instruction);
// Increment the counter by 1 to move past the current instruction
pos += 1;
if is_push_instruction {
// Figure out how many bytes we need to increment the position counter
let amount_bytes_to_push = curr_instruction - PUSH_OFFSET;
pos += amount_bytes_to_push as usize;
}
}
// Arriving here means that the position counter went over the length of the code chunk
// We can calculate the leftover push data by taking the offset of the counter and the length of
// the code chunk
let leftover: u8 = (pos - code_chunk.len())
.try_into()
.expect("left over cannot fit into a u8");
leftover
}
#[test]
fn check_against_eip() {
// This was taken directly from the EIP as a sniff test
let push4 = PUSH_OFFSET + 4;
let remaining = compute_leftover_push_data(&[push4, 99, 98]);
assert_eq!(remaining, 2)
}
#[test]
fn leftover_fuzz() {
// The push32 instruction should give us a leftover of 2, because we can only store 30 elements
// in the remaining chunk
let chunk: [u8; 32] = [
3, PUSH32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
23, 24, 25, 26, 27, 28, 29, 30,
];
let remaining = compute_leftover_push_data(&chunk);
assert_eq!(remaining, 2);
// Push32 at the end of the chunk should give us a leftover of 32
let chunk: [u8; 32] = [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, PUSH32,
];
let remaining = compute_leftover_push_data(&chunk);
assert_eq!(remaining, 32);
// This should return 0, since push4 treats the PUSH32 as PUSHDATA
let push4 = PUSH_OFFSET + 4;
let chunk: [u8; 32] = [
push4, PUSH32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
];
let remaining = compute_leftover_push_data(&chunk);
assert_eq!(remaining, 0);
}
#[test]
fn chunkify_simple_test() {
let push4 = PUSH_OFFSET + 4;
let push3 = PUSH_OFFSET + 3;
let push21 = PUSH_OFFSET + 21;
let push7 = PUSH_OFFSET + 7;
let push30 = PUSH_OFFSET + 30;
let code: Vec<[u8; 31]> = vec![
// First 31 bytes
[
0, push4, 1, 2, 3, 4, push3, 58, 68, 12, push21, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20,
],
// Second 31 bytes
[
0, push21, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
push7, 1, 2, 3, 4, 5, 6, 7,
],
// Third 31 bytes
[
push30, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
23, 24, 25, 26, 27, 28, 29, 30,
],
];
let code = code.into_iter().flatten().collect();
let chunked_code = chunkify_code(code);
let num_chunks = chunked_code.len();
assert_eq!(num_chunks, 3);
let chunk1 = chunked_code[0];
let chunk2 = chunked_code[1];
let chunk3 = chunked_code[2];
// The first chunk should have a leading byte of 0;
assert_eq!(chunk1[0], 0);
// The second chunk should have a leading byte of 1, because the last push instruction from chunk1 was PUSH21
// and we could only store 20 bytes in that chunk
assert_eq!(chunk2[0], 1);
// The third chunk should have a leading by of 0, since the last push instruction was PUSH7 and we stored all 7 bytes
// in the second chunk
assert_eq!(chunk3[0], 0);
}
#[test]
fn chunkify_with_push32_at_end_test() {
let push21 = PUSH_OFFSET + 21;
let code: Vec<[u8; 31]> = vec![
// First 31 bytes
[
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
25, 26, 27, 28, 29, 30, PUSH32,
],
// Second 31 bytes
[
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
25, 26, 27, 28, 29, 30, 31,
],
// Third 31 bytes
[
32, push21, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
0, 0, 0, 0, 0, 0, 0, 0,
],
];
let code = code.into_iter().flatten().collect();
let chunked_code = chunkify_code(code);
let num_chunks = chunked_code.len();
assert_eq!(num_chunks, 3);
let chunk1 = chunked_code[0];
let chunk2 = chunked_code[1];
let chunk3 = chunked_code[2];
// The first chunk should have a leading byte of 0;
assert_eq!(chunk1[0], 0);
// The second chunk should have a leading byte of 31, because the last push instruction from chunk1 was PUSH32
// and we couldn't store any of 32 bytes in that chunk, but chunk can store only 31 non-leading bytes.
assert_eq!(chunk2[0], 31);
// The third chunk should have a leading byte of 1, because the last push instruction was PUSH32 in chunk1 that
// we didn't finish in chunk2.
assert_eq!(chunk3[0], 1);
}
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/verkle-spec/src/header.rs | verkle-spec/src/header.rs | use ethereum_types::H256;
use ethereum_types::U256;
use crate::parameters::{
BALANCE_LEAF_KEY, CODE_KECCAK_LEAF_KEY, CODE_SIZE_LEAF_KEY, NONCE_LEAF_KEY, VERSION_LEAF_KEY,
};
use crate::util::hash_addr_int;
use crate::Hasher;
use crate::{util::swap_last_byte, Address32};
pub struct Header {
balance_tree_key: H256,
version_tree_key: H256,
code_size_tree_key: H256,
nonce_tree_key: H256,
code_keccak_tree_key: H256,
}
impl Header {
pub fn new<H: Hasher>(address: Address32) -> Header {
let tree_index = U256::zero();
Header::with_tree_index::<H>(address, tree_index)
}
pub fn with_tree_index<H: Hasher>(addr: Address32, tree_index: U256) -> Header {
let base_hash = hash_addr_int::<H>(addr, tree_index);
let version_tree_key = swap_last_byte(base_hash, VERSION_LEAF_KEY);
let balance_tree_key = swap_last_byte(base_hash, BALANCE_LEAF_KEY);
let nonce_tree_key = swap_last_byte(base_hash, NONCE_LEAF_KEY);
let code_keccak_tree_key = swap_last_byte(base_hash, CODE_KECCAK_LEAF_KEY);
let code_size_tree_key = swap_last_byte(base_hash, CODE_SIZE_LEAF_KEY);
Header {
balance_tree_key,
version_tree_key,
code_size_tree_key,
code_keccak_tree_key,
nonce_tree_key,
}
}
pub fn balance(&self) -> H256 {
self.balance_tree_key
}
pub fn nonce(&self) -> H256 {
self.nonce_tree_key
}
// Backwards compatibility for EXTCODEHASH
pub fn code_keccak(&self) -> H256 {
self.code_keccak_tree_key
}
// Backwards compatibility for EXTCODESIZE
pub fn code_size(&self) -> H256 {
self.code_size_tree_key
}
pub fn version(&self) -> H256 {
self.version_tree_key
}
}
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/verkle-spec/src/storage.rs | verkle-spec/src/storage.rs | use ethereum_types::{H256, U256};
use crate::{
parameters::{CODE_OFFSET, HEADER_STORAGE_OFFSET, MAIN_STORAGE_OFFSET, VERKLE_NODE_WIDTH},
util::{hash_addr_int, swap_last_byte},
Address32, Hasher,
};
pub struct Storage {
storage_slot_tree_key: H256,
}
impl Storage {
pub fn new<H: Hasher>(address: Address32, storage_key: U256) -> Storage {
let pos = if storage_key < (CODE_OFFSET - HEADER_STORAGE_OFFSET) {
HEADER_STORAGE_OFFSET + storage_key
} else {
MAIN_STORAGE_OFFSET + storage_key
};
let base_hash = hash_addr_int::<H>(address, pos / VERKLE_NODE_WIDTH);
let storage_slot_tree_key = swap_last_byte(base_hash, pos % VERKLE_NODE_WIDTH);
Storage {
storage_slot_tree_key,
}
}
pub fn storage_slot(&self) -> H256 {
self.storage_slot_tree_key
}
}
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/verkle-spec/src/util.rs | verkle-spec/src/util.rs | use ethereum_types::{H256, U256};
use crate::{parameters::VERKLE_NODE_WIDTH, Address32, Hasher};
#[must_use]
pub(crate) fn swap_last_byte(mut hash: H256, byte: U256) -> H256 {
let byte: u8 = byte
.as_u32()
.try_into()
.expect("number cannot be represented as a byte");
let bytes = hash.as_bytes_mut();
let last_byte = bytes.last_mut().expect("infallible");
*last_byte = byte;
hash
}
pub(crate) fn hash_addr_int<H: Hasher>(addr: Address32, integer: U256) -> H256 {
let address_bytes = addr.as_fixed_bytes();
let mut integer_bytes = [0u8; 32];
integer.to_little_endian(&mut integer_bytes);
let mut hash_input = [0u8; 64];
let (first_half, second_half) = hash_input.split_at_mut(32);
// Copy address and index into slice, then hash it
first_half.copy_from_slice(address_bytes);
second_half.copy_from_slice(&integer_bytes);
H::hash64(hash_input)
}
// Chunk the input into 16 byte integers. This is because the scalar field
// we are using can not hold full 256 bit integers.
pub(crate) fn chunk_bytes(input: &[u8]) -> Vec<u128> {
// We can only commit to VERKLE_NODE_WIDTH elements at once
// since we need 1 element to represent the encoding flag
// the commit capcacit is therefore the WIDTH-1
let commit_capacity = (VERKLE_NODE_WIDTH - 1u8).as_u32() as usize;
// We will chop our byte slice into 16 byte integers
// Therefore we need to ensure that the input is not too large
assert!(input.len() <= (commit_capacity * 16));
// This is so that we can seperate the extension marker and suffix commmitments
// from this. ExtMarker = 1;SuffixMarker = 0;
let type_encoding: u128 = 2;
// 256 * input.len() has an upper bound of 2^8 * 2^8 ^ 2^4
// hence any integer that can hold more than 20 bits, will be enough
let encoding_flag: u128 = type_encoding + 256 * input.len() as u128;
// We will pad the input with zeroes. This is to ensure
// that when we chunk the slice into 16 byte integers, everything is aligned
// Also note that in our case, H(x, 0) = H(x) , so the zeroes
// can be ignored, but this is not the case with functions like sha256
// Hence this way is future proof. One could also argue that since the
// length is encoded, then we can skip the zeroes
//
// TODO: this pads to 255*16, but we really only need it to be aligned
let pad_by = commit_capacity * 16 - input.len();
// Make the input 16 byte aligned
let mut aligned_input = encoding_flag.to_le_bytes().to_vec();
aligned_input.extend_from_slice(input);
aligned_input.extend(vec![0u8; pad_by]);
let mut input_as_u128 = Vec::new();
//Now chunk the input into 16 byte chunks
for chunk in aligned_input.chunks(16) {
let chunk: [u8; 16] = chunk
.try_into()
.expect("input is not 16 byte aligned. This should not happen after padding is added.");
input_as_u128.push(u128::from_le_bytes(chunk))
}
input_as_u128
}
// Specialised version of `chunk_bytes` for 64 bytes without the padding
pub(crate) fn chunk64(bytes64: [u8; 64]) -> [u128; 5] {
const INPUT_LEN: u128 = 64;
let mut chunked_input = [[0u8; 16]; 5];
let type_encoding: u128 = 2;
let encoding_flag = (type_encoding + 256 * INPUT_LEN).to_le_bytes();
chunked_input[0] = encoding_flag;
for (c_input, chunk) in chunked_input.iter_mut().skip(1).zip(bytes64.chunks(16)) {
c_input.copy_from_slice(chunk)
}
let mut input_as_u128 = [0u128; 5];
for (result, chunk) in (input_as_u128).iter_mut().zip(chunked_input) {
*result = u128::from_le_bytes(chunk)
}
input_as_u128
}
// Pads the input until it is a multiple of `alignment`
pub(crate) fn zero_align_bytes(mut bytes: Vec<u8>, alignment: usize) -> Vec<u8> {
assert!(alignment > 0);
if bytes.len() % alignment == 0 {
return bytes;
}
let pad_by = alignment - bytes.len() % alignment;
bytes.extend(vec![0u8; pad_by]);
bytes
}
#[test]
fn swap_byte() {
let replacement_byte = 123u8;
let hash = H256::repeat_byte(2);
let got = swap_last_byte(hash, U256::from(replacement_byte));
let mut expected = *hash.as_fixed_bytes();
*expected.last_mut().unwrap() = replacement_byte;
assert_eq!(*got.as_fixed_bytes(), expected)
}
#[test]
fn chunk_bytes_consistency() {
let bytes = [1u8; 64];
let res_cbytes = chunk_bytes(&bytes);
let mut res_c64 = chunk64(bytes).to_vec();
let len_diff = res_cbytes.len() - res_c64.len();
let pad = vec![0u128; len_diff];
res_c64.extend(pad);
assert_eq!(res_c64, res_cbytes);
}
#[test]
fn check_padding() {
// We check alignment upto x
let end = 150;
for alignment in 1..end {
for initial_num_elements in 0..alignment {
let bytes = vec![0; initial_num_elements];
let result = zero_align_bytes(bytes, alignment);
// The result should be aligned
assert_eq!(result.len() % alignment, 0)
}
}
}
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/verkle-spec/tests/devnet6.rs | verkle-spec/tests/devnet6.rs | use anyhow::Result;
use ark_serialize::CanonicalSerialize;
use hex::FromHex;
use keccak_hash::{keccak, KECCAK_EMPTY};
use once_cell::sync::Lazy;
use serde::Deserialize;
use std::{collections::HashMap, fs::File, io::BufReader, str::FromStr, sync::Mutex};
use verkle_spec::{
addr20_to_addr32, code::chunkify_code, Address20, Code, Hasher, Header, Storage, H256, U256,
};
use verkle_trie::{database::memory_db::MemoryDb, Trie, TrieTrait, Value, VerkleConfig};
const GENESIS_FILEPATH: &str = "assets/devnet6_genesis.json";
const STATE_ROOT: &str = "0x1fbf85345a3cbba9a6d44f991b721e55620a22397c2a93ee8d5011136ac300ee";
pub struct DefaultHasher;
impl Hasher for DefaultHasher {}
#[derive(Deserialize)]
pub struct GenesisAccountState {
balance: String,
nonce: Option<String>,
code: Option<String>,
storage: Option<HashMap<U256, H256>>,
}
#[derive(Deserialize)]
pub struct GenesisConfig {
alloc: HashMap<Address20, GenesisAccountState>,
}
pub static CONFIG: Lazy<Mutex<VerkleConfig<MemoryDb>>> =
Lazy::new(|| Mutex::new(VerkleConfig::new(MemoryDb::new())));
fn to_trie_value(u256: U256) -> Value {
let mut value = Value::default();
u256.to_little_endian(value.as_mut_slice());
value
}
#[test]
fn genesis_state_root() -> Result<()> {
let file = File::open(GENESIS_FILEPATH)?;
let genesis_config: GenesisConfig = serde_json::from_reader(BufReader::new(file))?;
let mut trie = Trie::new(CONFIG.lock().unwrap().clone());
for (address, account_state) in genesis_config.alloc {
let address = addr20_to_addr32(address);
let header = Header::new::<DefaultHasher>(address);
let balance = U256::from_dec_str(&account_state.balance)?;
let nonce = U256::from_dec_str(&account_state.nonce.unwrap_or("0".to_string()))?;
trie.insert(
[
(header.version().0, to_trie_value(U256::zero())),
(header.balance().0, to_trie_value(balance)),
(header.nonce().0, to_trie_value(nonce)),
]
.into_iter(),
);
match account_state.code {
None => {
trie.insert_single(header.code_keccak().0, KECCAK_EMPTY.0);
}
Some(code) => {
let code = code.strip_prefix("0x").unwrap_or(&code);
let code = <Vec<u8>>::from_hex(code)?;
trie.insert(
[
(header.code_keccak().0, keccak(&code).0),
(header.code_size().0, to_trie_value(U256::from(code.len()))),
]
.into_iter(),
);
let code_kv = chunkify_code(code)
.into_iter()
.enumerate()
.map(|(chunk_id, code_chunk)| {
let tree_key =
Code::new::<DefaultHasher>(address, U256::from(chunk_id)).code_chunk();
(tree_key.0, code_chunk)
})
.collect::<Vec<_>>();
trie.insert(code_kv.into_iter());
}
}
if let Some(storage) = account_state.storage {
let storage_kv = storage
.into_iter()
.map(|(storage_slot, storage_value)| {
let storage_slot_tree_key =
Storage::new::<DefaultHasher>(address, storage_slot).storage_slot();
(storage_slot_tree_key.0, storage_value.0)
})
.collect::<Vec<_>>();
trie.insert(storage_kv.into_iter());
}
}
let mut root_hash = H256::zero();
trie.root_commitment()
.serialize_compressed(root_hash.as_bytes_mut())?;
assert_eq!(root_hash, H256::from_str(STATE_ROOT)?);
Ok(())
}
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
freethinkel/tauri-nspopover-plugin | https://github.com/freethinkel/tauri-nspopover-plugin/blob/efdd92922d337d350d59d8cf20c821e24ca16b48/build.rs | build.rs | const COMMANDS: &[&str] = &["show_popover", "hide_popover", "is_popover_shown"];
fn main() {
tauri_plugin::Builder::new(COMMANDS).build();
}
| rust | MIT | efdd92922d337d350d59d8cf20c821e24ca16b48 | 2026-01-04T20:20:39.361853Z | false |
freethinkel/tauri-nspopover-plugin | https://github.com/freethinkel/tauri-nspopover-plugin/blob/efdd92922d337d350d59d8cf20c821e24ca16b48/src/lib.rs | src/lib.rs | use objc2::rc::Retained;
use objc2_app_kit::{NSPopover, NSStatusBarButton, NSWindow};
use objc2_foundation::NSRectEdge;
use tauri::{
plugin::{Builder, TauriPlugin},
tray::TrayIcon,
AppHandle, Manager, Runtime, State, WebviewWindow,
};
use std::sync::Mutex;
mod popover;
use popover::PopoverController;
pub struct ToPopoverOptions {
pub is_fullsize_content: bool,
}
pub trait WindowExt<R: Runtime> {
fn to_popover(&self, options: ToPopoverOptions);
}
pub trait AppExt<R: Runtime> {
fn is_popover_shown(&self) -> bool;
fn show_popover(&self);
fn hide_popover(&self);
fn ns_popover(&self) -> Retained<NSPopover>;
fn ns_statusbar_button(&self) -> Retained<NSStatusBarButton>;
}
pub use tauri::tray::TrayIconId;
#[allow(dead_code)]
pub struct StatusItem<R: Runtime> {
id: TrayIconId,
pub(crate) inner: tray_icon::TrayIcon,
app_handle: AppHandle<R>,
}
pub trait StatusItemGetter {
fn get_status_bar_button(&self) -> Retained<NSStatusBarButton>;
}
impl<R: Runtime> StatusItemGetter for TrayIcon<R> {
fn get_status_bar_button(&self) -> Retained<NSStatusBarButton> {
let status_item: &StatusItem<R> =
unsafe { std::mem::transmute::<&TrayIcon<R>, &StatusItem<R>>(self) };
let mtm = status_item.inner.tray.as_ref().borrow().mtm;
let tray = unsafe { status_item.inner.tray.try_borrow_unguarded().unwrap() };
let status = tray.ns_status_item.as_ref().unwrap();
let btn = unsafe { status.button(mtm).unwrap() };
return unsafe { std::mem::transmute(btn) };
}
}
impl<R: Runtime> WindowExt<R> for WebviewWindow<R> {
fn to_popover(&self, options: ToPopoverOptions) {
let tray = self.app_handle().tray_by_id("main").unwrap();
let button = tray.get_status_bar_button();
let window = self;
let window = window.ns_window().unwrap();
let ns_window = unsafe { (window.cast() as *mut NSWindow).as_ref().unwrap() };
let _scale = self.scale_factor().unwrap();
let popover_controller = PopoverController::new(ns_window);
let _ = self.hide();
let popover = SafeNSPopover(popover_controller.popover());
if options.is_fullsize_content {
unsafe { popover.0.setHasFullSizeContent(true) };
}
let button = SafeNSStatusBarButton(button);
let state = self.app_handle().state() as State<'_, AppState>;
*state.0.lock().unwrap() = Some(AppStateInner { popover, button });
}
}
impl<R: Runtime> AppExt<R> for AppHandle<R> {
fn is_popover_shown(&self) -> bool {
let state: State<AppState> = self.state();
if state.0.lock().unwrap().as_ref().is_none() {
return false;
}
let state_guard = state.0.lock().unwrap();
let inner = state_guard.as_ref().unwrap();
let popover = &inner.popover.0;
unsafe { popover.isShown() }
}
fn ns_popover(&self) -> Retained<NSPopover> {
let state: State<AppState> = self.state();
let guard = state.0.lock().unwrap();
let inner = guard.as_ref().unwrap();
let popover = &inner.popover.0;
// Create a new reference to the same popover
popover.clone()
}
fn ns_statusbar_button(&self) -> Retained<NSStatusBarButton> {
let state: State<AppState> = self.state();
let button = state.0.lock().unwrap().as_ref().unwrap().button.0.clone();
button
}
fn show_popover(&self) {
let state: State<AppState> = self.state();
if state.0.lock().unwrap().as_ref().is_none() {
return;
}
let popover = self.ns_popover();
let button = self.ns_statusbar_button();
let rect = button.bounds();
if unsafe { !popover.isShown() } {
unsafe {
popover.showRelativeToRect_ofView_preferredEdge(
rect,
button.as_ref(),
NSRectEdge::MaxY,
);
}
}
}
fn hide_popover(&self) {
let state: State<AppState> = self.state();
if state.0.lock().unwrap().as_ref().is_none() {
return;
}
let popover = self.ns_popover();
if unsafe { popover.isShown() } {
unsafe { popover.performClose(None) };
}
}
}
struct SafeNSPopover(Retained<NSPopover>);
struct SafeNSStatusBarButton(Retained<NSStatusBarButton>);
unsafe impl Send for SafeNSPopover {}
unsafe impl Send for SafeNSStatusBarButton {}
#[tauri::command]
fn show_popover<R: Runtime>(app: AppHandle<R>) -> Result<(), String> {
app.show_popover();
return Ok(());
}
#[tauri::command]
fn hide_popover<R: Runtime>(app: AppHandle<R>) -> Result<(), String> {
app.hide_popover();
Ok(())
}
#[tauri::command]
fn is_popover_shown<R: Runtime>(app: AppHandle<R>) -> Result<bool, String> {
return Ok(app.is_popover_shown());
}
struct AppStateInner {
popover: SafeNSPopover,
button: SafeNSStatusBarButton,
}
struct AppState(Mutex<Option<AppStateInner>>);
pub fn init<R: Runtime>() -> TauriPlugin<R> {
Builder::new("nspopover")
.invoke_handler(tauri::generate_handler![
show_popover,
hide_popover,
is_popover_shown
])
.setup(|app, _| {
app.manage(AppState(Mutex::new(None)));
Ok(())
})
.build()
}
| rust | MIT | efdd92922d337d350d59d8cf20c821e24ca16b48 | 2026-01-04T20:20:39.361853Z | false |
freethinkel/tauri-nspopover-plugin | https://github.com/freethinkel/tauri-nspopover-plugin/blob/efdd92922d337d350d59d8cf20c821e24ca16b48/src/popover.rs | src/popover.rs | use objc2::{msg_send, rc::Retained, runtime::Bool};
use objc2_app_kit::{NSColor, NSPopover, NSPopoverBehavior, NSView, NSViewController, NSWindow};
use objc2_foundation::MainThreadMarker;
pub struct PopoverController {
popover: Retained<NSPopover>,
}
impl PopoverController {
pub fn new(window: &NSWindow) -> Self {
let popover = Self::create_popover(window);
return PopoverController { popover };
}
pub fn popover(&self) -> Retained<NSPopover> {
self.popover.clone()
}
fn get_target_view(ns_window: &NSWindow) -> Retained<NSView> {
let view = ns_window.contentView().unwrap();
view.setWantsLayer(true);
unsafe {
let color = NSColor::clearColor();
let _: () = msg_send![&*view, setBackgroundColor: &*color];
let _: () = msg_send![&*view, setOpaque: Bool::YES];
}
return view;
}
fn create_popover(window: &NSWindow) -> Retained<NSPopover> {
let view = Self::get_target_view(window);
unsafe {
let mtm = MainThreadMarker::new().unwrap();
let ctrl = NSViewController::new(mtm);
ctrl.setView(view.as_ref());
let popover = NSPopover::new(mtm);
popover.setBehavior(NSPopoverBehavior::Transient);
popover.setContentViewController(Some(ctrl.as_ref()));
let content_size = window.frame().size;
popover.setContentSize(content_size);
popover
}
}
}
| rust | MIT | efdd92922d337d350d59d8cf20c821e24ca16b48 | 2026-01-04T20:20:39.361853Z | false |
freethinkel/tauri-nspopover-plugin | https://github.com/freethinkel/tauri-nspopover-plugin/blob/efdd92922d337d350d59d8cf20c821e24ca16b48/src/action.rs | src/action.rs | use icrate::objc2::{
class,
declare::ClassDecl,
msg_send, msg_send_id,
rc::{Allocated, Id},
runtime::{AnyClass, AnyObject, Sel},
sel, Encode, RefEncode,
};
use std::fmt;
use std::sync::Once;
#[repr(C)]
pub struct Object {
_priv: PrivateMarker,
}
unsafe impl RefEncode for Object {
const ENCODING_REF: icrate::objc2::Encoding = icrate::objc2::Encoding::Block;
// Implement the required methods...
}
pub type Idd = *mut Object;
type PrivateMarker = [u8; 0];
#[derive(Debug)]
pub struct TargetActionHandler {}
impl TargetActionHandler {
/// Returns a new TargetEventHandler.
pub fn new<F: Fn() + Send + Sync + 'static>(control: &AnyObject, action: F) -> Self {
let block = Box::new(Action(Box::new(action)));
let ptr = Box::into_raw(block);
unsafe {
let class = register_invoker_class::<F>().as_ref().unwrap();
let obj: Allocated<AnyObject> = msg_send_id![class, alloc];
let obj: Id<AnyObject> = msg_send_id![obj, init];
unsafe fn set_ivar<T: Encode>(obj: &mut AnyObject, name: &str, value: T) {
*obj.get_mut_ivar::<T>(name) = value;
}
let obj_ptr: *mut AnyObject = std::mem::transmute(obj);
set_ivar(&mut *obj_ptr, ACTION_CALLBACK_PTR, ptr);
let _: () = msg_send![control, setAction: sel!(perform:)];
let _: () = msg_send![control, setTarget: obj_ptr];
}
TargetActionHandler {}
}
}
pub struct Action(Box<dyn Fn() + Send + Sync + 'static>);
unsafe impl RefEncode for Action {
const ENCODING_REF: icrate::objc2::Encoding = icrate::objc2::Encoding::Object;
}
impl fmt::Debug for Action {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Action").finish()
}
}
pub static ACTION_CALLBACK_PTR: &str = "rstTargetActionPtr";
pub fn load<'a, T>(this: &'a AnyObject, ptr_name: &str) -> &'a T {
unsafe {
let ptr: *mut Action = *this.get_ivar(ptr_name);
let obj = ptr as *const T;
&*obj
}
}
extern "C" fn perform<'a, F: Fn() + 'static>(this: &'a mut AnyObject, _: Sel, _sender: Idd) {
let action = load::<Action>(this, ACTION_CALLBACK_PTR);
(action.0)();
}
pub(crate) fn register_invoker_class<F: Fn() + 'static>() -> *const AnyClass {
static mut VIEW_CLASS: *const AnyClass = 0 as *const AnyClass;
static INIT: Once = Once::new();
INIT.call_once(|| unsafe {
let superclass = class!(NSObject);
let mut decl = ClassDecl::new("RSTTargetActionHandler", superclass).unwrap();
decl.add_ivar::<*mut Action>(ACTION_CALLBACK_PTR);
decl.add_method(sel!(perform:), perform::<F> as extern "C" fn(_, _, _));
VIEW_CLASS = decl.register();
});
unsafe { VIEW_CLASS }
}
| rust | MIT | efdd92922d337d350d59d8cf20c821e24ca16b48 | 2026-01-04T20:20:39.361853Z | false |
freethinkel/tauri-nspopover-plugin | https://github.com/freethinkel/tauri-nspopover-plugin/blob/efdd92922d337d350d59d8cf20c821e24ca16b48/example/src-tauri/build.rs | example/src-tauri/build.rs | fn main() {
tauri_build::build()
}
| rust | MIT | efdd92922d337d350d59d8cf20c821e24ca16b48 | 2026-01-04T20:20:39.361853Z | false |
freethinkel/tauri-nspopover-plugin | https://github.com/freethinkel/tauri-nspopover-plugin/blob/efdd92922d337d350d59d8cf20c821e24ca16b48/example/src-tauri/src/main.rs | example/src-tauri/src/main.rs | // Prevents additional console window on Windows in release, DO NOT REMOVE!!
#![cfg_attr(not(debug_assertions), windows_subsystem = "windows")]
use tauri::{
tray::{MouseButton, MouseButtonState, TrayIcon, TrayIconEvent},
ActivationPolicy, Manager,
};
use tauri_plugin_nspopover::{AppExt, ToPopoverOptions, WindowExt};
fn main() {
tauri::Builder::default()
.plugin(tauri_plugin_shell::init())
.plugin(tauri_plugin_nspopover::init())
.setup(|app| {
app.set_activation_policy(ActivationPolicy::Accessory);
let window = app.handle().get_webview_window("main").unwrap();
window.to_popover(ToPopoverOptions {
is_fullsize_content: true,
});
let tray = app.tray_by_id("main").unwrap();
let handle = app.handle().clone();
tray.on_tray_icon_event(move |_, event| match event {
TrayIconEvent::Click {
button,
button_state,
..
} => {
if button == MouseButton::Left && button_state == MouseButtonState::Up {
if !handle.is_popover_shown() {
handle.show_popover();
} else {
handle.hide_popover();
}
}
}
_ => {}
});
Ok(())
})
.run(tauri::generate_context!())
.expect("error while running tauri application");
}
| rust | MIT | efdd92922d337d350d59d8cf20c821e24ca16b48 | 2026-01-04T20:20:39.361853Z | false |
otavio/rsget | https://github.com/otavio/rsget/blob/a3cf767cff45087b40f2e75e7ca7701ef773524b/src/main.rs | src/main.rs | // Copyright (C) 2018, 2019, 2020 O.S. Systems Sofware LTDA
//
// SPDX-License-Identifier: Apache-2.0
use anyhow::anyhow;
use clap::Parser;
use indicatif::{ProgressBar, ProgressStyle};
use reqwest::{header, Client};
use std::path::Path;
use tokio::{fs, io::AsyncWriteExt};
#[derive(Parser, Debug)]
#[clap(author, version)]
struct Cmdline {
/// URL to download
#[clap(value_parser, short, long)]
url: reqwest::Url,
}
#[tokio::main]
async fn main() -> Result<(), anyhow::Error> {
let cmdline = Cmdline::parse();
let client = Client::new();
let total_size = {
let resp = client.head(cmdline.url.as_str()).send().await?;
if resp.status().is_success() {
resp.headers()
.get(header::CONTENT_LENGTH)
.and_then(|ct_len| ct_len.to_str().ok())
.and_then(|ct_len| ct_len.parse().ok())
.unwrap_or(0)
} else {
return Err(anyhow!(
"Couldn't download URL: {}. Error: {:?}",
cmdline.url,
resp.status(),
));
}
};
let mut request = client.get(cmdline.url.as_str());
let pb = ProgressBar::new(total_size);
pb.set_style(ProgressStyle::default_bar()
.template("{spinner:.green} [{elapsed_precise}] [{bar:40.cyan/blue}] {bytes}/{total_bytes} ({eta})")
.unwrap()
.progress_chars("#>-"));
let file = Path::new(
cmdline.url.path_segments().and_then(std::iter::Iterator::last).unwrap_or("tmp.bin"),
);
if file.exists() {
let size = file.metadata()?.len().saturating_sub(1);
request = request.header(header::RANGE, format!("bytes={}-", size));
pb.inc(size);
}
let mut source = request.send().await?;
let mut dest = fs::OpenOptions::new().create(true).append(true).open(&file).await?;
while let Some(chunk) = source.chunk().await? {
dest.write_all(&chunk).await?;
pb.inc(chunk.len() as u64);
}
println!("Download of '{}' has been completed.", file.to_str().unwrap());
Ok(())
}
| rust | Apache-2.0 | a3cf767cff45087b40f2e75e7ca7701ef773524b | 2026-01-04T20:20:41.702478Z | false |
jtomschroeder/cedar | https://github.com/jtomschroeder/cedar/blob/1674097ba752cb3c3e86db119f9faab85c963ecb/src/shadow.rs | src/shadow.rs | use std::collections::HashMap;
use std::str;
use boo::Boo;
use dom;
use application::View;
use renderer::{Command, Event, Update};
use tree::{self, Vertex};
/// Convert 'changeset' to list of commands to send to UI 'rendering' process
fn commands<T>(
old: Option<&dom::Object<T>>,
dom: &dom::Object<T>,
set: dom::Changeset,
) -> Vec<Command> {
fn expand<S>(root: &tree::Path, node: &dom::Object<S>, commands: &mut Vec<Command>) {
// TODO: handle create path issue (vertex traversal assumes from root)
node.traverse(root, |path, node| {
let id = path.to_string();
let kind = node.widget.element();
let value = node.widget.value.clone();
let attributes = node.attributes.iter().map(|attr| attr.raw()).collect();
let parent = path.parent().to_string();
commands.push(Command::Create {
id,
parent,
kind,
value,
attributes,
})
});
}
let mut commands = vec![];
for (path, op) in set.into_iter() {
let node = || dom.find(&path).expect("path in nodes");
let id = || path.to_string();
match op {
tree::Operation::Create => expand(&path, node(), &mut commands),
tree::Operation::Update => {
// TODO: are we missing an update to 'Text' attributes?
let node = node();
if node.widget.is_text() {
let value = node.widget.value.clone().unwrap();
commands.push(Command::Update {
id: id(),
value: Update::Text(value),
})
} else {
let mut attrs: HashMap<_, _> =
node.attributes.iter().map(|attr| attr.raw()).collect();
// Clear out any attributes that are no longer used.
if let Some(old) = old {
for (key, _) in old.attributes.iter().map(|attr| attr.raw()) {
if !attrs.contains_key(&key) {
attrs.insert(key, "".into());
}
}
}
commands.push(Command::Update {
id: id(),
value: Update::Attributes(attrs),
})
}
}
tree::Operation::Delete => commands.push(Command::Remove { id: id() }),
tree::Operation::Replace => panic!("`Replace` not yet implemented!"),
}
}
commands
}
pub struct Shadow<S> {
dom: dom::Object<S>,
}
impl<S> Shadow<S>
where
S: Send + PartialEq + 'static,
{
pub fn initialize<M>(model: &M, view: View<M, S>) -> (Self, Vec<Command>) {
let dom = view(&model);
// Create changeset: Create @ 'root'
let patch = vec![(tree::Path::new(), tree::Operation::Create)];
let commands = commands(None, &dom, patch);
(Shadow { dom }, commands)
}
fn find(&self, id: &str) -> Option<&dom::Object<S>> {
let path = id.split(".").filter_map(|s| s.parse().ok()).collect();
let path = tree::Path::from_vec(path);
self.dom.find(&path)
}
/// Find the message associated with an event (by looking up node in DOM)
pub fn translate(&self, event: Event) -> Option<Boo<S>> {
// TODO: serialize ID as Path object to avoid parsing!
// - in both Command and Event
match event {
Event::Click { id } => self.find(&id)
.and_then(|node| node.widget.click.as_ref().map(Boo::Borrowed)),
Event::Input { id, value } => self.find(&id)
.and_then(|node| node.widget.input.as_ref().map(|i| i(value)).map(Boo::Owned)),
Event::Keydown { id, code } => self.find(&id).and_then(|node| {
node.widget
.keydown
.as_ref()
.and_then(|k| k(code))
.map(Boo::Owned)
}),
}
}
pub fn update<M>(&mut self, model: &M, view: View<M, S>) -> Vec<Command> {
let dom = view(&model);
let changeset = dom::diff(&self.dom, &dom);
let cmds = commands(Some(&self.dom), &dom, changeset);
// Replace 'old' DOM with 'new' DOM
self.dom = dom;
cmds
}
}
| rust | MIT | 1674097ba752cb3c3e86db119f9faab85c963ecb | 2026-01-04T20:20:46.962183Z | false |
jtomschroeder/cedar | https://github.com/jtomschroeder/cedar/blob/1674097ba752cb3c3e86db119f9faab85c963ecb/src/lib.rs | src/lib.rs |
#![deny(trivial_casts, trivial_numeric_casts)]
#![deny(unused_import_braces, unused_qualifications)]
// #![deny(missing_docs)]
// #![deny(unsafe_code, unstable_features)]
// #![deny(missing_debug_implementations, missing_copy_implementations)]
extern crate serde;
#[macro_use]
extern crate serde_derive;
pub extern crate serde_json as json;
extern crate sass_rs as sass;
extern crate web_view;
extern crate cedar_hypertext as hypertext;
mod boo;
#[macro_use]
mod tree;
mod application;
mod renderer;
mod shadow;
pub mod dom;
pub use application::{app, Application};
pub use hypertext::hypertext;
// TODO: move into own module or crate
/// build.rs helper
pub fn custom_style(path: &str) {
let css = sass::compile_file(path, sass::Options::default()).unwrap();
use std::fs::File;
use std::io::prelude::*;
let out_file = format!("{}/style.css", std::env::var("OUT_DIR").unwrap());
let mut file = File::create(&out_file).unwrap();
file.write_all(css.as_bytes()).unwrap();
}
| rust | MIT | 1674097ba752cb3c3e86db119f9faab85c963ecb | 2026-01-04T20:20:46.962183Z | false |
jtomschroeder/cedar | https://github.com/jtomschroeder/cedar/blob/1674097ba752cb3c3e86db119f9faab85c963ecb/src/renderer.rs | src/renderer.rs | use std::collections::HashMap;
type Identifier = String;
// TODO: `enum` for kind?
#[derive(Serialize, Deserialize, Debug)]
pub enum Update {
Text(String),
Attributes(HashMap<String, String>),
}
#[derive(Serialize, Deserialize, Debug)]
pub enum Command {
Create {
id: Identifier,
parent: Identifier,
kind: String,
value: Option<String>,
attributes: HashMap<String, String>,
},
Update {
id: Identifier,
value: Update,
},
Remove {
id: Identifier,
},
}
#[derive(Serialize, Deserialize, Debug)]
pub enum Event {
Click { id: Identifier },
Input { id: Identifier, value: String },
Keydown { id: Identifier, code: u32 },
}
| rust | MIT | 1674097ba752cb3c3e86db119f9faab85c963ecb | 2026-01-04T20:20:46.962183Z | false |
jtomschroeder/cedar | https://github.com/jtomschroeder/cedar/blob/1674097ba752cb3c3e86db119f9faab85c963ecb/src/application.rs | src/application.rs | use json;
use sass;
use web_view;
use dom;
use renderer;
use shadow::Shadow;
pub type Update<M, S> = fn(M, &S) -> M;
pub type View<M, S> = fn(&M) -> dom::Object<S>;
pub struct Application<M, S> {
model: M,
update: Update<M, S>,
view: View<M, S>,
style: Option<String>,
}
impl<M, S> Application<M, S>
where
S: Send + PartialEq + 'static,
M: Send + 'static,
{
pub fn new(model: M, update: Update<M, S>, view: View<M, S>) -> Self {
Application {
model,
update,
view,
style: None,
}
}
pub fn style(mut self, style: &str) -> Self {
self.style = Some(style.into());
self
}
pub fn run(self) {
Program::run(self.model, self.update, self.view, self.style)
}
}
pub fn app<S, M>(model: M, update: Update<M, S>, view: View<M, S>)
where
S: Send + PartialEq + 'static,
M: Send + 'static,
{
Application::new(model, update, view).run()
}
struct Program<M, S> {
model: Option<M>,
update: Update<M, S>,
view: View<M, S>,
shadow: Shadow<S>,
}
const HTML: &str = include_str!("../lib/web-view/index.html");
const CSS: &str = include_str!("../lib/web-view/style.scss");
impl<M, S> Program<M, S>
where
S: Send + PartialEq + 'static,
M: Send + 'static,
{
fn new(model: M, update: Update<M, S>, view: View<M, S>) -> (Self, Vec<renderer::Command>) {
let (shadow, commands) = Shadow::initialize(&model, view);
(
Program {
model: Some(model),
update,
view,
shadow,
},
commands,
)
}
fn process(&mut self, event: &str) -> Vec<renderer::Command> {
let event: renderer::Event = json::from_str(event).unwrap();
// TODO: get new subscriptions
// - Do a 'difference' on the old and new
// - Enable new ones and disable old ones
let model = {
// translate events from backend renderer to actions
let message = match self.shadow.translate(event) {
Some(m) => m,
_ => return vec![], // TODO: Option<>?
};
let model = self.model.take().unwrap();
(self.update)(model, &message)
};
let commands = {
let commands = self.shadow.update(&model, self.view);
self.model = Some(model);
commands
};
commands
}
fn run(model: M, update: Update<M, S>, view: View<M, S>, style: Option<String>) {
let (mut program, mut commands) = Program::new(model, update, view);
let html = match style {
Some(style) => HTML.replace("/* styles */", &style),
_ => {
let css = sass::compile_string(CSS, sass::Options::default()).unwrap();
HTML.replace("/* styles */", &css)
}
};
let title = "cedar app";
let size = (800, 600);
let resizable = true;
let debug = true;
web_view::run(
title,
web_view::Content::Html(html),
Some(size),
resizable,
debug,
move |webview| {
webview.dispatch(move |webview, _| {
webview.eval("setup()");
for cmd in commands.drain(..) {
let cmd = json::to_string(&cmd).unwrap();
webview.eval(&format!("window.cedar.command('{}')", cmd));
}
});
},
move |webview, message, _| {
let mut commands = program.process(message);
for cmd in commands.drain(..) {
let cmd = json::to_string(&cmd).unwrap();
webview.eval(&format!("window.cedar.command('{}')", cmd));
}
},
(),
);
}
}
| rust | MIT | 1674097ba752cb3c3e86db119f9faab85c963ecb | 2026-01-04T20:20:46.962183Z | false |
jtomschroeder/cedar | https://github.com/jtomschroeder/cedar/blob/1674097ba752cb3c3e86db119f9faab85c963ecb/src/dom.rs | src/dom.rs | use std::fmt;
use tree;
pub type Element = String;
#[derive(PartialEq, Debug)]
pub struct Attribute(String, String);
impl Attribute {
pub fn raw(&self) -> (String, String) {
match self {
&Attribute(ref name, ref value) => (name.clone(), value.clone()),
}
}
}
pub struct Widget<S> {
element: Element,
pub value: Option<String>,
// Events
pub click: Option<S>,
pub input: Option<Box<Fn(String) -> S>>,
pub keydown: Option<Box<Fn(u32) -> Option<S>>>,
}
impl<S> PartialEq for Widget<S> {
fn eq(&self, other: &Self) -> bool {
self.element == other.element && self.value == other.value
}
}
impl<S> Widget<S> {
pub fn new(element: Element) -> Self {
Widget {
element,
value: None,
click: None,
input: None,
keydown: None,
}
}
pub fn new_with_value(element: Element, value: String) -> Self {
Widget {
element,
value: Some(value),
click: None,
input: None,
keydown: None,
}
}
pub fn is_text(&self) -> bool {
self.element == "text"
}
pub fn element(&self) -> String {
self.element.clone()
}
}
impl<S> fmt::Debug for Widget<S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self.element)
}
}
pub struct Object<S> {
pub widget: Widget<S>,
pub attributes: Vec<Attribute>,
pub children: Vec<Object<S>>,
}
/// Object: Actions
impl<S> Object<S> {
pub fn new(widget: &str) -> Self {
Object {
widget: Widget::new(widget.into()),
attributes: vec![],
children: vec![],
}
}
pub fn from_widget(widget: Widget<S>) -> Self {
Object {
widget,
attributes: vec![],
children: vec![],
}
}
}
impl<S> fmt::Debug for Object<S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Object")
.field("widget", &self.widget)
.field("children", &self.children)
.finish()
}
}
impl<T> tree::Vertex for Object<T> {
fn children(&self) -> &[Self] {
&self.children
}
}
impl<T: PartialEq> tree::Comparable for Object<T> {
fn compare(&self, other: &Self) -> Option<tree::Difference> {
if self.widget.element != other.widget.element {
Some(tree::Difference::Kind)
} else if self.widget != other.widget || self.attributes != other.attributes {
Some(tree::Difference::Value)
} else {
None
}
}
}
pub type Change = tree::Change;
pub type Changeset = tree::Changeset;
pub fn diff<S: PartialEq>(old: &Object<S>, new: &Object<S>) -> Changeset {
tree::diff(old, new)
}
/// Object: Actions
impl<S> Object<S> {
pub fn click(mut self, action: S) -> Self {
self.widget.click = Some(action);
self
}
pub fn input<F>(mut self, input: F) -> Self
where
F: Fn(String) -> S + 'static,
{
self.widget.input = Some(Box::new(input));
self
}
pub fn keydown<F>(mut self, keydown: F) -> Self
where
F: Fn(u32) -> Option<S> + 'static,
{
self.widget.keydown = Some(Box::new(keydown));
self
}
}
/// Object: Attributes
impl<S> Object<S> {
pub fn attr<V: ToString>(self, name: &str, value: V) -> Self {
let name = match name {
"class" => "className",
_ => name,
};
self.attribute(Attribute(name.into(), value.to_string()))
}
fn attribute(mut self, attr: Attribute) -> Self {
self.attributes.push(attr);
self
}
}
pub trait Pushable<S> {
fn pushed(self, object: &mut Object<S>);
}
impl<S> Pushable<S> for Object<S> {
fn pushed(self, object: &mut Object<S>) {
object.children.push(self);
}
}
impl<'s, S> Pushable<S> for Vec<Object<S>> {
fn pushed(self, object: &mut Object<S>) {
object.children.extend(self);
}
}
impl<'s, S> Pushable<S> for &'s str {
fn pushed(self, object: &mut Object<S>) {
object.children.push(text(self));
}
}
impl<S> Pushable<S> for String {
fn pushed(self, object: &mut Object<S>) {
object.children.push(text(self));
}
}
/// Object: Children
impl<S> Object<S> {
pub fn add(mut self, child: Object<S>) -> Self {
self.children.push(child);
self
}
pub fn children(mut self, children: Vec<Object<S>>) -> Self {
self.children.extend(children.into_iter());
self
}
pub fn push<P: Pushable<S>>(mut self, pushed: P) -> Self {
pushed.pushed(&mut self);
self
}
}
pub fn text<S, T: ToString>(text: T) -> Object<S> {
let widget = Widget::new_with_value("text".into(), text.to_string());
Object::from_widget(widget)
}
| rust | MIT | 1674097ba752cb3c3e86db119f9faab85c963ecb | 2026-01-04T20:20:46.962183Z | false |
jtomschroeder/cedar | https://github.com/jtomschroeder/cedar/blob/1674097ba752cb3c3e86db119f9faab85c963ecb/src/boo.rs | src/boo.rs | use std::convert::AsRef;
use std::ops::Deref;
///
/// Borrowed or Owned
/// Very similar to `std::borrow::Cow` but without requiring `ToOwned` (and therefore, `Clone`)
///
pub enum Boo<'a, B: 'a> {
Borrowed(&'a B),
Owned(B),
}
impl<'a, B: 'a> AsRef<B> for Boo<'a, B> {
fn as_ref(&self) -> &B {
match self {
&Boo::Borrowed(b) => b,
&Boo::Owned(ref o) => o,
}
}
}
impl<'a, B: 'a> Deref for Boo<'a, B> {
type Target = B;
fn deref(&self) -> &B {
self.as_ref()
}
}
| rust | MIT | 1674097ba752cb3c3e86db119f9faab85c963ecb | 2026-01-04T20:20:46.962183Z | false |
jtomschroeder/cedar | https://github.com/jtomschroeder/cedar/blob/1674097ba752cb3c3e86db119f9faab85c963ecb/src/tree/path.rs | src/tree/path.rs | use std::fmt;
#[derive(Clone, Debug)]
pub struct PathRef<'p> {
path: &'p [usize],
}
#[derive(Clone, Debug)]
pub struct Path {
path: Vec<usize>,
}
impl Path {
pub fn new() -> Self {
Path { path: vec![0] }
}
pub fn from_vec(path: Vec<usize>) -> Self {
Path { path }
}
pub fn push(&mut self, element: usize) {
self.path.push(element)
}
pub fn len(&self) -> usize {
self.path.len()
}
pub fn raw(&self) -> &[usize] {
&self.path
}
pub fn parent(&self) -> PathRef {
PathRef {
path: &self.path[..self.len() - 1],
}
}
pub fn reference(&self) -> PathRef {
PathRef { path: &self.path }
}
}
impl fmt::Display for Path {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.reference())
}
}
impl<'p> fmt::Display for PathRef<'p> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.path {
p if p.is_empty() => write!(f, ""),
p if p.len() == 1 => write!(f, "{}", p[0]),
p => {
let id = (&p[1..])
.iter()
.fold(p[0].to_string(), |id, n| id + &format!(".{}", n));
write!(f, "{}", id)
}
}
}
}
| rust | MIT | 1674097ba752cb3c3e86db119f9faab85c963ecb | 2026-01-04T20:20:46.962183Z | false |
jtomschroeder/cedar | https://github.com/jtomschroeder/cedar/blob/1674097ba752cb3c3e86db119f9faab85c963ecb/src/tree/zipper.rs | src/tree/zipper.rs | pub enum Pair<T, U> {
Left(T),
Both(T, U),
Right(U),
}
pub struct Zip<I, J> {
i: I,
j: J,
}
impl<I, J> Iterator for Zip<I, J>
where
I: Iterator,
J: Iterator,
{
type Item = Pair<I::Item, J::Item>;
fn next(&mut self) -> Option<Self::Item> {
match (self.i.next(), self.j.next()) {
(Some(i), Some(j)) => Some(Pair::Both(i, j)),
(Some(i), _) => Some(Pair::Left(i)),
(_, Some(j)) => Some(Pair::Right(j)),
_ => None,
}
}
}
pub fn zip<I, J>(i: I, j: J) -> Zip<I::IntoIter, J::IntoIter>
where
I: IntoIterator,
J: IntoIterator,
{
Zip {
i: i.into_iter(),
j: j.into_iter(),
}
}
| rust | MIT | 1674097ba752cb3c3e86db119f9faab85c963ecb | 2026-01-04T20:20:46.962183Z | false |
jtomschroeder/cedar | https://github.com/jtomschroeder/cedar/blob/1674097ba752cb3c3e86db119f9faab85c963ecb/src/tree/mod.rs | src/tree/mod.rs | mod path;
mod zipper;
use std::collections::VecDeque;
use self::zipper::{zip, Pair};
pub use self::path::Path;
pub trait Vertex {
fn children(&self) -> &[Self]
where
Self: Sized;
fn find(&self, path: &Path) -> Option<&Self>
where
Self: Sized,
{
let path = path.raw();
let mut queue = VecDeque::new();
queue.push_back((path, 0, self));
while let Some((path, i, node)) = queue.pop_front() {
match path.len() {
0 => {}
1 if i == path[0] => return Some(node),
_ if i == path[0] => for (n, child) in node.children().iter().enumerate() {
queue.push_back((&path[1..], n, child));
},
_ => {}
}
}
None
}
fn traverse<D>(&self, root: &Path, mut delegate: D)
where
Self: Sized,
D: FnMut(&Path, &Self),
{
let path = root.clone();
let mut queue = VecDeque::new();
queue.push_back((path, self));
while let Some((path, node)) = queue.pop_front() {
delegate(&path, node);
for (n, child) in node.children().iter().enumerate() {
let mut path = path.clone();
path.push(n);
queue.push_back((path, child));
}
}
}
}
pub trait Comparable {
fn compare(&self, other: &Self) -> Option<Difference>;
}
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum Operation {
Create,
Delete,
Update,
Replace,
}
pub type Change = (Path, Operation);
pub type Changeset = Vec<Change>;
pub enum Difference {
Kind,
Value,
}
pub fn diff<V>(old: &V, new: &V) -> Changeset
where
V: Vertex + Comparable,
{
use self::Operation::*;
// - if `old` doesn't exist: CREATE new
// - else if `new` doesn't exist: DELETE old
// - else if old.type != new.type: REPLACE old with new
// - else UPDATE properties and check children
// Breadth-First Traversal!
let mut changeset = vec![];
let path = Path::new();
let mut queue = VecDeque::new();
// TODO: this code is same as below... (DRY)
match old.compare(&new) {
Some(Difference::Kind) => changeset.push((path, Replace)),
cmp => {
if let Some(Difference::Value) = cmp {
changeset.push((path.clone(), Update));
}
queue.push_back((old.children(), new.children(), path));
}
}
while let Some((old, new, path)) = queue.pop_front() {
for (n, pair) in zip(old, new).enumerate() {
// Add current location to path
let mut path = path.clone();
path.push(n);
match pair {
Pair::Left(_) => changeset.push((path, Delete)),
Pair::Right(_) => changeset.push((path, Create)),
Pair::Both(t, u) => {
// if t.type != u.type => replace u with t
// else if t != u (properties changes) => update and diff children
// else (if t == u) => diff children
match t.compare(&u) {
Some(Difference::Kind) => changeset.push((path, Replace)),
cmp => {
if let Some(Difference::Value) = cmp {
changeset.push((path.clone(), Update));
}
queue.push_back((t.children(), u.children(), path));
}
}
}
}
}
}
changeset
}
// TODO: REALLY need to build out these tests!
#[cfg(test)]
mod tests {
use tree;
#[derive(PartialEq, Debug)]
enum Kind {
This,
That,
}
#[derive(Debug)]
struct Object {
kind: Kind,
value: u32,
children: Vec<Object>,
}
fn this(value: u32, children: Vec<Object>) -> Object {
Object {
kind: Kind::This,
value,
children,
}
}
fn that(value: u32, children: Vec<Object>) -> Object {
Object {
kind: Kind::That,
value,
children,
}
}
impl tree::Vertex for Object {
fn children(&self) -> &[Self] {
&self.children
}
}
impl tree::Comparable for Object {
fn compare(&self, other: &Self) -> Option<tree::Difference> {
if self.kind != other.kind {
Some(tree::Difference::Kind)
} else if self.value != other.value {
Some(tree::Difference::Value)
} else {
None
}
}
}
#[test]
fn same_tree() {
let tree = that(0, vec![this(1, vec![]), this(2, vec![])]);
let changeset = tree::diff(&tree, &tree);
assert!(changeset.is_empty());
}
#[test]
fn tree() {
let left = that(0, vec![this(1, vec![]), this(2, vec![])]);
let right = that(0, vec![this(2, vec![])]);
let changeset = tree::diff(&left, &right);
println!("changeset: {:?}", changeset);
}
}
| rust | MIT | 1674097ba752cb3c3e86db119f9faab85c963ecb | 2026-01-04T20:20:46.962183Z | false |
jtomschroeder/cedar | https://github.com/jtomschroeder/cedar/blob/1674097ba752cb3c3e86db119f9faab85c963ecb/hypertext/src/lib.rs | hypertext/src/lib.rs | extern crate proc_macro as pm;
extern crate proc_macro2 as pm2;
#[macro_use]
extern crate quote;
mod parser;
#[proc_macro]
pub fn hypertext(tokens: pm::TokenStream) -> pm::TokenStream {
let tokens = tokens.to_string();
let dom = parser::parse(&tokens).unwrap();
dom.render().into()
}
impl parser::Element {
fn render(self) -> quote::Tokens {
match self {
parser::Element::Element {
name,
mut attributes,
mut children,
} => {
let attributes: Vec<_> = attributes.drain(..).map(|a| a.render()).collect();
let children: Vec<_> = children.drain(..).map(Self::render).collect();
quote! {
::cedar::dom::Object::new(#name)
#( #attributes )*
#( .push( #children ) )*
}
}
parser::Element::Text(text) => {
quote! { ::cedar::dom::text(#text) }
}
parser::Element::Block(block) => {
let block: pm2::TokenStream = block.parse().unwrap();
quote! { #block }
}
}
}
}
impl parser::Attribute {
fn render(self) -> quote::Tokens {
let name: pm2::TokenStream = self.name.parse().unwrap();
let block: pm2::TokenStream = self.block.parse().unwrap();
// TODO?: if value is 'true' or 'false' -> add or remove element without ="..."
match self.name.as_str() {
"click" | "input" | "keydown" => quote! { .#name(#block) },
_ => quote! { .attr(stringify!(#name), #block) },
}
}
}
| rust | MIT | 1674097ba752cb3c3e86db119f9faab85c963ecb | 2026-01-04T20:20:46.962183Z | false |
jtomschroeder/cedar | https://github.com/jtomschroeder/cedar/blob/1674097ba752cb3c3e86db119f9faab85c963ecb/hypertext/src/parser.rs | hypertext/src/parser.rs | use std;
#[derive(Debug)]
pub struct Attribute {
pub name: String,
pub block: String,
}
#[derive(Debug)]
pub enum Element {
Element {
name: String,
attributes: Vec<Attribute>,
children: Vec<Element>,
},
Text(String),
Block(String),
}
type Result<T> = std::result::Result<T, String>;
#[derive(Clone, Debug)]
struct Parsee<'s>(&'s str);
impl<'s> Parsee<'s> {
fn peek(&self) -> Option<char> {
self.0.chars().next()
}
fn spaces(self) -> Self {
Parsee(self.0.trim_left())
}
fn tag(self, text: &str) -> Result<Self> {
if self.0.starts_with(text) {
Ok(Parsee(&self.0[text.len()..]))
} else {
Err(format!("Text did not match tag '{}'", text))
}
}
fn identifier(self) -> Result<(Self, &'s str)> {
match self.0.chars().take_while(|c| c.is_alphanumeric()).count() {
0 => Err(format!("Failed to find identifier @ {}", self.0)),
count => Ok((Parsee(&self.0[count..]), &self.0[..count])),
}
}
fn text(self) -> (Self, Option<&'s str>) {
let count = self.0.chars().take_while(|&c| c != '<' && c != '{').count();
let text = self.0[..count].trim();
let text = if text.is_empty() { None } else { Some(text) };
(Parsee(&self.0[count..]), text)
}
fn block(self) -> Result<(Self, &'s str)> {
let parsee = self.spaces().tag("{")?;
let mut stack = 1;
let count = parsee
.0
.chars()
.take_while(|&c| {
match c {
'{' => stack += 1,
'}' => stack -= 1,
_ => {}
}
stack > 0
})
.count();
let block = parsee.0[..count].trim();
let count = count + 1; // count trailing '}'
let parsee = Parsee(&parsee.0[count..]);
let parsee = parsee.spaces();
Ok((parsee, block))
}
fn attribute(self) -> Result<(Self, Attribute)> {
let (parsee, attr) = self.spaces().identifier()?;
let (parsee, block) = parsee.spaces().tag("=")?.block()?;
Ok((
parsee,
Attribute {
name: attr.into(),
block: block.into(),
},
))
}
fn attributes(self) -> (Self, Vec<Attribute>) {
let mut attrs = vec![];
let mut parsee = self;
loop {
let p = parsee.clone();
match p.attribute() {
Ok((p, attr)) => {
attrs.push(attr);
parsee = p
}
Err(_) => break,
}
}
(parsee, attrs)
}
fn open_tag(self) -> Result<(Self, &'s str, Vec<Attribute>)> {
let (parsee, name) = self.spaces().tag("<")?.spaces().identifier()?;
let (parsee, attrs) = parsee.attributes();
let parsee = parsee.spaces().tag(">")?;
Ok((parsee, name, attrs))
}
fn close_tag(self) -> Result<(Self, &'s str)> {
let (parsee, name) = self
.spaces()
.tag("<")?
.spaces()
.tag("/")?
.spaces()
.identifier()?;
let parsee = parsee.spaces().tag(">")?.spaces();
Ok((parsee, name))
}
fn elements(self) -> Result<(Self, Vec<Element>)> {
let mut elements = vec![];
let mut parsee = self;
loop {
let p = parsee.clone();
// Parse a block, element, or text node
let (p, element) = match p.peek() {
Some('{') => {
let (p, block) = p.block()?;
(p, Element::Block(block.into()))
}
Some('<') => match p.element() {
Ok((p, element)) => (p, element),
Err(_) => break,
},
Some(_) => {
let (p, text) = p.text();
match text {
Some(text) => (p, Element::Text(text.into())),
None => return Err("Failed to find text element!".into()),
}
}
None => break,
};
elements.push(element);
parsee = p
}
Ok((parsee, elements))
}
fn element(self) -> Result<(Self, Element)> {
let parsee = self;
let (parsee, name, attrs) = parsee.open_tag()?;
let (parsee, children) = parsee.spaces().elements()?;
let (parsee, close) = parsee.close_tag()?;
assert_eq!(name, close); // TODO: return Err()
Ok((
parsee,
Element::Element {
name: name.into(),
attributes: attrs,
children,
},
))
}
fn parse(self) -> Result<(Self, Element)> {
self.element()
}
}
pub fn parse(tokens: &str) -> Result<Element> {
let (parsee, element) = Parsee(tokens).parse()?;
if !parsee.0.is_empty() {
// only one root element allowed! (must parse all input)
return Err("Found more than a single element".into());
}
// println!("{:#?}", element);
Ok(element)
}
#[cfg(test)]
mod tests {
use parser::parse;
#[test]
fn basic_parse() {
assert!(parse("--").is_err());
assert!(parse("<div></div>").is_ok());
assert!(parse("<div>Hello, world!</div>").is_ok());
assert!(parse("<div>Hello, world! <div></div> </div>").is_ok());
assert!(
parse(
"<div></div>
<div></div>"
).is_err()
);
}
#[test]
fn nested() {
assert!(
parse(
"<div> text
<div>Hello!</div>
<div>Test</div>
</div>"
).is_ok()
);
}
#[test]
fn text_around_child() {
assert!(parse("<div> text <div>Hello!</div> more text </div>").is_ok());
assert!(
parse("<div> text <div>Hello!</div> more text <div>Test!</div> more </div>").is_ok()
);
}
#[test]
fn attributes() {
assert!(parse(r#"<div attr1={"test"} attr2={|| 42}></div>"#).is_ok());
assert!(parse(r#"<div attr1={{ 42 }}></div>"#).is_ok());
}
// #[test]
// fn self_closing_tag() {
// assert!(parse("<div />").is_ok());
// }
#[test]
fn buttons() {
assert!(
parse(
"<div>
<button click={Message::Increment}>+</button>
<div>{model}</div>
<button click={Message::Decrement}>-</button>
</div>"
).is_ok()
);
assert!(parse("< div > < / div >").is_ok());
assert!(parse("< div > < button click = { Message :: Increment } > + < / button > < div > { model } < / div > < button click = { Message :: Decrement } > - < / button > < / div >").is_ok());
}
#[test]
fn embedded_block() {
assert!(parse("<div>{model}</div>").is_ok());
assert!(parse("<div>{model} HEY {test}</div>").is_ok());
}
}
| rust | MIT | 1674097ba752cb3c3e86db119f9faab85c963ecb | 2026-01-04T20:20:46.962183Z | false |
jtomschroeder/cedar | https://github.com/jtomschroeder/cedar/blob/1674097ba752cb3c3e86db119f9faab85c963ecb/examples/words.rs | examples/words.rs | #![feature(proc_macro_non_items)]
extern crate cedar;
use cedar::hypertext;
type Model = String;
#[derive(PartialEq)]
enum Message {
NewContent(String),
}
fn update(_: Model, message: &Message) -> Model {
match message {
&Message::NewContent(ref content) => content.clone(),
}
}
type Object = cedar::dom::Object<Message>;
fn words(line: &str) -> Vec<Object> {
line.split(' ')
.filter(|s| !s.is_empty())
.map(|w| hypertext! { <div>{w}</div> })
.collect()
}
fn view(model: &Model) -> Object {
hypertext! {
<div>
<input placeholder={"Words!"} input={Message::NewContent}></input>
<div>{words(model)}</div>
</div>
}
}
fn main() {
cedar::app("".into(), update, view)
}
| rust | MIT | 1674097ba752cb3c3e86db119f9faab85c963ecb | 2026-01-04T20:20:46.962183Z | false |
jtomschroeder/cedar | https://github.com/jtomschroeder/cedar/blob/1674097ba752cb3c3e86db119f9faab85c963ecb/examples/field.rs | examples/field.rs | #![feature(proc_macro_non_items)]
extern crate cedar;
use cedar::hypertext;
type Model = String;
#[derive(PartialEq)]
enum Message {
NewContent(String),
}
fn update(_: Model, message: &Message) -> Model {
match message {
&Message::NewContent(ref content) => content.clone(),
}
}
const STYLE: &'static str =
"width: 100%; height: 40px; padding: 10px 0; font-size: 2em; text-align: center;";
fn view(model: &Model) -> cedar::dom::Object<Message> {
let field: String = model.chars().rev().collect();
hypertext! {
<div>
<input style={STYLE} input={Message::NewContent}></input>
<div style={STYLE}>{field}</div>
</div>
}
}
fn main() {
cedar::app("".into(), update, view)
}
| rust | MIT | 1674097ba752cb3c3e86db119f9faab85c963ecb | 2026-01-04T20:20:46.962183Z | false |
jtomschroeder/cedar | https://github.com/jtomschroeder/cedar/blob/1674097ba752cb3c3e86db119f9faab85c963ecb/examples/buttons.rs | examples/buttons.rs | #![feature(proc_macro_non_items)]
extern crate cedar;
use cedar::hypertext;
type Model = i32;
#[derive(PartialEq)]
enum Message {
Increment,
Decrement,
}
fn update(model: Model, message: &Message) -> Model {
match message {
&Message::Increment => model + 1,
&Message::Decrement => model - 1,
}
}
fn view(model: &Model) -> cedar::dom::Object<Message> {
hypertext! {
<div>
<button click={Message::Increment}> + </button>
<div>{model.to_string()}</div>
<button click={Message::Decrement}> - </button>
</div>
}
}
fn main() {
cedar::app(0, update, view)
}
| rust | MIT | 1674097ba752cb3c3e86db119f9faab85c963ecb | 2026-01-04T20:20:46.962183Z | false |
jtomschroeder/cedar | https://github.com/jtomschroeder/cedar/blob/1674097ba752cb3c3e86db119f9faab85c963ecb/examples/todo/build.rs | examples/todo/build.rs |
extern crate cedar;
fn main() {
cedar::custom_style("src/style/style-todo.scss");
}
| rust | MIT | 1674097ba752cb3c3e86db119f9faab85c963ecb | 2026-01-04T20:20:46.962183Z | false |
jtomschroeder/cedar | https://github.com/jtomschroeder/cedar/blob/1674097ba752cb3c3e86db119f9faab85c963ecb/examples/todo/src/main.rs | examples/todo/src/main.rs | #![feature(proc_macro)]
#![feature(proc_macro_non_items)]
extern crate cedar;
use cedar::hypertext;
type Entries = Vec<Entry>;
struct Model {
entries: Entries,
visibility: String,
field: String,
uid: u32,
}
impl Model {
fn empty() -> Self {
Model {
entries: Entries::new(),
visibility: "All".into(),
field: "".into(),
uid: 0,
}
}
}
struct Entry {
description: String,
completed: bool,
id: u32,
}
impl Entry {
fn new(description: String, id: u32) -> Self {
Entry {
description,
completed: false,
id,
}
}
}
#[derive(PartialEq)]
enum Message {
UpdateField(String),
UpdateEntry(u32, String),
Add,
Delete(u32),
DeleteComplete,
Check(u32, bool),
CheckAll(bool),
ChangeVisibility(String),
}
fn update(mut model: Model, message: &Message) -> Model {
match message {
&Message::Add => {
let uid = model.uid;
let field = model.field.split_off(0);
if !field.is_empty() {
model.entries.push(Entry::new(field, uid));
}
model.uid += 1;
}
&Message::UpdateField(ref s) => model.field = s.clone(),
&Message::UpdateEntry(id, ref task) => model
.entries
.iter_mut()
.find(|e| e.id == id)
.iter_mut()
.for_each(|e| e.description = task.clone()),
&Message::Delete(id) => model.entries.retain(|e| e.id != id),
&Message::DeleteComplete => model.entries.retain(|e| !e.completed),
&Message::Check(id, completed) => model
.entries
.iter_mut()
.find(|e| e.id == id)
.iter_mut()
.for_each(|e| e.completed = completed),
&Message::CheckAll(completed) => model
.entries
.iter_mut()
.for_each(|e| e.completed = completed),
&Message::ChangeVisibility(ref visibility) => model.visibility = visibility.clone(),
}
model
}
type Widget = cedar::dom::Object<Message>;
fn view(model: &Model) -> Widget {
(hypertext! { |model: &Model|
<div class={"todomvc-wrapper"} style={"visibility: hidden"}>
<section class={"todoapp"}>
{view_input(&model.field)}
{view_entries(&model.visibility, &model.entries)}
{view_controls(&model.visibility, &model.entries)}
</section>
{info_footer()}
</div>
})(model)
}
fn view_input(task: &str) -> Widget {
(hypertext! { |task: &str|
<header class={"header"}>
<h1>todos</h1>
<input class={"new-todo"}
placeholder={"What needs to be done?"}
autofocus={"true"}
value={task}
name={"newTodo"}
input={Message::UpdateField}
keydown={|code| { if code == 13 /* ENTER */ { Some(Message::Add) } else { None }}}>
</input>
</header>
})(task)
}
fn view_entries(visibility: &str, entries: &[Entry]) -> Widget {
let all_completed = entries.iter().all(|e| e.completed);
let vis = if entries.is_empty() {
"hidden"
} else {
"visible"
};
let todos: cedar::dom::List<_> = entries
.iter()
.filter(|e| -> bool {
match visibility {
"Completed" => e.completed,
"Active" => !e.completed,
_ => true,
}
})
.map(view_entry)
.collect();
(hypertext! { |vis, all_completed, todos|
<section class={"main"} style={format!("visibility: {}", vis)}>
<input class={"toggle-all"}
type={"checkbox"}
name={"toggle"}
checked={if all_completed { "true" } else { "false" }}
click={Message::CheckAll(!all_completed)}>
</input>
<ul class={"todo-list"}>{todos}</ul>
</section>
})(vis, all_completed, todos)
}
fn view_entry(entry: &Entry) -> Widget {
let &Entry {
ref description,
completed,
id,
} = entry;
(hypertext! { |id, completed, description|
<li>
<div class={"view"}>
<input class={"toggle"}
type={"checkbox"}
checked={if completed { "true" } else { "false" }}
click={Message::Check(id, !completed)}>
</input>
<label>{description}</label>
</div>
<button class={"destroy"} click={Message::Delete(id)}></button>
<input class={"edit"}
value={description}
name={"title"}
id={format!("todo-{}", id)}
input={move |s| Message::UpdateEntry(id.clone(), s)}>
</input>
</li>
})(id, completed, description)
}
fn view_controls(visibility: &str, entries: &[Entry]) -> Widget {
let num_completed = entries.iter().filter(|e| e.completed).count();
let num_left = entries.len() - num_completed;
(hypertext! { |entries: &[Entry], num_left, visibility, num_completed|
<footer class={"footer"}
hidden={entries.is_empty()}>
{view_controls_count(num_left)}
{view_controls_filters(visibility)}
{view_controls_clear(num_completed)}
</footer>
})(entries, num_left, visibility, num_completed)
}
fn view_controls_count(num_left: usize) -> Widget {
let item = match num_left {
1 => "item",
_ => "items",
};
let s = format!("{} {} left", num_left, item);
(hypertext! { |s|
<span class={"todo-count"}>{s}</span>
})(s)
}
fn view_controls_filters(visibility: &str) -> Widget {
(hypertext! { |visibility|
<ul class={"filters"}>
{visibility_swap("All", visibility)}
{" "}
{visibility_swap("Active", visibility)}
{" "}
{visibility_swap("Completed", visibility)}
</ul>
})(visibility)
}
fn visibility_swap(visibility: &str, actual_visibility: &str) -> Widget {
(hypertext! { |visibility: &str, actual_visibility: &str|
<li click={Message::ChangeVisibility(visibility.into())}>
<a class={if visibility == actual_visibility { "selected" } else { "" }}>
{visibility}
</a>
</li>
})(visibility, actual_visibility)
}
fn view_controls_clear(num_completed: usize) -> Widget {
(hypertext! { |num_completed|
<button class={"clear-completed"}
hidden={num_completed == 0}
click={Message::DeleteComplete}>
{format!("Clear completed ({})", num_completed)}
</button>
})(num_completed)
}
fn info_footer() -> Widget {
(hypertext! { ||
<footer class={"info"}>
<p>{"Written by Tom Schroeder using cedar!"}</p>
</footer>
})()
}
fn main() {
cedar::Application::new(Model::empty(), update, view)
.style(include_str!(concat!(env!("OUT_DIR"), "/style.css")))
.run()
}
| rust | MIT | 1674097ba752cb3c3e86db119f9faab85c963ecb | 2026-01-04T20:20:46.962183Z | false |
getinstachip/vpm | https://github.com/getinstachip/vpm/blob/03763cc40a2354d7a979a378e6830c1b4ceee44a/src/config_man.rs | src/config_man.rs | use anyhow::Result;
use directories::ProjectDirs;
use rand::RngCore;
use reqwest::Client;
use serde_json::json;
use std::fs;
use std::path::PathBuf;
use toml_edit::{DocumentMut, Item, Value, Table};
use uuid::Uuid;
use ring::aead::{self, Aad, LessSafeKey, Nonce};
use base64::{Engine as _, engine::general_purpose};
use ring::aead::UnboundKey;
use sha2::{Digest, Sha256};
use sys_info;
const POSTHOG_API_KEY: Option<&str> = option_env!("POSTHOG_API_KEY");
const DOCS_KEY: Option<&str> = option_env!("DOCS_KEY");
pub async fn send_event(command: String) -> Result<()> {
if get_analytics()? {
let uuid = get_uuid()?;
let version = env!("CARGO_PKG_VERSION").to_string();
let api_key = POSTHOG_API_KEY.expect("POSTHOG_API_KEY environment variable not set").to_string();
let client = Client::new();
let payload = json!({
"api_key": api_key,
"event": "user_action",
"distinct_id": uuid,
"properties": {
"command": command,
"version": version
}
});
let _response = client.post("https://us.i.posthog.com/capture/")
.json(&payload)
.send()
.await?;
// if !response.status().is_success() {
// eprintln!("Failed to send event to PostHog: {}", response.status());
// }
}
Ok(())
}
pub fn get_config_path() -> Option<PathBuf> {
ProjectDirs::from("com", "Instachip", "vpm")
.map(|proj_dirs| proj_dirs.config_dir().to_path_buf())
.map(|mut path| {
path.push("config.toml");
path
})
}
pub fn create_config() -> Result<()> {
let config_path = get_config_path().unwrap();
if !config_path.exists() {
if let Some(parent) = config_path.parent() {
fs::create_dir_all(parent)?;
}
fs::File::create(&config_path)?;
}
fs::write(config_path.clone(), "").expect("Failed to create config.toml");
let contents = fs::read_to_string(config_path.clone())?;
let mut config_doc = contents.parse::<DocumentMut>().expect("Failed to parse config.toml");
config_doc.insert("user", Item::Table(Table::new()));
let user_table = config_doc["user"].as_table_mut().unwrap();
user_table.insert("uuid", Item::Value(Value::from(create_uuid()?)));
user_table.insert("os", Item::Value(Value::from(std::env::consts::OS)));
user_table.insert("arch", Item::Value(Value::from(std::env::consts::ARCH)));
config_doc.insert("tool", Item::Table(Table::new()));
let tool_table = config_doc["tool"].as_table_mut().unwrap();
tool_table.insert("version", Item::Value(Value::from(env!("CARGO_PKG_VERSION"))));
config_doc.insert("options", Item::Table(Table::new()));
let options_table = config_doc["options"].as_table_mut().unwrap();
options_table.insert("analytics", Item::Value(Value::from(true)));
config_doc.insert("metrics", Item::Table(Table::new()));
let metrics_table = config_doc["metrics"].as_table_mut().unwrap();
metrics_table.insert("docs_count", Item::Value(Value::from(0)));
encrypt_docs_count(0)?;
fs::write(config_path, config_doc.to_string()).expect("Failed to write config.toml");
Ok(())
}
fn create_uuid() -> Result<String> {
let uuid = Uuid::now_v7().to_string();
let os = sys_info::os_type().unwrap_or_default();
let release = sys_info::os_release().unwrap_or_default();
let arch = std::env::consts::ARCH.to_string();
let cpu_num = sys_info::cpu_num().unwrap_or_default().to_string();
let cpu_speed = sys_info::cpu_speed().unwrap_or_default().to_string();
let mem_total = sys_info::mem_info().unwrap_or(sys_info::MemInfo { total: 0, free: 0, buffers: 0, cached: 0, swap_total: 0, swap_free: 0, avail: 0 }).total.to_string();
let hostname = sys_info::hostname().unwrap_or_default();
let timezone = std::env::var("TZ").unwrap_or_else(|_| "Unknown".to_string());
let mut hasher = Sha256::new();
hasher.update(uuid);
hasher.update(os);
hasher.update(release);
hasher.update(arch);
hasher.update(cpu_num);
hasher.update(cpu_speed);
hasher.update(mem_total);
hasher.update(hostname);
hasher.update(timezone);
let hash = hasher.finalize();
Ok(format!("{:x}", hash))
}
fn get_uuid() -> Result<String> {
let config_path = get_config_path().unwrap();
if !config_path.exists() {
create_config()?;
}
let contents = fs::read_to_string(config_path)?;
let config = contents.parse::<DocumentMut>().expect("Failed to parse config.toml");
Ok(config["user"]["uuid"].as_str().unwrap().to_string())
}
pub fn set_analytics(value: bool) -> Result<()> {
let config_path = get_config_path().unwrap();
if !config_path.exists() {
create_config()?;
}
let config = fs::read_to_string(config_path.clone())?;
let mut config_doc = config.parse::<DocumentMut>().expect("Failed to parse config.toml");
config_doc["options"]["analytics"] = Item::Value(Value::from(value));
fs::write(config_path, config_doc.to_string()).expect("Failed to write config.toml");
Ok(())
}
fn get_analytics() -> Result<bool> {
let config_path = get_config_path().unwrap();
if !config_path.exists() {
create_config()?;
}
let config = fs::read_to_string(config_path.clone())?;
let config_doc = config.parse::<DocumentMut>().expect("Failed to parse config.toml");
Ok(config_doc["options"]["analytics"].as_bool().unwrap())
}
pub fn set_version(version: &str) -> Result<()> {
let config_path = get_config_path().unwrap();
if !config_path.exists() {
create_config()?;
}
let config = fs::read_to_string(config_path.clone())?;
let mut config_doc = config.parse::<DocumentMut>().expect("Failed to parse config.toml");
config_doc["tool"]["version"] = Item::Value(Value::from(version));
fs::write(config_path, config_doc.to_string()).expect("Failed to write config.toml");
Ok(())
}
pub fn decrypt_docs_count() -> Result<u8> {
let config_path = get_config_path().ok_or(anyhow::anyhow!("Failed to get config path"))?;
if !config_path.exists() {
create_config()?;
}
let config = fs::read_to_string(config_path)?;
let config_doc = config.parse::<DocumentMut>().expect("Failed to parse config.toml");
let encrypted_docs_base64 = config_doc["metrics"]["docs_count"]
.as_str()
.ok_or_else(|| anyhow::anyhow!("docs_count not found in config"))?;
let encrypted_docs = general_purpose::STANDARD
.decode(encrypted_docs_base64)
.map_err(|e| anyhow::anyhow!("Failed to decode docs count: {}", e))?;
// Get the key from environment variable
let docs_key_str = DOCS_KEY.ok_or_else(|| anyhow::anyhow!("DOCS_KEY is not set"))?;
let key_bytes = hex::decode(docs_key_str).map_err(|e| anyhow::anyhow!("Invalid DOCS_KEY format: {}", e))?;
// Create an AEAD key
let unbound_key =
UnboundKey::new(&aead::AES_256_GCM, &key_bytes).map_err(|_| anyhow::anyhow!("Invalid key"))?;
let key = LessSafeKey::new(unbound_key);
// Extract nonce and ciphertext
if encrypted_docs.len() < 12 {
return Err(anyhow::anyhow!("Ciphertext too short"));
}
let (nonce_bytes, ciphertext_and_tag) = encrypted_docs.split_at(12);
let nonce = Nonce::try_assume_unique_for_key(nonce_bytes).map_err(|_| anyhow::anyhow!("Invalid nonce"))?;
// Prepare mutable buffer for decryption
let mut in_out = ciphertext_and_tag.to_vec();
// Decrypt the data
key.open_in_place(nonce, Aad::empty(), &mut in_out)
.map_err(|_| anyhow::anyhow!("Decryption failed"))?;
// Convert decrypted data to string
let decrypted_str =
std::str::from_utf8(&in_out).map_err(|_| anyhow::anyhow!("Invalid UTF-8 in decrypted data"))?;
let docs_count: u8 = decrypted_str.parse().map_err(|_| anyhow::anyhow!("Failed to parse decrypted data"))?;
Ok(docs_count)
}
pub fn encrypt_docs_count(docs_count: u8) -> Result<()> {
// Convert the docs_count to a string and then to bytes
let docs_count_bytes = docs_count.to_string().into_bytes();
// Get the key from the environment variable
let docs_key_str = DOCS_KEY.ok_or_else(|| anyhow::anyhow!("DOCS_KEY is not set"))?;
let key_bytes = hex::decode(docs_key_str).map_err(|e| anyhow::anyhow!("Invalid DOCS_KEY format: {}", e))?;
// Create an AEAD key
let unbound_key =
UnboundKey::new(&aead::AES_256_GCM, &key_bytes).map_err(|_| anyhow::anyhow!("Invalid key"))?;
let key = LessSafeKey::new(unbound_key);
// Generate a random nonce
let mut nonce_bytes = [0u8; 12];
rand::rngs::OsRng.fill_bytes(&mut nonce_bytes);
let nonce = Nonce::assume_unique_for_key(nonce_bytes);
// Prepare buffer for encryption (data + space for the tag)
let mut in_out = docs_count_bytes;
in_out.extend_from_slice(&[0u8; 16]);
// Encrypt the data
key.seal_in_place_append_tag(nonce, Aad::empty(), &mut in_out)
.map_err(|_| anyhow::anyhow!("Encryption failed"))?;
// Prepend nonce to the ciphertext
let mut encrypted_data = nonce_bytes.to_vec();
encrypted_data.extend_from_slice(&in_out);
// Encode the encrypted data to base64
let encrypted_base64 = general_purpose::STANDARD.encode(encrypted_data);
let config_path = get_config_path().unwrap();
let config = fs::read_to_string(config_path.clone())?;
let mut config_doc = config.parse::<DocumentMut>().expect("Failed to parse config.toml");
config_doc["metrics"]["docs_count"] = Item::Value(Value::from(encrypted_base64));
fs::write(config_path, config_doc.to_string()).expect("Failed to write config.toml");
Ok(())
} | rust | MIT | 03763cc40a2354d7a979a378e6830c1b4ceee44a | 2026-01-04T20:11:49.815734Z | false |
getinstachip/vpm | https://github.com/getinstachip/vpm/blob/03763cc40a2354d7a979a378e6830c1b4ceee44a/src/error.rs | src/error.rs | use core::fmt::Display;
use std::fmt::{self, Formatter};
/// Custom error type for early exit
#[derive(Debug)]
pub struct SilentExit {
pub code: u8,
}
impl Display for SilentExit {
fn fmt(&self, _: &mut Formatter<'_>) -> fmt::Result {
Ok(())
}
}
| rust | MIT | 03763cc40a2354d7a979a378e6830c1b4ceee44a | 2026-01-04T20:11:49.815734Z | false |
getinstachip/vpm | https://github.com/getinstachip/vpm/blob/03763cc40a2354d7a979a378e6830c1b4ceee44a/src/main.rs | src/main.rs | mod cmd;
mod error;
mod toml;
mod config_man;
use std::env;
use std::io::{self, Write};
use std::process::ExitCode;
use std::fs;
use clap::Parser;
use crate::cmd::{Cmd, Execute};
use crate::error::SilentExit;
use crate::config_man::{get_config_path, create_config};
#[tokio::main]
pub async fn main() -> ExitCode {
// Forcibly disable backtraces.
env::remove_var("RUST_LIB_BACKTRACE");
env::remove_var("RUST_BACKTRACE");
let flag_file = get_config_path().unwrap().with_file_name(".vpm_welcome_shown");
if !flag_file.exists() {
create_config().unwrap();
println!("Welcome to vpm!");
println!("We collect anonymous usage data to improve the tool.");
println!("The following information will be collected:");
println!(" - The version of vpm you are using");
println!(" - Which commands you run and when (not including arguments, input, or output)");
println!("No personal information will be collected.");
println!("To opt-out, run `vpm config --analytics false`. You may change this at any time.\n");
println!("Rerun your command to accept and continue.");
fs::write(flag_file, "").unwrap();
return ExitCode::SUCCESS;
}
match Cmd::parse().execute().await {
Ok(()) => ExitCode::SUCCESS,
Err(e) => match e.downcast::<SilentExit>() {
Ok(SilentExit { code }) => code.into(),
Err(e) => {
_ = writeln!(io::stderr(), "vpm: {e:?}");
ExitCode::FAILURE
}
},
}
}
| rust | MIT | 03763cc40a2354d7a979a378e6830c1b4ceee44a | 2026-01-04T20:11:49.815734Z | false |
getinstachip/vpm | https://github.com/getinstachip/vpm/blob/03763cc40a2354d7a979a378e6830c1b4ceee44a/src/toml.rs | src/toml.rs | use serde::{Deserialize, Serialize};
use std::fs::{OpenOptions, read_to_string};
use std::io::Write;
use std::path::Path;
use std::collections::HashSet;
use anyhow::Result;
use toml_edit::{Array, DocumentMut, InlineTable, Item, Table, Value};
#[derive(Serialize, Deserialize, Debug)]
struct Package {
name: String,
version: String,
authors: Vec<String>,
description: String,
license: String,
}
#[derive(Debug)]
struct VpmToml {
toml_doc: DocumentMut,
}
impl Default for Package {
fn default() -> Self {
Package {
name: "my-vpm-package".to_string(),
version: "0.1.0".to_string(),
authors: vec!["<author-name> <author-email>".to_string()],
description: "A vpm package".to_string(),
license: "LicenseRef-LICENSE".to_string(),
}
}
}
impl VpmToml {
pub fn from(filepath: &str) -> Self {
if !Path::new(filepath).exists() {
let mut initial_doc = DocumentMut::new();
initial_doc["package"] = Item::Table(Table::new());
initial_doc["package"]["name"] = Item::Value(Value::from(Package::default().name));
initial_doc["package"]["version"] = Item::Value(Value::from(Package::default().version));
initial_doc["package"]["authors"] = Item::Value(Value::from(Array::from(Package::default().authors.iter().map(|s| Value::from(s.to_string())).collect())));
initial_doc["package"]["description"] = Item::Value(Value::from(Package::default().description));
initial_doc["package"]["license"] = Item::Value(Value::from(Package::default().license));
initial_doc["dependencies"] = Item::Table(Table::new());
let mut file = OpenOptions::new()
.write(true)
.create(true)
.truncate(true)
.open(filepath)
.expect("Failed to create vpm.toml");
file.write_all(initial_doc.to_string().as_bytes()).expect("Failed to write to vpm.toml");
}
let toml_content = read_to_string(filepath).expect("Failed to read vpm.toml");
Self {
toml_doc: toml_content.parse::<DocumentMut>().expect("Failed to parse vpm.toml")
}
}
pub fn get_dependencies(&self) -> Option<&Table> {
self.toml_doc["dependencies"].as_table()
}
pub fn add_dependency(&mut self, git: &str) {
self.toml_doc["dependencies"][git] = Item::Value(Value::Array(Array::new()));
}
pub fn add_top_module(&mut self, repo_link: &str, module_name: &str, commit: &str) {
let array = self.toml_doc["dependencies"][repo_link].as_array_mut().unwrap();
if !array.iter().any(|m| m.as_inline_table().unwrap().get("top_module").unwrap().as_str().unwrap() == module_name) {
let new_entry = Value::InlineTable({
let mut table = InlineTable::new();
table.insert("top_module".to_string(), Value::from(module_name));
table.insert("commit_hash".to_string(), Value::from(commit.to_string()));
table
});
array.push(new_entry);
}
}
pub fn remove_dependency(&mut self, git: &str) {
if let Some(dependencies) = self.toml_doc["dependencies"].as_table_mut() {
dependencies.remove(git);
}
}
pub fn remove_top_module(&mut self, repo_link: &str, module_name: &str) {
if let Some(dependencies) = self.toml_doc["dependencies"].as_table_mut() {
if let Some(modules) = dependencies.get_mut(repo_link).and_then(|v| v.as_array_mut()) {
modules.retain(|m| {
if let Some(table) = m.as_inline_table() {
if let Some(top_module) = table.get("top_module").and_then(|v| v.as_str()) {
return top_module != module_name;
}
}
true
});
// If the array is empty after removal, remove the entire dependency
if modules.is_empty() {
dependencies.remove(repo_link);
}
}
}
}
pub fn write_to_file(&self, filepath: &str) -> Result<()> {
let toml_content = self.toml_doc.to_string();
let mut formatted_content = String::new();
for line in toml_content.lines() {
if !line.trim().contains("}, ") {
formatted_content.push_str(line);
} else {
let indent_level = line.chars().take_while(|&c| c != '{').count();
formatted_content.push_str(&line.replace("}, ", &format!("}},\n{}", " ".repeat(indent_level))));
}
formatted_content.push('\n');
}
let mut file = OpenOptions::new()
.write(true)
.create(true)
.truncate(true)
.open(filepath)
.expect("Failed to open vpm.toml");
file.write_all(formatted_content.as_bytes()).expect("Failed to write to vpm.toml");
Ok(())
}
pub fn get_repo_links(&self, module_name: &str) -> HashSet<String> {
let mut repo_links = HashSet::new();
if let Some(dependencies) = self.toml_doc["dependencies"].as_table() {
for (repo_link, dependency) in dependencies.iter() {
if let Some(top_modules) = dependency.as_array() {
if top_modules.iter().any(|m| m.as_inline_table().unwrap().get("top_module").unwrap().as_str().unwrap() == module_name) {
repo_links.insert(repo_link.to_string());
}
}
}
}
repo_links
}
}
pub fn add_dependency(git: &str) -> Result<()> {
let mut vpm_toml = VpmToml::from("vpm.toml");
if !vpm_toml.get_dependencies().unwrap().contains_key(git) {
vpm_toml.add_dependency(git);
vpm_toml.write_to_file("vpm.toml")?;
}
Ok(())
}
pub fn add_top_module(repo_link: &str, module_path: &str, commit: &str) -> Result<()> {
let mut vpm_toml = VpmToml::from("vpm.toml");
vpm_toml.add_top_module(repo_link, module_path, commit);
vpm_toml.write_to_file("vpm.toml")?;
Ok(())
}
fn remove_dependency(git: &str) -> Result<()> {
let mut vpm_toml = VpmToml::from("vpm.toml");
vpm_toml.remove_dependency(git);
vpm_toml.write_to_file("vpm.toml")?;
Ok(())
}
pub fn remove_top_module(repo_link: &str, module_name: &str) -> Result<()> {
let mut vpm_toml = VpmToml::from("vpm.toml");
vpm_toml.remove_top_module(repo_link, module_name);
if let Some(dependencies) = vpm_toml.toml_doc["dependencies"].as_table() {
if let Some(modules) = dependencies.get(repo_link).and_then(|v| v.as_array()) {
if modules.is_empty() {
remove_dependency(repo_link)?;
}
}
}
vpm_toml.write_to_file("vpm.toml")?;
Ok(())
}
pub fn get_repo_links(module_name: &str) -> HashSet<String> {
let vpm_toml = VpmToml::from("vpm.toml");
vpm_toml.get_repo_links(module_name)
} | rust | MIT | 03763cc40a2354d7a979a378e6830c1b4ceee44a | 2026-01-04T20:11:49.815734Z | false |
getinstachip/vpm | https://github.com/getinstachip/vpm/blob/03763cc40a2354d7a979a378e6830c1b4ceee44a/src/cmd/config.rs | src/cmd/config.rs | use crate::cmd::{Execute, Config};
use crate::config_man::set_analytics;
use anyhow::Result;
impl Execute for Config {
async fn execute(&self) -> Result<()> {
if self.analytics.is_some() {
set_analytics(self.analytics.unwrap())?;
println!("Analytics set to: {}", self.analytics.unwrap());
}
Ok(())
}
} | rust | MIT | 03763cc40a2354d7a979a378e6830c1b4ceee44a | 2026-01-04T20:11:49.815734Z | false |
getinstachip/vpm | https://github.com/getinstachip/vpm/blob/03763cc40a2354d7a979a378e6830c1b4ceee44a/src/cmd/dotf.rs | src/cmd/dotf.rs | use anyhow::Result;
use std::fs;
use std::path::{Path, PathBuf};
use std::io::Write;
use crate::cmd::{Execute, Dotf};
impl Execute for Dotf {
async fn execute(&self) -> Result<()> {
// Clear the .f file if it already exists
let top_module_file = Path::new(&self.path_to_top_module).file_name().and_then(|f| f.to_str()).unwrap_or("");
let top_module_dir = Path::new(&self.path_to_top_module).with_extension("").to_str().unwrap_or("").to_string();
let filelist_name = format!("{}.f", top_module_file.trim_end_matches(".sv").trim_end_matches(".v"));
let filelist_path = PathBuf::from("vpm_modules").join(&top_module_dir).join(&filelist_name);
if filelist_path.exists() {
fs::write(&filelist_path, "")?;
}
let _ = append_modules_to_filelist(&self.path_to_top_module, true);
Ok(())
}
}
pub fn append_modules_to_filelist(top_module_path: &str, sub: bool) -> Result<()> {
let vpm_modules_dir = PathBuf::from("./vpm_modules");
let mut visited_modules: Vec<String> = Vec::new();
let top_module_file = Path::new(top_module_path).file_name().and_then(|f| f.to_str()).unwrap_or("");
let top_module_dir = Path::new(top_module_path).with_extension("").to_str().unwrap_or("").to_string();
let filelist_name = format!("{}.f", top_module_file.trim_end_matches(".sv").trim_end_matches(".v"));
let filelist_path = PathBuf::from("vpm_modules").join(&top_module_dir).join(&filelist_name);
let mut filepaths = Vec::new();
let mut f_statements = Vec::new();
let mut define_statements = Vec::new();
append_module(&vpm_modules_dir, top_module_file, top_module_file, &mut visited_modules, sub, &filelist_path, &mut filepaths, &mut f_statements, &mut define_statements)?;
// Write all filepaths together
let mut file = fs::OpenOptions::new()
.append(true)
.create(true)
.open(&filelist_path)?;
// Add +incdir+ statement
file.write_all(format!("+incdir+{}\n\n", vpm_modules_dir.join(&top_module_dir).display()).as_bytes())?;
for filepath in filepaths {
file.write_all(format!("{}\n", filepath).as_bytes())?;
}
file.write_all(b"\n")?;
// Write all unique define statements together
let mut unique_defines: std::collections::HashSet<String> = std::collections::HashSet::new();
for define in define_statements {
unique_defines.insert(define);
}
for define in unique_defines {
file.write_all(format!("{}\n", define).as_bytes())?;
}
file.write_all(b"\n")?;
// Write all -f statements together
for f_statement in f_statements {
file.write_all(format!("{}\n", f_statement).as_bytes())?;
}
Ok(())
}
fn append_module(
dir: &Path,
module: &str,
top_module: &str,
visited_modules: &mut Vec<String>,
sub: bool,
filelist_path: &Path,
filepaths: &mut Vec<String>,
f_statements: &mut Vec<String>,
define_statements: &mut Vec<String>,
) -> Result<()> {
for entry in fs::read_dir(dir)? {
let entry = entry?;
let path = entry.path();
if path.is_file() && path.file_name().map_or(false, |name| name == module) {
let module_path = path.to_str().unwrap_or_default();
let contents = fs::read_to_string(&path)?;
filepaths.push(module_path.to_string());
// Check for `define macros and module boundaries
let mut in_ifdef_block = false;
let mut in_module = false;
let mut module_defines = Vec::new();
let mut current_module_name = String::new();
for line in contents.lines() {
let trimmed_line = line.trim();
if trimmed_line.starts_with("module") {
in_module = true;
current_module_name = trimmed_line.split_whitespace().nth(1).unwrap_or("").to_string();
} else if trimmed_line.starts_with("endmodule") {
in_module = false;
if !module_defines.is_empty() {
if current_module_name == top_module.trim_end_matches(".v") {
// Add defines to the top module's .f file
define_statements.extend(module_defines.clone());
} else {
let submodule_filelist_name = format!("{}.f", current_module_name);
let submodule_filelist_path = dir.join(&submodule_filelist_name);
let mut submodule_file = fs::OpenOptions::new()
.append(true)
.create(true)
.open(&submodule_filelist_path)?;
for define in &module_defines {
submodule_file.write_all(format!("{}\n", define).as_bytes())?;
}
f_statements.push(format!("-f {}", submodule_filelist_path.to_str().unwrap_or_default()));
}
}
module_defines.clear();
} else if trimmed_line.starts_with("`ifdef") {
in_ifdef_block = true;
let parts: Vec<&str> = trimmed_line.split_whitespace().collect();
if parts.len() >= 2 {
let macro_name = parts[1];
let define = format!("+define+{}", macro_name);
if !in_module {
if !define_statements.contains(&define) {
define_statements.push(define.clone());
}
} else {
module_defines.push(define);
}
}
} else if trimmed_line.starts_with("`endif") {
in_ifdef_block = false;
} else if !in_ifdef_block && trimmed_line.starts_with("`define") {
let parts: Vec<&str> = trimmed_line.split_whitespace().collect();
let define = if parts.len() >= 3 {
let macro_name = parts[1];
let macro_value = parts[2..].join(" ");
format!("+define+{}={}", macro_name, macro_value)
} else if parts.len() == 2 {
let macro_name = parts[1];
format!("+define+{}", macro_name)
} else {
continue;
};
if !in_module {
if !define_statements.contains(&define) {
define_statements.push(define.clone());
}
} else {
module_defines.push(define);
}
}
}
if sub {
let mut parser = tree_sitter::Parser::new();
parser
.set_language(tree_sitter_verilog::language())
.expect("Error loading Verilog grammar");
if let Some(tree) = parser.parse(&contents, None) {
let root_node = tree.root_node();
find_module_instantiations(
root_node,
top_module,
&contents,
visited_modules,
sub,
filelist_path,
filepaths,
f_statements,
define_statements)?;
}
}
return Ok(());
} else if path.is_dir() {
append_module(
&path,
module,
top_module,
visited_modules,
sub,
filelist_path,
filepaths,
f_statements,
define_statements)?;
}
}
Ok(())
}
fn find_module_instantiations(
root_node: tree_sitter::Node,
top_module: &str,
contents: &str,
visited_modules: &mut Vec<String>,
sub: bool,
filelist_path: &Path,
filepaths: &mut Vec<String>,
f_statements: &mut Vec<String>,
define_statements: &mut Vec<String>,
) -> Result<()> {
let mut cursor = root_node.walk();
for child in root_node.children(&mut cursor) {
if child.kind().contains("instantiation") {
if let Some(first_child) = child.child(0) {
if let Ok(module) = first_child.utf8_text(contents.as_bytes()) {
let module_name_v = format!("{}.v", module);
let module_name_sv = format!("{}.sv", module);
if !visited_modules.contains(&module_name_v) && !visited_modules.contains(&module_name_sv) {
visited_modules.push(module_name_v.clone());
visited_modules.push(module_name_sv.clone());
append_module(
&PathBuf::from("./vpm_modules"),
&module_name_v,
top_module,
visited_modules,
sub,
filelist_path,
filepaths,
f_statements,
define_statements)?;
append_module(
&PathBuf::from("./vpm_modules"),
&module_name_sv,
top_module,
visited_modules,
sub,
filelist_path,
filepaths,
f_statements,
define_statements)?;
}
}
}
}
find_module_instantiations(
child,
top_module,
contents,
visited_modules,
sub,
filelist_path,
filepaths,
f_statements,
define_statements)?;
}
Ok(())
}
| rust | MIT | 03763cc40a2354d7a979a378e6830c1b4ceee44a | 2026-01-04T20:11:49.815734Z | false |
getinstachip/vpm | https://github.com/getinstachip/vpm/blob/03763cc40a2354d7a979a378e6830c1b4ceee44a/src/cmd/update.rs | src/cmd/update.rs | use anyhow::Result;
use crate::cmd::{Execute, Update};
use crate::cmd::include::get_head_commit_hash;
use crate::toml::{get_repo_links, add_top_module, remove_top_module};
use imara_diff::intern::InternedInput;
use imara_diff::{diff, Algorithm, UnifiedDiffBuilder};
impl Execute for Update {
async fn execute(&self) -> Result<()> {
let module_path = &self.module_path;
println!("Updating module '{}'", module_path);
update_module(module_path, self.commit.as_deref())
}
}
fn update_module(module_path: &str, commit: Option<&str>) -> Result<()> {
let repo_links = get_repo_links(module_path);
if repo_links.is_empty() {
return Err(anyhow::anyhow!("No repositories found for module '{}'", module_path));
}
let chosen_repo = if repo_links.len() == 1 {
repo_links.into_iter().next().unwrap()
} else {
println!("Multiple repositories found for module '{}'. Please choose one:", module_path);
for (index, link) in repo_links.iter().enumerate() {
println!("{}. {}", index + 1, link);
}
let mut choice = String::new();
std::io::stdin().read_line(&mut choice)?;
let index: usize = choice.trim().parse()?;
repo_links.into_iter().nth(index - 1)
.ok_or_else(|| anyhow::anyhow!("Invalid choice"))?
};
let head_commit_hash = get_head_commit_hash(&chosen_repo).unwrap();
let commit_hash = commit.unwrap_or(&head_commit_hash);
println!("Updating module '{}' to commit '{}'", module_path, commit_hash);
let old_contents = std::fs::read_to_string(module_path)?;
remove_top_module(&chosen_repo, module_path)?;
add_top_module(&chosen_repo, module_path, commit_hash)?;
let new_contents = std::fs::read_to_string(module_path)?;
println!("Module '{}' updated to commit '{}'", module_path, commit_hash);
display_diff(&old_contents, &new_contents);
Ok(())
}
fn display_diff(old_contents: &str, new_contents: &str) {
let input = InternedInput::new(old_contents, new_contents);
let diff_output = diff(
Algorithm::Histogram,
&input,
UnifiedDiffBuilder::new(&input)
);
println!("Diff:\n{}", diff_output);
} | rust | MIT | 03763cc40a2354d7a979a378e6830c1b4ceee44a | 2026-01-04T20:11:49.815734Z | false |
getinstachip/vpm | https://github.com/getinstachip/vpm/blob/03763cc40a2354d7a979a378e6830c1b4ceee44a/src/cmd/list.rs | src/cmd/list.rs | use anyhow::{Result, Context, anyhow};
use std::collections::HashSet;
use std::process::Command;
use crate::cmd::{Execute, List};
use tempfile::tempdir;
const STD_LIB_URL: &str = "https://github.com/getinstachip/openchips";
impl Execute for List {
async fn execute(&self) -> Result<()> {
match list_verilog_files() {
Ok(verilog_files) => {
println!("Available Verilog modules:");
for file in verilog_files {
println!(" {}", file);
}
Ok(())
}
Err(e) => {
eprintln!("Error: Failed to list Verilog files. {}", e);
eprintln!("Debug steps:");
eprintln!("1. Check your internet connection");
eprintln!("2. Ensure git is installed and accessible from the command line");
eprintln!("3. Verify you have read permissions for the temporary directory");
Err(e)
}
}
}
}
fn list_verilog_files() -> Result<Vec<String>> {
let temp_dir = tempdir().context("Failed to create temporary directory. Ensure you have write permissions in the system temp directory.")?;
let repo_path = temp_dir.path();
// Clone the repository
let output = Command::new("git")
.args([
"clone",
"--depth",
"1",
"--single-branch",
"--jobs",
"4",
STD_LIB_URL,
repo_path.to_str().unwrap_or_default(),
])
.output()
.context("Failed to execute git command. Ensure git is installed and accessible from the command line.")?;
if !output.status.success() {
return Err(anyhow!(
"Git clone failed. Error: {}. This could be due to network issues, incorrect repository URL, or git configuration problems.",
String::from_utf8_lossy(&output.stderr)
));
}
let mut verilog_files = HashSet::new();
for entry in walkdir::WalkDir::new(repo_path)
.into_iter()
.filter_map(|e| e.ok())
{
if let Some(extension) = entry.path().extension() {
match extension.to_str() {
Some("v") | Some("sv") => {
if let Some(file_name) = entry.path().file_stem() {
verilog_files.insert(file_name.to_string_lossy().into_owned());
}
}
_ => {}
}
}
}
if verilog_files.is_empty() {
Err(anyhow!("No Verilog files found in the repository. This could indicate an issue with the repository structure or content."))
} else {
Ok(verilog_files.into_iter().collect())
}
}
| rust | MIT | 03763cc40a2354d7a979a378e6830c1b4ceee44a | 2026-01-04T20:11:49.815734Z | false |
getinstachip/vpm | https://github.com/getinstachip/vpm/blob/03763cc40a2354d7a979a378e6830c1b4ceee44a/src/cmd/include.rs | src/cmd/include.rs | use std::collections::HashSet;
use std::env::current_dir;
use std::path::{Path, PathBuf};
use std::{fs, process::Command};
use anyhow::{Context, Result};
use once_cell::sync::Lazy;
use tree_sitter::{Node, Parser, Query, QueryCursor};
use crate::cmd::{Execute, Include};
use crate::toml::{add_dependency, add_top_module};
use walkdir::{DirEntry, WalkDir};
use fancy_regex::Regex;
use dialoguer::{theme::ColorfulTheme, MultiSelect};
use fuzzy_matcher::FuzzyMatcher;
use fuzzy_matcher::skim::SkimMatcherV2;
use std::io::{self, Write};
use indicatif::{ProgressBar, ProgressStyle};
impl Execute for Include {
async fn execute(&self) -> Result<()> {
println!("Including from: '{}'", self.url);
let repo_name = name_from_url(&self.url);
let tmp_path = PathBuf::from("/tmp").join(repo_name);
let commit = if self.commit.is_none() {
Some(get_head_commit_hash(&self.url)?)
} else {
self.commit.clone()
};
if self.repo {
include_entire_repo(&self.url, &tmp_path, self.riscv, commit.as_deref())?
} else {
include_single_module(&self.url, self.riscv, commit.as_deref())?
}
Ok(())
}
}
pub fn get_head_commit_hash(url: &str) -> Result<String> {
let github_url = if url.starts_with("https://github.com/") {
url.to_string()
} else {
format!("https://github.com/{}", url)
};
let (repo_url, _) = github_url.rsplit_once("/blob/").unwrap_or((&github_url, ""));
let output = Command::new("git")
.args(["ls-remote", repo_url, "HEAD"])
.output()?;
if output.status.success() {
let stdout = String::from_utf8(output.stdout)?;
let hash = stdout.split_whitespace().next().unwrap_or("").to_string();
if !hash.is_empty() {
Ok(hash[..7].to_string()) // Return only the first 7 characters (short hash)
} else {
Err(anyhow::anyhow!("Failed to get HEAD commit hash: Empty hash returned"))
}
} else {
let stderr = String::from_utf8_lossy(&output.stderr);
Err(anyhow::anyhow!("Failed to get HEAD commit hash: {}", stderr))
}
}
fn include_entire_repo(url: &str, tmp_path: &PathBuf, riscv: bool, commit_hash: Option<&str>) -> Result<()> {
let url = format!("https://github.com/{}", url);
println!("Full GitHub URL: {}@{}", url, commit_hash.unwrap_or("HEAD"));
include_repo_from_url(&url, "/tmp/", commit_hash)?;
add_dependency(&url)?;
let files = get_files(&tmp_path.to_str().unwrap_or_default());
let items = get_relative_paths(&files, tmp_path);
let selected_items = select_modules(&items)?;
process_selected_modules(&url, tmp_path, &selected_items, riscv, commit_hash)?;
fs::remove_dir_all(tmp_path)?;
print_success_message(&url, &selected_items);
Ok(())
}
fn include_single_module(url: &str, riscv: bool, commit_hash: Option<&str>) -> Result<()> {
let repo_url = get_github_repo_url(url).unwrap();
include_repo_from_url(&repo_url, "/tmp/", commit_hash)?;
add_dependency(&repo_url)?;
println!("Repo URL: {}@{}", repo_url, commit_hash.unwrap_or("HEAD"));
let module_path = get_component_path_from_github_url(url).unwrap_or_default();
println!("Including module: {}", module_path);
include_module_from_url(&module_path, &repo_url, riscv, commit_hash)?;
println!("Successfully installed module: {}", module_path);
Ok(())
}
fn get_files(directory: &str) -> Vec<String> {
WalkDir::new(directory)
.into_iter()
.filter_map(|entry| {
entry.ok().and_then(|e| {
if e.file_type().is_file() {
Some(e.path().to_string_lossy().into_owned())
} else {
None
}
})
})
.collect()
}
fn get_relative_paths(files: &[String], tmp_path: &PathBuf) -> Vec<String> {
files.iter()
.map(|file| file.strip_prefix(&tmp_path.to_string_lossy().as_ref())
.unwrap_or(file)
.trim_start_matches('/')
.to_string())
.collect()
}
fn select_modules(items: &[String]) -> Result<HashSet<String>> {
let matcher = SkimMatcherV2::default();
let mut selected_items: HashSet<String> = HashSet::new();
loop {
print!("Enter module name (or press Enter to finish): ");
io::stdout().flush()?;
let mut query = String::new();
io::stdin().read_line(&mut query)?;
query = query.trim().to_string();
if query.is_empty() {
break;
}
let filtered_items: Vec<String> = items
.iter()
.filter(|&item| matcher.fuzzy_match(item, &query).is_some())
.cloned()
.collect();
let selection = MultiSelect::with_theme(&ColorfulTheme::default())
.with_prompt("Toggle items to include with the space bar. Hit enter to start a new search")
.items(&filtered_items)
.interact()?;
for i in &selected_items {
println!("- {}", i);
}
selected_items.extend(selection.iter().map(|&i| filtered_items[i].clone()));
}
print!("\x1B[2J\x1B[1;1H");
Ok(selected_items)
}
fn process_selected_modules(url: &str, tmp_path: &PathBuf, selected_items: &HashSet<String>, riscv: bool, commit_hash: Option<&str>) -> Result<()> {
for item in selected_items {
let displayed_path = item.strip_prefix(tmp_path.to_string_lossy().as_ref()).unwrap_or(item).trim_start_matches('/');
println!("Including module: {}", displayed_path);
let full_path = tmp_path.join(displayed_path);
let module_path = full_path.strip_prefix(tmp_path).unwrap_or(&full_path).to_str().unwrap().trim_start_matches('/');
println!("Module path: {}", module_path);
include_module_from_url(module_path, url, riscv, commit_hash)?;
}
if selected_items.is_empty() {
println!("No modules selected. Including entire repository.");
include_repo_from_url(url, "./", commit_hash)?;
}
Ok(())
}
fn print_success_message(url: &str, selected_items: &HashSet<String>) {
if !selected_items.is_empty() {
let installed_modules = selected_items.iter()
.map(|item| item.to_string())
.collect::<Vec<String>>()
.join(", ");
println!("Successfully installed module(s): {}", installed_modules);
} else {
println!("Successfully installed repository '{}'.", name_from_url(url));
}
}
fn name_from_url(url: &str) -> &str {
url.rsplit('/').find(|&s| !s.is_empty()).unwrap_or_default()
}
fn get_component_path_from_github_url(url: &str) -> Option<String> {
let parts: Vec<&str> = url.split("/").collect();
if parts.len() < 8 || !url.starts_with("https://github.com/") {
return None;
}
Some(parts[7..].join("/"))
}
fn get_github_repo_url(url: &str) -> Option<String> {
let parts: Vec<&str> = url.split('/').collect();
if parts.len() < 5 || !url.starts_with("https://github.com/") {
return None;
}
Some(format!("https://github.com/{}/{}", parts[3], parts[4]))
}
fn is_full_filepath(path: &str) -> bool {
path.contains('/') || path.contains('\\')
}
fn filepath_to_dir_entry(filepath: PathBuf) -> Result<DirEntry> {
WalkDir::new(filepath)
.min_depth(0)
.max_depth(0)
.into_iter()
.next()
.ok_or_else(|| anyhow::anyhow!("Failed to create DirEntry"))?
.context("Failed to create DirEntry")
}
fn generate_top_v_content(module_path: &str) -> Result<String> {
println!("Generating top.v file for RISC-V in {}", module_path);
let module_content = fs::read_to_string(module_path)?;
let mut top_content = String::new();
top_content.push_str("// Auto-generated top.v file for RISC-V\n\n");
// Use regex to find module declaration
let module_re = regex::Regex::new(r"module\s+(\w+)\s*(?:#\s*\(([\s\S]*?)\))?\s*\(([\s\S]*?)\);").unwrap();
if let Some(captures) = module_re.captures(&module_content) {
let module_name = captures.get(1).unwrap().as_str();
println!("Module name: {}", module_name);
// Extract parameters
let params = captures.get(2).map_or(Vec::new(), |m| {
m.as_str().lines()
.map(|line| line.trim())
.filter(|line| !line.is_empty())
.collect()
});
// Extract ports
let ports: Vec<&str> = captures.get(3).unwrap().as_str()
.lines()
.map(|line| line.trim())
.filter(|line| !line.is_empty())
.collect();
// Generate top module ports
top_content.push_str("module top (\n");
for port in &ports {
top_content.push_str(&format!(" {}\n", port));
}
top_content.push_str(");\n\n");
// Instantiate the module
top_content.push_str(&format!("{} #(\n", module_name));
for param in params.iter() {
if let Some((name, value)) = param.split_once('=') {
let name = name.trim().trim_start_matches("parameter").trim();
let name = name.split_whitespace().last().unwrap_or(name);
let value = value.trim().trim_end_matches(',');
top_content.push_str(&format!(" .{}({}),\n", name, value));
}
}
top_content.push_str(") cpu (\n");
// Connect ports
let port_re = regex::Regex::new(r"(input|output|inout)\s+(?:wire|reg)?\s*(?:\[.*?\])?\s*(\w+)").unwrap();
for (i, port) in ports.iter().enumerate() {
if let Some(port_captures) = port_re.captures(port) {
let port_name = port_captures.get(2).unwrap().as_str();
top_content.push_str(&format!(" .{}({}){}\n", port_name, port_name, if i < ports.len() - 1 { "," } else { "" }));
}
}
top_content.push_str(");\n\n");
top_content.push_str("endmodule\n");
return Ok(top_content);
}
Err(anyhow::anyhow!("No module declaration found in the file"))
}
fn generate_xdc_content(module_path: &str) -> Result<String> {
println!("Generating constraints.xdc file for Xilinx Artix-7 board in {}", module_path);
let module_content = fs::read_to_string(module_path)?;
let mut xdc_content = String::new();
xdc_content.push_str("## Auto-generated constraints.xdc file for Xilinx Artix-7 board\n\n");
// Use regex to find all ports
let port_re = regex::Regex::new(r"(?m)^\s*(input|output|inout)\s+(?:wire|reg)?\s*(?:\[.*?\])?\s*(\w+)").unwrap();
let mut ports = Vec::new();
for captures in port_re.captures_iter(&module_content) {
let port_type = captures.get(1).unwrap().as_str();
let port_name = captures.get(2).unwrap().as_str();
ports.push((port_type, port_name));
}
// Define pin mappings (you may need to adjust these based on your specific board)
let pin_mappings = [
("clk", "E3"),
("resetn", "C12"),
("trap", "D10"),
("mem_valid", "C11"),
("mem_instr", "C10"),
("mem_ready", "A10"),
("mem_addr[0]", "A8"),
("mem_wdata[0]", "C5"),
("mem_wstrb[0]", "C6"),
("mem_rdata[0]", "D5"),
];
// Generate constraints for each port
for (_port_type, port_name) in ports {
if let Some((_, pin)) = pin_mappings.iter().find(|&&(p, _)| p == port_name) {
let iostandard = if port_name == "clk" { "LVCMOS33" } else { "LVCMOS33" };
xdc_content.push_str(&format!("set_property -dict {{ PACKAGE_PIN {} IOSTANDARD {} }} [get_ports {{ {} }}]\n", pin, iostandard, port_name));
} else {
println!("Warning: No pin mapping found for port: {}", port_name);
}
}
// Add clock constraint
if let Some((_, _clk_pin)) = pin_mappings.iter().find(|&&(p, _)| p == "clk") {
xdc_content.push_str(&format!("\n## Clock signal\n"));
xdc_content.push_str(&format!("create_clock -period 10.000 -name sys_clk_pin -waveform {{0.000 5.000}} -add [get_ports {{ clk }}]\n"));
} else {
println!("Warning: No clock signal found. XDC file may be incomplete.");
xdc_content.push_str("\n## Warning: No clock signal found. Please add clock constraints manually.\n");
}
Ok(xdc_content)
}
pub fn include_module_from_url(module_path: &str, url: &str, riscv: bool, commit_hash: Option<&str>) -> Result<()> {
let package_name = name_from_url(url);
include_repo_from_url(url, "/tmp/", commit_hash)?;
let destination = "./";
process_module(package_name, module_path, destination.to_owned(), &mut HashSet::new(), url, true, commit_hash)?;
let module_path = Path::new(&destination).join(Path::new(module_path).file_name().unwrap());
anyhow::ensure!(module_path.exists(), "Module file not found in the destination folder");
if riscv {
let top_v_content = generate_top_v_content(&module_path.to_str().unwrap())?;
fs::write(format!("{}/top.v", destination), top_v_content)?;
println!("Created top.v file for RISC-V in {}", destination);
// Generate .xdc file for Xilinx Artix-7 board
let xdc_content = generate_xdc_content(&format!("{}/top.v", destination))?;
fs::write(format!("{}/constraints.xdc", destination), xdc_content)?;
println!("Created constraints.xdc file for Xilinx Artix-7 board in {}", destination);
}
add_top_module(url, current_dir()?.join(module_path.file_name().unwrap()).to_str().unwrap(), commit_hash.unwrap_or(""))?;
Ok(())
}
pub fn process_module(package_name: &str, module: &str, destination: String, visited: &mut HashSet<String>, url: &str, is_top_module: bool, commit_hash: Option<&str>) -> Result<HashSet<String>> {
// println!("Processing module: {}", module);
let module_name = module.strip_suffix(".v").or_else(|| module.strip_suffix(".sv")).unwrap_or(module);
let module_with_ext = if module.ends_with(".v") || module.ends_with(".sv") {
module.to_string()
} else {
format!("{}.v", module_name)
};
if !visited.insert(module_with_ext.clone()) {
return Ok(HashSet::new());
}
let tmp_path = PathBuf::from("/tmp").join(package_name);
let file_path = tmp_path.join(&module_with_ext);
let target_path = PathBuf::from(&destination);
println!("Including submodule '{}'", module_with_ext);
let mut processed_modules = HashSet::new();
if is_full_filepath(&module_with_ext) {
// println!("Full filepath detected for module '{}'", module_with_ext);
let dir_entry = filepath_to_dir_entry(file_path)?;
process_file(&dir_entry, &target_path.to_str().unwrap(), module, url, visited, is_top_module)?;
processed_modules.insert(module_with_ext.clone());
} else {
// println!("Full filepath not detected for module '{}'", module_with_ext);
process_non_full_filepath(module_name, &tmp_path, &target_path, url, visited, is_top_module, &mut processed_modules)?;
}
let submodules = download_and_process_submodules(package_name, module, &destination, url, visited, is_top_module, commit_hash)?;
processed_modules.extend(submodules);
Ok(processed_modules)
}
fn process_non_full_filepath(module_name: &str, tmp_path: &PathBuf, target_path: &PathBuf, url: &str, visited: &mut HashSet<String>, is_top_module: bool, processed_modules: &mut HashSet<String>) -> Result<()> {
let matching_entries = find_matching_entries(module_name, tmp_path);
println!("Found {} matching entries for module '{}'", matching_entries.len(), module_name);
if matching_entries.is_empty() {
println!("No matching files found for module '{}'. Skipping...", module_name);
} else if matching_entries.len() == 1 {
let dir_entry = filepath_to_dir_entry(matching_entries[0].clone())?;
process_file(&dir_entry, target_path.to_str().unwrap(), module_name, url, visited, is_top_module)?;
processed_modules.insert(format!("{}.v", module_name));
} else {
process_multiple_matches(matching_entries, target_path, module_name, url, visited, is_top_module, processed_modules)?;
}
Ok(())
}
fn find_matching_entries(module_name: &str, tmp_path: &PathBuf) -> Vec<PathBuf> {
WalkDir::new(tmp_path)
.into_iter()
.filter_map(Result::ok)
.filter(|entry| {
entry.file_name().to_str() == Some(&format!("{}.sv", module_name)) ||
entry.file_name().to_str() == Some(&format!("{}.v", module_name))
})
.map(|entry| entry.path().to_path_buf())
.collect()
}
fn process_multiple_matches(matching_entries: Vec<PathBuf>, target_path: &PathBuf, module_name: &str, url: &str, visited: &mut HashSet<String>, is_top_module: bool, processed_modules: &mut HashSet<String>) -> Result<()> {
println!("Multiple modules found for '{}'. Please choose:", module_name);
for (i, entry) in matching_entries.iter().enumerate() {
println!("{}: {}", i + 1, entry.display());
}
let mut choice = String::new();
std::io::stdin().read_line(&mut choice)?;
let index: usize = choice.trim().parse()?;
if index > 0 && index <= matching_entries.len() {
let dir_entry = filepath_to_dir_entry(matching_entries[index - 1].clone())?;
process_file(&dir_entry, target_path.to_str().unwrap(), module_name, url, visited, is_top_module)?;
processed_modules.insert(format!("{}.v", module_name));
} else {
anyhow::bail!("Invalid choice");
}
Ok(())
}
fn process_file(entry: &DirEntry, destination: &str, module_path: &str, url: &str, visited: &mut HashSet<String>, is_top_module: bool) -> Result<()> {
let target_path = PathBuf::from(destination);
let extension = entry.path().extension().and_then(|s| s.to_str()).unwrap_or("v");
fs::copy(entry.path(), &target_path.join(entry.file_name()))?;
let contents = fs::read_to_string(entry.path())?;
let mut parser = Parser::new();
parser.set_language(tree_sitter_verilog::language())?;
let tree = parser.parse(&contents, None).context("Failed to parse file")?;
let header_content = generate_headers(tree.root_node(), &contents)?;
let module_name = Path::new(module_path)
.file_stem()
.and_then(|s| s.to_str())
.unwrap_or(module_path);
let module_name_with_ext = if !module_name.ends_with(".v") && !module_name.ends_with(".sv") {
format!("{}.{}", module_name, extension)
} else {
module_name.to_string()
};
let header_filename = format!("{}.{}", module_name.strip_suffix(".v").unwrap_or(module_name), if extension == "sv" { "svh" } else { "vh" });
let headers_dir = target_path.join("headers");
fs::create_dir_all(&headers_dir)?;
fs::write(headers_dir.join(&header_filename), header_content)?;
println!("Generating header file: {}", target_path.join(&header_filename).to_str().unwrap());
let full_module_path = target_path.join(&module_name_with_ext);
update_lockfile(&full_module_path, url, &contents, visited, is_top_module)?;
Ok(())
}
fn download_and_process_submodules(package_name: &str, module_path: &str, destination: &str, url: &str, visited: &mut HashSet<String>, _is_top_module: bool, commit_hash: Option<&str>) -> Result<HashSet<String>> {
let module_name = Path::new(module_path)
.file_stem()
.and_then(|s| s.to_str())
.unwrap_or(module_path);
// println!("Processing submodule: {}", module_path);
let module_name_with_ext = if module_path.ends_with(".sv") {
format!("{}.sv", module_name)
} else if module_path.ends_with(".v") {
format!("{}.v", module_name)
} else {
module_path.to_string()
};
let full_module_path = PathBuf::from(destination).join(&module_name_with_ext);
// println!("Full module path: {}", full_module_path.display());
let contents = match fs::read_to_string(&full_module_path) {
Ok(c) => c,
Err(e) => {
println!("Warning: Failed to read file {}: {}. Skipping this module.", full_module_path.display(), e);
return Ok(HashSet::new());
}
};
let mut parser = Parser::new();
if let Err(e) = parser.set_language(tree_sitter_verilog::language()) {
eprintln!("Warning: Failed to set parser language: {}. Skipping submodule processing.", e);
return Ok(HashSet::new());
}
let submodules = match get_submodules(&contents) {
Ok(s) => s,
Err(e) => {
eprintln!("Warning: Failed to get submodules from {}: {}. Continuing without submodules.", full_module_path.display(), e);
HashSet::new()
}
};
let mut all_submodules = HashSet::new();
for submodule in submodules {
let submodule_with_ext = if submodule.ends_with(".v") || submodule.ends_with(".sv") {
submodule.to_string()
} else {
let parent_extension = Path::new(&module_name_with_ext)
.extension()
.and_then(|ext| ext.to_str())
.unwrap_or("v");
format!("{}.{}", &submodule, parent_extension)
};
if !visited.contains(&submodule_with_ext) {
let submodule_destination = PathBuf::from(destination);
if let Err(e) = fs::create_dir_all(&submodule_destination) {
eprintln!("Warning: Failed to create directory {}: {}. Skipping this submodule.", submodule_destination.display(), e);
continue;
}
match process_module(
package_name,
&submodule_with_ext,
submodule_destination.to_str().unwrap().to_string(),
visited,
&url,
false,
commit_hash.clone()
) {
Ok(processed_submodules) => {
all_submodules.insert(submodule_with_ext.clone());
all_submodules.extend(processed_submodules);
},
Err(e) => {
eprintln!("Warning: Failed to process submodule {}: {}. Skipping this submodule.", submodule_with_ext, e);
continue;
}
}
let full_submodule_path = submodule_destination.join(&submodule_with_ext);
if let Err(e) = update_lockfile(&full_submodule_path, &url, &contents, visited, false) {
eprintln!("Warning: Failed to update lockfile for {}: {}. Continuing without updating lockfile.", full_submodule_path.display(), e);
}
}
}
Ok(all_submodules)
}
fn update_lockfile(full_path: &PathBuf, url: &str, contents: &str, visited: &HashSet<String>, is_top_module: bool) -> Result<()> {
let mut lockfile = fs::read_to_string("vpm.lock").unwrap_or_default();
let module_entry = if is_top_module {
format!("[[package]]\nfull_path = \"{}\"\nsource = \"{}\"\nparents = []\n", full_path.display(), url)
} else {
format!("[[package]]\nfull_path = \"{}\"\nsource = \"{}\"\n", full_path.display(), url)
};
let mut parser = Parser::new();
parser.set_language(tree_sitter_verilog::language())?;
let submodules = get_submodules(contents)?;
let submodules_vec: Vec<String> = submodules.into_iter().collect();
if !lockfile.contains(&format!("full_path = \"{}\"", full_path.display())) {
let formatted_submodules = submodules_vec.iter()
.map(|s| format!(" \"{}\",", s))
.collect::<Vec<_>>()
.join("\n");
lockfile.push_str(&format!("\n{}\nsubmodules = [\n{}\n]\n", module_entry, formatted_submodules));
} else {
update_submodules(&mut lockfile, &module_entry, &submodules_vec);
}
for submodule in &submodules_vec {
if !visited.contains(submodule) {
let submodule_path = full_path.parent().unwrap().join(submodule);
if let Some(existing_entry) = lockfile.find(&format!("\n[[package]]\nfull_path = \"{}\"", submodule_path.display())) {
let parent_start = lockfile[existing_entry..].find("parents = [").map(|i| existing_entry + i);
if let Some(start) = parent_start {
let end = lockfile[start..].find(']').map(|i| start + i + 1).unwrap_or(lockfile.len());
let current_parents = lockfile[start..end].to_string();
let new_parents = if current_parents.contains(&full_path.display().to_string()) {
current_parents
} else {
format!("{} \"{}\",\n]", ¤t_parents[..current_parents.len() - 1], full_path.display())
};
lockfile.replace_range(start..end, &new_parents);
}
} else {
let submodule_entry = format!("\n[[package]]\nfull_path = \"{}\"\nsource = \"{}\"\nparents = [\n \"{}\",\n]\nsubmodules = []\n", submodule_path.display(), url, full_path.display());
lockfile.push_str(&submodule_entry);
}
}
}
fs::write("vpm.lock", lockfile)?;
Ok(())
}
fn update_submodules(lockfile: &mut String, module_entry: &str, submodules: &[String]) {
if let Some(start) = lockfile.find(module_entry).and_then(|pos| lockfile[pos..].find("submodules = [").map(|offset| pos + offset)) {
let end = lockfile[start..].find(']').map(|pos| start + pos + 1).unwrap_or(lockfile.len());
let new_modules = format!("submodules = [\n{}\n]", submodules.iter().map(|m| format!(" \"{}\",", m)).collect::<Vec<_>>().join("\n"));
lockfile.replace_range(start..end, &new_modules);
}
}
pub fn generate_headers(root_node: Node, contents: &str) -> Result<String> {
static QUERY: Lazy<Query> = Lazy::new(|| {
Query::new(
tree_sitter_verilog::language(),
"(module_declaration
(module_header
(module_keyword)
(simple_identifier) @module_name)
(module_nonansi_header
(parameter_port_list)? @params
(list_of_ports) @ports)
)
(module_declaration
(module_header
(module_keyword)
(simple_identifier) @module_name)
(module_ansi_header
(parameter_port_list)? @params
(list_of_port_declarations)? @ports)
)",
)
.expect("Failed to create query")
});
let mut query_cursor = QueryCursor::new();
let matches = query_cursor.matches(&QUERY, root_node, contents.as_bytes());
let mut header_content = String::new();
for match_ in matches {
let mut module_name = "";
let mut params = "";
let mut ports = "";
for capture in match_.captures {
let capture_text = &contents[capture.node.byte_range()];
match capture.index {
0 => module_name = capture_text,
1 => params = capture_text,
2 => ports = capture_text,
_ => {}
}
}
header_content.push_str(&format!(
"`ifndef {}_H\n`define {}_H\n\n",
module_name.to_uppercase(),
module_name.to_uppercase()
));
if !params.is_empty() {
header_content.push_str(&format!(
"// Parameters\n{}\n\n",
params.trim()
));
}
if !ports.is_empty() {
header_content.push_str(&format!(
"// Ports\n{}\n\n",
ports.trim()
));
}
header_content.push_str(&format!(
"// Module: {}\n// TODO: Add module description\n\n`endif // {}_H\n\n",
module_name,
module_name.to_uppercase()
));
}
Ok(header_content)
}
pub fn get_submodules(contents: &str) -> Result<HashSet<String>> {
static REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(
r"(?mi)^\s*(?!(always(_comb|_ff|_latch)?|assert|assign|assume|begin|case|cover|else|end(case|function|generate|module|primitive|table|task)?|enum|for|forever|function|generate|if|initial|input|int|localparam|logic|module|negedge|output|param(eter)?|posedge|primitive|real|reg|repeat|table|task|time|timescale|typedef|while|wire))(\w+)\s*(?:#\([\s.\w(\[\-:\]\),{'}`/+!~@#$%^&*=<>?]+\))?\s*[\w\[:\]]+\s*(?:\([\s.\w(\[\-:\]\),{'}`/+!~@#$%^&*=<>?|]+\));"
).unwrap());
let submodules: HashSet<String> = REGEX
.captures_iter(contents) // Iterate over captures
.map(|caps| caps.unwrap().get(0).unwrap().as_str()) // Extract the matched string
.map(|s| s.split_whitespace().next().unwrap().to_string()) // Split and get submodule name
.collect(); // Collect into a HashSet
// for submodule in &submodules {
// println!("Found submodule: {}", submodule);
// }
Ok(submodules)
}
pub fn include_repo_from_url(url: &str, location: &str, commit_hash: Option<&str>) -> Result<()> {
let repo_path = Path::new(location).join(name_from_url(url));
let pb = ProgressBar::new_spinner();
pb.set_style(ProgressStyle::default_spinner().template("{spinner} {msg}").unwrap());
pb.set_message("Reading repository...");
pb.enable_steady_tick(std::time::Duration::from_millis(100));
clone_repo(url, &repo_path, commit_hash)?;
pb.finish_with_message("Reading repository complete");
Ok(())
}
pub fn clone_repo(url: &str, repo_path: &Path, commit_hash: Option<&str>) -> Result<()> {
if repo_path.exists() {
fs::remove_dir_all(repo_path)?;
}
Command::new("git")
.args([ "clone", "--depth", "1", "--single-branch", "--jobs", "4",
url, repo_path.to_str().unwrap_or_default(),
])
.stdout(std::process::Stdio::null())
.stderr(std::process::Stdio::null())
.status()
.with_context(|| format!("Failed to clone repository from URL: '{}'", url))?;
if let Some(hash) = commit_hash {
Command::new("git")
.args([ "-C", repo_path.to_str().unwrap_or_default(), "checkout", hash ])
.stdout(std::process::Stdio::null())
.stderr(std::process::Stdio::null())
.status()
.with_context(|| format!("Failed to checkout commit hash: '{}'", hash))?;
}
Ok(())
} | rust | MIT | 03763cc40a2354d7a979a378e6830c1b4ceee44a | 2026-01-04T20:11:49.815734Z | false |
getinstachip/vpm | https://github.com/getinstachip/vpm/blob/03763cc40a2354d7a979a378e6830c1b4ceee44a/src/cmd/sim.rs | src/cmd/sim.rs | use anyhow::{Context, Result};
use std::process::Command;
use std::env;
use std::path::{Path, PathBuf};
use std::fs;
use fastrand;
use crate::cmd::{Execute, Sim};
use std::fs::File;
use std::io::{BufRead, BufReader};
use fancy_regex::Regex;
use walkdir::WalkDir;
impl Execute for Sim {
async fn execute(&self) -> Result<()> {
let output_path = if let Some(folder) = &self.folder {
compile_verilog_from_folder(folder)?
} else {
let mut verilog_files = self.verilog_files.clone();
if !testbench_exists(&verilog_files) {
generate_and_add_testbench(&mut verilog_files)?;
}
compile_verilog(&verilog_files)?
};
if self.waveform {
run_simulation_with_waveform(&output_path)?;
} else {
run_simulation(&output_path)?;
}
Ok(())
}
}
fn testbench_exists(verilog_files: &[String]) -> bool {
verilog_files.iter().any(|file| file.to_lowercase().contains("_tb.v"))
}
fn generate_and_add_testbench(verilog_files: &mut Vec<String>) -> Result<()> {
if let Some(first_file) = verilog_files.first() {
let is_systemverilog = first_file.ends_with(".sv");
let testbench_content = generate_testbench(first_file, is_systemverilog)
.context("Failed to generate testbench. Please check if the Verilog file is valid.")?;
let testbench_path = format!("{}_tb.{}", first_file.trim_end_matches(if is_systemverilog { ".sv" } else { ".v" }), if is_systemverilog { "sv" } else { "v" });
fs::write(&testbench_path, testbench_content)
.context("Failed to write testbench file. Please check if you have write permissions.")?;
// Remove comments from the original Verilog file
remove_comments_from_file(first_file)?;
verilog_files.push(testbench_path.clone());
println!("Generated testbench: {}", testbench_path);
Ok(())
} else {
Err(anyhow::anyhow!("No Verilog files provided. Please specify at least one Verilog file."))
}
}
pub fn compile_verilog_from_folder(folder: &str) -> Result<PathBuf> {
println!("Compiling Verilog files from folder: {}", folder);
let verilog_files = collect_verilog_files(folder)?;
if verilog_files.is_empty() {
return Err(anyhow::anyhow!("No Verilog files found in the specified folder."));
}
let output_dir = Path::new(folder);
let random_output_name = generate_random_output_name();
let output_path = output_dir.join(&random_output_name);
let command_status = run_iverilog_command(output_path.to_str().unwrap(), &verilog_files)?;
if !command_status.success() {
return Err(anyhow::anyhow!("Compilation failed. Please check the error messages above."));
}
println!("Compilation successful. Output file: {:?}", output_path);
Ok(output_path)
}
fn collect_verilog_files(folder: &str) -> Result<Vec<String>> {
let mut verilog_files = Vec::new();
for entry in WalkDir::new(folder).into_iter().filter_map(|e| e.ok()) {
let path = entry.path();
if path.is_file() && (path.extension() == Some("v".as_ref()) || path.extension() == Some("sv".as_ref())) {
verilog_files.push(path.to_string_lossy().into_owned());
}
}
Ok(verilog_files)
}
fn run_simulation_with_waveform(output_path: &Path) -> Result<()> {
println!("Running simulation with waveform...");
println!("Output path: {:?}", output_path);
let testbench_dir = output_path.parent().unwrap();
let vcd_path = testbench_dir.join("waveform.vcd");
let current_dir = std::env::current_dir()
.context("Failed to get current directory. Please check your file system permissions.")?;
std::env::set_current_dir(testbench_dir)
.context("Failed to change directory to testbench location. Please ensure the directory exists and you have necessary permissions.")?;
let mut cmd = Command::new("vvp");
cmd.arg(output_path.file_name().unwrap());
cmd.arg(format!("-vcd={}", vcd_path.file_name().unwrap().to_str().unwrap()));
let output = cmd.output()
.context("Failed to run simulation with VCD output. Debug steps:\n1. Ensure 'vvp' is installed: Run 'vvp --version' in terminal.\n2. Check if 'vvp' is in your PATH: Run 'which vvp' (Unix) or 'where vvp' (Windows).\n3. If not found, install Icarus Verilog or add its bin directory to your PATH.")?;
// Call gtkwave on the generated waveform file
println!("Opening waveform in GTKWave...");
let _gtkwave_status = Command::new("gtkwave")
.arg("waveform.vcd")
.spawn()
.context("Failed to open GTKWave. Debug steps:\n1. Ensure GTKWave is installed: Run 'gtkwave --version' in terminal.\n2. Check if 'gtkwave' is in your PATH: Run 'which gtkwave' (Unix) or 'where gtkwave' (Windows).\n3. If not found, install GTKWave or add its installation directory to your PATH.")?;
// We don't wait for GTKWave to exit, as it's a GUI application
println!("GTKWave opened successfully. You can now view the waveform.");
std::env::set_current_dir(current_dir)
.context("Failed to change back to the original directory. This is unexpected, please check your file system.")?;
if !output.status.success() {
let error_message = String::from_utf8_lossy(&output.stderr);
return Err(anyhow::anyhow!("Simulation failed. Error details:\n{}\n\nDebugging steps:\n1. Check your Verilog code for syntax errors.\n2. Ensure all module dependencies are correctly included.\n3. Verify testbench inputs and timing.\n4. Run the simulation without waveform generation to isolate the issue.", error_message));
}
println!("Generated waveform file: {}", vcd_path.display());
println!("If GTKWave didn't open automatically, you can manually open the waveform file using GTKWave.");
Ok(())
}
pub fn generate_testbench(module_path: &str, is_systemverilog: bool) -> Result<String> {
println!("Generating testbench for module: {}", module_path);
let (module_name, ports, parameters) = extract_module_info(module_path)?;
println!("Module name: {}", module_name);
println!("Ports: {:?}", ports);
println!("Parameters: {:?}", parameters);
let mut testbench = String::new();
testbench.push_str(&generate_testbench_header(&module_name));
testbench.push_str(&declare_parameters(¶meters));
testbench.push_str(&declare_wires_for_ports(&ports, is_systemverilog));
testbench.push_str(&instantiate_module(&module_name, &ports, ¶meters));
testbench.push_str(&generate_clock(&ports));
testbench.push_str(&generate_initial_block(&ports, is_systemverilog));
testbench.push_str(&generate_check_outputs_task(&ports, is_systemverilog));
testbench.push_str("endmodule\n");
Ok(testbench)
}
fn extract_module_info(module_path: &str) -> Result<(String, Vec<(String, Option<String>, String)>, Vec<(String, String)>)> {
let file = File::open(module_path)
.context(format!("Failed to open module file: {}. Please check if the file exists and you have read permissions.", module_path))?;
let reader = BufReader::new(file);
let mut module_name = String::new();
let mut ports = Vec::new();
let mut parameters = Vec::new();
let mut in_module = false;
let module_regex = Regex::new(r"module\s+(\w+)\s*(?:\(|#)").unwrap();
let port_regex = Regex::new(r"(input|output|inout)\s+(?:reg|wire|logic)?\s*(\[.*?\])?\s*(\w+)").unwrap();
let parameter_regex = Regex::new(r"parameter\s+(\w+)\s*=\s*([^,\)]+)").unwrap();
let inline_parameter_regex = Regex::new(r"(\w+)\s*=\s*([^,\)]+)").unwrap();
for line in reader.lines() {
let line = line?;
if !in_module {
if let Ok(Some(captures)) = module_regex.captures(&line) {
module_name = captures[1].to_string();
in_module = true;
}
} else {
for capture_result in port_regex.captures_iter(&line) {
if let Ok(capture) = capture_result {
let direction = capture[1].to_string();
let bus_width = capture.get(2).map(|m| m.as_str().to_string());
let name = capture[3].to_string();
ports.push((direction, bus_width, name));
}
}
for capture_result in parameter_regex.captures_iter(&line) {
if let Ok(capture) = capture_result {
let name = capture[1].to_string();
let value = capture[2].to_string();
if let Some(existing) = parameters.iter_mut().find(|(n, _)| n == &name) {
existing.1 = value;
} else {
parameters.push((name, value));
}
}
}
for capture_result in inline_parameter_regex.captures_iter(&line) {
if let Ok(capture) = capture_result {
let name = capture[1].to_string();
let value = capture[2].to_string();
if !parameters.iter().any(|(n, _)| n == &name) {
parameters.push((name, value));
}
}
}
if line.contains(");") {
break;
}
}
}
if module_name.is_empty() {
return Err(anyhow::anyhow!("Could not find module declaration in {}. Please ensure the file contains a valid Verilog or SystemVerilog module.", module_path));
}
Ok((module_name, ports, parameters))
}
fn generate_testbench_header(module_name: &str) -> String {
format!("`timescale 1ns / 1ps\n\nmodule {}_tb;\n\n", module_name)
}
fn declare_parameters(parameters: &[(String, String)]) -> String {
let mut declarations = String::new();
for (name, value) in parameters {
let mut line = format!(" parameter {} = {}", name, value);
if !line.ends_with(')') && line.contains('(') {
line.push(')');
}
declarations.push_str(&line);
declarations.push_str(";\n");
}
declarations.push_str("\n");
declarations
}
fn declare_wires_for_ports(ports: &[(String, Option<String>, String)], is_systemverilog: bool) -> String {
let mut declarations = String::new();
for (direction, bus_width, name) in ports {
let wire_type = if is_systemverilog { "logic" } else { "reg" };
let declaration = match bus_width {
Some(width) => format!(" {} {} {};\n", if direction == "input" { wire_type } else { "wire" }, width, name),
None => format!(" {} {};\n", if direction == "input" { wire_type } else { "wire" }, name),
};
declarations.push_str(&declaration);
}
declarations.push_str("\n");
declarations
}
fn instantiate_module(module_name: &str, ports: &[(String, Option<String>, String)], parameters: &[(String, String)]) -> String {
let mut instantiation = String::new();
if !parameters.is_empty() {
instantiation.push_str(&format!(" {} #(\n", module_name));
for (i, (name, _)) in parameters.iter().enumerate() {
instantiation.push_str(&format!(" .{}({}){}\n", name, name, if i < parameters.len() - 1 { "," } else { "" }));
}
instantiation.push_str(" ) uut (\n");
} else {
instantiation.push_str(&format!(" {} uut (\n", module_name));
}
for (i, (_, _, name)) in ports.iter().enumerate() {
instantiation.push_str(&format!(" .{}({}){}\n", name, name, if i < ports.len() - 1 { "," } else { "" }));
}
instantiation.push_str(" );\n\n");
instantiation
}
fn generate_clock(ports: &[(String, Option<String>, String)]) -> String {
if let Some((_, _, clock_name)) = ports.iter().find(|(_, _, name)| name.to_lowercase().contains("clk")) {
format!(" localparam CLOCK_PERIOD = 10;\n initial begin\n {} = 0;\n forever #(CLOCK_PERIOD/2) {} = ~{};\n end\n\n", clock_name, clock_name, clock_name)
} else {
String::new()
}
}
fn generate_initial_block(ports: &[(String, Option<String>, String)], is_systemverilog: bool) -> String {
let mut initial_block = String::from(" initial begin\n $dumpfile(\"waveform.vcd\");\n $dumpvars(0, uut);\n\n");
for (direction, bus_width, name) in ports {
if direction == "input" && !name.to_lowercase().contains("clk") {
match bus_width {
Some(width) => {
let width_value = width.trim_matches(|c| c == '[' || c == ']').split(':').next().unwrap_or("0");
if is_systemverilog {
initial_block.push_str(&format!(" {} = '{{{} {{1'b0}}}};\n", name, width_value));
} else {
initial_block.push_str(&format!(" {} = {{'b0}};\n", name));
}
},
None => initial_block.push_str(&format!(" {} = 1'b0;\n", name)),
}
}
}
initial_block.push_str("\n #100;\n\n #1000;\n $finish;\n end\n\n");
initial_block
}
fn generate_check_outputs_task(ports: &[(String, Option<String>, String)], is_systemverilog: bool) -> String {
let mut task = String::from(" task check_outputs;\n");
if is_systemverilog {
task.push_str(" input string test_case;\n");
} else {
task.push_str(" input [8*32-1:0] test_case;\n");
}
task.push_str(" begin\n $display(\"Checking outputs for %s\", test_case);\n");
for (direction, bus_width, name) in ports {
if direction == "output" {
let format_specifier = match bus_width {
Some(_) => "%h",
None => "%b",
};
task.push_str(&format!(" $display(\" {} = {}\", {});\n", name, format_specifier, name));
}
}
task.push_str(" end\n endtask\n\n");
task
}
fn remove_comments_from_file(file_path: &str) -> Result<()> {
let file = File::open(file_path)
.context(format!("Failed to open file: {}", file_path))?;
let reader = BufReader::new(file);
let mut content = String::new();
for line in reader.lines() {
let line = line?;
let parts: Vec<&str> = line.split("//").collect();
if !parts.is_empty() {
let code_part = parts[0].trim_end();
if !code_part.is_empty() {
content.push_str(code_part);
content.push('\n');
}
}
}
fs::write(file_path, content)
.context(format!("Failed to write updated content to file: {}", file_path))?;
Ok(())
}
pub fn compile_verilog(verilog_files: &Vec<String>) -> Result<PathBuf> {
println!("Compiling Verilog files...");
let first_file = &verilog_files[0];
let output_dir = Path::new(first_file).parent().unwrap();
let random_output_name = generate_random_output_name();
let output_path = output_dir.join(&random_output_name);
let command_status = run_iverilog_command(output_path.to_str().unwrap(), verilog_files)?;
if !command_status.success() {
return Err(anyhow::anyhow!("Failed to compile Verilog files. Please check your Verilog code for syntax errors."));
}
if !output_path.exists() {
return Err(anyhow::anyhow!("Output binary not found: {:?}. Compilation may have failed silently.", output_path));
}
println!("Compiled output: {:?}", output_path);
Ok(output_path)
}
fn generate_random_output_name() -> String {
std::iter::repeat_with(fastrand::alphanumeric)
.take(10)
.collect()
}
fn run_iverilog_command(output_name: &str, verilog_files: &[String]) -> Result<std::process::ExitStatus> {
let mut command = Command::new("iverilog");
command.arg("-o").arg(output_name);
for file in verilog_files {
command.arg(file);
}
command.status()
.context("Failed to execute Icarus Verilog compilation. Please ensure Icarus Verilog is installed and accessible.")
}
pub fn run_simulation(output_path: &PathBuf) -> Result<()> {
println!("Running simulation...");
let current_dir = env::current_dir()
.context("Failed to get current directory. Please check your file system permissions.")?;
let binary_path: PathBuf = current_dir.join(output_path);
let status = Command::new(&binary_path)
.status()
.context(format!("Failed to execute simulation. Please ensure the binary at {:?} is executable.", binary_path))?;
if !status.success() {
eprintln!("Warning: Simulation completed with non-zero exit status. This may indicate errors in your Verilog code.");
} else {
println!("Simulation completed successfully.");
}
if let Err(e) = fs::remove_file(&binary_path) {
eprintln!("Warning: Failed to remove temporary binary file: {}. You may want to delete it manually.", e);
}
Ok(())
}
| rust | MIT | 03763cc40a2354d7a979a378e6830c1b4ceee44a | 2026-01-04T20:11:49.815734Z | false |
getinstachip/vpm | https://github.com/getinstachip/vpm/blob/03763cc40a2354d7a979a378e6830c1b4ceee44a/src/cmd/install.rs | src/cmd/install.rs | use anyhow::{Context, Result};
use std::process::Command;
use std::path::Path;
use std::env;
use std::fs::OpenOptions;
use std::io::Write;
use crate::cmd::{Execute, Install};
impl Execute for Install {
async fn execute(&self) -> Result<()> {
match self.tool_name.as_str() {
"verilator" => {
println!("Installing Verilator...");
install_verilator()?;
},
"icarus-verilog" => {
println!("Installing Icarus Verilog...");
install_icarus_verilog()?;
},
"chipyard" => {
println!("Installing Chipyard...");
install_chipyard()?;
},
"openroad" => {
println!("Installing OpenROAD...");
install_openroad()?;
},
"edalize" => {
println!("Installing Edalize...");
install_edalize()?;
},
"yosys" => {
println!("Installing Yosys...");
install_yosys()?;
},
"riscv" => {
println!("Installing RISC-V toolchain...");
install_riscv()?;
},
"nextpnr" => {
println!("Installing NextPNR...");
install_nextpnr()?;
},
"project-xray" => {
println!("Installing Project XRay...");
install_xray()?;
},
_ => {
println!("Tool '{}' is not recognized for installation.", self.tool_name);
}
}
Ok(())
}
}
fn has_sudo_access() -> bool {
let output = Command::new("sudo")
.arg("-n")
.arg("true")
.output()
.expect("Failed to execute sudo command");
output.status.success()
}
fn install_verilator() -> Result<()> {
println!("Installing Verilator...");
#[cfg(target_os = "macos")]
{
println!("Running on macOS...");
// Install Verilator using Homebrew on macOS
let status = Command::new("brew")
.arg("install")
.arg("verilator")
.status()
.context("Failed to install Verilator using Homebrew")?;
if !status.success() {
println!("Failed to install Verilator on macOS.");
return Ok(());
}
}
#[cfg(target_os = "linux")]
{
println!("Running on Linux...");
if has_sudo_access() {
// Install Verilator using package manager
if !is_arch_distro() {
// Install Verilator using apt-get on non-Arch Linux
let status = Command::new("sudo")
.args(&["apt-get", "update"])
.status()
.context("Failed to update package lists")?;
if !status.success() {
println!("Failed to update package lists on Linux.");
return Ok(());
}
let status = Command::new("sudo")
.args(&["apt-get", "install", "-y", "verilator"])
.status()
.context("Failed to install Verilator using apt-get")?;
if !status.success() {
println!("Failed to install Verilator on Linux.");
return Ok(());
}
} else {
// Install Verilator using pacman on Arch Linux
let status = Command::new("sudo")
.args(&["pacman", "-Syu", "--noconfirm", "verilator"])
.status()
.context("Failed to install Verilator using pacman")?;
if !status.success() {
println!("Failed to install Verilator on Arch Linux.");
return Ok(());
}
}
} else {
println!("No sudo access. Installing Verilator from source...");
install_verilator_from_source()?;
}
}
#[cfg(not(any(target_os = "macos", target_os = "linux")))]
{
println!("Unsupported operating system. Please install Verilator manually.");
return Ok(());
}
println!("Verilator installed successfully.");
Ok(())
}
fn install_verilator_from_source() -> Result<()> {
// Create a directory for the installation
let install_dir = Path::new(&std::env::var("HOME")?).join("verilator");
std::fs::create_dir_all(&install_dir)?;
// Clone the repository
Command::new("git")
.args(&["clone", "https://github.com/verilator/verilator"])
.current_dir(&install_dir)
.status()
.context("Failed to clone Verilator repository")?;
let source_dir = install_dir.join("verilator");
// Configure with custom prefix
Command::new("autoconf")
.current_dir(&source_dir)
.status()
.context("Failed to run autoconf for Verilator")?;
Command::new("./configure")
.arg(format!("--prefix={}", install_dir.display()))
.current_dir(&source_dir)
.status()
.context("Failed to configure Verilator")?;
// Build
Command::new("make")
.current_dir(&source_dir)
.status()
.context("Failed to build Verilator")?;
// Install
Command::new("make")
.arg("install")
.current_dir(&source_dir)
.status()
.context("Failed to install Verilator")?;
// Add installation directory to PATH
println!("Verilator installed successfully in {}.", install_dir.display());
println!("Please add the following line to your shell configuration file (e.g., .bashrc or .zshrc):");
println!("export PATH=$PATH:{}/bin", install_dir.display());
Ok(())
}
fn install_icarus_verilog() -> Result<()> {
println!("Installing Icarus Verilog...");
#[cfg(target_os = "macos")]
{
println!("Running on macOS...");
// Install Icarus Verilog using Homebrew on macOS
let status = Command::new("brew")
.arg("install")
.arg("icarus-verilog")
.status()
.context("Failed to install Icarus Verilog using Homebrew")?;
if !status.success() {
println!("Failed to install Icarus Verilog on macOS.");
return Ok(());
}
}
#[cfg(target_os = "linux")]
{
if !is_arch_distro() {
println!("Running on Linux...");
// Install Icarus Verilog using apt-get on Linux
let status = Command::new("sudo")
.arg("apt-get")
.arg("update")
.status()
.context("Failed to update package lists")?;
if !status.success() {
println!("Failed to update package lists on Linux.");
return Ok(());
}
let status = Command::new("sudo")
.arg("apt-get")
.arg("install")
.arg("-y")
.arg("iverilog")
.status()
.context("Failed to install Icarus Verilog using apt-get")?;
if !status.success() {
println!("Failed to install Icarus Verilog on Linux using Apt-Get.");
let install_dir = Path::new(&std::env::var("HOME")?).join("icarus_verilog");
std::fs::create_dir_all(&install_dir)?;
Command::new("git")
.args(&["clone", "https://github.com/steveicarus/iverilog.git"])
.current_dir(&install_dir)
.status()
.context("Failed to clone Icarus Verilog repository")?;
let source_dir = install_dir.join("iverilog");
Command::new("sh")
.arg("autoconf.sh")
.current_dir(&source_dir)
.status()
.context("Failed to generate configure script")?;
Command::new("./configure")
.arg(format!("--prefix={}", install_dir.display()))
.current_dir(&source_dir)
.status()
.context("Failed to configure Icarus Verilog")?;
Command::new("make")
.current_dir(&source_dir)
.status()
.context("Failed to build Icarus Verilog")?;
Command::new("make")
.arg("install")
.current_dir(&source_dir)
.status()
.context("Failed to install Icarus Verilog")?;
println!("Icarus Verilog installed successfully.");
println!("Please add the following line to your shell configuration file (e.g., .bashrc or .zshrc):");
println!("export PATH=$PATH:{}/bin", install_dir.display());
return Ok(());
} else {
} else {
println!("Running on Arch Linux...");
// Install Icarus Verilog using pacman on Arch Linux
let status = Command::new("sudo")
.arg("pacman")
.arg("-Syu")
.arg("--noconfirm")
.arg("iverilog")
.status()
.context("Failed to install Icarus Verilog using pacman")?;
if !status.success() {
println!("Failed to install Icarus Verilog on Arch Linux.");
return Ok(());
}
}
}
#[cfg(not(any(target_os = "macos", target_os = "linux")))]
{
println!("Unsupported operating system. Please install Icarus Verilog manually.");
return Ok(());
}
println!("Icarus Verilog installed successfully.");
Ok(())
}
fn install_chipyard() -> Result<()> {
println!("Installing Chipyard...");
// Define the installation directory
let install_dir = Path::new("/usr/local/bin");
// Download Chipyard binary
let status = Command::new("curl")
.args(&["-L", "https://github.com/ucb-bar/chipyard/releases/latest/download/chipyard", "-o", install_dir.join("chipyard").to_str().unwrap()])
.status()
.context("Failed to download Chipyard binary")?;
if !status.success() {
println!("Failed to download Chipyard binary.");
return Ok(());
}
// Make the binary executable
let status = Command::new("chmod")
.args(&["+x", install_dir.join("chipyard").to_str().unwrap()])
.status()
.context("Failed to make Chipyard binary executable")?;
if !status.success() {
println!("Failed to make Chipyard binary executable.");
return Ok(());
}
println!("Chipyard installed successfully.");
Ok(())
}
fn install_edalize() -> Result<()> {
println!("Installing Edalize...");
let (_python_cmd, pip_cmd) = if check_command("python3") {
("python3", "pip3")
} else if check_command("python") {
("python", "pip")
} else {
println!("Neither Python 3 nor Python 2 is installed. Please install Python before proceeding.");
return Ok(());
};
if !check_command(pip_cmd) {
println!("{} is not installed. Please install pip before proceeding.", pip_cmd);
return Ok(());
}
// Install Edalize
let status = Command::new(pip_cmd)
.arg("install")
.arg("--user")
.arg("edalize")
.status()
.context("Failed to install Edalize using pip")?;
if !status.success() {
println!("Failed to install Edalize.");
return Ok(());
}
// Install FuseSoC
let status = Command::new(pip_cmd)
.arg("install")
.arg("--user")
.arg("fusesoc")
.status()
.context("Failed to install FuseSoC using pip")?;
if !status.success() {
println!("Failed to install FuseSoC.");
return Ok(());
}
println!("Edalize installed successfully.");
Ok(())
}
fn check_command(cmd: &str) -> bool {
Command::new(cmd)
.arg("--version")
.output()
.is_ok()
}
fn install_openroad() -> Result<()> {
println!("Installing OpenROAD...");
#[cfg(target_os = "linux")]
{
println!("Running on Linux...");
// Install OpenROAD using apt on Linux
let status = Command::new("sudo")
.arg("apt")
.arg("update")
.status()
.context("Failed to update package lists")?;
if !status.success() {
println!("Failed to update package lists on Linux.");
return Ok(());
}
let status = Command::new("sudo")
.arg("apt")
.arg("install")
.arg("-y")
.arg("openroad")
.status()
.context("Failed to install OpenROAD using apt")?;
if !status.success() {
println!("Failed to install OpenROAD on Linux.");
return Ok(());
}
}
#[cfg(target_os = "macos")]
{
println!("Running on macOS...");
// Install OpenROAD using Homebrew on macOS
let status = Command::new("brew")
.arg("install")
.arg("openroad/openroad/openroad")
.status()
.context("Failed to install OpenROAD using Homebrew")?;
if !status.success() {
println!("Failed to install OpenROAD on macOS.");
return Ok(());
}
}
#[cfg(not(any(target_os = "macos", target_os = "linux")))]
{
println!("Unsupported operating system. Please install OpenROAD manually.");
return Ok(());
}
println!("OpenROAD installed successfully.");
Ok(())
}
fn install_yosys() -> Result<()> {
println!("Installing Yosys and ABC...");
#[cfg(target_os = "macos")]
{
println!("Running on macOS...");
// Install Yosys using Homebrew on macOS
let status = Command::new("brew")
.arg("install")
.arg("yosys")
.status()
.context("Failed to install Yosys using Homebrew")?;
if !status.success() {
println!("Failed to install Yosys on macOS.");
return Ok(());
}
// Install ABC by git cloning and making
if !Path::new("/usr/local/bin/abc").exists() {
println!("Installing ABC...");
let status = Command::new("git")
.args(&["clone", "https://github.com/berkeley-abc/abc.git"])
.status()
.context("Failed to clone ABC repository")?;
if !status.success() {
println!("Failed to clone ABC repository.");
return Ok(());
}
let status = Command::new("make")
.current_dir("abc")
.status()
.context("Failed to make ABC")?;
if !status.success() {
println!("Failed to make ABC.");
return Ok(());
}
let status = Command::new("sudo")
.args(&["mv", "abc/abc", "/usr/local/bin/"])
.status()
.context("Failed to move ABC to /usr/local/bin/")?;
if !status.success() {
println!("Failed to move ABC to /usr/local/bin/.");
return Ok(());
}
println!("ABC installed successfully.");
} else {
println!("ABC is already installed.");
}
}
println!("Yosys and ABC installed successfully.");
Ok(())
}
fn install_riscv() -> Result<()> {
println!("Installing RISC-V toolchain...");
Command::new("git")
.args(&["clone", "--recursive", "https://github.com/riscv/riscv-gnu-toolchain.git"])
.status()?;
// Change to the cloned directory
env::set_current_dir("riscv-gnu-toolchain")?;
// Step 2: Install prerequisites (for Ubuntu/Debian)
Command::new("sudo")
.args(&["apt-get", "install", "autoconf", "automake", "autotools-dev", "curl", "python3", "libmpc-dev", "libmpfr-dev", "libgmp-dev", "gawk", "build-essential", "bison", "flex", "texinfo", "gperf", "libtool", "patchutils", "bc", "zlib1g-dev", "libexpat-dev"])
.status()?;
// Step 3: Create install directory
Command::new("sudo")
.args(&["mkdir", "-p", "/opt/riscv"])
.status()?;
// Step 4: Configure and build the toolchain
Command::new("./configure")
.arg("--prefix=/opt/riscv")
.status()?;
Command::new("sudo")
.arg("make")
.status()?;
// Step 5: Add the toolchain to PATH
let home = env::var("HOME")?;
let bashrc_path = Path::new(&home).join(".bashrc");
let mut bashrc = OpenOptions::new()
.append(true)
.open(bashrc_path)?;
writeln!(bashrc, "\nexport PATH=$PATH:/opt/riscv/bin")?;
// Step 6: Verify installation
Command::new("/opt/riscv/bin/riscv64-unknown-elf-gcc")
.arg("--version")
.status()?;
println!("RISC-V GNU toolchain installed successfully!");
println!("Please restart your terminal or run 'source ~/.bashrc' to update your PATH.");
Ok(())
}
fn install_nextpnr() -> Result<()> {
println!("Installing NextPNR...");
// Install NextPNR using Homebrew on macOS
let status = Command::new("brew")
.arg("install")
.arg("nextpnr")
.status()
.context("Failed to install NextPNR using Homebrew")?;
if !status.success() {
println!("Failed to install NextPNR on macOS.");
return Ok(());
}
println!("NextPNR installed successfully.");
Ok(())
}
fn install_xray() -> Result<()> {
println!("Installing Project XRay...");
// Install Project XRay using Homebrew on macOS
let status = Command::new("brew")
.arg("install")
.arg("xray")
.status()
.context("Failed to install Project XRay using Homebrew")?;
if !status.success() {
println!("Failed to install Project XRay on macOS.");
return Ok(());
}
println!("Project XRay installed successfully.");
Ok(())
}
fn is_arch_distro() -> bool {
Command::new("pacman")
.arg("--version")
.output()
.is_ok()
}
| rust | MIT | 03763cc40a2354d7a979a378e6830c1b4ceee44a | 2026-01-04T20:11:49.815734Z | false |
getinstachip/vpm | https://github.com/getinstachip/vpm/blob/03763cc40a2354d7a979a378e6830c1b4ceee44a/src/cmd/upgrade.rs | src/cmd/upgrade.rs | use anyhow::Result;
use std::process::Command;
use crate::cmd::{Execute, Upgrade};
use crate::config_man::set_version;
impl Execute for Upgrade {
async fn execute(&self) -> Result<()> {
println!("Upgrading VPM...");
upgrade_vpm()?;
let version = get_latest_version()?;
if !version.is_empty() {
set_version(&version)?;
}
println!("VPM upgrade completed successfully.");
Ok(())
}
}
fn upgrade_vpm() -> Result<()> {
if cfg!(unix) {
let output = Command::new("sh")
.arg("-c")
.arg("curl -sSfL https://raw.githubusercontent.com/getinstachip/vpm-internal/main/install.sh | sh")
.output()?;
if !output.status.success() {
return Err(anyhow::anyhow!("Upgrade command failed"));
}
} else if cfg!(windows) {
println!("To upgrade VPM on Windows, please follow these steps:");
println!("1. Visit https://github.com/getinstachip/vpm/releases/latest");
println!("2. Download the appropriate .exe file for your system");
println!("3. Run the downloaded .exe file to complete the upgrade");
return Ok(());
} else {
return Err(anyhow::anyhow!("Unsupported operating system"));
}
Ok(())
}
fn get_latest_version() -> Result<String> {
let output = Command::new("git")
.arg("describe ")
.arg("--tags")
.arg("--abbrev=0")
.output()?;
Ok(String::from_utf8(output.stdout)?)
} | rust | MIT | 03763cc40a2354d7a979a378e6830c1b4ceee44a | 2026-01-04T20:11:49.815734Z | false |
getinstachip/vpm | https://github.com/getinstachip/vpm/blob/03763cc40a2354d7a979a378e6830c1b4ceee44a/src/cmd/mod.rs | src/cmd/mod.rs | mod cmd;
mod upgrade;
mod include;
mod update;
mod remove;
mod dotf;
mod list;
mod install;
mod sim;
mod docs;
mod synth;
mod run;
mod load;
mod config;
use anyhow::Result;
pub use crate::cmd::cmd::*;
use crate::config_man::send_event;
pub trait Execute {
async fn execute(&self) -> Result<()>;
}
impl Execute for Cmd {
async fn execute(&self) -> Result<()> {
match self {
Cmd::Upgrade(cmd) => {
cmd.execute().await?;
send_event("upgrade".to_string()).await?;
Ok(())
},
Cmd::Include(cmd) => {
cmd.execute().await?;
send_event("include".to_string()).await?;
Ok(())
},
Cmd::Update(cmd) => {
cmd.execute().await?;
send_event("update".to_string()).await?;
Ok(())
},
Cmd::Remove(cmd) => {
cmd.execute().await?;
send_event("remove".to_string()).await?;
Ok(())
},
Cmd::Dotf(cmd) => {
cmd.execute().await?;
send_event("dotf".to_string()).await?;
Ok(())
},
Cmd::Install(cmd) => {
cmd.execute().await?;
send_event("install".to_string()).await?;
Ok(())
},
Cmd::List(cmd) => {
cmd.execute().await?;
send_event("list".to_string()).await?;
Ok(())
},
Cmd::Sim(cmd) => {
cmd.execute().await?;
send_event("sim".to_string()).await?;
Ok(())
},
Cmd::Docs(cmd) => {
cmd.execute().await?;
send_event("docs".to_string()).await?;
Ok(())
},
Cmd::Synth(cmd) => {
cmd.execute().await?;
send_event("synth".to_string()).await?;
Ok(())
},
Cmd::Load(cmd) => {
cmd.execute().await?;
send_event("load".to_string()).await?;
Ok(())
},
Cmd::Run(cmd) => {
cmd.execute().await?;
send_event("run".to_string()).await?;
Ok(())
},
Cmd::Config(cmd) => {
cmd.execute().await?;
send_event("config".to_string()).await?;
Ok(())
},
}
}
}
| rust | MIT | 03763cc40a2354d7a979a378e6830c1b4ceee44a | 2026-01-04T20:11:49.815734Z | false |
getinstachip/vpm | https://github.com/getinstachip/vpm/blob/03763cc40a2354d7a979a378e6830c1b4ceee44a/src/cmd/run.rs | src/cmd/run.rs | use anyhow::Result;
use crate::cmd::{Execute, Run};
impl Execute for Run {
async fn execute(&self) -> Result<()> {
Ok(())
}
} | rust | MIT | 03763cc40a2354d7a979a378e6830c1b4ceee44a | 2026-01-04T20:11:49.815734Z | false |
getinstachip/vpm | https://github.com/getinstachip/vpm/blob/03763cc40a2354d7a979a378e6830c1b4ceee44a/src/cmd/synth.rs | src/cmd/synth.rs | use anyhow::{Result, Context};
use std::path::PathBuf;
use std::process::Command;
use std::fs::File;
use std::io::Write;
use crate::cmd::{Execute, Synth};
impl Execute for Synth {
async fn execute(&self) -> Result<()> {
synthesize_design(
&self.top_module_path,
self.riscv,
self.core_path.as_ref(),
&self.board,
self.gen_yosys_script
)
}
}
fn synthesize_design(
top_module_path: &str,
riscv: bool,
core_path: Option<&String>,
board: &Option<String>,
gen_yosys_script: bool
) -> Result<()> {
let top_module_path = PathBuf::from(top_module_path);
let (input_file, module_name, parent_dir, _) = extract_path_info(&top_module_path);
let script_content = match board {
Some(board) if board.to_lowercase() == "xilinx" => {
let board_name = "artix7";
let output_file = format!("{}/{}_{}_{}_synth.v", parent_dir, module_name, board_name, "xilinx");
generate_xilinx_script_content(&input_file, riscv, core_path.cloned(), &module_name, &output_file)?
},
None => {
let output_file = format!("{}/{}_synth.v", parent_dir, module_name);
generate_yosys_script_content(&input_file, &module_name, &output_file)
},
Some(other) => {
return Err(anyhow::anyhow!("Unsupported board: {}", other));
}
};
if gen_yosys_script {
let script_file = PathBuf::from(&parent_dir).join(format!("{}_synth_script.ys", module_name));
write_script_to_file(&script_file, &script_content)?;
println!("Yosys script generated at: {:?}", script_file);
}
run_yosys_with_script_content(&script_content)?;
println!("Synthesis completed successfully.");
Ok(())
}
fn extract_path_info(top_module_path: &PathBuf) -> (String, String, String, String) {
let input_file = top_module_path.to_str().unwrap().to_string();
let top_module = top_module_path.file_stem().unwrap().to_str().unwrap().to_string();
let parent_dir = top_module_path.parent().unwrap().to_string_lossy().to_string();
let output_file = format!("{}/{}_synth.v", parent_dir, top_module);
(input_file, top_module, parent_dir, output_file)
}
fn generate_yosys_script_content(input_file: &str, top_module: &str, output_file: &str) -> String {
format!(
r#"
# Read the Verilog file
read_verilog {}
# Synthesize the design
synth -top {}
# Optimize the design
opt
# Write the synthesized design
write_verilog {}
"#,
input_file,
top_module,
output_file
)
}
fn generate_xilinx_script_content(top_module_path_str: &str, riscv: bool, core_path: Option<String>, module_name: &str, output_file: &str) -> Result<String> {
let mut script_content = format!(
r#"
# Read the SystemVerilog file
read_verilog -sv {top_module_path_str}
"#
);
if riscv {
if let Some(core_path) = core_path {
script_content.push_str(&format!(
r#"
# Read the RISC-V core
read_verilog -sv {core_path}
"#
));
} else {
return Err(anyhow::anyhow!("RISC-V core path is required when riscv flag is set"));
}
}
script_content.push_str(&format!(
r#"
# Synthesize for Xilinx 7 series (Artix-7)
synth_xilinx -top {module_name} -family xc7
# Optimize the design
opt
# Map to Xilinx 7 series cells
abc -lut 6
# Clean up
clean
# Write the synthesized design to a Verilog file
write_verilog {output_file}
write_edif {output_file}.edif
# Print statistics
stat
"#
));
Ok(script_content)
}
fn write_script_to_file(script_file: &PathBuf, script_content: &str) -> Result<()> {
let mut file = File::create(script_file)?;
file.write_all(script_content.as_bytes())?;
Ok(())
}
fn run_yosys_with_script_content(script_content: &str) -> Result<()> {
let output = Command::new("yosys")
.arg("-p")
.arg(script_content)
.output()
.context("Failed to execute Yosys")?;
println!("Yosys output:");
println!("{}", String::from_utf8_lossy(&output.stdout));
if !output.status.success() {
let error_message = String::from_utf8_lossy(&output.stderr);
return Err(anyhow::anyhow!("Yosys synthesis failed: {}", error_message));
}
Ok(())
} | rust | MIT | 03763cc40a2354d7a979a378e6830c1b4ceee44a | 2026-01-04T20:11:49.815734Z | false |
getinstachip/vpm | https://github.com/getinstachip/vpm/blob/03763cc40a2354d7a979a378e6830c1b4ceee44a/src/cmd/load.rs | src/cmd/load.rs | use anyhow::Result;
use std::path::Path;
use std::process::Command;
use crate::cmd::{Execute, Load};
impl Execute for Load {
async fn execute(&self) -> Result<()> {
let top_module_path = Path::new(&self.top_module_path);
let constraints_path = Path::new(&self.constraints_path);
if self.riscv {
load_xilinx(top_module_path, constraints_path)?;
} else {
unimplemented!("Non RISC-V loading not yet implemented");
}
Ok(())
}
}
fn load_xilinx(edif_path: &Path, constraints_path: &Path) -> Result<()> {
let edif_path_str = edif_path.to_str().unwrap();
let constraints_path_str = constraints_path.to_str().unwrap();
Command::new("yosys")
.args(&["-p", &format!("read_edif {}; write_json design.json", edif_path_str)])
.status()?;
Command::new("nextpnr-xilinx")
.args(&[
"--chipdb", "vpm_modules/chipdb-xc7a35t.bin",
"--xdc", constraints_path_str,
"--json", "design.json",
"--write", "output.fasm",
"--device", "xc7a35tcsg324-1"
])
.status()?;
let fasm_output = Command::new("fasm2frames")
.args(&["--part", "xc7a35tcsg324-1", "output.fasm"])
.output()?;
std::fs::write("output.frames", fasm_output.stdout)?;
Command::new("xc7frames2bit")
.args(&[
"--part_file", "vpm_modules/xc7a35tcsg324-1.yaml",
"--part_name", "xc7a35tcsg324-1",
"--frm_file", "output.frames",
"--output_file", "output.bit"
])
.status()?;
println!("Bitstream generated successfully: output.bit");
Ok(())
} | rust | MIT | 03763cc40a2354d7a979a378e6830c1b4ceee44a | 2026-01-04T20:11:49.815734Z | false |
getinstachip/vpm | https://github.com/getinstachip/vpm/blob/03763cc40a2354d7a979a378e6830c1b4ceee44a/src/cmd/docs.rs | src/cmd/docs.rs | use anyhow::{Result, Context, anyhow};
use reqwest::Client;
use std::path::PathBuf;
use serde_json::json;
use std::fs;
use indicatif::{ProgressBar, ProgressStyle};
use std::process::{Command, Stdio};
use crate::cmd::{Execute, Docs};
use crate::config_man::{decrypt_docs_count, encrypt_docs_count};
impl Execute for Docs {
async fn execute(&self) -> Result<()> {
let docs_count = decrypt_docs_count()?;
if docs_count >= 10 {
println!("You have used all your documentation generation credits. Consider upgrading to VPM Pro for unlimited and betterdocumentation generation.");
return Ok(());
}
if self.from_repo {
let content = fetch_module_content(&self.module_path).await
.context("Failed to fetch module content. Please check your internet connection and ensure the provided URL is correct.")?;
let file_name = self.module_path.split('/').last().unwrap_or(&self.module_path);
let folder_name = file_name.split('.').next().unwrap_or(file_name);
let destination = PathBuf::from("./vpm_modules").join(folder_name);
fs::create_dir_all(&destination)
.context("Failed to create destination directory. Please check if you have write permissions in the current directory.")?;
if self.offline {
generate_docs_offline(&self.module_path, &content, Some(destination.join(format!("{}_README.md", folder_name)))).await
.context("Failed to generate documentation offline. Please check the module content and try again.")?;
} else {
generate_docs(&self.module_path, &content, Some(destination.join(format!("{}_README.md", folder_name)))).await
.context("Failed to generate documentation. Please check the module content and try again.")?;
}
} else {
let full_module_path = PathBuf::from(&self.module_path);
if full_module_path.exists() {
let content = fs::read_to_string(&full_module_path)
.with_context(|| format!("Failed to read module file: {}. Please ensure you have read permissions for this file.", full_module_path.display()))?;
println!("Generating documentation for local module '{}'", self.module_path);
let readme_path = full_module_path.with_file_name(format!("{}_README.md", full_module_path.file_stem().unwrap().to_str().unwrap()));
if self.offline {
generate_docs_offline(&self.module_path, &content, Some(readme_path)).await
.context("Failed to generate documentation offline for the local module. Please check the module content and try again.")?;
} else {
generate_docs(&self.module_path, &content, Some(readme_path)).await
.context("Failed to generate documentation for the local module. Please check the module content and try again.")?;
}
} else {
return Err(anyhow!("Module '{}' not found in vpm_modules. Please provide a URL to a repository containing the module, or ensure the module exists in the correct location.", self.module_path));
}
}
encrypt_docs_count(docs_count + 1)?;
println!("Documentation generated successfully. You have used {} of your 10 credits.", docs_count + 1);
Ok(())
}
}
async fn fetch_module_content(url: &str) -> Result<String> {
let client = reqwest::Client::new();
// Extract the raw content URL
let raw_url = url.replace("github.com", "raw.githubusercontent.com")
.replace("/blob/", "/");
println!("Fetching content from URL: {}", raw_url);
// Fetch the content
let response = client.get(&raw_url).send().await?;
if !response.status().is_success() {
return Err(anyhow::anyhow!("Failed to fetch module content: HTTP {}", response.status()));
}
let content = response.text().await?;
Ok(content)
}
fn format_text(text: &str) -> String {
text.replace("\\n", "\n")
.replace("\\'", "'")
.replace("\\\"", "\"")
.replace("\\\\", "\\")
}
async fn generate_docs(module_path: &str, content: &str, full_module_path: Option<PathBuf>) -> Result<()> {
let pb = ProgressBar::new(100);
pb.set_style(ProgressStyle::default_bar()
.template("{spinner:.green} [{elapsed_precise}] [{bar:40.cyan/blue}] {pos}/{len} ({eta}) {msg}")
.unwrap_or_else(|_| ProgressStyle::default_bar())
.progress_chars("#>-"));
pb.set_position(33);
pb.set_message("Generating documentation...");
let client = Client::new();
let api_url = "https://bmniatl2bh.execute-api.us-east-1.amazonaws.com/dev/getApiKey";
let response = client.post(api_url)
.header("Content-Type", "application/json")
.json(&json!({ "code": content }))
.send().await
.context("Failed to send request to documentation generation API. Please check your internet connection and try again.")?;
let documentation = format_text(&response.text().await
.context("Failed to read response from documentation generation API. The API might be experiencing issues. Please try again later.")?);
pb.set_position(66);
pb.set_message("Writing documentation to file...");
let readme_path = if let Some(path) = full_module_path {
path
} else {
let module_name = module_path.rsplit('/').next().unwrap_or(module_path);
let dir = PathBuf::from("./vpm_modules").join(module_name).parent().unwrap().to_path_buf();
fs::create_dir_all(&dir)
.with_context(|| format!("Failed to create directory: {}. Please ensure you have write permissions in this location.", dir.display()))?;
dir.join(format!("{}_README.md", module_name))
};
tokio::fs::write(&readme_path, documentation).await
.with_context(|| format!("Failed to write documentation to file: {}. Please ensure you have write permissions in this location.", readme_path.display()))?;
pb.set_position(100);
pb.finish_with_message(format!("Documentation for {} written to {}", module_path, readme_path.display()));
Ok(())
}
async fn generate_docs_offline(module_path: &str, content: &str, full_module_path: Option<PathBuf>) -> Result<()> {
let pb = ProgressBar::new(100);
pb.set_style(ProgressStyle::default_bar()
.template("{spinner:.green} [{elapsed_precise}] [{bar:40.cyan/blue}] {pos}/{len} ({eta}) {msg}")
.unwrap_or_else(|_| ProgressStyle::default_bar())
.progress_chars("#>-"));
pb.set_position(33);
pb.set_message("Generating documentation offline...");
// Check if Ollama is installed
if !Command::new("ollama").arg("--version").output().is_ok() {
pb.set_message("Ollama not found. Installing...");
// Install Ollama
let install_status = if cfg!(target_os = "macos") {
Command::new("brew").args(&["install", "ollama"]).status()
} else if cfg!(target_os = "linux") {
Command::new("curl").args(&["-fsSL", "https://ollama.ai/install.sh", "|", "sh"]).status()
} else {
return Err(anyhow::anyhow!("Unsupported operating system for Ollama installation"));
};
if let Err(e) = install_status {
return Err(anyhow::anyhow!("Failed to install Ollama: {}", e));
}
pb.set_message("Ollama installed successfully");
}
// Start Ollama server in the background
let mut ollama_serve = Command::new("ollama")
.arg("serve")
.stdout(Stdio::null())
.stderr(Stdio::null())
.spawn()
.context("Failed to start Ollama server. Make sure it's installed and in your PATH.")?;
// Give the server a moment to start up
tokio::time::sleep(tokio::time::Duration::from_secs(2)).await;
// Prepare the Ollama command
let ollama_output = Command::new("ollama")
.arg("run")
.arg("codellama")
.arg("Generate documentation for the following Verilog module:")
.arg(content)
.output()
.context("Failed to execute Ollama. Make sure it's installed and in your PATH.")?;
// Stop the Ollama server
ollama_serve.kill().context("Failed to stop Ollama server")?;
if !ollama_output.status.success() {
return Err(anyhow::anyhow!("Ollama command failed: {}", String::from_utf8_lossy(&ollama_output.stderr)));
}
let documentation = String::from_utf8(ollama_output.stdout)
.context("Failed to parse Ollama output as UTF-8")?;
pb.set_position(66);
pb.set_message("Writing documentation to file...");
let readme_path = if let Some(path) = full_module_path {
path
} else {
let module_name = module_path.rsplit('/').next().unwrap_or(module_path);
let dir = PathBuf::from("./vpm_modules").join(module_name).parent().unwrap().to_path_buf();
fs::create_dir_all(&dir)
.with_context(|| format!("Failed to create directory: {}. Please ensure you have write permissions in this location.", dir.display()))?;
dir.join(format!("{}_README.md", module_name))
};
tokio::fs::write(&readme_path, documentation).await
.with_context(|| format!("Failed to write documentation to file: {}. Please ensure you have write permissions in this location.", readme_path.display()))?;
pb.set_position(100);
pb.finish_with_message(format!("Documentation for {} written to {}", module_path, readme_path.display()));
Ok(())
}
| rust | MIT | 03763cc40a2354d7a979a378e6830c1b4ceee44a | 2026-01-04T20:11:49.815734Z | false |
getinstachip/vpm | https://github.com/getinstachip/vpm/blob/03763cc40a2354d7a979a378e6830c1b4ceee44a/src/cmd/remove.rs | src/cmd/remove.rs | use std::fs;
use std::path::PathBuf;
use std::io::{self, Write};
use anyhow::{Result, anyhow};
use crate::cmd::{Execute, Remove};
use crate::toml::{remove_top_module, get_repo_links};
impl Execute for Remove {
async fn execute(&self) -> Result<()> {
remove_module(&self.package_path)?;
Ok(())
}
}
fn remove_module(module_path: &str) -> Result<()> {
let module_path = PathBuf::from(module_path);
if !module_path.exists() {
return Err(anyhow!("Module not found: {}", module_path.display()));
}
let module_name = module_path.file_name().unwrap().to_str().unwrap();
// Ask for y/n confirmation
print!("Are you sure you want to remove the module {}? (y/n): ", module_name);
io::stdout().flush()?;
let mut confirmation = String::new();
io::stdin().read_line(&mut confirmation)?;
if confirmation.trim().to_lowercase() != "y" {
return Ok(());
}
let repo_links = get_repo_links(module_name);
let repo_link = match repo_links.len() {
0 => return Err(anyhow!("No repository links found for module: {}", module_name)),
1 => repo_links.into_iter().next().unwrap(),
_ => {
println!("Multiple repository links found for module: {}. Please choose the correct repository link.", module_name);
for (i, link) in repo_links.iter().enumerate() {
println!("{}. {}", i + 1, link);
}
let mut choice = String::new();
print!("Enter your choice (1-{}): ", repo_links.len());
io::stdout().flush()?;
io::stdin().read_line(&mut choice)?;
let index: usize = choice.trim().parse().map_err(|_| anyhow!("Invalid choice"))?;
if index < 1 || index > repo_links.len() {
return Err(anyhow!("Invalid choice"));
}
repo_links.iter().nth(index - 1)
.ok_or_else(|| anyhow!("Invalid choice"))?
.to_string()
}
};
// Ask to enter the name of the module to confirm
print!("To confirm removal, please re-type \"{}\" (without the quotes): ", module_name);
io::stdout().flush()?;
let mut confirmation_name = String::new();
io::stdin().read_line(&mut confirmation_name)?;
if confirmation_name.trim() != module_name {
return Err(anyhow!("Module name does not match. Removal cancelled."));
}
fs::remove_file(&module_path)?;
// Remove the corresponding header file if it exists
let header_path = module_path.with_extension("vh");
if header_path.exists() {
fs::remove_file(&header_path)?;
println!("Removed header file: {}", header_path.display());
}
remove_top_module(&repo_link, module_name)?;
println!("Removed module: {}", module_path.display());
Ok(())
}
| rust | MIT | 03763cc40a2354d7a979a378e6830c1b4ceee44a | 2026-01-04T20:11:49.815734Z | false |
getinstachip/vpm | https://github.com/getinstachip/vpm/blob/03763cc40a2354d7a979a378e6830c1b4ceee44a/src/cmd/cmd.rs | src/cmd/cmd.rs | use clap::Parser;
#[derive(Debug, Parser)]
#[clap(
about = "VPM - Verilog Package Manager",
author,
version,
propagate_version = true,
disable_help_subcommand = true,
after_help = "Run 'vpm <COMMAND> --help' for more information on a specific command."
)]
pub enum Cmd {
#[command(
about = "vpm include <MODULE_URL> [--repo] [--riscv] [--commit <HASH>] // Add a module or repository to your project",
long_about = "Include a module with one command. VPM's internal parser will identify and configure any subdependencies."
)]
Include(Include),
#[command(
about = "vpm update <MODULE_PATH> // Update a module to its latest version",
long_about = "Update a specific module to its latest version. This command checks for updates to the specified module and applies them if available."
)]
Update(Update),
#[command(
about = "vpm remove <PACKAGE_PATH> // Remove a package from your project",
long_about = "Remove a package from your project. This command uninstalls the specified package and removes it from your project's dependencies, helping you maintain a clean and efficient project structure."
)]
Remove(Remove),
#[command(
about = "vpm dotf <TOP_MODULE_PATH> // Generate a .f filelist for a module",
long_about = "Generate a filelist (.f file) for a top module and all its submodules."
)]
Dotf(Dotf),
#[command(
about = "vpm docs <MODULE_PATH> [--url <URL>] // Generate documentation for a module",
long_about = "Generate documentation for a module. This command creates comprehensive documentation for the specified module, including descriptions of inputs, outputs, and functionality. It supports both local modules and those hosted on remote repositories."
)]
Docs(Docs),
#[command(
about = "vpm install <TOOL_NAME> // Install a specified tool",
long_about = "Install a specified tool. VPM automates the build process and installs missing subdependencies. Support for powerful linters, Icarus Verilog, Verilator, Yosys, GTKWave, the RISC-V toolchain, and more."
)]
Install(Install),
#[command(
about = "vpm list // List all available modules in the project",
long_about = "List all available modules in the current project. This command provides an overview of all modules currently included in your project, helping you keep track of your dependencies and project structure."
)]
List(List),
#[command(
about = "vpm sim <FILE_PATHS>... // Simulate Verilog files",
long_about = "Simulate one or more Verilog files. This command runs simulations on the specified Verilog files, allowing you to test and verify the behavior of your designs before synthesis or implementation."
)]
Sim(Sim),
#[command(
about = "vpm synth <TOP_MODULE_PATH> // Synthesize a top module",
long_about = "Synthesize a top module. This command performs synthesis on the specified top module, converting your RTL design into a gate-level netlist. Supports synthesis for:
• Board-agnostic (default)
• Xilinx FPGAs
• Altera FPGAs (coming soon)
• Custom board files (coming soon)
"
)]
Synth(Synth),
#[command(
about = "vpm load <TOP_MODULE_PATH> // Load a top module onto a target device",
long_about = "Load a top module onto a target device. This command programs the synthesized design onto the specified hardware, allowing you to test your design on actual FPGA or ASIC hardware."
)]
Load(Load),
#[command(
about = "vpm run <PROGRAM> // Execute a specified program",
long_about = "Run a specified program. This command executes the given program, which can be useful for running custom scripts, tools, or compiled designs as part of your Verilog development workflow."
)]
Run(Run),
#[command(
about = "vpm upgrade // Upgrade VPM to the latest version",
long_about = "Upgrade VPM to the latest version available."
)]
Upgrade(Upgrade),
#[command(
about = "vpm config <KEY> <VALUE> // Configure VPM settings",
long_about = "Configure VPM settings. This command allows you to set various options and preferences for VPM, such as enabling or disabling analytics."
)]
Config(Config),
}
#[derive(Debug, Parser)]
pub struct Upgrade {}
#[derive(Debug, Parser)]
pub struct Include {
#[arg(long, short, help = "If this flag is set, the URL will be treated as a full repository. If not set, the URL will be treated as a single module.")]
pub repo: bool,
#[arg(help = "GitHub URL of the module to include. This should point to a single .v or .sv file in GitHub. If --repo is set, <URL> should not be a full repository URL, but rather 'AUTHOR_NAME/REPO_NAME'")]
pub url: String,
#[arg(long, help = "Include RISC-V specific modules. Use this flag when including modules designed specifically for RISC-V architectures.")]
pub riscv: bool,
#[arg(long, help = "Commit hash of the module to include. This should be a valid commit hash from the module's repository.")]
pub commit: Option<String>,
}
#[derive(Debug, Parser)]
pub struct Update {
#[arg(help = "Full module path of the module to update. This should be the complete path to the module file within your project structure.")]
pub module_path: String,
#[arg(long, help = "Update to the given commit hash. If not set, the latest commit hash will be used.")]
pub commit: Option<String>,
}
#[derive(Debug, Parser)]
pub struct Remove {
#[arg(help = "Full module path of the package to remove. This should be the complete path to the package directory within your project structure.")]
pub package_path: String,
}
#[derive(Debug, Parser)]
pub struct Dotf {
#[arg(help = "Path to the top module to generate a filelist for. This should be the complete path to the top module file within your project structure.")]
pub path_to_top_module: String,
}
#[derive(Debug, Parser)]
pub struct Docs {
#[arg(help = "Path of the module to generate documentation for. This should be the path to the module file within your project structure, starting with 'vpm_modules/'.")]
pub module_path: String,
#[arg(long, help = "If this flag is set, the module path will be treated as a link to a .v or .sv file in a GitHub repository. If not set, the path will be treated as a local file path.")]
pub from_repo: bool,
#[arg(long, help = "Generate documentation in offline mode for code security.")]
pub offline: bool,
}
#[derive(Debug, Parser)]
pub struct Install {
#[arg(help = "Name of the tool to install. This should be a valid tool name recognized by VPM. Available options:
• verilator: A fast Verilog/SystemVerilog simulator
• iverilog: Icarus Verilog, a Verilog simulation and synthesis tool
• yosys: Open-source Verilog synthesis suite
• gtkwave: Waveform viewer for simulation results
• verible: SystemVerilog parser, style linter, and formatter
• edalize: One-stop library for interfacing EDA tools
• riscv-gnu-toolchain: GNU toolchain for RISC-V, including GCC compiler and associated tools")]
pub tool_name: String,
}
#[derive(Debug, Parser)]
pub struct Sim {
#[arg(help = "List of paths to .v or .sv files you want to simulate. Include '_tb' in the testbench file name; otherwise, a base testbench and waveform will be generated.")]
pub verilog_files: Vec<String>,
#[arg(long, help = "Generate waveform output. If set, the simulation will produce waveform data and open it in GTKWave.")]
pub waveform: bool,
pub folder: Option<String>,
}
#[derive(Debug, Parser)]
pub struct List {}
#[derive(Debug, Parser)]
pub struct Synth {
#[arg(help = "Top module path to synthesize. This should be the path to the main module of your design that you want to synthesize.")]
pub top_module_path: String,
#[arg(long, help = "Set this flag if you're working with a RISC-V based design.")]
pub riscv: bool,
#[arg(long, help = "Path to RISC-V core. Required if --riscv is set. This should be the path to your RISC-V core implementation.")]
pub core_path: Option<String>,
#[arg(long, help = "Specify target board. Use this to optimize the synthesis for a specific FPGA board. Current options:
• xilinx: Optimize for Xilinx FPGA boards
• altera: Optimize for Altera FPGA boards (coming soon)
• custom: Use a custom board file (coming soon)")]
pub board: Option<String>,
#[arg(long, help = "Generate synthesis script. If set, the command will produce a Yosys synthesis script instead of running the synthesis directly.")]
pub gen_yosys_script: bool,
}
#[derive(Debug, Parser)]
pub struct Load {
#[arg(help = "Path to the top module to load. This should be the path to the synthesized netlist or bitstream file.")]
pub top_module_path: String,
#[arg(help = "Path to the .xcd constraint file. This file should contain timing and placement constraints for your design.")]
pub constraints_path: String,
#[arg(long, help = "Use RISC-V toolchain. Set this flag if you're working with a RISC-V based design and need to use RISC-V specific tools for loading.")]
pub riscv: bool,
}
#[derive(Debug, Parser)]
pub struct Run {
#[arg(help = "Path to the program to run. This can be a compiled binary, a script, or any executable file.")]
pub program_path: String,
#[arg(long, help = "Use RISC-V toolchain. Set this flag if you're running a program compiled for RISC-V architecture and need to use RISC-V specific tools or emulators.")]
pub riscv: bool,
}
#[derive(Debug, Parser)]
pub struct Config {
#[arg(long, help = "Enable or disable anonymous usage data collection. Set to false to opt-out of data collection.")]
pub analytics: Option<bool>,
}
| rust | MIT | 03763cc40a2354d7a979a378e6830c1b4ceee44a | 2026-01-04T20:11:49.815734Z | false |
paritytech/litep2p | https://github.com/paritytech/litep2p/blob/991aa12f60db41543735394bf71fba09332752f8/build.rs | build.rs | fn main() {
let mut config = prost_build::Config::new();
// Configure Prost to add #[derive(Serialize, Deserialize)] to all generated structs
config.type_attribute(
".",
"#[cfg_attr(feature = \"fuzz\", derive(serde::Serialize, serde::Deserialize))]",
);
config
.compile_protos(
&[
"src/schema/keys.proto",
"src/schema/noise.proto",
"src/schema/webrtc.proto",
"src/protocol/libp2p/schema/identify.proto",
"src/protocol/libp2p/schema/kademlia.proto",
"src/protocol/libp2p/schema/bitswap.proto",
],
&["src"],
)
.unwrap();
}
| rust | MIT | 991aa12f60db41543735394bf71fba09332752f8 | 2026-01-04T20:20:42.179941Z | false |
paritytech/litep2p | https://github.com/paritytech/litep2p/blob/991aa12f60db41543735394bf71fba09332752f8/src/config.rs | src/config.rs | // Copyright 2023 litep2p developers
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//! [`Litep2p`](`crate::Litep2p`) configuration.
use crate::{
crypto::ed25519::Keypair,
executor::{DefaultExecutor, Executor},
protocol::{
libp2p::{bitswap, identify, kademlia, ping},
mdns::Config as MdnsConfig,
notification, request_response, UserProtocol,
},
transport::{
manager::limits::ConnectionLimitsConfig, tcp::config::Config as TcpConfig,
KEEP_ALIVE_TIMEOUT, MAX_PARALLEL_DIALS,
},
types::protocol::ProtocolName,
PeerId,
};
#[cfg(feature = "quic")]
use crate::transport::quic::config::Config as QuicConfig;
#[cfg(feature = "webrtc")]
use crate::transport::webrtc::config::Config as WebRtcConfig;
#[cfg(feature = "websocket")]
use crate::transport::websocket::config::Config as WebSocketConfig;
use multiaddr::Multiaddr;
use std::{collections::HashMap, sync::Arc, time::Duration};
/// Connection role.
#[derive(Debug, Copy, Clone)]
pub enum Role {
/// Dialer.
Dialer,
/// Listener.
Listener,
}
impl From<Role> for crate::yamux::Mode {
fn from(value: Role) -> Self {
match value {
Role::Dialer => crate::yamux::Mode::Client,
Role::Listener => crate::yamux::Mode::Server,
}
}
}
/// Configuration builder for [`Litep2p`](`crate::Litep2p`).
pub struct ConfigBuilder {
/// TCP transport configuration.
tcp: Option<TcpConfig>,
/// QUIC transport config.
#[cfg(feature = "quic")]
quic: Option<QuicConfig>,
/// WebRTC transport config.
#[cfg(feature = "webrtc")]
webrtc: Option<WebRtcConfig>,
/// WebSocket transport config.
#[cfg(feature = "websocket")]
websocket: Option<WebSocketConfig>,
/// Keypair.
keypair: Option<Keypair>,
/// Ping protocol config.
ping: Option<ping::Config>,
/// Identify protocol config.
identify: Option<identify::Config>,
/// Kademlia protocol config.
kademlia: Vec<kademlia::Config>,
/// Bitswap protocol config.
bitswap: Option<bitswap::Config>,
/// Notification protocols.
notification_protocols: HashMap<ProtocolName, notification::Config>,
/// Request-response protocols.
request_response_protocols: HashMap<ProtocolName, request_response::Config>,
/// User protocols.
user_protocols: HashMap<ProtocolName, Box<dyn UserProtocol>>,
/// mDNS configuration.
mdns: Option<MdnsConfig>,
/// Known addresess.
known_addresses: Vec<(PeerId, Vec<Multiaddr>)>,
/// Executor for running futures.
executor: Option<Arc<dyn Executor>>,
/// Maximum number of parallel dial attempts.
max_parallel_dials: usize,
/// Connection limits config.
connection_limits: ConnectionLimitsConfig,
/// Close the connection if no substreams are open within this time frame.
keep_alive_timeout: Duration,
/// Use system's DNS config.
use_system_dns_config: bool,
}
impl Default for ConfigBuilder {
fn default() -> Self {
Self::new()
}
}
impl ConfigBuilder {
/// Create empty [`ConfigBuilder`].
pub fn new() -> Self {
Self {
tcp: None,
#[cfg(feature = "quic")]
quic: None,
#[cfg(feature = "webrtc")]
webrtc: None,
#[cfg(feature = "websocket")]
websocket: None,
keypair: None,
ping: None,
identify: None,
kademlia: Vec::new(),
bitswap: None,
mdns: None,
executor: None,
max_parallel_dials: MAX_PARALLEL_DIALS,
user_protocols: HashMap::new(),
notification_protocols: HashMap::new(),
request_response_protocols: HashMap::new(),
known_addresses: Vec::new(),
connection_limits: ConnectionLimitsConfig::default(),
keep_alive_timeout: KEEP_ALIVE_TIMEOUT,
use_system_dns_config: false,
}
}
/// Add TCP transport configuration, enabling the transport.
pub fn with_tcp(mut self, config: TcpConfig) -> Self {
self.tcp = Some(config);
self
}
/// Add QUIC transport configuration, enabling the transport.
#[cfg(feature = "quic")]
pub fn with_quic(mut self, config: QuicConfig) -> Self {
self.quic = Some(config);
self
}
/// Add WebRTC transport configuration, enabling the transport.
#[cfg(feature = "webrtc")]
pub fn with_webrtc(mut self, config: WebRtcConfig) -> Self {
self.webrtc = Some(config);
self
}
/// Add WebSocket transport configuration, enabling the transport.
#[cfg(feature = "websocket")]
pub fn with_websocket(mut self, config: WebSocketConfig) -> Self {
self.websocket = Some(config);
self
}
/// Add keypair.
///
/// If no keypair is specified, litep2p creates a new keypair.
pub fn with_keypair(mut self, keypair: Keypair) -> Self {
self.keypair = Some(keypair);
self
}
/// Enable notification protocol.
pub fn with_notification_protocol(mut self, config: notification::Config) -> Self {
self.notification_protocols.insert(config.protocol_name().clone(), config);
self
}
/// Enable IPFS Ping protocol.
pub fn with_libp2p_ping(mut self, config: ping::Config) -> Self {
self.ping = Some(config);
self
}
/// Enable IPFS Identify protocol.
pub fn with_libp2p_identify(mut self, config: identify::Config) -> Self {
self.identify = Some(config);
self
}
/// Enable IPFS Kademlia protocol.
pub fn with_libp2p_kademlia(mut self, config: kademlia::Config) -> Self {
self.kademlia.push(config);
self
}
/// Enable IPFS Bitswap protocol.
pub fn with_libp2p_bitswap(mut self, config: bitswap::Config) -> Self {
self.bitswap = Some(config);
self
}
/// Enable request-response protocol.
pub fn with_request_response_protocol(mut self, config: request_response::Config) -> Self {
self.request_response_protocols.insert(config.protocol_name().clone(), config);
self
}
/// Enable user protocol.
pub fn with_user_protocol(mut self, protocol: Box<dyn UserProtocol>) -> Self {
self.user_protocols.insert(protocol.protocol(), protocol);
self
}
/// Enable mDNS for peer discoveries in the local network.
pub fn with_mdns(mut self, config: MdnsConfig) -> Self {
self.mdns = Some(config);
self
}
/// Add known address(es) for one or more peers.
pub fn with_known_addresses(
mut self,
addresses: impl Iterator<Item = (PeerId, Vec<Multiaddr>)>,
) -> Self {
self.known_addresses = addresses.collect();
self
}
/// Add executor for running futures spawned by `litep2p`.
///
/// If no executor is specified, `litep2p` defaults to calling `tokio::spawn()`.
pub fn with_executor(mut self, executor: Arc<dyn Executor>) -> Self {
self.executor = Some(executor);
self
}
/// How many addresses should litep2p attempt to dial in parallel.
pub fn with_max_parallel_dials(mut self, max_parallel_dials: usize) -> Self {
self.max_parallel_dials = max_parallel_dials;
self
}
/// Set connection limits configuration.
pub fn with_connection_limits(mut self, config: ConnectionLimitsConfig) -> Self {
self.connection_limits = config;
self
}
/// Set keep alive timeout for connections.
pub fn with_keep_alive_timeout(mut self, timeout: Duration) -> Self {
self.keep_alive_timeout = timeout;
self
}
/// Set DNS resolver according to system configuration instead of default (Google).
pub fn with_system_resolver(mut self) -> Self {
self.use_system_dns_config = true;
self
}
/// Build [`Litep2pConfig`].
pub fn build(mut self) -> Litep2pConfig {
let keypair = match self.keypair {
Some(keypair) => keypair,
None => Keypair::generate(),
};
Litep2pConfig {
keypair,
tcp: self.tcp.take(),
mdns: self.mdns.take(),
#[cfg(feature = "quic")]
quic: self.quic.take(),
#[cfg(feature = "webrtc")]
webrtc: self.webrtc.take(),
#[cfg(feature = "websocket")]
websocket: self.websocket.take(),
ping: self.ping.take(),
identify: self.identify.take(),
kademlia: self.kademlia,
bitswap: self.bitswap.take(),
max_parallel_dials: self.max_parallel_dials,
executor: self.executor.map_or(Arc::new(DefaultExecutor {}), |executor| executor),
user_protocols: self.user_protocols,
notification_protocols: self.notification_protocols,
request_response_protocols: self.request_response_protocols,
known_addresses: self.known_addresses,
connection_limits: self.connection_limits,
keep_alive_timeout: self.keep_alive_timeout,
use_system_dns_config: self.use_system_dns_config,
}
}
}
/// Configuration for [`Litep2p`](`crate::Litep2p`).
pub struct Litep2pConfig {
// TCP transport configuration.
pub(crate) tcp: Option<TcpConfig>,
/// QUIC transport config.
#[cfg(feature = "quic")]
pub(crate) quic: Option<QuicConfig>,
/// WebRTC transport config.
#[cfg(feature = "webrtc")]
pub(crate) webrtc: Option<WebRtcConfig>,
/// WebSocket transport config.
#[cfg(feature = "websocket")]
pub(crate) websocket: Option<WebSocketConfig>,
/// Keypair.
pub(crate) keypair: Keypair,
/// Ping protocol configuration, if enabled.
pub(crate) ping: Option<ping::Config>,
/// Identify protocol configuration, if enabled.
pub(crate) identify: Option<identify::Config>,
/// Kademlia protocol configuration, if enabled.
pub(crate) kademlia: Vec<kademlia::Config>,
/// Bitswap protocol configuration, if enabled.
pub(crate) bitswap: Option<bitswap::Config>,
/// Notification protocols.
pub(crate) notification_protocols: HashMap<ProtocolName, notification::Config>,
/// Request-response protocols.
pub(crate) request_response_protocols: HashMap<ProtocolName, request_response::Config>,
/// User protocols.
pub(crate) user_protocols: HashMap<ProtocolName, Box<dyn UserProtocol>>,
/// mDNS configuration.
pub(crate) mdns: Option<MdnsConfig>,
/// Executor.
pub(crate) executor: Arc<dyn Executor>,
/// Maximum number of parallel dial attempts.
pub(crate) max_parallel_dials: usize,
/// Known addresses.
pub(crate) known_addresses: Vec<(PeerId, Vec<Multiaddr>)>,
/// Connection limits config.
pub(crate) connection_limits: ConnectionLimitsConfig,
/// Close the connection if no substreams are open within this time frame.
pub(crate) keep_alive_timeout: Duration,
/// Use system's DNS config.
pub(crate) use_system_dns_config: bool,
}
| rust | MIT | 991aa12f60db41543735394bf71fba09332752f8 | 2026-01-04T20:20:42.179941Z | false |
paritytech/litep2p | https://github.com/paritytech/litep2p/blob/991aa12f60db41543735394bf71fba09332752f8/src/lib.rs | src/lib.rs | // Copyright 2023 litep2p developers
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
#![allow(clippy::single_match)]
#![allow(clippy::result_large_err)]
#![allow(clippy::large_enum_variant)]
#![allow(clippy::redundant_pattern_matching)]
#![allow(clippy::type_complexity)]
#![allow(clippy::result_unit_err)]
#![allow(clippy::should_implement_trait)]
#![allow(clippy::too_many_arguments)]
#![allow(clippy::assign_op_pattern)]
#![allow(clippy::match_like_matches_macro)]
use crate::{
addresses::PublicAddresses,
config::Litep2pConfig,
error::DialError,
protocol::{
libp2p::{bitswap::Bitswap, identify::Identify, kademlia::Kademlia, ping::Ping},
mdns::Mdns,
notification::NotificationProtocol,
request_response::RequestResponseProtocol,
},
transport::{
manager::{SupportedTransport, TransportManager, TransportManagerBuilder},
tcp::TcpTransport,
TransportBuilder, TransportEvent,
},
};
#[cfg(feature = "quic")]
use crate::transport::quic::QuicTransport;
#[cfg(feature = "webrtc")]
use crate::transport::webrtc::WebRtcTransport;
#[cfg(feature = "websocket")]
use crate::transport::websocket::WebSocketTransport;
use hickory_resolver::{name_server::TokioConnectionProvider, TokioResolver};
use multiaddr::{Multiaddr, Protocol};
use transport::Endpoint;
use types::ConnectionId;
pub use bandwidth::BandwidthSink;
pub use error::Error;
pub use peer_id::PeerId;
use std::{collections::HashSet, sync::Arc};
pub use types::protocol::ProtocolName;
pub(crate) mod peer_id;
pub mod addresses;
pub mod codec;
pub mod config;
pub mod crypto;
pub mod error;
pub mod executor;
pub mod protocol;
pub mod substream;
pub mod transport;
pub mod types;
pub mod yamux;
mod bandwidth;
mod multistream_select;
pub mod utils;
#[cfg(test)]
mod mock;
/// Public result type used by the crate.
pub type Result<T> = std::result::Result<T, error::Error>;
/// Logging target for the file.
const LOG_TARGET: &str = "litep2p";
/// Default channel size.
const DEFAULT_CHANNEL_SIZE: usize = 4096usize;
/// Litep2p events.
#[derive(Debug)]
pub enum Litep2pEvent {
/// Connection established to peer.
ConnectionEstablished {
/// Remote peer ID.
peer: PeerId,
/// Endpoint.
endpoint: Endpoint,
},
/// Connection closed to remote peer.
ConnectionClosed {
/// Peer ID.
peer: PeerId,
/// Connection ID.
connection_id: ConnectionId,
},
/// Failed to dial peer.
///
/// This error can originate from dialing a single peer address.
DialFailure {
/// Address of the peer.
address: Multiaddr,
/// Dial error.
error: DialError,
},
/// A list of multiple dial failures.
ListDialFailures {
/// List of errors.
///
/// Depending on the transport, the address might be different for each error.
errors: Vec<(Multiaddr, DialError)>,
},
}
/// [`Litep2p`] object.
pub struct Litep2p {
/// Local peer ID.
local_peer_id: PeerId,
/// Listen addresses.
listen_addresses: Vec<Multiaddr>,
/// Transport manager.
transport_manager: TransportManager,
/// Bandwidth sink.
bandwidth_sink: BandwidthSink,
}
impl Litep2p {
/// Create new [`Litep2p`].
pub fn new(mut litep2p_config: Litep2pConfig) -> crate::Result<Litep2p> {
let local_peer_id = PeerId::from_public_key(&litep2p_config.keypair.public().into());
let bandwidth_sink = BandwidthSink::new();
let mut listen_addresses = vec![];
let (resolver_config, resolver_opts) = if litep2p_config.use_system_dns_config {
hickory_resolver::system_conf::read_system_conf()
.map_err(Error::CannotReadSystemDnsConfig)?
} else {
(Default::default(), Default::default())
};
let resolver = Arc::new(
TokioResolver::builder_with_config(resolver_config, TokioConnectionProvider::default())
.with_options(resolver_opts)
.build(),
);
let supported_transports = Self::supported_transports(&litep2p_config);
let mut transport_manager = TransportManagerBuilder::new()
.with_keypair(litep2p_config.keypair.clone())
.with_supported_transports(supported_transports)
.with_bandwidth_sink(bandwidth_sink.clone())
.with_max_parallel_dials(litep2p_config.max_parallel_dials)
.with_connection_limits_config(litep2p_config.connection_limits)
.build();
let transport_handle = transport_manager.transport_manager_handle();
// add known addresses to `TransportManager`, if any exist
if !litep2p_config.known_addresses.is_empty() {
for (peer, addresses) in litep2p_config.known_addresses {
transport_manager.add_known_address(peer, addresses.iter().cloned());
}
}
// start notification protocol event loops
for (protocol, config) in litep2p_config.notification_protocols.into_iter() {
tracing::debug!(
target: LOG_TARGET,
?protocol,
"enable notification protocol",
);
let service = transport_manager.register_protocol(
protocol,
config.fallback_names.clone(),
config.codec,
litep2p_config.keep_alive_timeout,
);
let executor = Arc::clone(&litep2p_config.executor);
litep2p_config.executor.run(Box::pin(async move {
NotificationProtocol::new(service, config, executor).run().await
}));
}
// start request-response protocol event loops
for (protocol, config) in litep2p_config.request_response_protocols.into_iter() {
tracing::debug!(
target: LOG_TARGET,
?protocol,
"enable request-response protocol",
);
let service = transport_manager.register_protocol(
protocol,
config.fallback_names.clone(),
config.codec,
litep2p_config.keep_alive_timeout,
);
litep2p_config.executor.run(Box::pin(async move {
RequestResponseProtocol::new(service, config).run().await
}));
}
// start user protocol event loops
for (protocol_name, protocol) in litep2p_config.user_protocols.into_iter() {
tracing::debug!(target: LOG_TARGET, protocol = ?protocol_name, "enable user protocol");
let service = transport_manager.register_protocol(
protocol_name,
Vec::new(),
protocol.codec(),
litep2p_config.keep_alive_timeout,
);
litep2p_config.executor.run(Box::pin(async move {
let _ = protocol.run(service).await;
}));
}
// start ping protocol event loop if enabled
if let Some(ping_config) = litep2p_config.ping.take() {
tracing::debug!(
target: LOG_TARGET,
protocol = ?ping_config.protocol,
"enable ipfs ping protocol",
);
let service = transport_manager.register_protocol(
ping_config.protocol.clone(),
Vec::new(),
ping_config.codec,
litep2p_config.keep_alive_timeout,
);
litep2p_config.executor.run(Box::pin(async move {
Ping::new(service, ping_config).run().await
}));
}
// start kademlia protocol event loops
for kademlia_config in litep2p_config.kademlia.into_iter() {
tracing::debug!(
target: LOG_TARGET,
protocol_names = ?kademlia_config.protocol_names,
"enable ipfs kademlia protocol",
);
let main_protocol =
kademlia_config.protocol_names.first().expect("protocol name to exist");
let fallback_names = kademlia_config.protocol_names.iter().skip(1).cloned().collect();
let service = transport_manager.register_protocol(
main_protocol.clone(),
fallback_names,
kademlia_config.codec,
litep2p_config.keep_alive_timeout,
);
litep2p_config.executor.run(Box::pin(async move {
let _ = Kademlia::new(service, kademlia_config).run().await;
}));
}
// start identify protocol event loop if enabled
let mut identify_info = match litep2p_config.identify.take() {
None => None,
Some(mut identify_config) => {
tracing::debug!(
target: LOG_TARGET,
protocol = ?identify_config.protocol,
"enable ipfs identify protocol",
);
let service = transport_manager.register_protocol(
identify_config.protocol.clone(),
Vec::new(),
identify_config.codec,
litep2p_config.keep_alive_timeout,
);
identify_config.public = Some(litep2p_config.keypair.public().into());
Some((service, identify_config))
}
};
// start bitswap protocol event loop if enabled
if let Some(bitswap_config) = litep2p_config.bitswap.take() {
tracing::debug!(
target: LOG_TARGET,
protocol = ?bitswap_config.protocol,
"enable ipfs bitswap protocol",
);
let service = transport_manager.register_protocol(
bitswap_config.protocol.clone(),
Vec::new(),
bitswap_config.codec,
litep2p_config.keep_alive_timeout,
);
litep2p_config.executor.run(Box::pin(async move {
Bitswap::new(service, bitswap_config).run().await
}));
}
// enable tcp transport if the config exists
if let Some(config) = litep2p_config.tcp.take() {
let handle = transport_manager.transport_handle(Arc::clone(&litep2p_config.executor));
let (transport, transport_listen_addresses) =
<TcpTransport as TransportBuilder>::new(handle, config, resolver.clone())?;
for address in transport_listen_addresses {
transport_manager.register_listen_address(address.clone());
listen_addresses.push(address.with(Protocol::P2p(*local_peer_id.as_ref())));
}
transport_manager.register_transport(SupportedTransport::Tcp, Box::new(transport));
}
// enable quic transport if the config exists
#[cfg(feature = "quic")]
if let Some(config) = litep2p_config.quic.take() {
let handle = transport_manager.transport_handle(Arc::clone(&litep2p_config.executor));
let (transport, transport_listen_addresses) =
<QuicTransport as TransportBuilder>::new(handle, config, resolver.clone())?;
for address in transport_listen_addresses {
transport_manager.register_listen_address(address.clone());
listen_addresses.push(address.with(Protocol::P2p(*local_peer_id.as_ref())));
}
transport_manager.register_transport(SupportedTransport::Quic, Box::new(transport));
}
// enable webrtc transport if the config exists
#[cfg(feature = "webrtc")]
if let Some(config) = litep2p_config.webrtc.take() {
let handle = transport_manager.transport_handle(Arc::clone(&litep2p_config.executor));
let (transport, transport_listen_addresses) =
<WebRtcTransport as TransportBuilder>::new(handle, config, resolver.clone())?;
for address in transport_listen_addresses {
transport_manager.register_listen_address(address.clone());
listen_addresses.push(address.with(Protocol::P2p(*local_peer_id.as_ref())));
}
transport_manager.register_transport(SupportedTransport::WebRtc, Box::new(transport));
}
// enable websocket transport if the config exists
#[cfg(feature = "websocket")]
if let Some(config) = litep2p_config.websocket.take() {
let handle = transport_manager.transport_handle(Arc::clone(&litep2p_config.executor));
let (transport, transport_listen_addresses) =
<WebSocketTransport as TransportBuilder>::new(handle, config, resolver)?;
for address in transport_listen_addresses {
transport_manager.register_listen_address(address.clone());
listen_addresses.push(address.with(Protocol::P2p(*local_peer_id.as_ref())));
}
transport_manager
.register_transport(SupportedTransport::WebSocket, Box::new(transport));
}
// enable mdns if the config exists
if let Some(config) = litep2p_config.mdns.take() {
let mdns = Mdns::new(transport_handle, config, listen_addresses.clone());
litep2p_config.executor.run(Box::pin(async move {
let _ = mdns.start().await;
}));
}
// if identify was enabled, give it the enabled protocols and listen addresses and start it
if let Some((service, mut identify_config)) = identify_info.take() {
identify_config.protocols = transport_manager.protocols().cloned().collect();
let identify = Identify::new(service, identify_config);
litep2p_config.executor.run(Box::pin(async move {
let _ = identify.run().await;
}));
}
if transport_manager.installed_transports().count() == 0 {
return Err(Error::Other("No transport specified".to_string()));
}
// verify that at least one transport is specified
if listen_addresses.is_empty() {
tracing::warn!(
target: LOG_TARGET,
"litep2p started with no listen addresses, cannot accept inbound connections",
);
}
Ok(Self {
local_peer_id,
bandwidth_sink,
listen_addresses,
transport_manager,
})
}
/// Collect supported transports before initializing the transports themselves.
///
/// Information of the supported transports is needed to initialize protocols but
/// information about protocols must be known to initialize transports so the initialization
/// has to be split.
fn supported_transports(config: &Litep2pConfig) -> HashSet<SupportedTransport> {
let mut supported_transports = HashSet::new();
config
.tcp
.is_some()
.then(|| supported_transports.insert(SupportedTransport::Tcp));
#[cfg(feature = "quic")]
config
.quic
.is_some()
.then(|| supported_transports.insert(SupportedTransport::Quic));
#[cfg(feature = "websocket")]
config
.websocket
.is_some()
.then(|| supported_transports.insert(SupportedTransport::WebSocket));
#[cfg(feature = "webrtc")]
config
.webrtc
.is_some()
.then(|| supported_transports.insert(SupportedTransport::WebRtc));
supported_transports
}
/// Get local peer ID.
pub fn local_peer_id(&self) -> &PeerId {
&self.local_peer_id
}
/// Get the list of public addresses of the node.
pub fn public_addresses(&self) -> PublicAddresses {
self.transport_manager.public_addresses()
}
/// Get the list of listen addresses of the node.
pub fn listen_addresses(&self) -> impl Iterator<Item = &Multiaddr> {
self.listen_addresses.iter()
}
/// Get handle to bandwidth sink.
pub fn bandwidth_sink(&self) -> BandwidthSink {
self.bandwidth_sink.clone()
}
/// Dial peer.
pub async fn dial(&mut self, peer: &PeerId) -> crate::Result<()> {
self.transport_manager.dial(*peer).await
}
/// Dial address.
pub async fn dial_address(&mut self, address: Multiaddr) -> crate::Result<()> {
self.transport_manager.dial_address(address).await
}
/// Add one ore more known addresses for peer.
///
/// Return value denotes how many addresses were added for the peer.
/// Addresses belonging to disabled/unsupported transports will be ignored.
pub fn add_known_address(
&mut self,
peer: PeerId,
address: impl Iterator<Item = Multiaddr>,
) -> usize {
self.transport_manager.add_known_address(peer, address)
}
/// Poll next event.
///
/// This function must be called in order for litep2p to make progress.
pub async fn next_event(&mut self) -> Option<Litep2pEvent> {
loop {
match self.transport_manager.next().await? {
TransportEvent::ConnectionEstablished { peer, endpoint, .. } =>
return Some(Litep2pEvent::ConnectionEstablished { peer, endpoint }),
TransportEvent::ConnectionClosed {
peer,
connection_id,
} =>
return Some(Litep2pEvent::ConnectionClosed {
peer,
connection_id,
}),
TransportEvent::DialFailure { address, error, .. } =>
return Some(Litep2pEvent::DialFailure { address, error }),
TransportEvent::OpenFailure { errors, .. } => {
return Some(Litep2pEvent::ListDialFailures { errors });
}
_ => {}
}
}
}
}
#[cfg(test)]
mod tests {
use crate::{
config::ConfigBuilder,
protocol::{libp2p::ping, notification::Config as NotificationConfig},
types::protocol::ProtocolName,
Litep2p, Litep2pEvent, PeerId,
};
use multiaddr::{Multiaddr, Protocol};
use multihash::Multihash;
use std::net::Ipv4Addr;
#[tokio::test]
async fn initialize_litep2p() {
let _ = tracing_subscriber::fmt()
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
.try_init();
let (config1, _service1) = NotificationConfig::new(
ProtocolName::from("/notificaton/1"),
1337usize,
vec![1, 2, 3, 4],
Vec::new(),
false,
64,
64,
true,
);
let (config2, _service2) = NotificationConfig::new(
ProtocolName::from("/notificaton/2"),
1337usize,
vec![1, 2, 3, 4],
Vec::new(),
false,
64,
64,
true,
);
let (ping_config, _ping_event_stream) = ping::Config::default();
let config = ConfigBuilder::new()
.with_tcp(Default::default())
.with_notification_protocol(config1)
.with_notification_protocol(config2)
.with_libp2p_ping(ping_config)
.build();
let _litep2p = Litep2p::new(config).unwrap();
}
#[tokio::test]
async fn no_transport_given() {
let _ = tracing_subscriber::fmt()
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
.try_init();
let (config1, _service1) = NotificationConfig::new(
ProtocolName::from("/notificaton/1"),
1337usize,
vec![1, 2, 3, 4],
Vec::new(),
false,
64,
64,
true,
);
let (config2, _service2) = NotificationConfig::new(
ProtocolName::from("/notificaton/2"),
1337usize,
vec![1, 2, 3, 4],
Vec::new(),
false,
64,
64,
true,
);
let (ping_config, _ping_event_stream) = ping::Config::default();
let config = ConfigBuilder::new()
.with_notification_protocol(config1)
.with_notification_protocol(config2)
.with_libp2p_ping(ping_config)
.build();
assert!(Litep2p::new(config).is_err());
}
#[tokio::test]
async fn dial_same_address_twice() {
let _ = tracing_subscriber::fmt()
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
.try_init();
let (config1, _service1) = NotificationConfig::new(
ProtocolName::from("/notificaton/1"),
1337usize,
vec![1, 2, 3, 4],
Vec::new(),
false,
64,
64,
true,
);
let (config2, _service2) = NotificationConfig::new(
ProtocolName::from("/notificaton/2"),
1337usize,
vec![1, 2, 3, 4],
Vec::new(),
false,
64,
64,
true,
);
let (ping_config, _ping_event_stream) = ping::Config::default();
let config = ConfigBuilder::new()
.with_tcp(Default::default())
.with_notification_protocol(config1)
.with_notification_protocol(config2)
.with_libp2p_ping(ping_config)
.build();
let peer = PeerId::random();
let address = Multiaddr::empty()
.with(Protocol::Ip4(Ipv4Addr::new(255, 254, 253, 252)))
.with(Protocol::Tcp(8888))
.with(Protocol::P2p(
Multihash::from_bytes(&peer.to_bytes()).unwrap(),
));
let mut litep2p = Litep2p::new(config).unwrap();
litep2p.dial_address(address.clone()).await.unwrap();
litep2p.dial_address(address.clone()).await.unwrap();
match litep2p.next_event().await {
Some(Litep2pEvent::DialFailure { .. }) => {}
_ => panic!("invalid event received"),
}
// verify that the second same dial was ignored and the dial failure is reported only once
match tokio::time::timeout(std::time::Duration::from_secs(20), litep2p.next_event()).await {
Err(_) => {}
_ => panic!("invalid event received"),
}
}
}
| rust | MIT | 991aa12f60db41543735394bf71fba09332752f8 | 2026-01-04T20:20:42.179941Z | false |
paritytech/litep2p | https://github.com/paritytech/litep2p/blob/991aa12f60db41543735394bf71fba09332752f8/src/bandwidth.rs | src/bandwidth.rs | // Copyright 2023 litep2p developers
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//! Bandwidth sinks for metering inbound/outbound bytes.
use std::sync::{
atomic::{AtomicUsize, Ordering},
Arc,
};
/// Inner bandwidth sink
#[derive(Debug)]
struct InnerBandwidthSink {
/// Number of inbound bytes.
inbound: AtomicUsize,
/// Number of outbound bytes.
outbound: AtomicUsize,
}
/// Bandwidth sink which provides metering for inbound/outbound byte usage.
///
/// The reported values are not necessarily up to date with the latest information
/// and should not be used for metrics that require high precision but they do provide
/// an overall view of the data usage of `litep2p`.
#[derive(Debug, Clone)]
pub struct BandwidthSink(Arc<InnerBandwidthSink>);
impl BandwidthSink {
/// Create new [`BandwidthSink`].
pub(crate) fn new() -> Self {
Self(Arc::new(InnerBandwidthSink {
inbound: AtomicUsize::new(0usize),
outbound: AtomicUsize::new(0usize),
}))
}
/// Increase the amount of inbound bytes.
pub(crate) fn increase_inbound(&self, bytes: usize) {
let _ = self.0.inbound.fetch_add(bytes, Ordering::Relaxed);
}
/// Increse the amount of outbound bytes.
pub(crate) fn increase_outbound(&self, bytes: usize) {
let _ = self.0.outbound.fetch_add(bytes, Ordering::Relaxed);
}
/// Get total the number of bytes received.
pub fn inbound(&self) -> usize {
self.0.inbound.load(Ordering::Relaxed)
}
/// Get total the nubmer of bytes sent.
pub fn outbound(&self) -> usize {
self.0.outbound.load(Ordering::Relaxed)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn verify_bandwidth() {
let sink = BandwidthSink::new();
sink.increase_inbound(1337usize);
sink.increase_outbound(1338usize);
assert_eq!(sink.inbound(), 1337usize);
assert_eq!(sink.outbound(), 1338usize);
}
}
| rust | MIT | 991aa12f60db41543735394bf71fba09332752f8 | 2026-01-04T20:20:42.179941Z | false |
paritytech/litep2p | https://github.com/paritytech/litep2p/blob/991aa12f60db41543735394bf71fba09332752f8/src/error.rs | src/error.rs | // Copyright 2019 Parity Technologies (UK) Ltd.
// Copyright 2023 litep2p developers
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
#![allow(clippy::enum_variant_names)]
//! [`Litep2p`](`crate::Litep2p`) error types.
use crate::{
protocol::Direction,
transport::manager::limits::ConnectionLimitsError,
types::{protocol::ProtocolName, ConnectionId, SubstreamId},
PeerId,
};
use multiaddr::Multiaddr;
use multihash::{Multihash, MultihashGeneric};
use std::io::{self, ErrorKind};
// TODO: https://github.com/paritytech/litep2p/issues/204 clean up the overarching error.
// Please note that this error is not propagated directly to the user.
#[allow(clippy::large_enum_variant)]
#[derive(Debug, thiserror::Error)]
pub enum Error {
#[error("Peer `{0}` does not exist")]
PeerDoesntExist(PeerId),
#[error("Peer `{0}` already exists")]
PeerAlreadyExists(PeerId),
#[error("Protocol `{0}` not supported")]
ProtocolNotSupported(String),
#[error("Address error: `{0}`")]
AddressError(#[from] AddressError),
#[error("Parse error: `{0}`")]
ParseError(ParseError),
#[error("I/O error: `{0}`")]
IoError(ErrorKind),
#[error("Negotiation error: `{0}`")]
NegotiationError(#[from] NegotiationError),
#[error("Substream error: `{0}`")]
SubstreamError(#[from] SubstreamError),
#[error("Substream error: `{0}`")]
NotificationError(NotificationError),
#[error("Essential task closed")]
EssentialTaskClosed,
#[error("Unknown error occurred")]
Unknown,
#[error("Cannot dial self: `{0}`")]
CannotDialSelf(Multiaddr),
#[error("Transport not supported")]
TransportNotSupported(Multiaddr),
#[error("Yamux error for substream `{0:?}`: `{1}`")]
YamuxError(Direction, crate::yamux::ConnectionError),
#[error("Operation not supported: `{0}`")]
NotSupported(String),
#[error("Other error occurred: `{0}`")]
Other(String),
#[error("Protocol already exists: `{0:?}`")]
ProtocolAlreadyExists(ProtocolName),
#[error("Operation timed out")]
Timeout,
#[error("Invalid state transition")]
InvalidState,
#[error("DNS address resolution failed")]
DnsAddressResolutionFailed,
#[error("Transport error: `{0}`")]
TransportError(String),
#[cfg(feature = "quic")]
#[error("Failed to generate certificate: `{0}`")]
CertificateGeneration(#[from] crate::crypto::tls::certificate::GenError),
#[error("Invalid data")]
InvalidData,
#[error("Input rejected")]
InputRejected,
#[cfg(feature = "websocket")]
#[error("WebSocket error: `{0}`")]
WebSocket(#[from] tokio_tungstenite::tungstenite::error::Error),
#[error("Insufficient peers")]
InsufficientPeers,
#[error("Substream doens't exist")]
SubstreamDoesntExist,
#[cfg(feature = "webrtc")]
#[error("`str0m` error: `{0}`")]
WebRtc(#[from] str0m::RtcError),
#[error("Remote peer disconnected")]
Disconnected,
#[error("Channel does not exist")]
ChannelDoesntExist,
#[error("Tried to dial self")]
TriedToDialSelf,
#[error("Litep2p is already connected to the peer")]
AlreadyConnected,
#[error("No addres available for `{0}`")]
NoAddressAvailable(PeerId),
#[error("Connection closed")]
ConnectionClosed,
#[cfg(feature = "quic")]
#[error("Quinn error: `{0}`")]
Quinn(quinn::ConnectionError),
#[error("Invalid certificate")]
InvalidCertificate,
#[error("Peer ID mismatch: expected `{0}`, got `{1}`")]
PeerIdMismatch(PeerId, PeerId),
#[error("Channel is clogged")]
ChannelClogged,
#[error("Connection doesn't exist: `{0:?}`")]
ConnectionDoesntExist(ConnectionId),
#[error("Exceeded connection limits `{0:?}`")]
ConnectionLimit(ConnectionLimitsError),
#[error("Failed to dial peer immediately")]
ImmediateDialError(#[from] ImmediateDialError),
#[error("Cannot read system DNS config: `{0}`")]
CannotReadSystemDnsConfig(hickory_resolver::ResolveError),
}
/// Error type for address parsing.
#[derive(Debug, thiserror::Error)]
pub enum AddressError {
/// The provided address does not correspond to the transport protocol.
///
/// For example, this can happen when the address used the UDP protocol but
/// the handling transport only allows TCP connections.
#[error("Invalid address for protocol")]
InvalidProtocol,
/// The provided address is not a valid URL.
#[error("Invalid URL")]
InvalidUrl,
/// The provided address does not include a peer ID.
#[error("`PeerId` missing from the address")]
PeerIdMissing,
/// No address is available for the provided peer ID.
#[error("Address not available")]
AddressNotAvailable,
/// The provided address contains an invalid multihash.
#[error("Multihash does not contain a valid peer ID : `{0:?}`")]
InvalidPeerId(Multihash),
}
#[derive(Debug, thiserror::Error, PartialEq)]
pub enum ParseError {
/// The provided probuf message cannot be decoded.
#[error("Failed to decode protobuf message: `{0:?}`")]
ProstDecodeError(#[from] prost::DecodeError),
/// The provided protobuf message cannot be encoded.
#[error("Failed to encode protobuf message: `{0:?}`")]
ProstEncodeError(#[from] prost::EncodeError),
/// The protobuf message contains an unexpected key type.
///
/// This error can happen when:
/// - The provided key type is not recognized.
/// - The provided key type is recognized but not supported.
#[error("Unknown key type from protobuf message: `{0}`")]
UnknownKeyType(i32),
/// The public key bytes are invalid and cannot be parsed.
///
/// This error can happen when:
/// - The received number of bytes is not equal to the expected number of bytes (32 bytes).
/// - The bytes are not a valid Ed25519 public key.
/// - Length of the public key is not represented by 2 bytes (WebRTC specific).
#[error("Invalid public key")]
InvalidPublicKey,
/// The provided date has an invalid format.
///
/// This error is protocol specific.
#[error("Invalid data")]
InvalidData,
/// The provided reply length is not valid
#[error("Invalid reply length")]
InvalidReplyLength,
}
#[derive(Debug, thiserror::Error)]
pub enum SubstreamError {
#[error("Connection closed")]
ConnectionClosed,
#[error("Connection channel clogged")]
ChannelClogged,
#[error("Connection to peer does not exist: `{0}`")]
PeerDoesNotExist(PeerId),
#[error("I/O error: `{0}`")]
IoError(ErrorKind),
#[error("yamux error: `{0}`")]
YamuxError(crate::yamux::ConnectionError, Direction),
#[error("Failed to read from substream, substream id `{0:?}`")]
ReadFailure(Option<SubstreamId>),
#[error("Failed to write to substream, substream id `{0:?}`")]
WriteFailure(Option<SubstreamId>),
#[error("Negotiation error: `{0:?}`")]
NegotiationError(#[from] NegotiationError),
}
// Libp2p yamux does not implement PartialEq for ConnectionError.
impl PartialEq for SubstreamError {
fn eq(&self, other: &Self) -> bool {
match (self, other) {
(Self::ConnectionClosed, Self::ConnectionClosed) => true,
(Self::ChannelClogged, Self::ChannelClogged) => true,
(Self::PeerDoesNotExist(lhs), Self::PeerDoesNotExist(rhs)) => lhs == rhs,
(Self::IoError(lhs), Self::IoError(rhs)) => lhs == rhs,
(Self::YamuxError(lhs, lhs_1), Self::YamuxError(rhs, rhs_1)) => {
if lhs_1 != rhs_1 {
return false;
}
match (lhs, rhs) {
(
crate::yamux::ConnectionError::Io(lhs),
crate::yamux::ConnectionError::Io(rhs),
) => lhs.kind() == rhs.kind(),
(
crate::yamux::ConnectionError::Decode(lhs),
crate::yamux::ConnectionError::Decode(rhs),
) => match (lhs, rhs) {
(
crate::yamux::FrameDecodeError::Io(lhs),
crate::yamux::FrameDecodeError::Io(rhs),
) => lhs.kind() == rhs.kind(),
(
crate::yamux::FrameDecodeError::FrameTooLarge(lhs),
crate::yamux::FrameDecodeError::FrameTooLarge(rhs),
) => lhs == rhs,
(
crate::yamux::FrameDecodeError::Header(lhs),
crate::yamux::FrameDecodeError::Header(rhs),
) => match (lhs, rhs) {
(
crate::yamux::HeaderDecodeError::Version(lhs),
crate::yamux::HeaderDecodeError::Version(rhs),
) => lhs == rhs,
(
crate::yamux::HeaderDecodeError::Type(lhs),
crate::yamux::HeaderDecodeError::Type(rhs),
) => lhs == rhs,
_ => false,
},
_ => false,
},
(
crate::yamux::ConnectionError::NoMoreStreamIds,
crate::yamux::ConnectionError::NoMoreStreamIds,
) => true,
(
crate::yamux::ConnectionError::Closed,
crate::yamux::ConnectionError::Closed,
) => true,
(
crate::yamux::ConnectionError::TooManyStreams,
crate::yamux::ConnectionError::TooManyStreams,
) => true,
_ => false,
}
}
(Self::ReadFailure(lhs), Self::ReadFailure(rhs)) => lhs == rhs,
(Self::WriteFailure(lhs), Self::WriteFailure(rhs)) => lhs == rhs,
(Self::NegotiationError(lhs), Self::NegotiationError(rhs)) => lhs == rhs,
_ => false,
}
}
}
/// Error during the negotiation phase.
#[derive(Debug, thiserror::Error)]
pub enum NegotiationError {
/// Error occurred during the multistream-select phase of the negotiation.
#[error("multistream-select error: `{0:?}`")]
MultistreamSelectError(#[from] crate::multistream_select::NegotiationError),
/// Error occurred during the Noise handshake negotiation.
#[error("multistream-select error: `{0:?}`")]
SnowError(#[from] snow::Error),
/// The peer ID was not provided by the noise handshake.
#[error("`PeerId` missing from Noise handshake")]
PeerIdMissing,
/// The remote peer ID is not the same as the one expected.
#[error("The signature of the remote identity's public key does not verify")]
BadSignature,
/// The negotiation operation timed out.
#[error("Operation timed out")]
Timeout,
/// The message provided over the wire has an invalid format or is unsupported.
#[error("Parse error: `{0}`")]
ParseError(#[from] ParseError),
/// An I/O error occurred during the negotiation process.
#[error("I/O error: `{0}`")]
IoError(ErrorKind),
/// Expected a different state during the negotiation process.
#[error("Expected a different state")]
StateMismatch,
/// The noise handshake provided a different peer ID than the one expected in the dialing
/// address.
#[error("Peer ID mismatch: expected `{0}`, got `{1}`")]
PeerIdMismatch(PeerId, PeerId),
/// Error specific to the QUIC transport.
#[cfg(feature = "quic")]
#[error("QUIC error: `{0}`")]
Quic(#[from] QuicError),
/// Error specific to the WebSocket transport.
#[cfg(feature = "websocket")]
#[error("WebSocket error: `{0}`")]
WebSocket(#[from] tokio_tungstenite::tungstenite::error::Error),
}
impl PartialEq for NegotiationError {
fn eq(&self, other: &Self) -> bool {
match (self, other) {
(Self::MultistreamSelectError(lhs), Self::MultistreamSelectError(rhs)) => lhs == rhs,
(Self::SnowError(lhs), Self::SnowError(rhs)) => lhs == rhs,
(Self::ParseError(lhs), Self::ParseError(rhs)) => lhs == rhs,
(Self::IoError(lhs), Self::IoError(rhs)) => lhs == rhs,
(Self::PeerIdMismatch(lhs, lhs_1), Self::PeerIdMismatch(rhs, rhs_1)) =>
lhs == rhs && lhs_1 == rhs_1,
#[cfg(feature = "quic")]
(Self::Quic(lhs), Self::Quic(rhs)) => lhs == rhs,
#[cfg(feature = "websocket")]
(Self::WebSocket(lhs), Self::WebSocket(rhs)) =>
core::mem::discriminant(lhs) == core::mem::discriminant(rhs),
_ => core::mem::discriminant(self) == core::mem::discriminant(other),
}
}
}
#[derive(Debug, thiserror::Error)]
pub enum NotificationError {
#[error("Peer already exists")]
PeerAlreadyExists,
#[error("Peer is in invalid state")]
InvalidState,
#[error("Notifications clogged")]
NotificationsClogged,
#[error("Notification stream closed")]
NotificationStreamClosed(PeerId),
}
/// The error type for dialing a peer.
///
/// This error is reported via the litep2p events after performing
/// a network dialing operation.
#[derive(Debug, thiserror::Error)]
pub enum DialError {
/// The dialing operation timed out.
///
/// This error indicates that the `connection_open_timeout` from the protocol configuration
/// was exceeded.
#[error("Dial timed out")]
Timeout,
/// The provided address for dialing is invalid.
#[error("Address error: `{0}`")]
AddressError(#[from] AddressError),
/// An error occurred during DNS lookup operation.
///
/// The address provided may be valid, however it failed to resolve to a concrete IP address.
/// This error may be recoverable.
#[error("DNS lookup error for `{0}`")]
DnsError(#[from] DnsError),
/// An error occurred during the negotiation process.
#[error("Negotiation error: `{0}`")]
NegotiationError(#[from] NegotiationError),
}
/// Dialing resulted in an immediate error before performing any network operations.
#[derive(Debug, thiserror::Error, Copy, Clone, Eq, PartialEq)]
pub enum ImmediateDialError {
/// The provided address does not include a peer ID.
#[error("`PeerId` missing from the address")]
PeerIdMissing,
/// The peer ID provided in the address is the same as the local peer ID.
#[error("Tried to dial self")]
TriedToDialSelf,
/// Cannot dial an already connected peer.
#[error("Already connected to peer")]
AlreadyConnected,
/// Cannot dial a peer that does not have any address available.
#[error("No address available for peer")]
NoAddressAvailable,
/// The essential task was closed.
#[error("TaskClosed")]
TaskClosed,
/// The channel is clogged.
#[error("Connection channel clogged")]
ChannelClogged,
}
/// Error during the QUIC transport negotiation.
#[cfg(feature = "quic")]
#[derive(Debug, thiserror::Error, PartialEq)]
pub enum QuicError {
/// The provided certificate is invalid.
#[error("Invalid certificate")]
InvalidCertificate,
/// The connection was lost.
#[error("Failed to negotiate QUIC: `{0}`")]
ConnectionError(#[from] quinn::ConnectionError),
/// The connection could not be established.
#[error("Failed to connect to peer: `{0}`")]
ConnectError(#[from] quinn::ConnectError),
}
/// Error during DNS resolution.
#[derive(Debug, thiserror::Error, PartialEq)]
pub enum DnsError {
/// The DNS resolution failed to resolve the provided URL.
#[error("DNS failed to resolve url `{0}`")]
ResolveError(String),
/// The DNS expected a different IP address version.
///
/// For example, DNSv4 was expected but DNSv6 was provided.
#[error("DNS type is different from the provided IP address")]
IpVersionMismatch,
}
impl From<MultihashGeneric<64>> for Error {
fn from(hash: MultihashGeneric<64>) -> Self {
Error::AddressError(AddressError::InvalidPeerId(hash))
}
}
impl From<io::Error> for Error {
fn from(error: io::Error) -> Error {
Error::IoError(error.kind())
}
}
impl From<io::Error> for SubstreamError {
fn from(error: io::Error) -> SubstreamError {
SubstreamError::IoError(error.kind())
}
}
impl From<io::Error> for DialError {
fn from(error: io::Error) -> Self {
DialError::NegotiationError(NegotiationError::IoError(error.kind()))
}
}
impl From<crate::multistream_select::NegotiationError> for Error {
fn from(error: crate::multistream_select::NegotiationError) -> Error {
Error::NegotiationError(NegotiationError::MultistreamSelectError(error))
}
}
impl From<snow::Error> for Error {
fn from(error: snow::Error) -> Self {
Error::NegotiationError(NegotiationError::SnowError(error))
}
}
impl<T> From<tokio::sync::mpsc::error::SendError<T>> for Error {
fn from(_: tokio::sync::mpsc::error::SendError<T>) -> Self {
Error::EssentialTaskClosed
}
}
impl From<tokio::sync::oneshot::error::RecvError> for Error {
fn from(_: tokio::sync::oneshot::error::RecvError) -> Self {
Error::EssentialTaskClosed
}
}
impl From<prost::DecodeError> for Error {
fn from(error: prost::DecodeError) -> Self {
Error::ParseError(ParseError::ProstDecodeError(error))
}
}
impl From<prost::EncodeError> for Error {
fn from(error: prost::EncodeError) -> Self {
Error::ParseError(ParseError::ProstEncodeError(error))
}
}
impl From<io::Error> for NegotiationError {
fn from(error: io::Error) -> Self {
NegotiationError::IoError(error.kind())
}
}
impl From<ParseError> for Error {
fn from(error: ParseError) -> Self {
Error::ParseError(error)
}
}
impl From<MultihashGeneric<64>> for AddressError {
fn from(hash: MultihashGeneric<64>) -> Self {
AddressError::InvalidPeerId(hash)
}
}
#[cfg(feature = "quic")]
impl From<quinn::ConnectionError> for Error {
fn from(error: quinn::ConnectionError) -> Self {
match error {
quinn::ConnectionError::TimedOut => Error::Timeout,
error => Error::Quinn(error),
}
}
}
#[cfg(feature = "quic")]
impl From<quinn::ConnectionError> for DialError {
fn from(error: quinn::ConnectionError) -> Self {
match error {
quinn::ConnectionError::TimedOut => DialError::Timeout,
error => DialError::NegotiationError(NegotiationError::Quic(error.into())),
}
}
}
#[cfg(feature = "quic")]
impl From<quinn::ConnectError> for DialError {
fn from(error: quinn::ConnectError) -> Self {
DialError::NegotiationError(NegotiationError::Quic(error.into()))
}
}
impl From<ConnectionLimitsError> for Error {
fn from(error: ConnectionLimitsError) -> Self {
Error::ConnectionLimit(error)
}
}
#[cfg(test)]
mod tests {
use super::*;
use tokio::sync::mpsc::{channel, Sender};
#[tokio::test]
async fn try_from_errors() {
let (tx, rx) = channel(1);
drop(rx);
async fn test(tx: Sender<()>) -> crate::Result<()> {
tx.send(()).await.map_err(From::from)
}
match test(tx).await.unwrap_err() {
Error::EssentialTaskClosed => {}
_ => panic!("invalid error"),
}
}
}
| rust | MIT | 991aa12f60db41543735394bf71fba09332752f8 | 2026-01-04T20:20:42.179941Z | false |
paritytech/litep2p | https://github.com/paritytech/litep2p/blob/991aa12f60db41543735394bf71fba09332752f8/src/peer_id.rs | src/peer_id.rs | // Copyright 2018 Parity Technologies (UK) Ltd.
// Copyright 2023 litep2p developers
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
#![allow(clippy::wrong_self_convention)]
use crate::crypto::PublicKey;
use multiaddr::{Multiaddr, Protocol};
use multihash::{Code, Error, Multihash, MultihashDigest};
use rand::Rng;
use serde::{Deserialize, Serialize};
use thiserror::Error;
use std::{convert::TryFrom, fmt, str::FromStr};
/// Public keys with byte-lengths smaller than `MAX_INLINE_KEY_LENGTH` will be
/// automatically used as the peer id using an identity multihash.
const MAX_INLINE_KEY_LENGTH: usize = 42;
/// Identifier of a peer of the network.
///
/// The data is a CIDv0 compatible multihash of the protobuf encoded public key of the peer
/// as specified in [specs/peer-ids](https://github.com/libp2p/specs/blob/master/peer-ids/peer-ids.md).
#[derive(Clone, Copy, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub struct PeerId {
multihash: Multihash,
}
impl fmt::Debug for PeerId {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("PeerId").field(&self.to_base58()).finish()
}
}
impl fmt::Display for PeerId {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.to_base58().fmt(f)
}
}
impl PeerId {
/// Builds a `PeerId` from a public key.
pub fn from_public_key(key: &PublicKey) -> PeerId {
Self::from_public_key_protobuf(&key.to_protobuf_encoding())
}
/// Builds a `PeerId` from a public key in protobuf encoding.
pub fn from_public_key_protobuf(key_enc: &[u8]) -> PeerId {
let hash_algorithm = if key_enc.len() <= MAX_INLINE_KEY_LENGTH {
Code::Identity
} else {
Code::Sha2_256
};
let multihash = hash_algorithm.digest(key_enc);
PeerId { multihash }
}
/// Parses a `PeerId` from bytes.
pub fn from_bytes(data: &[u8]) -> Result<PeerId, Error> {
PeerId::from_multihash(Multihash::from_bytes(data)?)
.map_err(|mh| Error::UnsupportedCode(mh.code()))
}
/// Tries to turn a `Multihash` into a `PeerId`.
///
/// If the multihash does not use a valid hashing algorithm for peer IDs,
/// or the hash value does not satisfy the constraints for a hashed
/// peer ID, it is returned as an `Err`.
pub fn from_multihash(multihash: Multihash) -> Result<PeerId, Multihash> {
match Code::try_from(multihash.code()) {
Ok(Code::Sha2_256) => Ok(PeerId { multihash }),
Ok(Code::Identity) if multihash.digest().len() <= MAX_INLINE_KEY_LENGTH =>
Ok(PeerId { multihash }),
_ => Err(multihash),
}
}
/// Tries to extract a [`PeerId`] from the given [`Multiaddr`].
///
/// In case the given [`Multiaddr`] ends with `/p2p/<peer-id>`, this function
/// will return the encapsulated [`PeerId`], otherwise it will return `None`.
pub fn try_from_multiaddr(address: &Multiaddr) -> Option<PeerId> {
address.iter().last().and_then(|p| match p {
Protocol::P2p(hash) => PeerId::from_multihash(hash).ok(),
_ => None,
})
}
/// Generates a random peer ID from a cryptographically secure PRNG.
///
/// This is useful for randomly walking on a DHT, or for testing purposes.
pub fn random() -> PeerId {
let peer_id = rand::thread_rng().gen::<[u8; 32]>();
PeerId {
multihash: Multihash::wrap(Code::Identity.into(), &peer_id)
.expect("The digest size is never too large"),
}
}
/// Returns a raw bytes representation of this `PeerId`.
pub fn to_bytes(&self) -> Vec<u8> {
self.multihash.to_bytes()
}
/// Returns a base-58 encoded string of this `PeerId`.
pub fn to_base58(&self) -> String {
bs58::encode(self.to_bytes()).into_string()
}
/// Checks whether the public key passed as parameter matches the public key of this `PeerId`.
///
/// Returns `None` if this `PeerId`s hash algorithm is not supported when encoding the
/// given public key, otherwise `Some` boolean as the result of an equality check.
pub fn is_public_key(&self, public_key: &PublicKey) -> Option<bool> {
let alg = Code::try_from(self.multihash.code())
.expect("Internal multihash is always a valid `Code`");
let enc = public_key.to_protobuf_encoding();
Some(alg.digest(&enc) == self.multihash)
}
}
impl From<PublicKey> for PeerId {
fn from(key: PublicKey) -> PeerId {
PeerId::from_public_key(&key)
}
}
impl From<&PublicKey> for PeerId {
fn from(key: &PublicKey) -> PeerId {
PeerId::from_public_key(key)
}
}
impl TryFrom<Vec<u8>> for PeerId {
type Error = Vec<u8>;
fn try_from(value: Vec<u8>) -> Result<Self, Self::Error> {
PeerId::from_bytes(&value).map_err(|_| value)
}
}
impl TryFrom<Multihash> for PeerId {
type Error = Multihash;
fn try_from(value: Multihash) -> Result<Self, Self::Error> {
PeerId::from_multihash(value)
}
}
impl AsRef<Multihash> for PeerId {
fn as_ref(&self) -> &Multihash {
&self.multihash
}
}
impl From<PeerId> for Multihash {
fn from(peer_id: PeerId) -> Self {
peer_id.multihash
}
}
impl From<PeerId> for Vec<u8> {
fn from(peer_id: PeerId) -> Self {
peer_id.to_bytes()
}
}
impl Serialize for PeerId {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
if serializer.is_human_readable() {
serializer.serialize_str(&self.to_base58())
} else {
serializer.serialize_bytes(&self.to_bytes()[..])
}
}
}
impl<'de> Deserialize<'de> for PeerId {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
use serde::de::*;
struct PeerIdVisitor;
impl Visitor<'_> for PeerIdVisitor {
type Value = PeerId;
fn expecting(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "valid peer id")
}
fn visit_bytes<E>(self, v: &[u8]) -> Result<Self::Value, E>
where
E: Error,
{
PeerId::from_bytes(v).map_err(|_| Error::invalid_value(Unexpected::Bytes(v), &self))
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
where
E: Error,
{
PeerId::from_str(v).map_err(|_| Error::invalid_value(Unexpected::Str(v), &self))
}
}
if deserializer.is_human_readable() {
deserializer.deserialize_str(PeerIdVisitor)
} else {
deserializer.deserialize_bytes(PeerIdVisitor)
}
}
}
#[derive(Debug, Error)]
pub enum ParseError {
#[error("base-58 decode error: {0}")]
B58(#[from] bs58::decode::Error),
#[error("decoding multihash failed")]
MultiHash,
}
impl FromStr for PeerId {
type Err = ParseError;
#[inline]
fn from_str(s: &str) -> Result<Self, Self::Err> {
let bytes = bs58::decode(s).into_vec()?;
PeerId::from_bytes(&bytes).map_err(|_| ParseError::MultiHash)
}
}
#[cfg(test)]
mod tests {
use crate::{crypto::ed25519::Keypair, PeerId};
use multiaddr::{Multiaddr, Protocol};
use multihash::Multihash;
#[test]
fn peer_id_is_public_key() {
let key = Keypair::generate().public();
let peer_id = key.to_peer_id();
assert_eq!(peer_id.is_public_key(&key.into()), Some(true));
}
#[test]
fn peer_id_into_bytes_then_from_bytes() {
let peer_id = Keypair::generate().public().to_peer_id();
let second = PeerId::from_bytes(&peer_id.to_bytes()).unwrap();
assert_eq!(peer_id, second);
}
#[test]
fn peer_id_to_base58_then_back() {
let peer_id = Keypair::generate().public().to_peer_id();
let second: PeerId = peer_id.to_base58().parse().unwrap();
assert_eq!(peer_id, second);
}
#[test]
fn random_peer_id_is_valid() {
for _ in 0..5000 {
let peer_id = PeerId::random();
assert_eq!(peer_id, PeerId::from_bytes(&peer_id.to_bytes()).unwrap());
}
}
#[test]
fn peer_id_from_multiaddr() {
let address = "[::1]:1337".parse::<std::net::SocketAddr>().unwrap();
let peer = PeerId::random();
let address = Multiaddr::empty()
.with(Protocol::from(address.ip()))
.with(Protocol::Tcp(address.port()))
.with(Protocol::P2p(Multihash::from(peer)));
assert_eq!(peer, PeerId::try_from_multiaddr(&address).unwrap());
}
#[test]
fn peer_id_from_multiaddr_no_peer_id() {
let address = "[::1]:1337".parse::<std::net::SocketAddr>().unwrap();
let address = Multiaddr::empty()
.with(Protocol::from(address.ip()))
.with(Protocol::Tcp(address.port()));
assert!(PeerId::try_from_multiaddr(&address).is_none());
}
#[test]
fn peer_id_from_bytes() {
let peer = PeerId::random();
let bytes = peer.to_bytes();
assert_eq!(PeerId::try_from(bytes).unwrap(), peer);
}
#[test]
fn peer_id_as_multihash() {
let peer = PeerId::random();
let multihash = Multihash::from(peer);
assert_eq!(&multihash, peer.as_ref());
assert_eq!(PeerId::try_from(multihash).unwrap(), peer);
}
#[test]
fn serialize_deserialize() {
let peer = PeerId::random();
let serialized = serde_json::to_string(&peer).unwrap();
let deserialized = serde_json::from_str(&serialized).unwrap();
assert_eq!(peer, deserialized);
}
#[test]
fn invalid_multihash() {
fn test() -> crate::Result<PeerId> {
let bytes = [
0x16, 0x20, 0x64, 0x4b, 0xcc, 0x7e, 0x56, 0x43, 0x73, 0x04, 0x09, 0x99, 0xaa, 0xc8,
0x9e, 0x76, 0x22, 0xf3, 0xca, 0x71, 0xfb, 0xa1, 0xd9, 0x72, 0xfd, 0x94, 0xa3, 0x1c,
0x3b, 0xfb, 0xf2, 0x4e, 0x39, 0x38,
];
PeerId::from_multihash(Multihash::from_bytes(&bytes).unwrap()).map_err(From::from)
}
let _error = test().unwrap_err();
}
}
| rust | MIT | 991aa12f60db41543735394bf71fba09332752f8 | 2026-01-04T20:20:42.179941Z | false |
paritytech/litep2p | https://github.com/paritytech/litep2p/blob/991aa12f60db41543735394bf71fba09332752f8/src/types.rs | src/types.rs | // Copyright 2023 litep2p developers
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//! Types used by [`Litep2p`](`crate::Litep2p`) protocols/transport.
use rand::Rng;
// Re-export the types used in public interfaces.
pub mod multiaddr {
pub use multiaddr::{Error, Iter, Multiaddr, Onion3Addr, Protocol};
}
pub mod multihash {
pub use multihash::{Code, Error, Multihash, MultihashDigest};
}
pub mod cid {
pub use cid::{multihash::Multihash, Cid, CidGeneric, Error, Result, Version};
}
pub mod protocol;
/// Substream ID.
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
pub struct SubstreamId(usize);
impl Default for SubstreamId {
fn default() -> Self {
Self::new()
}
}
impl SubstreamId {
/// Create new [`SubstreamId`].
pub fn new() -> Self {
SubstreamId(0usize)
}
/// Get [`SubstreamId`] from a number that can be converted into a `usize`.
pub fn from<T: Into<usize>>(value: T) -> Self {
SubstreamId(value.into())
}
}
/// Request ID.
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
#[cfg_attr(feature = "fuzz", derive(serde::Serialize, serde::Deserialize))]
pub struct RequestId(usize);
impl RequestId {
/// Get [`RequestId`] from a number that can be converted into a `usize`.
pub fn from<T: Into<usize>>(value: T) -> Self {
RequestId(value.into())
}
}
/// Connection ID.
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
pub struct ConnectionId(usize);
impl ConnectionId {
/// Create new [`ConnectionId`].
pub fn new() -> Self {
ConnectionId(0usize)
}
/// Generate random `ConnectionId`.
pub fn random() -> Self {
ConnectionId(rand::thread_rng().gen::<usize>())
}
}
impl Default for ConnectionId {
fn default() -> Self {
Self::new()
}
}
impl From<usize> for ConnectionId {
fn from(value: usize) -> Self {
ConnectionId(value)
}
}
| rust | MIT | 991aa12f60db41543735394bf71fba09332752f8 | 2026-01-04T20:20:42.179941Z | false |
paritytech/litep2p | https://github.com/paritytech/litep2p/blob/991aa12f60db41543735394bf71fba09332752f8/src/executor.rs | src/executor.rs | // Copyright 2023 litep2p developers
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//! Behavior defining how futures running in the background should be executed.
use std::{future::Future, pin::Pin};
/// Trait which defines the interface the executor must implement.
pub trait Executor: Send + Sync {
/// Start executing a future in the background.
fn run(&self, future: Pin<Box<dyn Future<Output = ()> + Send>>);
/// Start executing a future in the background and give the future a name;
fn run_with_name(&self, name: &'static str, future: Pin<Box<dyn Future<Output = ()> + Send>>);
}
/// Default executor, defaults to calling `tokio::spawn()`.
pub(crate) struct DefaultExecutor;
impl Executor for DefaultExecutor {
fn run(&self, future: Pin<Box<dyn Future<Output = ()> + Send>>) {
tokio::spawn(future);
}
fn run_with_name(&self, _: &'static str, future: Pin<Box<dyn Future<Output = ()> + Send>>) {
tokio::spawn(future);
}
}
#[cfg(test)]
mod tests {
use super::*;
use tokio::sync::mpsc::channel;
#[tokio::test]
async fn run_with_name() {
let executor = DefaultExecutor;
let (tx, mut rx) = channel(1);
let sender = tx.clone();
executor.run(Box::pin(async move {
sender.send(1337usize).await.unwrap();
}));
executor.run_with_name(
"test",
Box::pin(async move {
tx.send(1337usize).await.unwrap();
}),
);
assert_eq!(rx.recv().await.unwrap(), 1337usize);
assert_eq!(rx.recv().await.unwrap(), 1337usize);
}
}
| rust | MIT | 991aa12f60db41543735394bf71fba09332752f8 | 2026-01-04T20:20:42.179941Z | false |
paritytech/litep2p | https://github.com/paritytech/litep2p/blob/991aa12f60db41543735394bf71fba09332752f8/src/addresses.rs | src/addresses.rs | // Copyright 2024 litep2p developers
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
use std::{collections::HashSet, sync::Arc};
use multiaddr::{Multiaddr, Protocol};
use parking_lot::RwLock;
use crate::PeerId;
/// Set of the public addresses of the local node.
///
/// The format of the addresses stored in the set contain the local peer ID.
/// This requirement is enforced by the [`PublicAddresses::add_address`] method,
/// that will add the local peer ID to the address if it is missing.
///
/// # Note
///
/// - The addresses are reported to the identify protocol and are used by other nodes to establish a
/// connection with the local node.
///
/// - Users must ensure that the addresses are reachable from the network.
#[derive(Debug, Clone)]
pub struct PublicAddresses {
pub(crate) inner: Arc<RwLock<HashSet<Multiaddr>>>,
local_peer_id: PeerId,
}
impl PublicAddresses {
/// Creates new [`PublicAddresses`] from the given peer ID.
pub(crate) fn new(local_peer_id: PeerId) -> Self {
Self {
inner: Arc::new(RwLock::new(HashSet::new())),
local_peer_id,
}
}
/// Add a public address to the list of addresses.
///
/// The address must contain the local peer ID, otherwise an error is returned.
/// In case the address does not contain any peer ID, it will be added.
///
/// Returns true if the address was added, false if it was already present.
pub fn add_address(&self, address: Multiaddr) -> Result<bool, InsertionError> {
let address = ensure_local_peer(address, self.local_peer_id)?;
Ok(self.inner.write().insert(address))
}
/// Remove the exact public address.
///
/// The provided address must contain the local peer ID.
pub fn remove_address(&self, address: &Multiaddr) -> bool {
self.inner.write().remove(address)
}
/// Returns a vector of the available listen addresses.
pub fn get_addresses(&self) -> Vec<Multiaddr> {
self.inner.read().iter().cloned().collect()
}
}
/// Check if the address contains the local peer ID.
///
/// If the address does not contain any peer ID, it will be added.
fn ensure_local_peer(
mut address: Multiaddr,
local_peer_id: PeerId,
) -> Result<Multiaddr, InsertionError> {
if address.is_empty() {
return Err(InsertionError::EmptyAddress);
}
// Verify the peer ID from the address corresponds to the local peer ID.
if let Some(peer_id) = PeerId::try_from_multiaddr(&address) {
if peer_id != local_peer_id {
return Err(InsertionError::DifferentPeerId);
}
} else {
address.push(Protocol::P2p(local_peer_id.into()));
}
Ok(address)
}
/// The error returned when an address cannot be inserted.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum InsertionError {
/// The address is empty.
EmptyAddress,
/// The address contains a different peer ID than the local peer ID.
DifferentPeerId,
}
#[cfg(test)]
mod tests {
use super::*;
use std::str::FromStr;
#[test]
fn add_remove_contains() {
let peer_id = PeerId::random();
let addresses = PublicAddresses::new(peer_id);
let address = Multiaddr::from_str("/dns/domain1.com/tcp/30333").unwrap();
let peer_address = Multiaddr::from_str("/dns/domain1.com/tcp/30333")
.unwrap()
.with(Protocol::P2p(peer_id.into()));
assert!(!addresses.get_addresses().contains(&address));
assert!(addresses.add_address(address.clone()).unwrap());
// Adding the address a second time returns Ok(false).
assert!(!addresses.add_address(address.clone()).unwrap());
assert!(!addresses.get_addresses().contains(&address));
assert!(addresses.get_addresses().contains(&peer_address));
addresses.remove_address(&peer_address);
assert!(!addresses.get_addresses().contains(&peer_address));
}
#[test]
fn get_addresses() {
let peer_id = PeerId::random();
let addresses = PublicAddresses::new(peer_id);
let address1 = Multiaddr::from_str("/dns/domain1.com/tcp/30333").unwrap();
let address2 = Multiaddr::from_str("/dns/domain2.com/tcp/30333").unwrap();
// Addresses different than the local peer ID are ignored.
let address3 = Multiaddr::from_str(
"/dns/domain2.com/tcp/30333/p2p/12D3KooWSueCPH3puP2PcvqPJdNaDNF3jMZjtJtDiSy35pWrbt5h",
)
.unwrap();
assert!(addresses.add_address(address1.clone()).unwrap());
assert!(addresses.add_address(address2.clone()).unwrap());
addresses.add_address(address3.clone()).unwrap_err();
let addresses = addresses.get_addresses();
assert_eq!(addresses.len(), 2);
assert!(addresses.contains(&address1.with(Protocol::P2p(peer_id.into()))));
assert!(addresses.contains(&address2.with(Protocol::P2p(peer_id.into()))));
}
}
| rust | MIT | 991aa12f60db41543735394bf71fba09332752f8 | 2026-01-04T20:20:42.179941Z | false |
paritytech/litep2p | https://github.com/paritytech/litep2p/blob/991aa12f60db41543735394bf71fba09332752f8/src/codec/unsigned_varint.rs | src/codec/unsigned_varint.rs | // Copyright 2023 litep2p developers
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//! [`unsigned-varint`](https://github.com/multiformats/unsigned-varint) codec.
use crate::error::Error;
use bytes::{Bytes, BytesMut};
use tokio_util::codec::{Decoder, Encoder};
use unsigned_varint::codec::UviBytes;
/// Unsigned varint codec.
pub struct UnsignedVarint {
codec: UviBytes<bytes::Bytes>,
}
impl UnsignedVarint {
/// Create new [`UnsignedVarint`] codec.
pub fn new(max_size: Option<usize>) -> Self {
let mut codec = UviBytes::<Bytes>::default();
if let Some(max_size) = max_size {
codec.set_max_len(max_size);
}
Self { codec }
}
/// Set maximum size for encoded/decodes values.
pub fn with_max_size(max_size: usize) -> Self {
let mut codec = UviBytes::<Bytes>::default();
codec.set_max_len(max_size);
Self { codec }
}
/// Encode `payload` using `unsigned-varint`.
pub fn encode<T: Into<Bytes>>(payload: T) -> crate::Result<Vec<u8>> {
let payload: Bytes = payload.into();
assert!(payload.len() <= u32::MAX as usize);
let mut bytes = BytesMut::with_capacity(payload.len() + 4);
let mut codec = Self::new(None);
codec.encode(payload, &mut bytes)?;
Ok(bytes.into())
}
/// Decode `payload` into `BytesMut`.
pub fn decode(payload: &mut BytesMut) -> crate::Result<BytesMut> {
UviBytes::<Bytes>::default().decode(payload)?.ok_or(Error::InvalidData)
}
}
impl Decoder for UnsignedVarint {
type Item = BytesMut;
type Error = Error;
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
self.codec.decode(src).map_err(From::from)
}
}
impl Encoder<Bytes> for UnsignedVarint {
type Error = Error;
fn encode(&mut self, item: Bytes, dst: &mut bytes::BytesMut) -> Result<(), Self::Error> {
self.codec.encode(item, dst).map_err(From::from)
}
}
#[cfg(test)]
mod tests {
use super::{Bytes, BytesMut, UnsignedVarint};
#[test]
fn max_size_respected() {
let mut codec = UnsignedVarint::with_max_size(1024);
{
use tokio_util::codec::Encoder;
let bytes_to_encode: Bytes = vec![0u8; 1024].into();
let mut out_bytes = BytesMut::with_capacity(2048);
assert!(codec.encode(bytes_to_encode, &mut out_bytes).is_ok());
}
{
use tokio_util::codec::Encoder;
let bytes_to_encode: Bytes = vec![1u8; 1025].into();
let mut out_bytes = BytesMut::with_capacity(2048);
assert!(codec.encode(bytes_to_encode, &mut out_bytes).is_err());
}
}
#[test]
fn encode_decode_works() {
let encoded1 = UnsignedVarint::encode(vec![0u8; 512]).unwrap();
let mut encoded2 = {
use tokio_util::codec::Encoder;
let mut codec = UnsignedVarint::with_max_size(512);
let bytes_to_encode: Bytes = vec![0u8; 512].into();
let mut out_bytes = BytesMut::with_capacity(2048);
codec.encode(bytes_to_encode, &mut out_bytes).unwrap();
out_bytes
};
assert_eq!(encoded1, encoded2);
let decoded1 = UnsignedVarint::decode(&mut encoded2).unwrap();
let decoded2 = {
use tokio_util::codec::Decoder;
let mut codec = UnsignedVarint::with_max_size(512);
let mut encoded1 = BytesMut::from(&encoded1[..]);
codec.decode(&mut encoded1).unwrap().unwrap()
};
assert_eq!(decoded1, decoded2);
}
}
| rust | MIT | 991aa12f60db41543735394bf71fba09332752f8 | 2026-01-04T20:20:42.179941Z | false |
paritytech/litep2p | https://github.com/paritytech/litep2p/blob/991aa12f60db41543735394bf71fba09332752f8/src/codec/mod.rs | src/codec/mod.rs | // Copyright 2023 litep2p developers
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//! Protocol codecs.
pub mod identity;
pub mod unsigned_varint;
/// Supported protocol codecs.
#[derive(Debug, Copy, Clone)]
pub enum ProtocolCodec {
/// Identity codec where the argument denotes the payload size.
Identity(usize),
/// Unsigned varint where the argument denotes the maximum message size, if specified.
UnsignedVarint(Option<usize>),
/// Protocol doens't need framing for its messages or is using a custom codec.
Unspecified,
}
| rust | MIT | 991aa12f60db41543735394bf71fba09332752f8 | 2026-01-04T20:20:42.179941Z | false |
paritytech/litep2p | https://github.com/paritytech/litep2p/blob/991aa12f60db41543735394bf71fba09332752f8/src/codec/identity.rs | src/codec/identity.rs | // Copyright 2023 litep2p developers
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//! Identity codec that reads/writes `N` bytes from/to source/sink.
use crate::error::Error;
use bytes::{BufMut, Bytes, BytesMut};
use tokio_util::codec::{Decoder, Encoder};
/// Identity codec.
pub struct Identity {
payload_len: usize,
}
impl Identity {
/// Create new [`Identity`] codec.
pub fn new(payload_len: usize) -> Self {
assert!(payload_len != 0);
Self { payload_len }
}
/// Encode `payload` using identity codec.
pub fn encode<T: Into<Bytes>>(payload: T) -> crate::Result<Vec<u8>> {
let payload: Bytes = payload.into();
Ok(payload.into())
}
}
impl Decoder for Identity {
type Item = BytesMut;
type Error = Error;
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
if src.is_empty() || src.len() < self.payload_len {
return Ok(None);
}
Ok(Some(src.split_to(self.payload_len)))
}
}
impl Encoder<Bytes> for Identity {
type Error = Error;
fn encode(&mut self, item: Bytes, dst: &mut bytes::BytesMut) -> Result<(), Self::Error> {
if item.len() > self.payload_len || item.is_empty() {
return Err(Error::InvalidData);
}
dst.put_slice(item.as_ref());
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn encoding_works() {
let mut codec = Identity::new(48);
let mut out_buf = BytesMut::with_capacity(32);
let bytes = Bytes::from(vec![0u8; 48]);
assert!(codec.encode(bytes.clone(), &mut out_buf).is_ok());
assert_eq!(out_buf.freeze(), bytes);
}
#[test]
fn decoding_works() {
let mut codec = Identity::new(64);
let bytes = vec![3u8; 64];
let copy = bytes.clone();
let mut bytes = BytesMut::from(&bytes[..]);
let decoded = codec.decode(&mut bytes).unwrap().unwrap();
assert_eq!(decoded, copy);
}
#[test]
fn decoding_smaller_payloads() {
let mut codec = Identity::new(100);
let bytes = [3u8; 64];
let mut bytes = BytesMut::from(&bytes[..]);
assert!(codec.decode(&mut bytes).unwrap().is_none());
}
#[test]
fn empty_encode() {
let mut codec = Identity::new(32);
let mut out_buf = BytesMut::with_capacity(32);
assert!(codec.encode(Bytes::new(), &mut out_buf).is_err());
}
#[test]
fn decode_encode() {
let mut codec = Identity::new(32);
assert!(codec.decode(&mut BytesMut::new()).unwrap().is_none());
}
#[test]
fn direct_encoding_works() {
assert_eq!(
Identity::encode(vec![1, 3, 3, 7]).unwrap(),
vec![1, 3, 3, 7]
);
}
#[test]
#[should_panic]
#[cfg(debug_assertions)]
fn empty_identity_codec() {
let _codec = Identity::new(0usize);
}
}
| rust | MIT | 991aa12f60db41543735394bf71fba09332752f8 | 2026-01-04T20:20:42.179941Z | false |
paritytech/litep2p | https://github.com/paritytech/litep2p/blob/991aa12f60db41543735394bf71fba09332752f8/src/yamux/control.rs | src/yamux/control.rs | // Copyright (c) 2018-2019 Parity Technologies (UK) Ltd.
//
// Licensed under the Apache License, Version 2.0 or MIT license, at your option.
//
// A copy of the Apache License, Version 2.0 is included in the software as
// LICENSE-APACHE and a copy of the MIT license is included in the software
// as LICENSE-MIT. You may also obtain a copy of the Apache License, Version 2.0
// at https://www.apache.org/licenses/LICENSE-2.0 and a copy of the MIT license
// at https://opensource.org/licenses/MIT.
use crate::yamux::{Connection, ConnectionError, Result, Stream, MAX_ACK_BACKLOG};
use futures::{
channel::{mpsc, oneshot},
prelude::*,
};
use std::{
pin::Pin,
task::{Context, Poll},
};
const LOG_TARGET: &str = "litep2p::yamux::control";
/// A Yamux [`Connection`] controller.
///
/// This presents an alternative API for using a yamux [`Connection`].
///
/// A [`Control`] communicates with a [`ControlledConnection`] via a channel. This allows
/// a [`Control`] to be cloned and shared between tasks and threads.
#[derive(Clone, Debug)]
pub struct Control {
/// Command channel to [`ControlledConnection`].
sender: mpsc::Sender<ControlCommand>,
}
impl Control {
pub fn new<T>(connection: Connection<T>) -> (Self, ControlledConnection<T>) {
let (sender, receiver) = mpsc::channel(MAX_ACK_BACKLOG);
let control = Control { sender };
let connection = ControlledConnection {
state: State::Idle(connection),
commands: receiver,
};
(control, connection)
}
/// Open a new stream to the remote.
pub async fn open_stream(&mut self) -> Result<Stream> {
let (tx, rx) = oneshot::channel();
self.sender.send(ControlCommand::OpenStream(tx)).await?;
rx.await?
}
/// Close the connection.
pub async fn close(&mut self) -> Result<()> {
let (tx, rx) = oneshot::channel();
if self.sender.send(ControlCommand::CloseConnection(tx)).await.is_err() {
// The receiver is closed which means the connection is already closed.
return Ok(());
}
// A dropped `oneshot::Sender` means the `Connection` is gone,
// so we do not treat receive errors differently here.
let _ = rx.await;
Ok(())
}
}
/// Wraps a [`Connection`] which can be controlled with a [`Control`].
pub struct ControlledConnection<T> {
state: State<T>,
commands: mpsc::Receiver<ControlCommand>,
}
impl<T> ControlledConnection<T>
where
T: AsyncRead + AsyncWrite + Unpin + Send + 'static,
{
fn poll_next(&mut self, cx: &mut Context<'_>) -> Poll<Option<Result<Stream>>> {
loop {
match std::mem::replace(&mut self.state, State::Poisoned) {
State::Idle(mut connection) => {
match connection.poll_next_inbound(cx) {
Poll::Ready(maybe_stream) => {
// Transport layers will close the connection on the first
// substream error. The `connection.poll_next_inbound` should
// not be called again after returning an error. Instead, we
// must close the connection gracefully.
match maybe_stream.as_ref() {
Some(Err(error)) => {
tracing::debug!(target: LOG_TARGET, ?error, "Inbound stream error, closing connection");
self.state = State::Closing {
reply: None,
inner: Closing::DrainingControlCommands { connection },
};
}
other => {
tracing::debug!(target: LOG_TARGET, ?other, "Inbound stream reset state to idle");
self.state = State::Idle(connection)
}
}
return Poll::Ready(maybe_stream);
}
Poll::Pending => {}
}
match self.commands.poll_next_unpin(cx) {
Poll::Ready(Some(ControlCommand::OpenStream(reply))) => {
self.state = State::OpeningNewStream { reply, connection };
continue;
}
Poll::Ready(Some(ControlCommand::CloseConnection(reply))) => {
self.commands.close();
self.state = State::Closing {
reply: Some(reply),
inner: Closing::DrainingControlCommands { connection },
};
continue;
}
Poll::Ready(None) => {
// Last `Control` sender was dropped, close te connection.
self.state = State::Closing {
reply: None,
inner: Closing::ClosingConnection { connection },
};
continue;
}
Poll::Pending => {}
}
self.state = State::Idle(connection);
return Poll::Pending;
}
State::OpeningNewStream {
reply,
mut connection,
} => match connection.poll_new_outbound(cx) {
Poll::Ready(stream) => {
let _ = reply.send(stream);
self.state = State::Idle(connection);
continue;
}
Poll::Pending => {
self.state = State::OpeningNewStream { reply, connection };
return Poll::Pending;
}
},
State::Closing {
reply,
inner: Closing::DrainingControlCommands { connection },
} => match self.commands.poll_next_unpin(cx) {
Poll::Ready(Some(ControlCommand::OpenStream(new_reply))) => {
let _ = new_reply.send(Err(ConnectionError::Closed));
self.state = State::Closing {
reply,
inner: Closing::DrainingControlCommands { connection },
};
continue;
}
Poll::Ready(Some(ControlCommand::CloseConnection(new_reply))) => {
let _ = new_reply.send(());
self.state = State::Closing {
reply,
inner: Closing::DrainingControlCommands { connection },
};
continue;
}
Poll::Ready(None) => {
self.state = State::Closing {
reply,
inner: Closing::ClosingConnection { connection },
};
continue;
}
Poll::Pending => {
self.state = State::Closing {
reply,
inner: Closing::DrainingControlCommands { connection },
};
return Poll::Pending;
}
},
State::Closing {
reply,
inner: Closing::ClosingConnection { mut connection },
} => match connection.poll_close(cx) {
Poll::Ready(Ok(())) | Poll::Ready(Err(ConnectionError::Closed)) => {
if let Some(reply) = reply {
let _ = reply.send(());
}
return Poll::Ready(None);
}
Poll::Ready(Err(other)) => {
if let Some(reply) = reply {
let _ = reply.send(());
}
return Poll::Ready(Some(Err(other)));
}
Poll::Pending => {
self.state = State::Closing {
reply,
inner: Closing::ClosingConnection { connection },
};
return Poll::Pending;
}
},
State::Poisoned => return Poll::Pending,
}
}
}
}
#[derive(Debug)]
enum ControlCommand {
/// Open a new stream to the remote end.
OpenStream(oneshot::Sender<Result<Stream>>),
/// Close the whole connection.
CloseConnection(oneshot::Sender<()>),
}
/// The state of a [`ControlledConnection`].
enum State<T> {
Idle(Connection<T>),
OpeningNewStream {
reply: oneshot::Sender<Result<Stream>>,
connection: Connection<T>,
},
Closing {
/// A channel to the [`Control`] in case the close was requested. `None` if we are closing
/// because the last [`Control`] was dropped.
reply: Option<oneshot::Sender<()>>,
inner: Closing<T>,
},
Poisoned,
}
/// A sub-state of our larger state machine for a [`ControlledConnection`].
///
/// Closing connection involves two steps:
///
/// 1. Draining and answered all remaining [`Closing::DrainingControlCommands`].
/// 1. Closing the underlying [`Connection`].
enum Closing<T> {
DrainingControlCommands { connection: Connection<T> },
ClosingConnection { connection: Connection<T> },
}
impl<T> futures::Stream for ControlledConnection<T>
where
T: AsyncRead + AsyncWrite + Unpin + Send + 'static,
{
type Item = Result<Stream>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
self.get_mut().poll_next(cx)
}
}
| rust | MIT | 991aa12f60db41543735394bf71fba09332752f8 | 2026-01-04T20:20:42.179941Z | false |
paritytech/litep2p | https://github.com/paritytech/litep2p/blob/991aa12f60db41543735394bf71fba09332752f8/src/yamux/mod.rs | src/yamux/mod.rs | // Copyright (c) 2018-2019 Parity Technologies (UK) Ltd.
//
// Licensed under the Apache License, Version 2.0 or MIT license, at your option.
//
// A copy of the Apache License, Version 2.0 is included in the software as
// LICENSE-APACHE and a copy of the MIT license is included in the software
// as LICENSE-MIT. You may also obtain a copy of the Apache License, Version 2.0
// at https://www.apache.org/licenses/LICENSE-2.0 and a copy of the MIT license
// at https://opensource.org/licenses/MIT.
//! This crate implements the [Yamux specification][1].
//!
//! It multiplexes independent I/O streams over reliable, ordered connections,
//! such as TCP/IP.
//!
//! The three primary objects, clients of this crate interact with, are:
//!
//! - [`Connection`], which wraps the underlying I/O resource, e.g. a socket,
//! - [`Stream`], which implements [`futures::io::AsyncRead`] and [`futures::io::AsyncWrite`], and
//! - [`Control`], to asynchronously control the [`Connection`].
//!
//! [1]: https://github.com/hashicorp/yamux/blob/master/spec.md
#![forbid(unsafe_code)]
mod control;
pub use yamux::{
Config, Connection, ConnectionError, FrameDecodeError, HeaderDecodeError, Mode, Packet, Result,
Stream, StreamId,
};
// Switching to the "poll" based yamux API is a massive breaking change for litep2p.
// Instead, we rely on the upstream yamux and keep the old controller API.
pub use crate::yamux::control::{Control, ControlledConnection};
pub const DEFAULT_CREDIT: u32 = 256 * 1024; // as per yamux specification
/// The maximum number of streams we will open without an acknowledgement from the other peer.
///
/// This enables a very basic form of backpressure on the creation of streams.
const MAX_ACK_BACKLOG: usize = 256;
| rust | MIT | 991aa12f60db41543735394bf71fba09332752f8 | 2026-01-04T20:20:42.179941Z | false |
paritytech/litep2p | https://github.com/paritytech/litep2p/blob/991aa12f60db41543735394bf71fba09332752f8/src/mock/mod.rs | src/mock/mod.rs | // Copyright 2023 litep2p developers
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
pub mod substream;
| rust | MIT | 991aa12f60db41543735394bf71fba09332752f8 | 2026-01-04T20:20:42.179941Z | false |
paritytech/litep2p | https://github.com/paritytech/litep2p/blob/991aa12f60db41543735394bf71fba09332752f8/src/mock/substream.rs | src/mock/substream.rs | // Copyright 2023 litep2p developers
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
use crate::error::SubstreamError;
use bytes::{Bytes, BytesMut};
use futures::{Sink, Stream};
use std::{
fmt::Debug,
pin::Pin,
task::{Context, Poll},
};
/// Trait which describes the behavior of a mock substream.
pub trait Substream:
Debug
+ Stream<Item = Result<BytesMut, SubstreamError>>
+ Sink<Bytes, Error = SubstreamError>
+ Send
+ Unpin
+ 'static
{
}
/// Blanket implementation for [`Substream`].
impl<
T: Debug
+ Stream<Item = Result<BytesMut, SubstreamError>>
+ Sink<Bytes, Error = SubstreamError>
+ Send
+ Unpin
+ 'static,
> Substream for T
{
}
mockall::mock! {
#[derive(Debug)]
pub Substream {}
impl Sink<bytes::Bytes> for Substream {
type Error = SubstreamError;
fn poll_ready<'a>(
self: Pin<&mut Self>,
cx: &mut Context<'a>
) -> Poll<Result<(), SubstreamError>>;
fn start_send(self: Pin<&mut Self>, item: bytes::Bytes) -> Result<(), SubstreamError>;
fn poll_flush<'a>(
self: Pin<&mut Self>,
cx: &mut Context<'a>
) -> Poll<Result<(), SubstreamError>>;
fn poll_close<'a>(
self: Pin<&mut Self>,
cx: &mut Context<'a>
) -> Poll<Result<(), SubstreamError>>;
}
impl Stream for Substream {
type Item = Result<BytesMut, SubstreamError>;
fn poll_next<'a>(
self: Pin<&mut Self>,
cx: &mut Context<'a>
) -> Poll<Option<Result<BytesMut, SubstreamError>>>;
}
}
/// Dummy substream which just implements `Stream + Sink` and returns `Poll::Pending`/`Ok(())`
#[derive(Debug)]
pub struct DummySubstream {}
impl DummySubstream {
/// Create new [`DummySubstream`].
#[cfg(test)]
pub fn new() -> Self {
Self {}
}
}
impl Sink<bytes::Bytes> for DummySubstream {
type Error = SubstreamError;
fn poll_ready(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), SubstreamError>> {
Poll::Pending
}
fn start_send(self: Pin<&mut Self>, _item: bytes::Bytes) -> Result<(), SubstreamError> {
Ok(())
}
fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), SubstreamError>> {
Poll::Pending
}
fn poll_close(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), SubstreamError>> {
Poll::Ready(Ok(()))
}
}
impl Stream for DummySubstream {
type Item = Result<BytesMut, SubstreamError>;
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<BytesMut, SubstreamError>>> {
Poll::Pending
}
}
#[cfg(test)]
mod tests {
use super::*;
use futures::SinkExt;
#[tokio::test]
async fn dummy_substream_sink() {
let mut substream = DummySubstream::new();
futures::future::poll_fn(|cx| match substream.poll_ready_unpin(cx) {
Poll::Pending => Poll::Ready(()),
_ => panic!("invalid event"),
})
.await;
assert!(Pin::new(&mut substream).start_send(bytes::Bytes::new()).is_ok());
futures::future::poll_fn(|cx| match substream.poll_flush_unpin(cx) {
Poll::Pending => Poll::Ready(()),
_ => panic!("invalid event"),
})
.await;
futures::future::poll_fn(|cx| match substream.poll_close_unpin(cx) {
Poll::Ready(Ok(())) => Poll::Ready(()),
_ => panic!("invalid event"),
})
.await;
}
}
| rust | MIT | 991aa12f60db41543735394bf71fba09332752f8 | 2026-01-04T20:20:42.179941Z | false |
paritytech/litep2p | https://github.com/paritytech/litep2p/blob/991aa12f60db41543735394bf71fba09332752f8/src/utils/mod.rs | src/utils/mod.rs | // Copyright 2024 litep2p developers
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
pub mod futures_stream;
| rust | MIT | 991aa12f60db41543735394bf71fba09332752f8 | 2026-01-04T20:20:42.179941Z | false |
paritytech/litep2p | https://github.com/paritytech/litep2p/blob/991aa12f60db41543735394bf71fba09332752f8/src/utils/futures_stream.rs | src/utils/futures_stream.rs | // Copyright 2024 litep2p developers
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
use futures::{stream::FuturesUnordered, Stream, StreamExt};
use std::{
future::Future,
pin::Pin,
task::{Context, Poll, Waker},
};
/// Wrapper around [`FuturesUnordered`] that wakes a task up automatically.
/// The [`Stream`] implemented by [`FuturesStream`] never terminates and can be
/// polled when contains no futures.
#[derive(Default)]
pub struct FuturesStream<F> {
futures: FuturesUnordered<F>,
waker: Option<Waker>,
}
impl<F> FuturesStream<F> {
/// Create new [`FuturesStream`].
pub fn new() -> Self {
Self {
futures: FuturesUnordered::new(),
waker: None,
}
}
/// Number of futures in the stream.
pub fn len(&self) -> usize {
self.futures.len()
}
/// Check if the stream is empty.
pub fn is_empty(&self) -> bool {
self.futures.is_empty()
}
/// Push a future for processing.
pub fn push(&mut self, future: F) {
self.futures.push(future);
if let Some(waker) = self.waker.take() {
waker.wake();
}
}
}
impl<F: Future> Stream for FuturesStream<F> {
type Item = <F as Future>::Output;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let Poll::Ready(Some(result)) = self.futures.poll_next_unpin(cx) else {
// We must save the current waker to wake up the task when new futures are inserted.
//
// Otherwise, simply returning `Poll::Pending` here would cause the task to never be
// woken up again.
//
// We were previously relying on some other task from the `loop tokio::select!` to
// finish.
self.waker = Some(cx.waker().clone());
return Poll::Pending;
};
Poll::Ready(Some(result))
}
}
| rust | MIT | 991aa12f60db41543735394bf71fba09332752f8 | 2026-01-04T20:20:42.179941Z | false |
paritytech/litep2p | https://github.com/paritytech/litep2p/blob/991aa12f60db41543735394bf71fba09332752f8/src/types/protocol.rs | src/types/protocol.rs | // Copyright 2023 litep2p developers
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//! Protocol name.
use std::{
fmt::Display,
hash::{Hash, Hasher},
sync::Arc,
};
/// Protocol name.
#[derive(Debug, Clone)]
#[cfg_attr(feature = "fuzz", derive(serde::Serialize, serde::Deserialize))]
pub enum ProtocolName {
#[cfg(not(feature = "fuzz"))]
Static(&'static str),
Allocated(Arc<str>),
}
#[cfg(not(feature = "fuzz"))]
impl From<&'static str> for ProtocolName {
fn from(protocol: &'static str) -> Self {
ProtocolName::Static(protocol)
}
}
#[cfg(feature = "fuzz")]
impl From<&'static str> for ProtocolName {
fn from(protocol: &'static str) -> Self {
ProtocolName::Allocated(Arc::from(protocol.to_string()))
}
}
impl Display for ProtocolName {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
#[cfg(not(feature = "fuzz"))]
Self::Static(protocol) => protocol.fmt(f),
Self::Allocated(protocol) => protocol.fmt(f),
}
}
}
impl From<String> for ProtocolName {
fn from(protocol: String) -> Self {
ProtocolName::Allocated(Arc::from(protocol))
}
}
impl From<Arc<str>> for ProtocolName {
fn from(protocol: Arc<str>) -> Self {
Self::Allocated(protocol)
}
}
impl std::ops::Deref for ProtocolName {
type Target = str;
fn deref(&self) -> &Self::Target {
match self {
#[cfg(not(feature = "fuzz"))]
Self::Static(protocol) => protocol,
Self::Allocated(protocol) => protocol,
}
}
}
impl Hash for ProtocolName {
fn hash<H: Hasher>(&self, state: &mut H) {
(self as &str).hash(state)
}
}
impl PartialEq for ProtocolName {
fn eq(&self, other: &Self) -> bool {
(self as &str) == (other as &str)
}
}
impl Eq for ProtocolName {}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn make_protocol() {
let protocol1 = ProtocolName::from(Arc::from(String::from("/protocol/1")));
let protocol2 = ProtocolName::from("/protocol/1");
assert_eq!(protocol1, protocol2);
}
}
| rust | MIT | 991aa12f60db41543735394bf71fba09332752f8 | 2026-01-04T20:20:42.179941Z | false |
paritytech/litep2p | https://github.com/paritytech/litep2p/blob/991aa12f60db41543735394bf71fba09332752f8/src/transport/mod.rs | src/transport/mod.rs | // Copyright 2023 litep2p developers
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//! Transport protocol implementations provided by [`Litep2p`](`crate::Litep2p`).
use crate::{error::DialError, transport::manager::TransportHandle, types::ConnectionId, PeerId};
use futures::Stream;
use hickory_resolver::TokioResolver;
use multiaddr::Multiaddr;
use std::{fmt::Debug, sync::Arc, time::Duration};
pub(crate) mod common;
#[cfg(feature = "quic")]
pub mod quic;
pub mod tcp;
#[cfg(feature = "webrtc")]
pub mod webrtc;
#[cfg(feature = "websocket")]
pub mod websocket;
#[cfg(test)]
pub(crate) mod dummy;
pub(crate) mod manager;
pub use manager::limits::{ConnectionLimitsConfig, ConnectionLimitsError};
/// Timeout for opening a connection.
pub(crate) const CONNECTION_OPEN_TIMEOUT: Duration = Duration::from_secs(10);
/// Timeout for opening a substream.
pub(crate) const SUBSTREAM_OPEN_TIMEOUT: Duration = Duration::from_secs(5);
/// Timeout for connection waiting new substreams.
pub(crate) const KEEP_ALIVE_TIMEOUT: Duration = Duration::from_secs(5);
/// Maximum number of parallel dial attempts.
pub(crate) const MAX_PARALLEL_DIALS: usize = 8;
/// Connection endpoint.
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum Endpoint {
/// Successfully established outbound connection.
Dialer {
/// Address that was dialed.
address: Multiaddr,
/// Connection ID.
connection_id: ConnectionId,
},
/// Successfully established inbound connection.
Listener {
/// Local connection address.
address: Multiaddr,
/// Connection ID.
connection_id: ConnectionId,
},
}
impl Endpoint {
/// Get `Multiaddr` of the [`Endpoint`].
pub fn address(&self) -> &Multiaddr {
match self {
Self::Dialer { address, .. } => address,
Self::Listener { address, .. } => address,
}
}
/// Crate dialer.
pub(crate) fn dialer(address: Multiaddr, connection_id: ConnectionId) -> Self {
Endpoint::Dialer {
address,
connection_id,
}
}
/// Create listener.
pub(crate) fn listener(address: Multiaddr, connection_id: ConnectionId) -> Self {
Endpoint::Listener {
address,
connection_id,
}
}
/// Get `ConnectionId` of the `Endpoint`.
pub fn connection_id(&self) -> ConnectionId {
match self {
Self::Dialer { connection_id, .. } => *connection_id,
Self::Listener { connection_id, .. } => *connection_id,
}
}
/// Is this a listener endpoint?
pub fn is_listener(&self) -> bool {
std::matches!(self, Self::Listener { .. })
}
}
/// Transport event.
#[derive(Debug)]
pub(crate) enum TransportEvent {
/// Fully negotiated connection established to remote peer.
ConnectionEstablished {
/// Peer ID.
peer: PeerId,
/// Endpoint.
endpoint: Endpoint,
},
PendingInboundConnection {
/// Connection ID.
connection_id: ConnectionId,
},
/// Connection opened to remote but not yet negotiated.
ConnectionOpened {
/// Connection ID.
connection_id: ConnectionId,
/// Address that was dialed.
address: Multiaddr,
},
/// Connection closed to remote peer.
#[allow(unused)]
ConnectionClosed {
/// Peer ID.
peer: PeerId,
/// Connection ID.
connection_id: ConnectionId,
},
/// Failed to dial remote peer.
DialFailure {
/// Connection ID.
connection_id: ConnectionId,
/// Dialed address.
address: Multiaddr,
/// Error.
error: DialError,
},
/// Open failure for an unnegotiated set of connections.
OpenFailure {
/// Connection ID.
connection_id: ConnectionId,
/// Errors.
errors: Vec<(Multiaddr, DialError)>,
},
}
pub(crate) trait TransportBuilder {
type Config: Debug;
type Transport: Transport;
/// Create new [`Transport`] object.
fn new(
context: TransportHandle,
config: Self::Config,
resolver: Arc<TokioResolver>,
) -> crate::Result<(Self, Vec<Multiaddr>)>
where
Self: Sized;
}
pub(crate) trait Transport: Stream + Unpin + Send {
/// Dial `address` and negotiate connection.
fn dial(&mut self, connection_id: ConnectionId, address: Multiaddr) -> crate::Result<()>;
/// Accept negotiated connection.
fn accept(&mut self, connection_id: ConnectionId) -> crate::Result<()>;
/// Accept pending connection.
fn accept_pending(&mut self, connection_id: ConnectionId) -> crate::Result<()>;
/// Reject pending connection.
fn reject_pending(&mut self, connection_id: ConnectionId) -> crate::Result<()>;
/// Reject negotiated connection.
fn reject(&mut self, connection_id: ConnectionId) -> crate::Result<()>;
/// Attempt to open connection to remote peer over one or more addresses.
fn open(&mut self, connection_id: ConnectionId, addresses: Vec<Multiaddr>)
-> crate::Result<()>;
/// Negotiate opened connection.
fn negotiate(&mut self, connection_id: ConnectionId) -> crate::Result<()>;
/// Cancel opening connections.
///
/// This is a no-op for connections that have already succeeded/canceled.
fn cancel(&mut self, connection_id: ConnectionId);
}
| rust | MIT | 991aa12f60db41543735394bf71fba09332752f8 | 2026-01-04T20:20:42.179941Z | false |
paritytech/litep2p | https://github.com/paritytech/litep2p/blob/991aa12f60db41543735394bf71fba09332752f8/src/transport/dummy.rs | src/transport/dummy.rs | // Copyright 2023 litep2p developers
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//! Dummy transport.
use crate::{
transport::{Transport, TransportEvent},
types::ConnectionId,
};
use futures::Stream;
use multiaddr::Multiaddr;
use std::{
collections::VecDeque,
pin::Pin,
task::{Context, Poll},
};
/// Dummy transport.
pub(crate) struct DummyTransport {
/// Events.
events: VecDeque<TransportEvent>,
}
impl DummyTransport {
/// Create new [`DummyTransport`].
pub(crate) fn new() -> Self {
Self {
events: VecDeque::new(),
}
}
/// Inject event into `DummyTransport`.
pub(crate) fn inject_event(&mut self, event: TransportEvent) {
self.events.push_back(event);
}
}
impl Stream for DummyTransport {
type Item = TransportEvent;
fn poll_next(mut self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Option<Self::Item>> {
if self.events.is_empty() {
return Poll::Pending;
}
Poll::Ready(self.events.pop_front())
}
}
impl Transport for DummyTransport {
fn dial(&mut self, _: ConnectionId, _: Multiaddr) -> crate::Result<()> {
Ok(())
}
fn accept(&mut self, _: ConnectionId) -> crate::Result<()> {
Ok(())
}
fn accept_pending(&mut self, _connection_id: ConnectionId) -> crate::Result<()> {
Ok(())
}
fn reject_pending(&mut self, _connection_id: ConnectionId) -> crate::Result<()> {
Ok(())
}
fn reject(&mut self, _: ConnectionId) -> crate::Result<()> {
Ok(())
}
fn open(&mut self, _: ConnectionId, _: Vec<Multiaddr>) -> crate::Result<()> {
Ok(())
}
fn negotiate(&mut self, _: ConnectionId) -> crate::Result<()> {
Ok(())
}
/// Cancel opening connections.
fn cancel(&mut self, _: ConnectionId) {}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{error::DialError, transport::Endpoint, PeerId};
use futures::StreamExt;
#[tokio::test]
async fn pending_event() {
let mut transport = DummyTransport::new();
transport.inject_event(TransportEvent::DialFailure {
connection_id: ConnectionId::from(1338usize),
address: Multiaddr::empty(),
error: DialError::Timeout,
});
let peer = PeerId::random();
let endpoint = Endpoint::listener(Multiaddr::empty(), ConnectionId::from(1337usize));
transport.inject_event(TransportEvent::ConnectionEstablished {
peer,
endpoint: endpoint.clone(),
});
match transport.next().await.unwrap() {
TransportEvent::DialFailure {
connection_id,
address,
..
} => {
assert_eq!(connection_id, ConnectionId::from(1338usize));
assert_eq!(address, Multiaddr::empty());
}
_ => panic!("invalid event"),
}
match transport.next().await.unwrap() {
TransportEvent::ConnectionEstablished {
peer: event_peer,
endpoint: event_endpoint,
} => {
assert_eq!(peer, event_peer);
assert_eq!(endpoint, event_endpoint);
}
_ => panic!("invalid event"),
}
futures::future::poll_fn(|cx| match transport.poll_next_unpin(cx) {
Poll::Pending => Poll::Ready(()),
_ => panic!("invalid event"),
})
.await;
}
#[test]
fn dummy_handle_connection_states() {
let mut transport = DummyTransport::new();
assert!(transport.reject(ConnectionId::new()).is_ok());
assert!(transport.open(ConnectionId::new(), Vec::new()).is_ok());
assert!(transport.negotiate(ConnectionId::new()).is_ok());
transport.cancel(ConnectionId::new());
}
}
| rust | MIT | 991aa12f60db41543735394bf71fba09332752f8 | 2026-01-04T20:20:42.179941Z | false |
paritytech/litep2p | https://github.com/paritytech/litep2p/blob/991aa12f60db41543735394bf71fba09332752f8/src/transport/quic/config.rs | src/transport/quic/config.rs | // Copyright 2023 litep2p developers
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//! QUIC transport configuration.
use crate::transport::{CONNECTION_OPEN_TIMEOUT, SUBSTREAM_OPEN_TIMEOUT};
use multiaddr::Multiaddr;
use std::time::Duration;
/// QUIC transport configuration.
#[derive(Debug)]
pub struct Config {
/// Listen address for the transport.
///
/// Default listen addres is `/ip4/127.0.0.1/udp/0/quic-v1`.
pub listen_addresses: Vec<Multiaddr>,
/// Connection open timeout.
///
/// How long should litep2p wait for a connection to be opend before the host
/// is deemed unreachable.
pub connection_open_timeout: Duration,
/// Substream open timeout.
///
/// How long should litep2p wait for a substream to be opened before considering
/// the substream rejected.
pub substream_open_timeout: Duration,
}
impl Default for Config {
fn default() -> Self {
Self {
listen_addresses: vec!["/ip4/127.0.0.1/udp/0/quic-v1".parse().expect("valid address")],
connection_open_timeout: CONNECTION_OPEN_TIMEOUT,
substream_open_timeout: SUBSTREAM_OPEN_TIMEOUT,
}
}
}
| rust | MIT | 991aa12f60db41543735394bf71fba09332752f8 | 2026-01-04T20:20:42.179941Z | false |
paritytech/litep2p | https://github.com/paritytech/litep2p/blob/991aa12f60db41543735394bf71fba09332752f8/src/transport/quic/listener.rs | src/transport/quic/listener.rs | // Copyright 2023 litep2p developers
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
use crate::{
crypto::{ed25519::Keypair, tls::make_server_config},
error::AddressError,
PeerId,
};
use futures::{future::BoxFuture, stream::FuturesUnordered, FutureExt, Stream, StreamExt};
use multiaddr::{Multiaddr, Protocol};
use quinn::{Connecting, Endpoint, ServerConfig};
use std::{
net::{IpAddr, SocketAddr},
pin::Pin,
sync::Arc,
task::{Context, Poll},
};
/// Logging target for the file.
const LOG_TARGET: &str = "litep2p::quic::listener";
/// QUIC listener.
pub struct QuicListener {
/// Listen addresses.
_listen_addresses: Vec<SocketAddr>,
/// Listeners.
listeners: Vec<Endpoint>,
/// Incoming connections.
incoming: FuturesUnordered<BoxFuture<'static, Option<(usize, Connecting)>>>,
}
impl QuicListener {
/// Create new [`QuicListener`].
pub fn new(
keypair: &Keypair,
addresses: Vec<Multiaddr>,
) -> crate::Result<(Self, Vec<Multiaddr>)> {
let mut listeners: Vec<Endpoint> = Vec::new();
let mut listen_addresses = Vec::new();
for address in addresses.into_iter() {
let (listen_address, _) = Self::get_socket_address(&address)?;
let crypto_config = Arc::new(make_server_config(keypair).expect("to succeed"));
let server_config = ServerConfig::with_crypto(crypto_config);
let listener = Endpoint::server(server_config, listen_address).unwrap();
let listen_address = listener.local_addr()?;
listen_addresses.push(listen_address);
listeners.push(listener);
// );
}
let listen_multi_addresses = listen_addresses
.iter()
.cloned()
.map(|address| {
Multiaddr::empty()
.with(Protocol::from(address.ip()))
.with(Protocol::Udp(address.port()))
.with(Protocol::QuicV1)
})
.collect();
Ok((
Self {
incoming: listeners
.iter_mut()
.enumerate()
.map(|(i, listener)| {
let inner = listener.clone();
async move { inner.accept().await.map(|connecting| (i, connecting)) }
.boxed()
})
.collect(),
listeners,
_listen_addresses: listen_addresses,
},
listen_multi_addresses,
))
}
/// Extract socket address and `PeerId`, if found, from `address`.
pub fn get_socket_address(
address: &Multiaddr,
) -> Result<(SocketAddr, Option<PeerId>), AddressError> {
tracing::trace!(target: LOG_TARGET, ?address, "parse multi address");
let mut iter = address.iter();
let socket_address = match iter.next() {
Some(Protocol::Ip6(address)) => match iter.next() {
Some(Protocol::Udp(port)) => SocketAddr::new(IpAddr::V6(address), port),
protocol => {
tracing::error!(
target: LOG_TARGET,
?protocol,
"invalid transport protocol, expected `QuicV1`",
);
return Err(AddressError::InvalidProtocol);
}
},
Some(Protocol::Ip4(address)) => match iter.next() {
Some(Protocol::Udp(port)) => SocketAddr::new(IpAddr::V4(address), port),
protocol => {
tracing::error!(
target: LOG_TARGET,
?protocol,
"invalid transport protocol, expected `QuicV1`",
);
return Err(AddressError::InvalidProtocol);
}
},
protocol => {
tracing::error!(target: LOG_TARGET, ?protocol, "invalid transport protocol");
return Err(AddressError::InvalidProtocol);
}
};
// verify that quic exists
match iter.next() {
Some(Protocol::QuicV1) => {}
_ => return Err(AddressError::InvalidProtocol),
}
let maybe_peer = match iter.next() {
Some(Protocol::P2p(multihash)) => Some(PeerId::from_multihash(multihash)?),
None => None,
protocol => {
tracing::error!(
target: LOG_TARGET,
?protocol,
"invalid protocol, expected `P2p` or `None`"
);
return Err(AddressError::PeerIdMissing);
}
};
Ok((socket_address, maybe_peer))
}
}
impl Stream for QuicListener {
type Item = Connecting;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
if self.incoming.is_empty() {
return Poll::Pending;
}
match futures::ready!(self.incoming.poll_next_unpin(cx)) {
None => Poll::Ready(None),
Some(None) => Poll::Ready(None),
Some(Some((listener, future))) => {
let inner = self.listeners[listener].clone();
self.incoming.push(
async move { inner.accept().await.map(|connecting| (listener, connecting)) }
.boxed(),
);
Poll::Ready(Some(future))
}
}
}
}
#[cfg(test)]
mod tests {
use crate::crypto::tls::make_client_config;
use super::*;
use quinn::ClientConfig;
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr};
#[test]
fn parse_multiaddresses() {
assert!(QuicListener::get_socket_address(
&"/ip6/::1/udp/8888/quic-v1".parse().expect("valid multiaddress")
)
.is_ok());
assert!(QuicListener::get_socket_address(
&"/ip4/127.0.0.1/udp/8888/quic-v1".parse().expect("valid multiaddress")
)
.is_ok());
assert!(QuicListener::get_socket_address(
&"/ip6/::1/udp/8888/quic-v1/p2p/12D3KooWT2ouvz5uMmCvHJGzAGRHiqDts5hzXR7NdoQ27pGdzp9Q"
.parse()
.expect("valid multiaddress")
)
.is_ok());
assert!(QuicListener::get_socket_address(
&"/ip4/127.0.0.1/udp/8888/quic-v1/p2p/12D3KooWT2ouvz5uMmCvHJGzAGRHiqDts5hzXR7NdoQ27pGdzp9Q"
.parse()
.expect("valid multiaddress")
)
.is_ok());
assert!(QuicListener::get_socket_address(
&"/ip6/::1/tcp/8888/quic-v1/p2p/12D3KooWT2ouvz5uMmCvHJGzAGRHiqDts5hzXR7NdoQ27pGdzp9Q"
.parse()
.expect("valid multiaddress")
)
.is_err());
assert!(QuicListener::get_socket_address(
&"/ip4/127.0.0.1/udp/8888/p2p/12D3KooWT2ouvz5uMmCvHJGzAGRHiqDts5hzXR7NdoQ27pGdzp9Q"
.parse()
.expect("valid multiaddress")
)
.is_err());
assert!(QuicListener::get_socket_address(
&"/ip4/127.0.0.1/tcp/8888/p2p/12D3KooWT2ouvz5uMmCvHJGzAGRHiqDts5hzXR7NdoQ27pGdzp9Q"
.parse()
.expect("valid multiaddress")
)
.is_err());
assert!(QuicListener::get_socket_address(
&"/dns/google.com/tcp/8888/p2p/12D3KooWT2ouvz5uMmCvHJGzAGRHiqDts5hzXR7NdoQ27pGdzp9Q"
.parse()
.expect("valid multiaddress")
)
.is_err());
assert!(QuicListener::get_socket_address(
&"/ip6/::1/udp/8888/quic-v1/utp".parse().expect("valid multiaddress")
)
.is_err());
}
#[tokio::test]
async fn no_listeners() {
let (mut listener, _) = QuicListener::new(&Keypair::generate(), Vec::new()).unwrap();
futures::future::poll_fn(|cx| match listener.poll_next_unpin(cx) {
Poll::Pending => Poll::Ready(()),
event => panic!("unexpected event: {event:?}"),
})
.await;
}
#[tokio::test]
async fn one_listener() {
let address: Multiaddr = "/ip6/::1/udp/0/quic-v1".parse().unwrap();
let keypair = Keypair::generate();
let peer = PeerId::from_public_key(&keypair.public().into());
let (mut listener, listen_addresses) =
QuicListener::new(&keypair, vec![address.clone()]).unwrap();
let Some(Protocol::Udp(port)) = listen_addresses.first().unwrap().clone().iter().nth(1)
else {
panic!("invalid address");
};
let crypto_config =
Arc::new(make_client_config(&Keypair::generate(), Some(peer)).expect("to succeed"));
let client_config = ClientConfig::new(crypto_config);
let client =
Endpoint::client(SocketAddr::new(IpAddr::V6(Ipv6Addr::UNSPECIFIED), 0)).unwrap();
let connection = client
.connect_with(client_config, format!("[::1]:{port}").parse().unwrap(), "l")
.unwrap();
let (res1, res2) = tokio::join!(
listener.next(),
Box::pin(async move {
match connection.await {
Ok(connection) => Ok(connection),
Err(error) => Err(error),
}
})
);
assert!(res1.is_some() && res2.is_ok());
}
#[tokio::test]
async fn two_listeners() {
let _ = tracing_subscriber::fmt()
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
.try_init();
let address1: Multiaddr = "/ip6/::1/udp/0/quic-v1".parse().unwrap();
let address2: Multiaddr = "/ip4/127.0.0.1/udp/0/quic-v1".parse().unwrap();
let keypair = Keypair::generate();
let peer = PeerId::from_public_key(&keypair.public().into());
let (mut listener, listen_addresses) =
QuicListener::new(&keypair, vec![address1, address2]).unwrap();
let Some(Protocol::Udp(port1)) = listen_addresses.first().unwrap().clone().iter().nth(1)
else {
panic!("invalid address");
};
let Some(Protocol::Udp(port2)) =
listen_addresses.iter().nth(1).unwrap().clone().iter().nth(1)
else {
panic!("invalid address");
};
let crypto_config1 =
Arc::new(make_client_config(&Keypair::generate(), Some(peer)).expect("to succeed"));
let client_config1 = ClientConfig::new(crypto_config1);
let client1 =
Endpoint::client(SocketAddr::new(IpAddr::V6(Ipv6Addr::UNSPECIFIED), 0)).unwrap();
let connection1 = client1
.connect_with(
client_config1,
format!("[::1]:{port1}").parse().unwrap(),
"l",
)
.unwrap();
let crypto_config2 =
Arc::new(make_client_config(&Keypair::generate(), Some(peer)).expect("to succeed"));
let client_config2 = ClientConfig::new(crypto_config2);
let client2 =
Endpoint::client(SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0)).unwrap();
let connection2 = client2
.connect_with(
client_config2,
format!("127.0.0.1:{port2}").parse().unwrap(),
"l",
)
.unwrap();
tokio::spawn(async move {
match connection1.await {
Ok(connection) => Ok(connection),
Err(error) => Err(error),
}
});
tokio::spawn(async move {
match connection2.await {
Ok(connection) => Ok(connection),
Err(error) => Err(error),
}
});
for _ in 0..2 {
let _ = listener.next().await;
}
}
#[tokio::test]
async fn two_clients_dialing_same_address() {
let _ = tracing_subscriber::fmt()
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
.try_init();
let keypair = Keypair::generate();
let peer = PeerId::from_public_key(&keypair.public().into());
let (mut listener, listen_addresses) = QuicListener::new(
&keypair,
vec![
"/ip6/::1/udp/0/quic-v1".parse().unwrap(),
"/ip4/127.0.0.1/udp/0/quic-v1".parse().unwrap(),
],
)
.unwrap();
let Some(Protocol::Udp(port)) = listen_addresses.first().unwrap().clone().iter().nth(1)
else {
panic!("invalid address");
};
let crypto_config1 =
Arc::new(make_client_config(&Keypair::generate(), Some(peer)).expect("to succeed"));
let client_config1 = ClientConfig::new(crypto_config1);
let client1 =
Endpoint::client(SocketAddr::new(IpAddr::V6(Ipv6Addr::UNSPECIFIED), 0)).unwrap();
let connection1 = client1
.connect_with(
client_config1,
format!("[::1]:{port}").parse().unwrap(),
"l",
)
.unwrap();
let crypto_config2 =
Arc::new(make_client_config(&Keypair::generate(), Some(peer)).expect("to succeed"));
let client_config2 = ClientConfig::new(crypto_config2);
let client2 =
Endpoint::client(SocketAddr::new(IpAddr::V6(Ipv6Addr::UNSPECIFIED), 0)).unwrap();
let connection2 = client2
.connect_with(
client_config2,
format!("[::1]:{port}").parse().unwrap(),
"l",
)
.unwrap();
tokio::spawn(async move {
match connection1.await {
Ok(connection) => Ok(connection),
Err(error) => Err(error),
}
});
tokio::spawn(async move {
match connection2.await {
Ok(connection) => Ok(connection),
Err(error) => Err(error),
}
});
for _ in 0..2 {
let _ = listener.next().await;
}
}
}
| rust | MIT | 991aa12f60db41543735394bf71fba09332752f8 | 2026-01-04T20:20:42.179941Z | false |
paritytech/litep2p | https://github.com/paritytech/litep2p/blob/991aa12f60db41543735394bf71fba09332752f8/src/transport/quic/connection.rs | src/transport/quic/connection.rs | // Copyright 2023 litep2p developers
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//! QUIC connection.
use std::time::Duration;
use crate::{
config::Role,
error::{Error, NegotiationError, SubstreamError},
multistream_select::{dialer_select_proto, listener_select_proto, Negotiated, Version},
protocol::{Direction, Permit, ProtocolCommand, ProtocolSet},
substream,
transport::{
quic::substream::{NegotiatingSubstream, Substream},
Endpoint,
},
types::{protocol::ProtocolName, SubstreamId},
BandwidthSink, PeerId,
};
use futures::{future::BoxFuture, stream::FuturesUnordered, AsyncRead, AsyncWrite, StreamExt};
use quinn::{Connection as QuinnConnection, RecvStream, SendStream};
/// Logging target for the file.
const LOG_TARGET: &str = "litep2p::quic::connection";
/// QUIC connection error.
#[derive(Debug)]
enum ConnectionError {
/// Timeout
Timeout {
/// Protocol.
protocol: Option<ProtocolName>,
/// Substream ID.
substream_id: Option<SubstreamId>,
},
/// Failed to negotiate connection/substream.
FailedToNegotiate {
/// Protocol.
protocol: Option<ProtocolName>,
/// Substream ID.
substream_id: Option<SubstreamId>,
/// Error.
error: SubstreamError,
},
}
struct NegotiatedSubstream {
/// Substream direction.
direction: Direction,
/// Substream ID.
substream_id: SubstreamId,
/// Protocol name.
protocol: ProtocolName,
/// Substream used to send data.
sender: SendStream,
/// Substream used to receive data.
receiver: RecvStream,
/// Permit.
permit: Permit,
}
/// QUIC connection.
pub struct QuicConnection {
/// Remote peer ID.
peer: PeerId,
/// Endpoint.
endpoint: Endpoint,
/// Substream open timeout.
substream_open_timeout: Duration,
/// QUIC connection.
connection: QuinnConnection,
/// Protocol set.
protocol_set: ProtocolSet,
/// Bandwidth sink.
bandwidth_sink: BandwidthSink,
/// Pending substreams.
pending_substreams:
FuturesUnordered<BoxFuture<'static, Result<NegotiatedSubstream, ConnectionError>>>,
}
impl QuicConnection {
/// Creates a new [`QuicConnection`].
pub fn new(
peer: PeerId,
endpoint: Endpoint,
connection: QuinnConnection,
protocol_set: ProtocolSet,
bandwidth_sink: BandwidthSink,
substream_open_timeout: Duration,
) -> Self {
Self {
peer,
endpoint,
connection,
protocol_set,
bandwidth_sink,
substream_open_timeout,
pending_substreams: FuturesUnordered::new(),
}
}
/// Negotiate protocol.
async fn negotiate_protocol<S: AsyncRead + AsyncWrite + Unpin>(
stream: S,
role: &Role,
protocols: Vec<&str>,
) -> Result<(Negotiated<S>, ProtocolName), NegotiationError> {
tracing::trace!(target: LOG_TARGET, ?protocols, "negotiating protocols");
let (protocol, socket) = match role {
Role::Dialer => dialer_select_proto(stream, protocols, Version::V1).await,
Role::Listener => listener_select_proto(stream, protocols).await,
}
.map_err(NegotiationError::MultistreamSelectError)?;
tracing::trace!(target: LOG_TARGET, ?protocol, "protocol negotiated");
Ok((socket, ProtocolName::from(protocol.to_string())))
}
/// Open substream for `protocol`.
async fn open_substream(
handle: QuinnConnection,
permit: Permit,
substream_id: SubstreamId,
protocol: ProtocolName,
fallback_names: Vec<ProtocolName>,
) -> Result<NegotiatedSubstream, SubstreamError> {
tracing::debug!(target: LOG_TARGET, ?protocol, ?substream_id, "open substream");
let stream = match handle.open_bi().await {
Ok((send_stream, recv_stream)) => NegotiatingSubstream::new(send_stream, recv_stream),
Err(error) => return Err(NegotiationError::Quic(error.into()).into()),
};
// TODO: https://github.com/paritytech/litep2p/issues/346 protocols don't change after
// they've been initialized so this should be done only once
let protocols = std::iter::once(&*protocol)
.chain(fallback_names.iter().map(|protocol| &**protocol))
.collect();
let (io, protocol) = Self::negotiate_protocol(stream, &Role::Dialer, protocols).await?;
tracing::trace!(
target: LOG_TARGET,
?protocol,
?substream_id,
"substream accepted and negotiated"
);
let stream = io.inner();
let (sender, receiver) = stream.into_parts();
Ok(NegotiatedSubstream {
sender,
receiver,
substream_id,
direction: Direction::Outbound(substream_id),
permit,
protocol,
})
}
/// Accept bidirectional substream from rmeote peer.
async fn accept_substream(
stream: NegotiatingSubstream,
protocols: Vec<ProtocolName>,
substream_id: SubstreamId,
permit: Permit,
) -> Result<NegotiatedSubstream, NegotiationError> {
tracing::trace!(
target: LOG_TARGET,
?substream_id,
"accept inbound substream"
);
let protocols = protocols.iter().map(|protocol| &**protocol).collect::<Vec<&str>>();
let (io, protocol) = Self::negotiate_protocol(stream, &Role::Listener, protocols).await?;
tracing::trace!(
target: LOG_TARGET,
?substream_id,
?protocol,
"substream accepted and negotiated"
);
let stream = io.inner();
let (sender, receiver) = stream.into_parts();
Ok(NegotiatedSubstream {
permit,
sender,
receiver,
protocol,
substream_id,
direction: Direction::Inbound,
})
}
/// Start event loop for [`QuicConnection`].
pub async fn start(mut self) -> crate::Result<()> {
self.protocol_set
.report_connection_established(self.peer, self.endpoint.clone())
.await?;
loop {
tokio::select! {
event = self.connection.accept_bi() => match event {
Ok((send_stream, receive_stream)) => {
let substream = self.protocol_set.next_substream_id();
let protocols = self.protocol_set.protocols();
let permit = self.protocol_set.try_get_permit().ok_or(Error::ConnectionClosed)?;
let stream = NegotiatingSubstream::new(send_stream, receive_stream);
let substream_open_timeout = self.substream_open_timeout;
self.pending_substreams.push(Box::pin(async move {
match tokio::time::timeout(
substream_open_timeout,
Self::accept_substream(stream, protocols, substream, permit),
)
.await
{
Ok(Ok(substream)) => Ok(substream),
Ok(Err(error)) => Err(ConnectionError::FailedToNegotiate {
protocol: None,
substream_id: None,
error: SubstreamError::NegotiationError(error),
}),
Err(_) => Err(ConnectionError::Timeout {
protocol: None,
substream_id: None
}),
}
}));
}
Err(error) => {
tracing::debug!(target: LOG_TARGET, peer = ?self.peer, ?error, "failed to accept substream");
return self.protocol_set.report_connection_closed(self.peer, self.endpoint.connection_id()).await;
}
},
substream = self.pending_substreams.select_next_some(), if !self.pending_substreams.is_empty() => {
match substream {
Err(error) => {
tracing::debug!(
target: LOG_TARGET,
?error,
"failed to accept/open substream",
);
let (protocol, substream_id, error) = match error {
ConnectionError::Timeout { protocol, substream_id } => {
(protocol, substream_id, SubstreamError::NegotiationError(NegotiationError::Timeout))
}
ConnectionError::FailedToNegotiate { protocol, substream_id, error } => {
(protocol, substream_id, error)
}
};
if let (Some(protocol), Some(substream_id)) = (protocol, substream_id) {
self.protocol_set
.report_substream_open_failure(protocol, substream_id, error)
.await?;
}
}
Ok(substream) => {
let protocol = substream.protocol.clone();
let substream_id = substream.substream_id;
let direction = substream.direction;
let bandwidth_sink = self.bandwidth_sink.clone();
let substream = substream::Substream::new_quic(
self.peer,
substream_id,
Substream::new(
substream.permit,
substream.sender,
substream.receiver,
bandwidth_sink
),
self.protocol_set.protocol_codec(&protocol)
);
self.protocol_set
.report_substream_open(self.peer, protocol, direction, substream)
.await?;
}
}
}
command = self.protocol_set.next() => match command {
None => {
tracing::debug!(
target: LOG_TARGET,
peer = ?self.peer,
connection_id = ?self.endpoint.connection_id(),
"protocols have dropped connection"
);
return self.protocol_set.report_connection_closed(self.peer, self.endpoint.connection_id()).await;
}
Some(ProtocolCommand::OpenSubstream { protocol, fallback_names, substream_id, permit, .. }) => {
let connection = self.connection.clone();
let substream_open_timeout = self.substream_open_timeout;
tracing::trace!(
target: LOG_TARGET,
?protocol,
?fallback_names,
?substream_id,
"open substream"
);
self.pending_substreams.push(Box::pin(async move {
match tokio::time::timeout(
substream_open_timeout,
Self::open_substream(
connection,
permit,
substream_id,
protocol.clone(),
fallback_names,
),
)
.await
{
Ok(Ok(substream)) => Ok(substream),
Ok(Err(error)) => Err(ConnectionError::FailedToNegotiate {
protocol: Some(protocol),
substream_id: Some(substream_id),
error,
}),
Err(_) => Err(ConnectionError::Timeout {
protocol: None,
substream_id: None
}),
}
}));
}
Some(ProtocolCommand::ForceClose) => {
tracing::debug!(
target: LOG_TARGET,
peer = ?self.peer,
connection_id = ?self.endpoint.connection_id(),
"force closing connection",
);
return self.protocol_set.report_connection_closed(self.peer, self.endpoint.connection_id()).await;
}
}
}
}
}
}
| rust | MIT | 991aa12f60db41543735394bf71fba09332752f8 | 2026-01-04T20:20:42.179941Z | false |
paritytech/litep2p | https://github.com/paritytech/litep2p/blob/991aa12f60db41543735394bf71fba09332752f8/src/transport/quic/mod.rs | src/transport/quic/mod.rs | // Copyright 2021 Parity Technologies (UK) Ltd.
// Copyright 2022 Protocol Labs.
// Copyright 2023 litep2p developers
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//! QUIC transport.
use crate::{
crypto::tls::make_client_config,
error::{AddressError, DialError, Error, QuicError},
transport::{
manager::TransportHandle,
quic::{config::Config as QuicConfig, connection::QuicConnection, listener::QuicListener},
Endpoint as Litep2pEndpoint, Transport, TransportBuilder, TransportEvent,
},
types::ConnectionId,
PeerId,
};
use futures::{
future::BoxFuture,
stream::{AbortHandle, FuturesUnordered},
Stream, StreamExt, TryFutureExt,
};
use hickory_resolver::TokioResolver;
use multiaddr::{Multiaddr, Protocol};
use quinn::{ClientConfig, Connecting, Connection, Endpoint, IdleTimeout};
use std::{
collections::HashMap,
net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr},
pin::Pin,
sync::Arc,
task::{Context, Poll},
};
pub(crate) use substream::Substream;
mod connection;
mod listener;
mod substream;
pub mod config;
/// Logging target for the file.
const LOG_TARGET: &str = "litep2p::quic";
#[derive(Debug)]
struct NegotiatedConnection {
/// Remote peer ID.
peer: PeerId,
/// QUIC connection.
connection: Connection,
}
#[derive(Debug)]
enum RawConnectionResult {
/// The first successful connection.
Connected {
connection_id: ConnectionId,
address: Multiaddr,
stream: NegotiatedConnection,
},
/// All connection attempts failed.
Failed {
connection_id: ConnectionId,
errors: Vec<(Multiaddr, DialError)>,
},
/// Future was canceled.
Canceled { connection_id: ConnectionId },
}
/// QUIC transport object.
pub(crate) struct QuicTransport {
/// Transport handle.
context: TransportHandle,
/// Transport config.
config: QuicConfig,
/// QUIC listener.
listener: QuicListener,
/// Pending dials.
pending_dials: HashMap<ConnectionId, Multiaddr>,
/// Pending inbound connections.
pending_inbound_connections: HashMap<ConnectionId, Connecting>,
/// Pending connections.
pending_connections: FuturesUnordered<
BoxFuture<'static, (ConnectionId, Result<NegotiatedConnection, DialError>)>,
>,
/// Negotiated connections waiting for validation.
pending_open: HashMap<ConnectionId, (NegotiatedConnection, Litep2pEndpoint)>,
/// Pending raw, unnegotiated connections.
pending_raw_connections: FuturesUnordered<BoxFuture<'static, RawConnectionResult>>,
/// Opened raw connection, waiting for approval/rejection from `TransportManager`.
opened_raw: HashMap<ConnectionId, (NegotiatedConnection, Multiaddr)>,
/// Cancel raw connections futures.
///
/// This is cancelling `Self::pending_raw_connections`.
cancel_futures: HashMap<ConnectionId, AbortHandle>,
}
impl QuicTransport {
/// Attempt to extract `PeerId` from connection certificates.
fn extract_peer_id(connection: &Connection) -> Option<PeerId> {
let certificates: Box<Vec<rustls::Certificate>> =
connection.peer_identity()?.downcast().ok()?;
let p2p_cert = crate::crypto::tls::certificate::parse(certificates.first()?)
.expect("the certificate was validated during TLS handshake; qed");
Some(p2p_cert.peer_id())
}
/// Handle inbound accepted connection.
fn on_inbound_connection(&mut self, connection_id: ConnectionId, connection: Connecting) {
self.pending_connections.push(Box::pin(async move {
let connection = match connection.await {
Ok(connection) => connection,
Err(error) => return (connection_id, Err(DialError::from(error))),
};
let Some(peer) = Self::extract_peer_id(&connection) else {
return (
connection_id,
Err(crate::error::NegotiationError::Quic(QuicError::InvalidCertificate).into()),
);
};
(connection_id, Ok(NegotiatedConnection { peer, connection }))
}));
}
/// Handle established connection.
fn on_connection_established(
&mut self,
connection_id: ConnectionId,
result: Result<NegotiatedConnection, DialError>,
) -> Option<TransportEvent> {
tracing::debug!(target: LOG_TARGET, ?connection_id, success = result.is_ok(), "connection established");
// `on_connection_established()` is called for both inbound and outbound connections
// but `pending_dials` will only contain entries for outbound connections.
let maybe_address = self.pending_dials.remove(&connection_id);
match result {
Ok(connection) => {
let peer = connection.peer;
let endpoint = maybe_address.map_or(
{
let address = connection.connection.remote_address();
Litep2pEndpoint::listener(
Multiaddr::empty()
.with(Protocol::from(address.ip()))
.with(Protocol::Udp(address.port()))
.with(Protocol::QuicV1),
connection_id,
)
},
|address| Litep2pEndpoint::dialer(address, connection_id),
);
self.pending_open.insert(connection_id, (connection, endpoint.clone()));
return Some(TransportEvent::ConnectionEstablished { peer, endpoint });
}
Err(error) => {
tracing::debug!(target: LOG_TARGET, ?connection_id, ?error, "failed to establish connection");
// since the address was found from `pending_dials`,
// report the error to protocols and `TransportManager`
if let Some(address) = maybe_address {
return Some(TransportEvent::DialFailure {
connection_id,
address,
error,
});
}
}
}
None
}
}
impl TransportBuilder for QuicTransport {
type Config = QuicConfig;
type Transport = QuicTransport;
/// Create new [`QuicTransport`] object.
fn new(
context: TransportHandle,
mut config: Self::Config,
_resolver: Arc<TokioResolver>,
) -> crate::Result<(Self, Vec<Multiaddr>)>
where
Self: Sized,
{
tracing::info!(
target: LOG_TARGET,
?config,
"start quic transport",
);
let (listener, listen_addresses) = QuicListener::new(
&context.keypair,
std::mem::take(&mut config.listen_addresses),
)?;
Ok((
Self {
context,
config,
listener,
opened_raw: HashMap::new(),
pending_open: HashMap::new(),
pending_dials: HashMap::new(),
pending_inbound_connections: HashMap::new(),
pending_raw_connections: FuturesUnordered::new(),
pending_connections: FuturesUnordered::new(),
cancel_futures: HashMap::new(),
},
listen_addresses,
))
}
}
impl Transport for QuicTransport {
fn dial(&mut self, connection_id: ConnectionId, address: Multiaddr) -> crate::Result<()> {
let Ok((socket_address, Some(peer))) = QuicListener::get_socket_address(&address) else {
return Err(Error::AddressError(AddressError::PeerIdMissing));
};
let crypto_config =
Arc::new(make_client_config(&self.context.keypair, Some(peer)).expect("to succeed"));
let mut transport_config = quinn::TransportConfig::default();
let timeout =
IdleTimeout::try_from(self.config.connection_open_timeout).expect("to succeed");
transport_config.max_idle_timeout(Some(timeout));
let mut client_config = ClientConfig::new(crypto_config);
client_config.transport_config(Arc::new(transport_config));
let client_listen_address = match address.iter().next() {
Some(Protocol::Ip6(_)) => SocketAddr::new(IpAddr::V6(Ipv6Addr::UNSPECIFIED), 0),
Some(Protocol::Ip4(_)) => SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0),
_ => return Err(Error::AddressError(AddressError::InvalidProtocol)),
};
let client = Endpoint::client(client_listen_address)
.map_err(|error| Error::Other(error.to_string()))?;
let connection = client
.connect_with(client_config, socket_address, "l")
.map_err(|error| Error::Other(error.to_string()))?;
tracing::trace!(
target: LOG_TARGET,
?address,
?peer,
?client_listen_address,
"dial peer",
);
self.pending_dials.insert(connection_id, address);
self.pending_connections.push(Box::pin(async move {
let connection = match connection.await {
Ok(connection) => connection,
Err(error) => return (connection_id, Err(DialError::from(error))),
};
let Some(peer) = Self::extract_peer_id(&connection) else {
return (
connection_id,
Err(crate::error::NegotiationError::Quic(QuicError::InvalidCertificate).into()),
);
};
(connection_id, Ok(NegotiatedConnection { peer, connection }))
}));
Ok(())
}
fn accept(&mut self, connection_id: ConnectionId) -> crate::Result<()> {
let (connection, endpoint) = self
.pending_open
.remove(&connection_id)
.ok_or(Error::ConnectionDoesntExist(connection_id))?;
let bandwidth_sink = self.context.bandwidth_sink.clone();
let protocol_set = self.context.protocol_set(connection_id);
let substream_open_timeout = self.config.substream_open_timeout;
tracing::trace!(
target: LOG_TARGET,
?connection_id,
"start connection",
);
self.context.executor.run(Box::pin(async move {
let _ = QuicConnection::new(
connection.peer,
endpoint,
connection.connection,
protocol_set,
bandwidth_sink,
substream_open_timeout,
)
.start()
.await;
}));
Ok(())
}
fn reject(&mut self, connection_id: ConnectionId) -> crate::Result<()> {
self.pending_open
.remove(&connection_id)
.map_or(Err(Error::ConnectionDoesntExist(connection_id)), |_| Ok(()))
}
fn accept_pending(&mut self, connection_id: ConnectionId) -> crate::Result<()> {
let connection = self
.pending_inbound_connections
.remove(&connection_id)
.ok_or(Error::ConnectionDoesntExist(connection_id))?;
self.on_inbound_connection(connection_id, connection);
Ok(())
}
fn reject_pending(&mut self, connection_id: ConnectionId) -> crate::Result<()> {
self.pending_inbound_connections
.remove(&connection_id)
.map_or(Err(Error::ConnectionDoesntExist(connection_id)), |_| Ok(()))
}
fn open(
&mut self,
connection_id: ConnectionId,
addresses: Vec<Multiaddr>,
) -> crate::Result<()> {
let num_addresses = addresses.len();
let mut futures: FuturesUnordered<_> = addresses
.into_iter()
.map(|address| {
let keypair = self.context.keypair.clone();
let connection_open_timeout = self.config.connection_open_timeout;
let addr = address.clone();
let future = async move {
let (socket_address, peer) = QuicListener::get_socket_address(&address)
.map_err(DialError::AddressError)?;
let peer =
peer.ok_or_else(|| DialError::AddressError(AddressError::PeerIdMissing))?;
let crypto_config =
Arc::new(make_client_config(&keypair, Some(peer)).expect("to succeed"));
let mut transport_config = quinn::TransportConfig::default();
let timeout =
IdleTimeout::try_from(connection_open_timeout).expect("to succeed");
transport_config.max_idle_timeout(Some(timeout));
let mut client_config = ClientConfig::new(crypto_config);
client_config.transport_config(Arc::new(transport_config));
let client_listen_address = match address.iter().next() {
Some(Protocol::Ip6(_)) =>
SocketAddr::new(IpAddr::V6(Ipv6Addr::UNSPECIFIED), 0),
Some(Protocol::Ip4(_)) =>
SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0),
_ => return Err(AddressError::InvalidProtocol.into()),
};
let client = match Endpoint::client(client_listen_address) {
Ok(client) => client,
Err(error) => {
return Err(DialError::from(error));
}
};
let connection = match client.connect_with(client_config, socket_address, "l") {
Ok(connection) => connection,
Err(error) => return Err(DialError::from(error)),
};
let connection = match connection.await {
Ok(connection) => connection,
Err(error) => return Err(DialError::from(error)),
};
let Some(peer) = Self::extract_peer_id(&connection) else {
return Err(crate::error::NegotiationError::Quic(
QuicError::InvalidCertificate,
)
.into());
};
Ok(NegotiatedConnection { peer, connection })
};
async move { future.await.map(|ok| (addr.clone(), ok)).map_err(|err| (addr, err)) }
})
.collect();
// Future that will resolve to the first successful connection.
let future = async move {
let mut errors = Vec::with_capacity(num_addresses);
while let Some(result) = futures.next().await {
match result {
Ok((address, stream)) =>
return RawConnectionResult::Connected {
connection_id,
address,
stream,
},
Err(error) => {
tracing::debug!(
target: LOG_TARGET,
?connection_id,
?error,
"failed to open connection",
);
errors.push(error)
}
}
}
RawConnectionResult::Failed {
connection_id,
errors,
}
};
let (fut, handle) = futures::future::abortable(future);
let fut = fut.unwrap_or_else(move |_| RawConnectionResult::Canceled { connection_id });
self.pending_raw_connections.push(Box::pin(fut));
self.cancel_futures.insert(connection_id, handle);
Ok(())
}
fn negotiate(&mut self, connection_id: ConnectionId) -> crate::Result<()> {
let (connection, _address) = self
.opened_raw
.remove(&connection_id)
.ok_or(Error::ConnectionDoesntExist(connection_id))?;
self.pending_connections
.push(Box::pin(async move { (connection_id, Ok(connection)) }));
Ok(())
}
/// Cancel opening connections.
fn cancel(&mut self, connection_id: ConnectionId) {
// Cancel the future if it exists.
// State clean-up happens inside the `poll_next`.
if let Some(handle) = self.cancel_futures.get(&connection_id) {
handle.abort();
}
}
}
impl Stream for QuicTransport {
type Item = TransportEvent;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
if let Poll::Ready(Some(connection)) = self.listener.poll_next_unpin(cx) {
let connection_id = self.context.next_connection_id();
tracing::trace!(
target: LOG_TARGET,
?connection_id,
"pending inbound connection",
);
self.pending_inbound_connections.insert(connection_id, connection);
return Poll::Ready(Some(TransportEvent::PendingInboundConnection {
connection_id,
}));
}
while let Poll::Ready(Some(result)) = self.pending_raw_connections.poll_next_unpin(cx) {
tracing::trace!(target: LOG_TARGET, ?result, "raw connection result");
match result {
RawConnectionResult::Connected {
connection_id,
address,
stream,
} => {
let Some(handle) = self.cancel_futures.remove(&connection_id) else {
tracing::warn!(
target: LOG_TARGET,
?connection_id,
?address,
"raw connection without a cancel handle",
);
continue;
};
if !handle.is_aborted() {
self.opened_raw.insert(connection_id, (stream, address.clone()));
return Poll::Ready(Some(TransportEvent::ConnectionOpened {
connection_id,
address,
}));
}
}
RawConnectionResult::Failed {
connection_id,
errors,
} => {
let Some(handle) = self.cancel_futures.remove(&connection_id) else {
tracing::warn!(
target: LOG_TARGET,
?connection_id,
?errors,
"raw connection without a cancel handle",
);
continue;
};
if !handle.is_aborted() {
return Poll::Ready(Some(TransportEvent::OpenFailure {
connection_id,
errors,
}));
}
}
RawConnectionResult::Canceled { connection_id } => {
if self.cancel_futures.remove(&connection_id).is_none() {
tracing::warn!(
target: LOG_TARGET,
?connection_id,
"raw cancelled connection without a cancel handle",
);
}
}
}
}
while let Poll::Ready(Some(connection)) = self.pending_connections.poll_next_unpin(cx) {
let (connection_id, result) = connection;
match self.on_connection_established(connection_id, result) {
Some(event) => return Poll::Ready(Some(event)),
None => {}
}
}
Poll::Pending
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{
codec::ProtocolCodec,
crypto::ed25519::Keypair,
executor::DefaultExecutor,
transport::manager::{ProtocolContext, TransportHandle},
types::protocol::ProtocolName,
BandwidthSink,
};
use multihash::Multihash;
use tokio::sync::mpsc::channel;
#[tokio::test]
async fn test_quinn() {
let _ = tracing_subscriber::fmt()
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
.try_init();
let keypair1 = Keypair::generate();
let (tx1, _rx1) = channel(64);
let (event_tx1, _event_rx1) = channel(64);
let handle1 = TransportHandle {
executor: Arc::new(DefaultExecutor {}),
next_substream_id: Default::default(),
next_connection_id: Default::default(),
keypair: keypair1.clone(),
tx: event_tx1,
bandwidth_sink: BandwidthSink::new(),
protocols: HashMap::from_iter([(
ProtocolName::from("/notif/1"),
ProtocolContext {
tx: tx1,
codec: ProtocolCodec::Identity(32),
fallback_names: Vec::new(),
},
)]),
};
let resolver = Arc::new(TokioResolver::builder_tokio().unwrap().build());
let (mut transport1, listen_addresses) =
QuicTransport::new(handle1, Default::default(), resolver.clone()).unwrap();
let listen_address = listen_addresses[0].clone();
let keypair2 = Keypair::generate();
let (tx2, _rx2) = channel(64);
let (event_tx2, _event_rx2) = channel(64);
let handle2 = TransportHandle {
executor: Arc::new(DefaultExecutor {}),
next_substream_id: Default::default(),
next_connection_id: Default::default(),
keypair: keypair2.clone(),
tx: event_tx2,
bandwidth_sink: BandwidthSink::new(),
protocols: HashMap::from_iter([(
ProtocolName::from("/notif/1"),
ProtocolContext {
tx: tx2,
codec: ProtocolCodec::Identity(32),
fallback_names: Vec::new(),
},
)]),
};
let (mut transport2, _) =
QuicTransport::new(handle2, Default::default(), resolver).unwrap();
let peer1: PeerId = PeerId::from_public_key(&keypair1.public().into());
let _peer2: PeerId = PeerId::from_public_key(&keypair2.public().into());
let listen_address = listen_address.with(Protocol::P2p(
Multihash::from_bytes(&peer1.to_bytes()).unwrap(),
));
transport2.dial(ConnectionId::new(), listen_address).unwrap();
let event = transport1.next().await.unwrap();
match event {
TransportEvent::PendingInboundConnection { connection_id } => {
transport1.accept_pending(connection_id).unwrap();
}
_ => panic!("unexpected event"),
}
let (res1, res2) = tokio::join!(transport1.next(), transport2.next());
assert!(std::matches!(
res1,
Some(TransportEvent::ConnectionEstablished { .. })
));
assert!(std::matches!(
res2,
Some(TransportEvent::ConnectionEstablished { .. })
));
}
}
| rust | MIT | 991aa12f60db41543735394bf71fba09332752f8 | 2026-01-04T20:20:42.179941Z | false |
paritytech/litep2p | https://github.com/paritytech/litep2p/blob/991aa12f60db41543735394bf71fba09332752f8/src/transport/quic/substream.rs | src/transport/quic/substream.rs | // Copyright 2023 litep2p developers
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
use crate::{error::SubstreamError, BandwidthSink};
use bytes::Bytes;
use futures::{AsyncRead, AsyncWrite};
use quinn::{RecvStream, SendStream};
use tokio::io::{AsyncRead as TokioAsyncRead, AsyncWrite as TokioAsyncWrite};
use tokio_util::compat::{Compat, TokioAsyncReadCompatExt, TokioAsyncWriteCompatExt};
use std::{
io,
pin::Pin,
task::{Context, Poll},
};
use crate::protocol::Permit;
/// QUIC substream.
#[derive(Debug)]
pub struct Substream {
_permit: Permit,
bandwidth_sink: BandwidthSink,
send_stream: SendStream,
recv_stream: RecvStream,
}
impl Substream {
/// Create new [`Substream`].
pub fn new(
_permit: Permit,
send_stream: SendStream,
recv_stream: RecvStream,
bandwidth_sink: BandwidthSink,
) -> Self {
Self {
_permit,
send_stream,
recv_stream,
bandwidth_sink,
}
}
/// Write `buffers` to the underlying socket.
pub async fn write_all_chunks(&mut self, buffers: &mut [Bytes]) -> Result<(), SubstreamError> {
let nwritten = buffers.iter().fold(0usize, |acc, buffer| acc + buffer.len());
match self
.send_stream
.write_all_chunks(buffers)
.await
.map_err(|_| SubstreamError::ConnectionClosed)
{
Ok(()) => {
self.bandwidth_sink.increase_outbound(nwritten);
Ok(())
}
Err(error) => Err(error),
}
}
}
impl TokioAsyncRead for Substream {
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut tokio::io::ReadBuf<'_>,
) -> Poll<io::Result<()>> {
match futures::ready!(Pin::new(&mut self.recv_stream).poll_read(cx, buf)) {
Err(error) => Poll::Ready(Err(error)),
Ok(res) => {
self.bandwidth_sink.increase_inbound(buf.filled().len());
Poll::Ready(Ok(res))
}
}
}
}
impl TokioAsyncWrite for Substream {
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<Result<usize, io::Error>> {
match futures::ready!(Pin::new(&mut self.send_stream).poll_write(cx, buf)) {
Err(error) => Poll::Ready(Err(error)),
Ok(nwritten) => {
self.bandwidth_sink.increase_outbound(nwritten);
Poll::Ready(Ok(nwritten))
}
}
}
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
Pin::new(&mut self.send_stream).poll_flush(cx)
}
fn poll_shutdown(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), io::Error>> {
Pin::new(&mut self.send_stream).poll_shutdown(cx)
}
}
/// Substream pair used to negotiate a protocol for the connection.
pub struct NegotiatingSubstream {
recv_stream: Compat<RecvStream>,
send_stream: Compat<SendStream>,
}
impl NegotiatingSubstream {
/// Create new [`NegotiatingSubstream`].
pub fn new(send_stream: SendStream, recv_stream: RecvStream) -> Self {
Self {
recv_stream: TokioAsyncReadCompatExt::compat(recv_stream),
send_stream: TokioAsyncWriteCompatExt::compat_write(send_stream),
}
}
/// Deconstruct [`NegotiatingSubstream`] into parts.
pub fn into_parts(self) -> (SendStream, RecvStream) {
let sender = self.send_stream.into_inner();
let receiver = self.recv_stream.into_inner();
(sender, receiver)
}
}
impl AsyncRead for NegotiatingSubstream {
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut [u8],
) -> Poll<io::Result<usize>> {
Pin::new(&mut self.recv_stream).poll_read(cx, buf)
}
}
impl AsyncWrite for NegotiatingSubstream {
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
Pin::new(&mut self.send_stream).poll_write(cx, buf)
}
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Pin::new(&mut self.send_stream).poll_flush(cx)
}
fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Pin::new(&mut self.send_stream).poll_close(cx)
}
}
| rust | MIT | 991aa12f60db41543735394bf71fba09332752f8 | 2026-01-04T20:20:42.179941Z | false |
paritytech/litep2p | https://github.com/paritytech/litep2p/blob/991aa12f60db41543735394bf71fba09332752f8/src/transport/tcp/config.rs | src/transport/tcp/config.rs | // Copyright 2023 litep2p developers
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//! TCP transport configuration.
use crate::{
crypto::noise::{MAX_READ_AHEAD_FACTOR, MAX_WRITE_BUFFER_SIZE},
transport::{CONNECTION_OPEN_TIMEOUT, SUBSTREAM_OPEN_TIMEOUT},
};
/// TCP transport configuration.
#[derive(Debug, Clone)]
pub struct Config {
/// Listen address for the transport.
///
/// Default listen addresses are ["/ip4/0.0.0.0/tcp/0", "/ip6/::/tcp/0"].
pub listen_addresses: Vec<multiaddr::Multiaddr>,
/// Whether to set `SO_REUSEPORT` and bind a socket to the listen address port for outbound
/// connections.
///
/// Note that `SO_REUSEADDR` is always set on listening sockets.
///
/// Defaults to `true`.
pub reuse_port: bool,
/// Enable `TCP_NODELAY`.
///
/// Defaults to `false`.
pub nodelay: bool,
/// Yamux configuration.
pub yamux_config: crate::yamux::Config,
/// Noise read-ahead frame count.
///
/// Specifies how many Noise frames are read per call to the underlying socket.
///
/// By default this is configured to `5` so each call to the underlying socket can read up
/// to `5` Noise frame per call. Fewer frames may be read if there isn't enough data in the
/// socket. Each Noise frame is `65 KB` so the default setting allocates `65 KB * 5 = 325 KB`
/// per connection.
pub noise_read_ahead_frame_count: usize,
/// Noise write buffer size.
///
/// Specifes how many Noise frames are tried to be coalesced into a single system call.
/// By default the value is set to `2` which means that the `NoiseSocket` will allocate
/// `130 KB` for each outgoing connection.
///
/// The write buffer size is separate from the read-ahead frame count so by default
/// the Noise code will allocate `2 * 65 KB + 5 * 65 KB = 455 KB` per connection.
pub noise_write_buffer_size: usize,
/// Connection open timeout.
///
/// How long should litep2p wait for a connection to be opened before the host
/// is deemed unreachable.
pub connection_open_timeout: std::time::Duration,
/// Substream open timeout.
///
/// How long should litep2p wait for a substream to be opened before considering
/// the substream rejected.
pub substream_open_timeout: std::time::Duration,
}
impl Default for Config {
fn default() -> Self {
Self {
listen_addresses: vec![
"/ip4/0.0.0.0/tcp/0".parse().expect("valid address"),
"/ip6/::/tcp/0".parse().expect("valid address"),
],
reuse_port: true,
nodelay: false,
yamux_config: Default::default(),
noise_read_ahead_frame_count: MAX_READ_AHEAD_FACTOR,
noise_write_buffer_size: MAX_WRITE_BUFFER_SIZE,
connection_open_timeout: CONNECTION_OPEN_TIMEOUT,
substream_open_timeout: SUBSTREAM_OPEN_TIMEOUT,
}
}
}
| rust | MIT | 991aa12f60db41543735394bf71fba09332752f8 | 2026-01-04T20:20:42.179941Z | false |
paritytech/litep2p | https://github.com/paritytech/litep2p/blob/991aa12f60db41543735394bf71fba09332752f8/src/transport/tcp/connection.rs | src/transport/tcp/connection.rs | // Copyright 2023 litep2p developers
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
use crate::{
config::Role,
crypto::{
ed25519::Keypair,
noise::{self, NoiseSocket},
},
error::{Error, NegotiationError, SubstreamError},
multistream_select::{dialer_select_proto, listener_select_proto, Negotiated, Version},
protocol::{Direction, Permit, ProtocolCommand, ProtocolSet},
substream,
transport::{
common::listener::{AddressType, DnsType},
tcp::substream::Substream,
Endpoint,
},
types::{protocol::ProtocolName, ConnectionId, SubstreamId},
BandwidthSink, PeerId,
};
use futures::{
future::BoxFuture,
stream::{FuturesUnordered, StreamExt},
AsyncRead, AsyncWrite,
};
use multiaddr::{Multiaddr, Protocol};
use tokio::net::TcpStream;
use tokio_util::compat::{
Compat, FuturesAsyncReadCompatExt, TokioAsyncReadCompatExt, TokioAsyncWriteCompatExt,
};
use std::{
borrow::Cow,
fmt,
net::SocketAddr,
sync::{
atomic::{AtomicUsize, Ordering},
Arc,
},
time::Duration,
};
/// Logging target for the file.
const LOG_TARGET: &str = "litep2p::tcp::connection";
#[derive(Debug)]
pub struct NegotiatedSubstream {
/// Substream direction.
direction: Direction,
/// Substream ID.
substream_id: SubstreamId,
/// Protocol name.
protocol: ProtocolName,
/// Yamux substream.
io: crate::yamux::Stream,
/// Permit.
permit: Permit,
}
/// TCP connection error.
#[derive(Debug)]
enum ConnectionError {
/// Timeout
Timeout {
/// Protocol.
protocol: Option<ProtocolName>,
/// Substream ID.
substream_id: Option<SubstreamId>,
},
/// Failed to negotiate connection/substream.
FailedToNegotiate {
/// Protocol.
protocol: Option<ProtocolName>,
/// Substream ID.
substream_id: Option<SubstreamId>,
/// Error.
error: SubstreamError,
},
}
/// Connection context for an opened connection that hasn't yet started its event loop.
pub struct NegotiatedConnection {
/// Yamux connection.
connection: crate::yamux::ControlledConnection<NoiseSocket<Compat<TcpStream>>>,
/// Yamux control.
control: crate::yamux::Control,
/// Remote peer ID.
peer: PeerId,
/// Endpoint.
endpoint: Endpoint,
/// Substream open timeout.
substream_open_timeout: Duration,
}
impl std::fmt::Debug for NegotiatedConnection {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("NegotiatedConnection")
.field("peer", &self.peer)
.field("endpoint", &self.endpoint)
.finish()
}
}
impl NegotiatedConnection {
/// Get `ConnectionId` of the negotiated connection.
pub fn connection_id(&self) -> ConnectionId {
self.endpoint.connection_id()
}
/// Get `PeerId` of the negotiated connection.
pub fn peer(&self) -> PeerId {
self.peer
}
/// Get `Endpoint` of the negotiated connection.
pub fn endpoint(&self) -> Endpoint {
self.endpoint.clone()
}
}
/// TCP connection.
pub struct TcpConnection {
/// Protocol context.
protocol_set: ProtocolSet,
/// Yamux connection.
connection: crate::yamux::ControlledConnection<NoiseSocket<Compat<TcpStream>>>,
/// Yamux control.
control: crate::yamux::Control,
/// Remote peer ID.
peer: PeerId,
/// Endpoint.
endpoint: Endpoint,
/// Substream open timeout.
substream_open_timeout: Duration,
/// Next substream ID.
next_substream_id: Arc<AtomicUsize>,
// Bandwidth sink.
bandwidth_sink: BandwidthSink,
/// Pending substreams.
pending_substreams:
FuturesUnordered<BoxFuture<'static, Result<NegotiatedSubstream, ConnectionError>>>,
}
impl fmt::Debug for TcpConnection {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("TcpConnection")
.field("peer", &self.peer)
.field("next_substream_id", &self.next_substream_id)
.finish()
}
}
impl TcpConnection {
/// Create new [`TcpConnection`] from [`NegotiatedConnection`].
pub(super) fn new(
context: NegotiatedConnection,
protocol_set: ProtocolSet,
bandwidth_sink: BandwidthSink,
next_substream_id: Arc<AtomicUsize>,
) -> Self {
let NegotiatedConnection {
connection,
control,
peer,
endpoint,
substream_open_timeout,
} = context;
Self {
protocol_set,
connection,
control,
peer,
endpoint,
bandwidth_sink,
next_substream_id,
pending_substreams: FuturesUnordered::new(),
substream_open_timeout,
}
}
/// Open connection to remote peer at `address`.
// TODO: https://github.com/paritytech/litep2p/issues/347 this function can be removed
pub(super) async fn open_connection(
connection_id: ConnectionId,
keypair: Keypair,
stream: TcpStream,
address: AddressType,
peer: Option<PeerId>,
yamux_config: crate::yamux::Config,
max_read_ahead_factor: usize,
max_write_buffer_size: usize,
connection_open_timeout: Duration,
substream_open_timeout: Duration,
) -> Result<NegotiatedConnection, NegotiationError> {
tracing::debug!(
target: LOG_TARGET,
?address,
?peer,
"open connection to remote peer",
);
match tokio::time::timeout(connection_open_timeout, async move {
Self::negotiate_connection(
stream,
peer,
connection_id,
keypair,
Role::Dialer,
address,
yamux_config,
max_read_ahead_factor,
max_write_buffer_size,
substream_open_timeout,
)
.await
})
.await
{
Err(_) => {
tracing::trace!(target: LOG_TARGET, ?connection_id, "connection timed out during negotiation");
Err(NegotiationError::Timeout)
}
Ok(result) => result,
}
}
/// Open substream for `protocol`.
pub(super) async fn open_substream(
mut control: crate::yamux::Control,
substream_id: SubstreamId,
permit: Permit,
protocol: ProtocolName,
fallback_names: Vec<ProtocolName>,
open_timeout: Duration,
) -> Result<NegotiatedSubstream, SubstreamError> {
tracing::debug!(target: LOG_TARGET, ?protocol, ?substream_id, "open substream");
let stream = match control.open_stream().await {
Ok(stream) => {
tracing::trace!(target: LOG_TARGET, ?substream_id, "substream opened");
stream
}
Err(error) => {
tracing::debug!(
target: LOG_TARGET,
?substream_id,
?error,
"failed to open substream"
);
return Err(SubstreamError::YamuxError(
error,
Direction::Outbound(substream_id),
));
}
};
// TODO: https://github.com/paritytech/litep2p/issues/346 protocols don't change after
// they've been initialized so this should be done only once
let protocols = std::iter::once(&*protocol)
.chain(fallback_names.iter().map(|protocol| &**protocol))
.collect();
let (io, protocol) =
Self::negotiate_protocol(stream, &Role::Dialer, protocols, open_timeout).await?;
Ok(NegotiatedSubstream {
io: io.inner(),
substream_id,
direction: Direction::Outbound(substream_id),
protocol,
permit,
})
}
/// Accept a new connection.
pub(super) async fn accept_connection(
stream: TcpStream,
connection_id: ConnectionId,
keypair: Keypair,
address: SocketAddr,
yamux_config: crate::yamux::Config,
max_read_ahead_factor: usize,
max_write_buffer_size: usize,
connection_open_timeout: Duration,
substream_open_timeout: Duration,
) -> Result<NegotiatedConnection, NegotiationError> {
tracing::debug!(target: LOG_TARGET, ?address, "accept connection");
match tokio::time::timeout(connection_open_timeout, async move {
Self::negotiate_connection(
stream,
None,
connection_id,
keypair,
Role::Listener,
AddressType::Socket(address),
yamux_config,
max_read_ahead_factor,
max_write_buffer_size,
substream_open_timeout,
)
.await
})
.await
{
Err(_) => Err(NegotiationError::Timeout),
Ok(result) => result,
}
}
/// Accept substream.
pub(super) async fn accept_substream(
stream: crate::yamux::Stream,
permit: Permit,
substream_id: SubstreamId,
protocols: Vec<ProtocolName>,
open_timeout: Duration,
) -> Result<NegotiatedSubstream, NegotiationError> {
tracing::trace!(
target: LOG_TARGET,
?substream_id,
"accept inbound substream",
);
let protocols = protocols.iter().map(|protocol| &**protocol).collect::<Vec<&str>>();
let (io, protocol) =
Self::negotiate_protocol(stream, &Role::Listener, protocols, open_timeout).await?;
tracing::trace!(
target: LOG_TARGET,
?substream_id,
"substream accepted and negotiated",
);
Ok(NegotiatedSubstream {
io: io.inner(),
substream_id,
direction: Direction::Inbound,
protocol,
permit,
})
}
/// Negotiate protocol.
async fn negotiate_protocol<S: AsyncRead + AsyncWrite + Unpin>(
stream: S,
role: &Role,
protocols: Vec<&str>,
substream_open_timeout: Duration,
) -> Result<(Negotiated<S>, ProtocolName), NegotiationError> {
tracing::trace!(target: LOG_TARGET, ?protocols, "negotiating protocols");
match tokio::time::timeout(substream_open_timeout, async move {
match role {
Role::Dialer => dialer_select_proto(stream, protocols, Version::V1).await,
Role::Listener => listener_select_proto(stream, protocols).await,
}
})
.await
{
Err(_) => Err(NegotiationError::Timeout),
Ok(Err(error)) => Err(NegotiationError::MultistreamSelectError(error)),
Ok(Ok((protocol, socket))) => {
tracing::trace!(target: LOG_TARGET, ?protocol, "protocol negotiated");
Ok((socket, ProtocolName::from(protocol.to_string())))
}
}
}
/// Negotiate noise + yamux for the connection.
pub(super) async fn negotiate_connection(
stream: TcpStream,
dialed_peer: Option<PeerId>,
connection_id: ConnectionId,
keypair: Keypair,
role: Role,
address: AddressType,
yamux_config: crate::yamux::Config,
max_read_ahead_factor: usize,
max_write_buffer_size: usize,
substream_open_timeout: Duration,
) -> Result<NegotiatedConnection, NegotiationError> {
tracing::trace!(
target: LOG_TARGET,
?role,
"negotiate connection",
);
let stream = TokioAsyncReadCompatExt::compat(stream).into_inner();
let stream = TokioAsyncWriteCompatExt::compat_write(stream);
// negotiate `noise`
let (stream, _) =
Self::negotiate_protocol(stream, &role, vec!["/noise"], substream_open_timeout).await?;
tracing::trace!(
target: LOG_TARGET,
"`multistream-select` and `noise` negotiated",
);
// perform noise handshake
let (stream, peer) = noise::handshake(
stream.inner(),
&keypair,
role,
max_read_ahead_factor,
max_write_buffer_size,
substream_open_timeout,
noise::HandshakeTransport::Tcp,
)
.await?;
if let Some(dialed_peer) = dialed_peer {
if dialed_peer != peer {
tracing::debug!(target: LOG_TARGET, ?dialed_peer, ?peer, "peer id mismatch");
return Err(NegotiationError::PeerIdMismatch(dialed_peer, peer));
}
}
tracing::trace!(target: LOG_TARGET, "noise handshake done");
let stream: NoiseSocket<Compat<TcpStream>> = stream;
// negotiate `yamux`
let (stream, _) =
Self::negotiate_protocol(stream, &role, vec!["/yamux/1.0.0"], substream_open_timeout)
.await?;
tracing::trace!(target: LOG_TARGET, "`yamux` negotiated");
let connection = crate::yamux::Connection::new(stream.inner(), yamux_config, role.into());
let (control, connection) = crate::yamux::Control::new(connection);
let address = match address {
AddressType::Socket(address) => Multiaddr::empty()
.with(Protocol::from(address.ip()))
.with(Protocol::Tcp(address.port())),
AddressType::Dns {
address,
port,
dns_type,
} => match dns_type {
DnsType::Dns => Multiaddr::empty()
.with(Protocol::Dns(Cow::Owned(address)))
.with(Protocol::Tcp(port)),
DnsType::Dns4 => Multiaddr::empty()
.with(Protocol::Dns4(Cow::Owned(address)))
.with(Protocol::Tcp(port)),
DnsType::Dns6 => Multiaddr::empty()
.with(Protocol::Dns6(Cow::Owned(address)))
.with(Protocol::Tcp(port)),
},
};
let endpoint = match role {
Role::Dialer => Endpoint::dialer(address, connection_id),
Role::Listener => Endpoint::listener(address, connection_id),
};
Ok(NegotiatedConnection {
peer,
control,
connection,
endpoint,
substream_open_timeout,
})
}
/// Handles the yamux substream.
///
/// Returns `true` if the connection handler should exit.
async fn handle_yamux_substream(
&mut self,
substream: Option<Result<crate::yamux::Stream, crate::yamux::ConnectionError>>,
) -> crate::Result<bool> {
match substream {
Some(Ok(stream)) => {
let substream_id = {
let substream_id = self.next_substream_id.fetch_add(1usize, Ordering::Relaxed);
SubstreamId::from(substream_id)
};
let protocols = self.protocol_set.protocols();
let permit = self.protocol_set.try_get_permit().ok_or(Error::ConnectionClosed)?;
let open_timeout = self.substream_open_timeout;
self.pending_substreams.push(Box::pin(async move {
match tokio::time::timeout(
open_timeout,
Self::accept_substream(
stream,
permit,
substream_id,
protocols,
open_timeout,
),
)
.await
{
Ok(Ok(substream)) => Ok(substream),
Ok(Err(error)) => Err(ConnectionError::FailedToNegotiate {
protocol: None,
substream_id: None,
error: SubstreamError::NegotiationError(error),
}),
Err(_) => Err(ConnectionError::Timeout {
protocol: None,
substream_id: None,
}),
}
}));
Ok(false)
}
Some(Err(error)) => {
tracing::debug!(
target: LOG_TARGET,
peer = ?self.peer,
?error,
"connection closed with error",
);
self.protocol_set
.report_connection_closed(self.peer, self.endpoint.connection_id())
.await?;
Ok(true)
}
None => {
tracing::debug!(target: LOG_TARGET, peer = ?self.peer, "connection closed");
self.protocol_set
.report_connection_closed(self.peer, self.endpoint.connection_id())
.await?;
Ok(true)
}
}
}
/// Handles negotiated substream results.
async fn handle_negotiated_substream(
&mut self,
result: Result<NegotiatedSubstream, ConnectionError>,
) -> crate::Result<()> {
match result {
Err(error) => {
tracing::debug!(
target: LOG_TARGET,
?error,
"failed to accept/open substream",
);
let (protocol, substream_id, error) = match error {
ConnectionError::Timeout {
protocol,
substream_id,
} => (
protocol,
substream_id,
SubstreamError::NegotiationError(NegotiationError::Timeout),
),
ConnectionError::FailedToNegotiate {
protocol,
substream_id,
error,
} => (protocol, substream_id, error),
};
match (protocol, substream_id) {
(Some(protocol), Some(substream_id)) => {
self.protocol_set
.report_substream_open_failure(protocol.clone(), substream_id, error)
.await
.inspect_err(|error| {
tracing::error!(
target: LOG_TARGET,
?protocol,
endpoint = ?self.endpoint,
?error,
"failed to register substream open failure to protocol"
);
})?;
}
_ => {}
}
}
Ok(substream) => {
let protocol = substream.protocol.clone();
let direction = substream.direction;
let substream_id = substream.substream_id;
let socket = FuturesAsyncReadCompatExt::compat(substream.io);
let bandwidth_sink = self.bandwidth_sink.clone();
let substream = substream::Substream::new_tcp(
self.peer,
substream_id,
Substream::new(socket, bandwidth_sink, substream.permit),
self.protocol_set.protocol_codec(&protocol),
);
self.protocol_set
.report_substream_open(self.peer, protocol.clone(), direction, substream)
.await
.inspect_err(|error| {
tracing::error!(
target: LOG_TARGET,
?protocol,
peer = ?self.peer,
endpoint = ?self.endpoint,
?error,
"failed to register opened substream to protocol",
);
})?;
}
}
Ok(())
}
/// Handles protocol command.
///
/// Returns `true` if the connection handler should exit.
async fn handle_protocol_command(
&mut self,
command: Option<ProtocolCommand>,
) -> crate::Result<bool> {
match command {
Some(ProtocolCommand::OpenSubstream {
protocol,
fallback_names,
substream_id,
connection_id,
permit,
}) => {
let control = self.control.clone();
let open_timeout = self.substream_open_timeout;
tracing::trace!(
target: LOG_TARGET,
?protocol,
?substream_id,
?connection_id,
"open substream",
);
self.pending_substreams.push(Box::pin(async move {
match tokio::time::timeout(
open_timeout,
Self::open_substream(
control,
substream_id,
permit,
protocol.clone(),
fallback_names,
open_timeout,
),
)
.await
{
Ok(Ok(substream)) => Ok(substream),
Ok(Err(error)) => Err(ConnectionError::FailedToNegotiate {
protocol: Some(protocol),
substream_id: Some(substream_id),
error,
}),
Err(_) => Err(ConnectionError::Timeout {
protocol: Some(protocol),
substream_id: Some(substream_id),
}),
}
}));
Ok(false)
}
Some(ProtocolCommand::ForceClose) => {
tracing::debug!(
target: LOG_TARGET,
peer = ?self.peer,
connection_id = ?self.endpoint.connection_id(),
"force closing connection",
);
self.protocol_set
.report_connection_closed(self.peer, self.endpoint.connection_id())
.await?;
Ok(true)
}
None => {
tracing::debug!(target: LOG_TARGET, "protocols have disconnected, closing connection");
self.protocol_set
.report_connection_closed(self.peer, self.endpoint.connection_id())
.await?;
Ok(true)
}
}
}
/// Start connection event loop.
pub(crate) async fn start(mut self) -> crate::Result<()> {
self.protocol_set
.report_connection_established(self.peer, self.endpoint.clone())
.await?;
loop {
tokio::select! {
substream = self.connection.next() => {
if self.handle_yamux_substream(substream).await? {
return Ok(());
}
},
substream = self.pending_substreams.select_next_some(), if !self.pending_substreams.is_empty() => {
self.handle_negotiated_substream(substream).await?;
}
protocol = self.protocol_set.next() => {
if self.handle_protocol_command(protocol).await? {
return Ok(())
}
}
}
}
}
}
#[cfg(test)]
mod tests {
use crate::transport::tcp::TcpTransport;
use super::*;
use hickory_resolver::{name_server::TokioConnectionProvider, TokioResolver};
use tokio::{io::AsyncWriteExt, net::TcpListener};
#[tokio::test]
async fn multistream_select_not_supported_dialer() {
let _ = tracing_subscriber::fmt()
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
.try_init();
let listener = TcpListener::bind("[::1]:0").await.unwrap();
let address = listener.local_addr().unwrap();
tokio::spawn(async move {
let (mut stream, _) = listener.accept().await.unwrap();
let _ = stream.write_all(&vec![0x12u8; 256]).await;
});
let (_, stream) = TcpTransport::dial_peer(
Multiaddr::empty()
.with(Protocol::from(address.ip()))
.with(Protocol::Tcp(address.port())),
Default::default(),
Duration::from_secs(10),
false,
Arc::new(
TokioResolver::builder_with_config(
Default::default(),
TokioConnectionProvider::default(),
)
.build(),
),
)
.await
.unwrap();
match TcpConnection::open_connection(
ConnectionId::from(0usize),
Keypair::generate(),
stream,
AddressType::Socket(address),
None,
Default::default(),
5,
2,
Duration::from_secs(10),
Duration::from_secs(10),
)
.await
{
Ok(_) => panic!("connection was supposed to fail"),
Err(NegotiationError::MultistreamSelectError(
crate::multistream_select::NegotiationError::ProtocolError(
crate::multistream_select::ProtocolError::InvalidMessage,
),
)) => {}
Err(error) => panic!("invalid error: {error:?}"),
}
}
#[tokio::test]
async fn multistream_select_not_supported_listener() {
let _ = tracing_subscriber::fmt()
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
.try_init();
let listener = TcpListener::bind("[::1]:0").await.unwrap();
let address = listener.local_addr().unwrap();
let (Ok(mut dialer), Ok((stream, dialer_address))) =
tokio::join!(TcpStream::connect(address), listener.accept(),)
else {
panic!("failed to establish connection");
};
tokio::spawn(async move {
let _ = dialer.write_all(&vec![0x12u8; 256]).await;
});
match TcpConnection::accept_connection(
stream,
ConnectionId::from(0usize),
Keypair::generate(),
dialer_address,
Default::default(),
5,
2,
Duration::from_secs(10),
Duration::from_secs(10),
)
.await
{
Ok(_) => panic!("connection was supposed to fail"),
Err(NegotiationError::MultistreamSelectError(
crate::multistream_select::NegotiationError::ProtocolError(
crate::multistream_select::ProtocolError::InvalidMessage,
),
)) => {}
Err(error) => panic!("invalid error: {error:?}"),
}
}
#[tokio::test]
async fn noise_not_supported_dialer() {
let _ = tracing_subscriber::fmt()
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
.try_init();
let listener = TcpListener::bind("[::1]:0").await.unwrap();
let address = listener.local_addr().unwrap();
tokio::spawn(async move {
let (stream, _) = listener.accept().await.unwrap();
let stream = TokioAsyncReadCompatExt::compat(stream).into_inner();
let stream = TokioAsyncWriteCompatExt::compat_write(stream);
// attempt to negotiate yamux, skipping noise entirely
assert!(listener_select_proto(stream, vec!["/yamux/1.0.0"]).await.is_err());
});
let (_, stream) = TcpTransport::dial_peer(
Multiaddr::empty()
.with(Protocol::from(address.ip()))
.with(Protocol::Tcp(address.port())),
Default::default(),
Duration::from_secs(10),
false,
Arc::new(
TokioResolver::builder_with_config(
Default::default(),
TokioConnectionProvider::default(),
)
.build(),
),
)
.await
.unwrap();
match TcpConnection::open_connection(
ConnectionId::from(0usize),
Keypair::generate(),
stream,
AddressType::Socket(address),
None,
Default::default(),
5,
2,
Duration::from_secs(10),
Duration::from_secs(10),
)
.await
{
Ok(_) => panic!("connection was supposed to fail"),
Err(NegotiationError::MultistreamSelectError(
crate::multistream_select::NegotiationError::Failed,
)) => {}
Err(error) => panic!("invalid error: {error:?}"),
}
}
#[tokio::test]
async fn noise_not_supported_listener() {
let _ = tracing_subscriber::fmt()
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
.try_init();
let listener = TcpListener::bind("[::1]:0").await.unwrap();
let address = listener.local_addr().unwrap();
let (Ok(dialer), Ok((listener, dialer_address))) =
tokio::join!(TcpStream::connect(address), listener.accept(),)
else {
panic!("failed to establish connection");
};
tokio::spawn(async move {
let dialer = TokioAsyncReadCompatExt::compat(dialer).into_inner();
let dialer = TokioAsyncWriteCompatExt::compat_write(dialer);
// attempt to negotiate yamux, skipping noise entirely
assert!(dialer_select_proto(dialer, vec!["/yamux/1.0.0"], Version::V1).await.is_err());
});
match TcpConnection::accept_connection(
listener,
ConnectionId::from(0usize),
Keypair::generate(),
dialer_address,
Default::default(),
5,
2,
Duration::from_secs(10),
Duration::from_secs(10),
)
.await
{
Ok(_) => panic!("connection was supposed to fail"),
Err(NegotiationError::MultistreamSelectError(
crate::multistream_select::NegotiationError::Failed,
)) => {}
Err(error) => panic!("invalid error: {error:?}"),
}
}
#[tokio::test]
async fn noise_timeout_listener() {
let _ = tracing_subscriber::fmt()
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
.try_init();
let listener = TcpListener::bind("[::1]:0").await.unwrap();
let address = listener.local_addr().unwrap();
let (Ok(dialer), Ok((listener, dialer_address))) =
tokio::join!(TcpStream::connect(address), listener.accept(),)
else {
| rust | MIT | 991aa12f60db41543735394bf71fba09332752f8 | 2026-01-04T20:20:42.179941Z | true |
paritytech/litep2p | https://github.com/paritytech/litep2p/blob/991aa12f60db41543735394bf71fba09332752f8/src/transport/tcp/mod.rs | src/transport/tcp/mod.rs | // Copyright 2020 Parity Technologies (UK) Ltd.
// Copyright 2023 litep2p developers
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//! TCP transport.
use crate::{
error::{DialError, Error},
transport::{
common::listener::{DialAddresses, GetSocketAddr, SocketListener, TcpAddress},
manager::TransportHandle,
tcp::{
config::Config,
connection::{NegotiatedConnection, TcpConnection},
},
Transport, TransportBuilder, TransportEvent,
},
types::ConnectionId,
utils::futures_stream::FuturesStream,
};
use futures::{
future::BoxFuture,
stream::{AbortHandle, FuturesUnordered, Stream, StreamExt},
TryFutureExt,
};
use hickory_resolver::TokioResolver;
use multiaddr::Multiaddr;
use socket2::{Domain, Socket, Type};
use tokio::net::TcpStream;
use std::{
collections::HashMap,
net::SocketAddr,
pin::Pin,
sync::Arc,
task::{Context, Poll},
time::Duration,
};
pub(crate) use substream::Substream;
mod connection;
mod substream;
pub mod config;
/// Logging target for the file.
const LOG_TARGET: &str = "litep2p::tcp";
/// Pending inbound connection.
struct PendingInboundConnection {
/// Socket address of the remote peer.
connection: TcpStream,
/// Address of the remote peer.
address: SocketAddr,
}
#[derive(Debug)]
enum RawConnectionResult {
/// The first successful connection.
Connected {
negotiated: NegotiatedConnection,
errors: Vec<(Multiaddr, DialError)>,
},
/// All connection attempts failed.
Failed {
connection_id: ConnectionId,
errors: Vec<(Multiaddr, DialError)>,
},
/// Future was canceled.
Canceled { connection_id: ConnectionId },
}
/// TCP transport.
pub(crate) struct TcpTransport {
/// Transport context.
context: TransportHandle,
/// Transport configuration.
config: Config,
/// TCP listener.
listener: SocketListener,
/// Pending dials.
pending_dials: HashMap<ConnectionId, Multiaddr>,
/// Dial addresses.
dial_addresses: DialAddresses,
/// Pending inbound connections.
pending_inbound_connections: HashMap<ConnectionId, PendingInboundConnection>,
/// Pending opening connections.
pending_connections:
FuturesStream<BoxFuture<'static, Result<NegotiatedConnection, (ConnectionId, DialError)>>>,
/// Pending raw, unnegotiated connections.
pending_raw_connections: FuturesStream<BoxFuture<'static, RawConnectionResult>>,
/// Opened raw connection, waiting for approval/rejection from `TransportManager`.
opened: HashMap<ConnectionId, NegotiatedConnection>,
/// Cancel raw connections futures.
///
/// This is cancelling `Self::pending_raw_connections`.
cancel_futures: HashMap<ConnectionId, AbortHandle>,
/// Connections which have been opened and negotiated but are being validated by the
/// `TransportManager`.
pending_open: HashMap<ConnectionId, NegotiatedConnection>,
/// DNS resolver.
resolver: Arc<TokioResolver>,
}
impl TcpTransport {
/// Handle inbound TCP connection.
fn on_inbound_connection(
&mut self,
connection_id: ConnectionId,
connection: TcpStream,
address: SocketAddr,
) {
let yamux_config = self.config.yamux_config.clone();
let max_read_ahead_factor = self.config.noise_read_ahead_frame_count;
let max_write_buffer_size = self.config.noise_write_buffer_size;
let connection_open_timeout = self.config.connection_open_timeout;
let substream_open_timeout = self.config.substream_open_timeout;
let keypair = self.context.keypair.clone();
tracing::trace!(
target: LOG_TARGET,
?connection_id,
?address,
"accept connection",
);
self.pending_connections.push(Box::pin(async move {
TcpConnection::accept_connection(
connection,
connection_id,
keypair,
address,
yamux_config,
max_read_ahead_factor,
max_write_buffer_size,
connection_open_timeout,
substream_open_timeout,
)
.await
.map_err(|error| (connection_id, error.into()))
}));
}
/// Dial remote peer
async fn dial_peer(
address: Multiaddr,
dial_addresses: DialAddresses,
connection_open_timeout: Duration,
nodelay: bool,
resolver: Arc<TokioResolver>,
) -> Result<(Multiaddr, TcpStream), DialError> {
let (socket_address, _) = TcpAddress::multiaddr_to_socket_address(&address)?;
let remote_address =
match tokio::time::timeout(connection_open_timeout, socket_address.lookup_ip(resolver))
.await
{
Err(_) => {
tracing::debug!(
target: LOG_TARGET,
?address,
?connection_open_timeout,
"failed to resolve address within timeout",
);
return Err(DialError::Timeout);
}
Ok(Err(error)) => return Err(error.into()),
Ok(Ok(address)) => address,
};
let domain = match remote_address.is_ipv4() {
true => Domain::IPV4,
false => Domain::IPV6,
};
let socket = Socket::new(domain, Type::STREAM, Some(socket2::Protocol::TCP))?;
if remote_address.is_ipv6() {
socket.set_only_v6(true)?;
}
socket.set_nonblocking(true)?;
socket.set_nodelay(nodelay)?;
match dial_addresses.local_dial_address(&remote_address.ip()) {
Ok(Some(dial_address)) => {
socket.set_reuse_address(true)?;
#[cfg(unix)]
socket.set_reuse_port(true)?;
socket.bind(&dial_address.into())?;
}
Ok(None) => {}
Err(()) => {
tracing::debug!(
target: LOG_TARGET,
?remote_address,
"tcp listener not enabled for remote address, using ephemeral port",
);
}
}
let future = async move {
match socket.connect(&remote_address.into()) {
Ok(()) => {}
Err(err) if err.raw_os_error() == Some(libc::EINPROGRESS) => {}
Err(err) if err.kind() == std::io::ErrorKind::WouldBlock => {}
Err(err) => return Err(err),
}
let stream = TcpStream::try_from(Into::<std::net::TcpStream>::into(socket))?;
stream.writable().await?;
if let Some(e) = stream.take_error()? {
return Err(e);
}
Ok((address, stream))
};
match tokio::time::timeout(connection_open_timeout, future).await {
Err(_) => {
tracing::debug!(
target: LOG_TARGET,
?connection_open_timeout,
"failed to connect within timeout",
);
Err(DialError::Timeout)
}
Ok(Err(error)) => Err(error.into()),
Ok(Ok((address, stream))) => {
tracing::debug!(
target: LOG_TARGET,
?address,
"connected",
);
Ok((address, stream))
}
}
}
}
impl TransportBuilder for TcpTransport {
type Config = Config;
type Transport = TcpTransport;
/// Create new [`TcpTransport`].
fn new(
context: TransportHandle,
mut config: Self::Config,
resolver: Arc<TokioResolver>,
) -> crate::Result<(Self, Vec<Multiaddr>)> {
tracing::debug!(
target: LOG_TARGET,
listen_addresses = ?config.listen_addresses,
"start tcp transport",
);
// start tcp listeners for all listen addresses
let (listener, listen_addresses, dial_addresses) = SocketListener::new::<TcpAddress>(
std::mem::take(&mut config.listen_addresses),
config.reuse_port,
config.nodelay,
);
Ok((
Self {
listener,
config,
context,
dial_addresses,
opened: HashMap::new(),
pending_open: HashMap::new(),
pending_dials: HashMap::new(),
pending_inbound_connections: HashMap::new(),
pending_connections: FuturesStream::new(),
pending_raw_connections: FuturesStream::new(),
cancel_futures: HashMap::new(),
resolver,
},
listen_addresses,
))
}
}
impl Transport for TcpTransport {
fn dial(&mut self, connection_id: ConnectionId, address: Multiaddr) -> crate::Result<()> {
tracing::debug!(target: LOG_TARGET, ?connection_id, ?address, "open connection");
let (socket_address, peer) = TcpAddress::multiaddr_to_socket_address(&address)?;
let yamux_config = self.config.yamux_config.clone();
let max_read_ahead_factor = self.config.noise_read_ahead_frame_count;
let max_write_buffer_size = self.config.noise_write_buffer_size;
let connection_open_timeout = self.config.connection_open_timeout;
let substream_open_timeout = self.config.substream_open_timeout;
let dial_addresses = self.dial_addresses.clone();
let keypair = self.context.keypair.clone();
let nodelay = self.config.nodelay;
let resolver = self.resolver.clone();
self.pending_dials.insert(connection_id, address.clone());
self.pending_connections.push(Box::pin(async move {
let (_, stream) = TcpTransport::dial_peer(
address,
dial_addresses,
connection_open_timeout,
nodelay,
resolver,
)
.await
.map_err(|error| (connection_id, error))?;
TcpConnection::open_connection(
connection_id,
keypair,
stream,
socket_address,
peer,
yamux_config,
max_read_ahead_factor,
max_write_buffer_size,
connection_open_timeout,
substream_open_timeout,
)
.await
.map_err(|error| (connection_id, error.into()))
}));
Ok(())
}
fn accept_pending(&mut self, connection_id: ConnectionId) -> crate::Result<()> {
let pending = self.pending_inbound_connections.remove(&connection_id).ok_or_else(|| {
tracing::error!(
target: LOG_TARGET,
?connection_id,
"Cannot accept non existent pending connection",
);
Error::ConnectionDoesntExist(connection_id)
})?;
self.on_inbound_connection(connection_id, pending.connection, pending.address);
Ok(())
}
fn reject_pending(&mut self, connection_id: ConnectionId) -> crate::Result<()> {
self.pending_inbound_connections.remove(&connection_id).map_or_else(
|| {
tracing::error!(
target: LOG_TARGET,
?connection_id,
"Cannot reject non existent pending connection",
);
Err(Error::ConnectionDoesntExist(connection_id))
},
|_| Ok(()),
)
}
fn accept(&mut self, connection_id: ConnectionId) -> crate::Result<()> {
let context = self
.pending_open
.remove(&connection_id)
.ok_or(Error::ConnectionDoesntExist(connection_id))?;
let protocol_set = self.context.protocol_set(connection_id);
let bandwidth_sink = self.context.bandwidth_sink.clone();
let next_substream_id = self.context.next_substream_id.clone();
tracing::trace!(
target: LOG_TARGET,
?connection_id,
"start connection",
);
self.context.executor.run(Box::pin(async move {
if let Err(error) =
TcpConnection::new(context, protocol_set, bandwidth_sink, next_substream_id)
.start()
.await
{
tracing::debug!(
target: LOG_TARGET,
?connection_id,
?error,
"connection exited with error",
);
}
}));
Ok(())
}
fn reject(&mut self, connection_id: ConnectionId) -> crate::Result<()> {
self.pending_open
.remove(&connection_id)
.map_or(Err(Error::ConnectionDoesntExist(connection_id)), |_| Ok(()))
}
fn open(
&mut self,
connection_id: ConnectionId,
addresses: Vec<Multiaddr>,
) -> crate::Result<()> {
let num_addresses = addresses.len();
let mut futures: FuturesUnordered<_> = addresses
.into_iter()
.map(|address| {
let yamux_config = self.config.yamux_config.clone();
let max_read_ahead_factor = self.config.noise_read_ahead_frame_count;
let max_write_buffer_size = self.config.noise_write_buffer_size;
let connection_open_timeout = self.config.connection_open_timeout;
let substream_open_timeout = self.config.substream_open_timeout;
let dial_addresses = self.dial_addresses.clone();
let keypair = self.context.keypair.clone();
let nodelay = self.config.nodelay;
let resolver = self.resolver.clone();
async move {
let (address, stream) = TcpTransport::dial_peer(
address.clone(),
dial_addresses,
connection_open_timeout,
nodelay,
resolver,
)
.await
.map_err(|error| (address, error))?;
let open_address = address.clone();
let (socket_address, peer) = TcpAddress::multiaddr_to_socket_address(&address)
.map_err(|error| (address, error.into()))?;
TcpConnection::open_connection(
connection_id,
keypair,
stream,
socket_address,
peer,
yamux_config,
max_read_ahead_factor,
max_write_buffer_size,
connection_open_timeout,
substream_open_timeout,
)
.await
.map_err(|error| (open_address, error.into()))
}
})
.collect();
// Future that will resolve to the first successful connection.
let future = async move {
let mut errors = Vec::with_capacity(num_addresses);
while let Some(result) = futures.next().await {
match result {
Ok(negotiated) => return RawConnectionResult::Connected { negotiated, errors },
Err(error) => {
tracing::debug!(
target: LOG_TARGET,
?connection_id,
?error,
"failed to open connection",
);
errors.push(error)
}
}
}
RawConnectionResult::Failed {
connection_id,
errors,
}
};
let (fut, handle) = futures::future::abortable(future);
let fut = fut.unwrap_or_else(move |_| RawConnectionResult::Canceled { connection_id });
self.pending_raw_connections.push(Box::pin(fut));
self.cancel_futures.insert(connection_id, handle);
Ok(())
}
fn negotiate(&mut self, connection_id: ConnectionId) -> crate::Result<()> {
let negotiated = self
.opened
.remove(&connection_id)
.ok_or(Error::ConnectionDoesntExist(connection_id))?;
self.pending_connections.push(Box::pin(async move { Ok(negotiated) }));
Ok(())
}
fn cancel(&mut self, connection_id: ConnectionId) {
// Cancel the future if it exists.
// State clean-up happens inside the `poll_next`.
if let Some(handle) = self.cancel_futures.get(&connection_id) {
handle.abort();
}
}
}
impl Stream for TcpTransport {
type Item = TransportEvent;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
if let Poll::Ready(event) = self.listener.poll_next_unpin(cx) {
return match event {
None => {
tracing::error!(
target: LOG_TARGET,
"TCP listener terminated, ignore if the node is stopping",
);
Poll::Ready(None)
}
Some(Err(error)) => {
tracing::error!(
target: LOG_TARGET,
?error,
"TCP listener terminated with error",
);
Poll::Ready(None)
}
Some(Ok((connection, address))) => {
let connection_id = self.context.next_connection_id();
tracing::trace!(
target: LOG_TARGET,
?connection_id,
?address,
"pending inbound TCP connection",
);
self.pending_inbound_connections.insert(
connection_id,
PendingInboundConnection {
connection,
address,
},
);
Poll::Ready(Some(TransportEvent::PendingInboundConnection {
connection_id,
}))
}
};
}
while let Poll::Ready(Some(result)) = self.pending_raw_connections.poll_next_unpin(cx) {
tracing::trace!(target: LOG_TARGET, ?result, "raw connection result");
match result {
RawConnectionResult::Connected { negotiated, errors } => {
let Some(handle) = self.cancel_futures.remove(&negotiated.connection_id())
else {
tracing::warn!(
target: LOG_TARGET,
connection_id = ?negotiated.connection_id(),
address = ?negotiated.endpoint().address(),
?errors,
"raw connection without a cancel handle",
);
continue;
};
if !handle.is_aborted() {
let connection_id = negotiated.connection_id();
let address = negotiated.endpoint().address().clone();
self.opened.insert(connection_id, negotiated);
return Poll::Ready(Some(TransportEvent::ConnectionOpened {
connection_id,
address,
}));
}
}
RawConnectionResult::Failed {
connection_id,
errors,
} => {
let Some(handle) = self.cancel_futures.remove(&connection_id) else {
tracing::warn!(
target: LOG_TARGET,
?connection_id,
?errors,
"raw connection without a cancel handle",
);
continue;
};
if !handle.is_aborted() {
return Poll::Ready(Some(TransportEvent::OpenFailure {
connection_id,
errors,
}));
}
}
RawConnectionResult::Canceled { connection_id } => {
if self.cancel_futures.remove(&connection_id).is_none() {
tracing::warn!(
target: LOG_TARGET,
?connection_id,
"raw cancelled connection without a cancel handle",
);
}
}
}
}
while let Poll::Ready(Some(connection)) = self.pending_connections.poll_next_unpin(cx) {
match connection {
Ok(connection) => {
let peer = connection.peer();
let endpoint = connection.endpoint();
self.pending_dials.remove(&connection.connection_id());
self.pending_open.insert(connection.connection_id(), connection);
return Poll::Ready(Some(TransportEvent::ConnectionEstablished {
peer,
endpoint,
}));
}
Err((connection_id, error)) => {
if let Some(address) = self.pending_dials.remove(&connection_id) {
return Poll::Ready(Some(TransportEvent::DialFailure {
connection_id,
address,
error,
}));
} else {
tracing::debug!(target: LOG_TARGET, ?error, ?connection_id, "Pending inbound connection failed");
}
}
}
}
Poll::Pending
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{
codec::ProtocolCodec,
crypto::ed25519::Keypair,
executor::DefaultExecutor,
transport::manager::{ProtocolContext, SupportedTransport, TransportManagerBuilder},
types::protocol::ProtocolName,
BandwidthSink, PeerId,
};
use multiaddr::Protocol;
use multihash::Multihash;
use std::sync::Arc;
use tokio::sync::mpsc::channel;
#[tokio::test]
async fn connect_and_accept_works() {
let _ = tracing_subscriber::fmt()
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
.try_init();
let keypair1 = Keypair::generate();
let (tx1, _rx1) = channel(64);
let (event_tx1, _event_rx1) = channel(64);
let bandwidth_sink = BandwidthSink::new();
let handle1 = crate::transport::manager::TransportHandle {
executor: Arc::new(DefaultExecutor {}),
next_substream_id: Default::default(),
next_connection_id: Default::default(),
keypair: keypair1.clone(),
tx: event_tx1,
bandwidth_sink: bandwidth_sink.clone(),
protocols: HashMap::from_iter([(
ProtocolName::from("/notif/1"),
ProtocolContext {
tx: tx1,
codec: ProtocolCodec::Identity(32),
fallback_names: Vec::new(),
},
)]),
};
let transport_config1 = Config {
listen_addresses: vec!["/ip6/::1/tcp/0".parse().unwrap()],
..Default::default()
};
let resolver = Arc::new(TokioResolver::builder_tokio().unwrap().build());
let (mut transport1, listen_addresses) =
TcpTransport::new(handle1, transport_config1, resolver.clone()).unwrap();
let listen_address = listen_addresses[0].clone();
let keypair2 = Keypair::generate();
let (tx2, _rx2) = channel(64);
let (event_tx2, _event_rx2) = channel(64);
let handle2 = crate::transport::manager::TransportHandle {
executor: Arc::new(DefaultExecutor {}),
next_substream_id: Default::default(),
next_connection_id: Default::default(),
keypair: keypair2.clone(),
tx: event_tx2,
bandwidth_sink: bandwidth_sink.clone(),
protocols: HashMap::from_iter([(
ProtocolName::from("/notif/1"),
ProtocolContext {
tx: tx2,
codec: ProtocolCodec::Identity(32),
fallback_names: Vec::new(),
},
)]),
};
let transport_config2 = Config {
listen_addresses: vec!["/ip6/::1/tcp/0".parse().unwrap()],
..Default::default()
};
let (mut transport2, _) = TcpTransport::new(handle2, transport_config2, resolver).unwrap();
transport2.dial(ConnectionId::new(), listen_address).unwrap();
let (tx, mut from_transport2) = channel(64);
tokio::spawn(async move {
let event = transport2.next().await;
tx.send(event).await.unwrap();
});
let event = transport1.next().await.unwrap();
match event {
TransportEvent::PendingInboundConnection { connection_id } => {
transport1.accept_pending(connection_id).unwrap();
}
_ => panic!("unexpected event"),
}
let event = transport1.next().await;
assert!(std::matches!(
event,
Some(TransportEvent::ConnectionEstablished { .. })
));
let event = from_transport2.recv().await.unwrap();
assert!(std::matches!(
event,
Some(TransportEvent::ConnectionEstablished { .. })
));
}
#[tokio::test]
async fn connect_and_reject_works() {
let _ = tracing_subscriber::fmt()
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
.try_init();
let keypair1 = Keypair::generate();
let (tx1, _rx1) = channel(64);
let (event_tx1, _event_rx1) = channel(64);
let bandwidth_sink = BandwidthSink::new();
let handle1 = crate::transport::manager::TransportHandle {
executor: Arc::new(DefaultExecutor {}),
next_substream_id: Default::default(),
next_connection_id: Default::default(),
keypair: keypair1.clone(),
tx: event_tx1,
bandwidth_sink: bandwidth_sink.clone(),
protocols: HashMap::from_iter([(
ProtocolName::from("/notif/1"),
ProtocolContext {
tx: tx1,
codec: ProtocolCodec::Identity(32),
fallback_names: Vec::new(),
},
)]),
};
let transport_config1 = Config {
listen_addresses: vec!["/ip6/::1/tcp/0".parse().unwrap()],
..Default::default()
};
let resolver = Arc::new(TokioResolver::builder_tokio().unwrap().build());
let (mut transport1, listen_addresses) =
TcpTransport::new(handle1, transport_config1, resolver.clone()).unwrap();
let listen_address = listen_addresses[0].clone();
let keypair2 = Keypair::generate();
let (tx2, _rx2) = channel(64);
let (event_tx2, _event_rx2) = channel(64);
let handle2 = crate::transport::manager::TransportHandle {
executor: Arc::new(DefaultExecutor {}),
next_substream_id: Default::default(),
next_connection_id: Default::default(),
keypair: keypair2.clone(),
tx: event_tx2,
bandwidth_sink: bandwidth_sink.clone(),
protocols: HashMap::from_iter([(
ProtocolName::from("/notif/1"),
ProtocolContext {
tx: tx2,
codec: ProtocolCodec::Identity(32),
fallback_names: Vec::new(),
},
)]),
};
let transport_config2 = Config {
listen_addresses: vec!["/ip6/::1/tcp/0".parse().unwrap()],
..Default::default()
};
let (mut transport2, _) = TcpTransport::new(handle2, transport_config2, resolver).unwrap();
transport2.dial(ConnectionId::new(), listen_address).unwrap();
let (tx, mut from_transport2) = channel(64);
tokio::spawn(async move {
let event = transport2.next().await;
tx.send(event).await.unwrap();
});
// Reject connection.
let event = transport1.next().await.unwrap();
match event {
TransportEvent::PendingInboundConnection { connection_id } => {
transport1.reject_pending(connection_id).unwrap();
}
_ => panic!("unexpected event"),
}
let event = from_transport2.recv().await.unwrap();
assert!(std::matches!(
event,
Some(TransportEvent::DialFailure { .. })
));
}
#[tokio::test]
async fn dial_failure() {
let _ = tracing_subscriber::fmt()
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
.try_init();
let keypair1 = Keypair::generate();
let (tx1, _rx1) = channel(64);
let (event_tx1, mut event_rx1) = channel(64);
let bandwidth_sink = BandwidthSink::new();
let handle1 = crate::transport::manager::TransportHandle {
executor: Arc::new(DefaultExecutor {}),
next_substream_id: Default::default(),
next_connection_id: Default::default(),
keypair: keypair1.clone(),
tx: event_tx1,
bandwidth_sink: bandwidth_sink.clone(),
protocols: HashMap::from_iter([(
ProtocolName::from("/notif/1"),
ProtocolContext {
tx: tx1,
codec: ProtocolCodec::Identity(32),
fallback_names: Vec::new(),
},
)]),
};
let resolver = Arc::new(TokioResolver::builder_tokio().unwrap().build());
let (mut transport1, _) =
TcpTransport::new(handle1, Default::default(), resolver.clone()).unwrap();
tokio::spawn(async move {
while let Some(event) = transport1.next().await {
match event {
TransportEvent::ConnectionEstablished { .. } => {}
TransportEvent::ConnectionClosed { .. } => {}
TransportEvent::DialFailure { .. } => {}
TransportEvent::ConnectionOpened { .. } => {}
TransportEvent::OpenFailure { .. } => {}
TransportEvent::PendingInboundConnection { .. } => {}
}
}
});
let keypair2 = Keypair::generate();
let (tx2, _rx2) = channel(64);
let (event_tx2, _event_rx2) = channel(64);
let handle2 = crate::transport::manager::TransportHandle {
| rust | MIT | 991aa12f60db41543735394bf71fba09332752f8 | 2026-01-04T20:20:42.179941Z | true |
paritytech/litep2p | https://github.com/paritytech/litep2p/blob/991aa12f60db41543735394bf71fba09332752f8/src/transport/tcp/substream.rs | src/transport/tcp/substream.rs | // Copyright 2023 litep2p developers
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
use crate::{protocol::Permit, BandwidthSink};
use tokio::io::{AsyncRead, AsyncWrite};
use tokio_util::compat::Compat;
use std::{
io,
pin::Pin,
task::{Context, Poll},
};
/// Substream that holds the inner substream provided by the transport
/// and a permit which keeps the connection open.
///
/// `BandwidthSink` is used to meter inbound/outbound bytes.
#[derive(Debug)]
pub struct Substream {
/// Underlying socket.
io: Compat<crate::yamux::Stream>,
/// Bandwidth sink.
bandwidth_sink: BandwidthSink,
/// Connection permit.
_permit: Permit,
}
impl Substream {
/// Create new [`Substream`].
pub fn new(
io: Compat<crate::yamux::Stream>,
bandwidth_sink: BandwidthSink,
_permit: Permit,
) -> Self {
Self {
io,
bandwidth_sink,
_permit,
}
}
}
impl AsyncRead for Substream {
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut tokio::io::ReadBuf<'_>,
) -> Poll<io::Result<()>> {
let len = buf.filled().len();
match futures::ready!(Pin::new(&mut self.io).poll_read(cx, buf)) {
Err(error) => Poll::Ready(Err(error)),
Ok(res) => {
let inbound_size = buf.filled().len().saturating_sub(len);
self.bandwidth_sink.increase_inbound(inbound_size);
Poll::Ready(Ok(res))
}
}
}
}
impl AsyncWrite for Substream {
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<Result<usize, io::Error>> {
match futures::ready!(Pin::new(&mut self.io).poll_write(cx, buf)) {
Err(error) => Poll::Ready(Err(error)),
Ok(nwritten) => {
self.bandwidth_sink.increase_outbound(nwritten);
Poll::Ready(Ok(nwritten))
}
}
}
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
Pin::new(&mut self.io).poll_flush(cx)
}
fn poll_shutdown(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), io::Error>> {
Pin::new(&mut self.io).poll_shutdown(cx)
}
fn poll_write_vectored(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[io::IoSlice<'_>],
) -> Poll<Result<usize, io::Error>> {
match futures::ready!(Pin::new(&mut self.io).poll_write_vectored(cx, bufs)) {
Err(error) => Poll::Ready(Err(error)),
Ok(nwritten) => {
self.bandwidth_sink.increase_outbound(nwritten);
Poll::Ready(Ok(nwritten))
}
}
}
fn is_write_vectored(&self) -> bool {
self.io.is_write_vectored()
}
}
| rust | MIT | 991aa12f60db41543735394bf71fba09332752f8 | 2026-01-04T20:20:42.179941Z | false |
paritytech/litep2p | https://github.com/paritytech/litep2p/blob/991aa12f60db41543735394bf71fba09332752f8/src/transport/manager/address.rs | src/transport/manager/address.rs | // Copyright 2023 litep2p developers
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
use crate::{error::DialError, PeerId};
use multiaddr::{Multiaddr, Protocol};
use multihash::Multihash;
use std::collections::{hash_map::Entry, HashMap};
/// Maximum number of addresses tracked for a peer.
const MAX_ADDRESSES: usize = 64;
/// Scores for address records.
pub mod scores {
/// Score indicating that the connection was successfully established.
pub const CONNECTION_ESTABLISHED: i32 = 100i32;
/// Score for failing to connect due to an invalid or unreachable address.
pub const CONNECTION_FAILURE: i32 = -100i32;
/// Score for providing an invalid address.
///
/// This address can never be reached.
pub const ADDRESS_FAILURE: i32 = i32::MIN;
}
#[allow(clippy::derived_hash_with_manual_eq)]
#[derive(Debug, Clone, Hash)]
pub struct AddressRecord {
/// Address score.
score: i32,
/// Address.
address: Multiaddr,
}
impl AsRef<Multiaddr> for AddressRecord {
fn as_ref(&self) -> &Multiaddr {
&self.address
}
}
impl AddressRecord {
/// Create new `AddressRecord` and if `address` doesn't contain `P2p`,
/// append the provided `PeerId` to the address.
pub fn new(peer: &PeerId, address: Multiaddr, score: i32) -> Self {
let address = if !std::matches!(address.iter().last(), Some(Protocol::P2p(_))) {
address.with(Protocol::P2p(
Multihash::from_bytes(&peer.to_bytes()).expect("valid peer id"),
))
} else {
address
};
Self { address, score }
}
/// Create `AddressRecord` from `Multiaddr`.
///
/// If `address` doesn't contain `PeerId`, return `None` to indicate that this
/// an invalid `Multiaddr` from the perspective of the `TransportManager`.
pub fn from_multiaddr(address: Multiaddr) -> Option<AddressRecord> {
if !std::matches!(address.iter().last(), Some(Protocol::P2p(_))) {
return None;
}
Some(AddressRecord {
address,
score: 0i32,
})
}
/// Create `AddressRecord` from `Multiaddr`.
///
/// This method does not check if the address contains `PeerId`.
///
/// Please consider using [`Self::from_multiaddr`] from the transport manager code.
pub fn from_raw_multiaddr(address: Multiaddr) -> AddressRecord {
AddressRecord {
address,
score: 0i32,
}
}
/// Create `AddressRecord` from `Multiaddr`.
///
/// This method does not check if the address contains `PeerId`.
///
/// Please consider using [`Self::from_multiaddr`] from the transport manager code.
pub fn from_raw_multiaddr_with_score(address: Multiaddr, score: i32) -> AddressRecord {
AddressRecord { address, score }
}
/// Get address score.
#[cfg(test)]
pub fn score(&self) -> i32 {
self.score
}
/// Get address.
pub fn address(&self) -> &Multiaddr {
&self.address
}
/// Update score of an address.
pub fn update_score(&mut self, score: i32) {
self.score = self.score.saturating_add(score);
}
}
impl PartialEq for AddressRecord {
fn eq(&self, other: &Self) -> bool {
self.score.eq(&other.score)
}
}
impl Eq for AddressRecord {}
impl PartialOrd for AddressRecord {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl Ord for AddressRecord {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
self.score.cmp(&other.score)
}
}
/// Store for peer addresses.
#[derive(Debug, Clone, Default)]
pub struct AddressStore {
/// Addresses available.
pub addresses: HashMap<Multiaddr, AddressRecord>,
/// Maximum capacity of the address store.
max_capacity: usize,
}
impl FromIterator<Multiaddr> for AddressStore {
fn from_iter<T: IntoIterator<Item = Multiaddr>>(iter: T) -> Self {
let mut store = AddressStore::new();
for address in iter {
if let Some(record) = AddressRecord::from_multiaddr(address) {
store.insert(record);
}
}
store
}
}
impl FromIterator<AddressRecord> for AddressStore {
fn from_iter<T: IntoIterator<Item = AddressRecord>>(iter: T) -> Self {
let mut store = AddressStore::new();
for record in iter {
store.insert(record);
}
store
}
}
impl Extend<AddressRecord> for AddressStore {
fn extend<T: IntoIterator<Item = AddressRecord>>(&mut self, iter: T) {
for record in iter {
self.insert(record)
}
}
}
impl<'a> Extend<&'a AddressRecord> for AddressStore {
fn extend<T: IntoIterator<Item = &'a AddressRecord>>(&mut self, iter: T) {
for record in iter {
self.insert(record.clone())
}
}
}
impl AddressStore {
/// Create new [`AddressStore`].
pub fn new() -> Self {
Self {
addresses: HashMap::with_capacity(MAX_ADDRESSES),
max_capacity: MAX_ADDRESSES,
}
}
/// Get the score for a given error.
pub fn error_score(error: &DialError) -> i32 {
match error {
DialError::AddressError(_) => scores::ADDRESS_FAILURE,
_ => scores::CONNECTION_FAILURE,
}
}
/// Check if [`AddressStore`] is empty.
pub fn is_empty(&self) -> bool {
self.addresses.is_empty()
}
/// Insert the address record into [`AddressStore`] with the provided score.
///
/// If the address is not in the store, it will be inserted.
/// Otherwise, the score and connection ID will be updated.
pub fn insert(&mut self, record: AddressRecord) {
if let Entry::Occupied(mut occupied) = self.addresses.entry(record.address.clone()) {
occupied.get_mut().update_score(record.score);
return;
}
// The eviction algorithm favours addresses with higher scores.
//
// This algorithm has the following implications:
// - it keeps the best addresses in the store.
// - if the store is at capacity, the worst address will be evicted.
// - an address that is not dialed yet (with score zero) will be preferred over an address
// that already failed (with negative score).
if self.addresses.len() >= self.max_capacity {
let min_record = self
.addresses
.values()
.min()
.cloned()
.expect("There is at least one element checked above; qed");
// The lowest score is better than the new record.
if record.score < min_record.score {
return;
}
self.addresses.remove(min_record.address());
}
// Insert the record.
self.addresses.insert(record.address.clone(), record);
}
/// Return the available addresses sorted by score.
pub fn addresses(&self, limit: usize) -> Vec<Multiaddr> {
let mut records = self.addresses.values().cloned().collect::<Vec<_>>();
records.sort_by(|lhs, rhs| rhs.score.cmp(&lhs.score));
records.into_iter().take(limit).map(|record| record.address).collect()
}
}
#[cfg(test)]
mod tests {
use std::{
collections::HashMap,
net::{Ipv4Addr, SocketAddrV4},
};
use super::*;
use rand::{rngs::ThreadRng, Rng};
fn tcp_address_record(rng: &mut ThreadRng) -> AddressRecord {
let peer = PeerId::random();
let address = std::net::SocketAddr::V4(SocketAddrV4::new(
Ipv4Addr::new(
rng.gen_range(1..=255),
rng.gen_range(0..=255),
rng.gen_range(0..=255),
rng.gen_range(0..=255),
),
rng.gen_range(1..=65535),
));
let score: i32 = rng.gen_range(10..=200);
AddressRecord::new(
&peer,
Multiaddr::empty()
.with(Protocol::from(address.ip()))
.with(Protocol::Tcp(address.port())),
score,
)
}
fn ws_address_record(rng: &mut ThreadRng) -> AddressRecord {
let peer = PeerId::random();
let address = std::net::SocketAddr::V4(SocketAddrV4::new(
Ipv4Addr::new(
rng.gen_range(1..=255),
rng.gen_range(0..=255),
rng.gen_range(0..=255),
rng.gen_range(0..=255),
),
rng.gen_range(1..=65535),
));
let score: i32 = rng.gen_range(10..=200);
AddressRecord::new(
&peer,
Multiaddr::empty()
.with(Protocol::from(address.ip()))
.with(Protocol::Tcp(address.port()))
.with(Protocol::Ws(std::borrow::Cow::Owned("/".to_string()))),
score,
)
}
fn quic_address_record(rng: &mut ThreadRng) -> AddressRecord {
let peer = PeerId::random();
let address = std::net::SocketAddr::V4(SocketAddrV4::new(
Ipv4Addr::new(
rng.gen_range(1..=255),
rng.gen_range(0..=255),
rng.gen_range(0..=255),
rng.gen_range(0..=255),
),
rng.gen_range(1..=65535),
));
let score: i32 = rng.gen_range(10..=200);
AddressRecord::new(
&peer,
Multiaddr::empty()
.with(Protocol::from(address.ip()))
.with(Protocol::Udp(address.port()))
.with(Protocol::QuicV1),
score,
)
}
#[test]
fn take_multiple_records() {
let mut store = AddressStore::new();
let mut rng = rand::thread_rng();
for _ in 0..rng.gen_range(1..5) {
store.insert(tcp_address_record(&mut rng));
}
for _ in 0..rng.gen_range(1..5) {
store.insert(ws_address_record(&mut rng));
}
for _ in 0..rng.gen_range(1..5) {
store.insert(quic_address_record(&mut rng));
}
let known_addresses = store.addresses.len();
assert!(known_addresses >= 3);
let taken = store.addresses(known_addresses - 2);
assert_eq!(known_addresses - 2, taken.len());
assert!(!store.is_empty());
let mut prev: Option<AddressRecord> = None;
for address in taken {
// Addresses are still in the store.
assert!(store.addresses.contains_key(&address));
let record = store.addresses.get(&address).unwrap().clone();
if let Some(previous) = prev {
assert!(previous.score >= record.score);
}
prev = Some(record);
}
}
#[test]
fn attempt_to_take_excess_records() {
let mut store = AddressStore::new();
let mut rng = rand::thread_rng();
store.insert(tcp_address_record(&mut rng));
store.insert(ws_address_record(&mut rng));
store.insert(quic_address_record(&mut rng));
assert_eq!(store.addresses.len(), 3);
let taken = store.addresses(8usize);
assert_eq!(taken.len(), 3);
let mut prev: Option<AddressRecord> = None;
for record in taken {
let record = store.addresses.get(&record).unwrap().clone();
if prev.is_none() {
prev = Some(record);
} else {
assert!(prev.unwrap().score >= record.score);
prev = Some(record);
}
}
}
#[test]
fn extend_from_iterator() {
let mut store = AddressStore::new();
let mut rng = rand::thread_rng();
let records = (0..10)
.map(|i| {
if i % 2 == 0 {
tcp_address_record(&mut rng)
} else if i % 3 == 0 {
quic_address_record(&mut rng)
} else {
ws_address_record(&mut rng)
}
})
.collect::<Vec<_>>();
assert!(store.is_empty());
let cloned = records
.iter()
.cloned()
.map(|record| (record.address().clone(), record))
.collect::<HashMap<_, _>>();
store.extend(records);
for record in store.addresses.values() {
let stored = cloned.get(record.address()).unwrap();
assert_eq!(stored.score(), record.score());
assert_eq!(stored.address(), record.address());
}
}
#[test]
fn extend_from_iterator_ref() {
let mut store = AddressStore::new();
let mut rng = rand::thread_rng();
let records = (0..10)
.map(|i| {
if i % 2 == 0 {
let record = tcp_address_record(&mut rng);
(record.address().clone(), record)
} else if i % 3 == 0 {
let record = quic_address_record(&mut rng);
(record.address().clone(), record)
} else {
let record = ws_address_record(&mut rng);
(record.address().clone(), record)
}
})
.collect::<Vec<_>>();
assert!(store.is_empty());
let cloned = records.iter().cloned().collect::<HashMap<_, _>>();
store.extend(records.iter().map(|(_, record)| record));
for record in store.addresses.values() {
let stored = cloned.get(record.address()).unwrap();
assert_eq!(stored.score(), record.score());
assert_eq!(stored.address(), record.address());
}
}
#[test]
fn insert_record() {
let mut store = AddressStore::new();
let mut rng = rand::thread_rng();
let mut record = tcp_address_record(&mut rng);
record.score = 10;
store.insert(record.clone());
assert_eq!(store.addresses.len(), 1);
assert_eq!(store.addresses.get(record.address()).unwrap(), &record);
// This time the record is updated.
store.insert(record.clone());
assert_eq!(store.addresses.len(), 1);
let store_record = store.addresses.get(record.address()).unwrap();
assert_eq!(store_record.score, record.score * 2);
}
#[test]
fn evict_on_capacity() {
let mut store = AddressStore {
addresses: HashMap::new(),
max_capacity: 2,
};
let mut rng = rand::thread_rng();
let mut first_record = tcp_address_record(&mut rng);
first_record.score = scores::CONNECTION_ESTABLISHED;
let mut second_record = ws_address_record(&mut rng);
second_record.score = 0;
store.insert(first_record.clone());
store.insert(second_record.clone());
assert_eq!(store.addresses.len(), 2);
// We have better addresses, ignore this one.
let mut third_record = quic_address_record(&mut rng);
third_record.score = scores::CONNECTION_FAILURE;
store.insert(third_record.clone());
assert_eq!(store.addresses.len(), 2);
assert!(store.addresses.contains_key(first_record.address()));
assert!(store.addresses.contains_key(second_record.address()));
// Evict the address with the lowest score.
// Store contains scores: [100, 0].
let mut fourth_record = quic_address_record(&mut rng);
fourth_record.score = 1;
store.insert(fourth_record.clone());
assert_eq!(store.addresses.len(), 2);
assert!(store.addresses.contains_key(first_record.address()));
assert!(store.addresses.contains_key(fourth_record.address()));
}
}
| rust | MIT | 991aa12f60db41543735394bf71fba09332752f8 | 2026-01-04T20:20:42.179941Z | false |
paritytech/litep2p | https://github.com/paritytech/litep2p/blob/991aa12f60db41543735394bf71fba09332752f8/src/transport/manager/limits.rs | src/transport/manager/limits.rs | // Copyright 2024 litep2p developers
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//! Limits for the transport manager.
use crate::types::ConnectionId;
use std::collections::HashSet;
/// Configuration for the connection limits.
#[derive(Debug, Clone, Default)]
pub struct ConnectionLimitsConfig {
/// Maximum number of incoming connections that can be established.
max_incoming_connections: Option<usize>,
/// Maximum number of outgoing connections that can be established.
max_outgoing_connections: Option<usize>,
}
impl ConnectionLimitsConfig {
/// Configures the maximum number of incoming connections that can be established.
pub fn max_incoming_connections(mut self, limit: Option<usize>) -> Self {
self.max_incoming_connections = limit;
self
}
/// Configures the maximum number of outgoing connections that can be established.
pub fn max_outgoing_connections(mut self, limit: Option<usize>) -> Self {
self.max_outgoing_connections = limit;
self
}
}
/// Error type for connection limits.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ConnectionLimitsError {
/// Maximum number of incoming connections exceeded.
MaxIncomingConnectionsExceeded,
/// Maximum number of outgoing connections exceeded.
MaxOutgoingConnectionsExceeded,
}
/// Connection limits.
#[derive(Debug, Clone)]
pub struct ConnectionLimits {
/// Configuration for the connection limits.
config: ConnectionLimitsConfig,
/// Established incoming connections.
incoming_connections: HashSet<ConnectionId>,
/// Established outgoing connections.
outgoing_connections: HashSet<ConnectionId>,
}
impl ConnectionLimits {
/// Creates a new connection limits instance.
pub fn new(config: ConnectionLimitsConfig) -> Self {
let max_incoming_connections = config.max_incoming_connections.unwrap_or(0);
let max_outgoing_connections = config.max_outgoing_connections.unwrap_or(0);
Self {
config,
incoming_connections: HashSet::with_capacity(max_incoming_connections),
outgoing_connections: HashSet::with_capacity(max_outgoing_connections),
}
}
/// Called when dialing an address.
///
/// Returns the number of outgoing connections permitted to be established.
/// It is guaranteed that at least one connection can be established if the method returns `Ok`.
/// The number of available outgoing connections can influence the maximum parallel dials to a
/// single address.
///
/// If the maximum number of outgoing connections is not set, `Ok(usize::MAX)` is returned.
pub fn on_dial_address(&mut self) -> Result<usize, ConnectionLimitsError> {
if let Some(max_outgoing_connections) = self.config.max_outgoing_connections {
if self.outgoing_connections.len() >= max_outgoing_connections {
return Err(ConnectionLimitsError::MaxOutgoingConnectionsExceeded);
}
return Ok(max_outgoing_connections - self.outgoing_connections.len());
}
Ok(usize::MAX)
}
/// Called before accepting a new incoming connection.
pub fn on_incoming(&mut self) -> Result<(), ConnectionLimitsError> {
if let Some(max_incoming_connections) = self.config.max_incoming_connections {
if self.incoming_connections.len() >= max_incoming_connections {
return Err(ConnectionLimitsError::MaxIncomingConnectionsExceeded);
}
}
Ok(())
}
/// Called when a new connection is established.
///
/// Returns an error if the connection cannot be accepted due to connection limits.
pub fn can_accept_connection(
&mut self,
is_listener: bool,
) -> Result<(), ConnectionLimitsError> {
// Check connection limits.
if is_listener {
if let Some(max_incoming_connections) = self.config.max_incoming_connections {
if self.incoming_connections.len() >= max_incoming_connections {
return Err(ConnectionLimitsError::MaxIncomingConnectionsExceeded);
}
}
} else if let Some(max_outgoing_connections) = self.config.max_outgoing_connections {
if self.outgoing_connections.len() >= max_outgoing_connections {
return Err(ConnectionLimitsError::MaxOutgoingConnectionsExceeded);
}
}
Ok(())
}
/// Accept an established connection.
///
/// # Note
///
/// This method should be called after the `Self::can_accept_connection` method
/// to ensure that the connection can be accepted.
pub fn accept_established_connection(
&mut self,
connection_id: ConnectionId,
is_listener: bool,
) {
if is_listener {
if self.config.max_incoming_connections.is_some() {
self.incoming_connections.insert(connection_id);
}
} else if self.config.max_outgoing_connections.is_some() {
self.outgoing_connections.insert(connection_id);
}
}
/// Called when a connection is closed.
pub fn on_connection_closed(&mut self, connection_id: ConnectionId) {
self.incoming_connections.remove(&connection_id);
self.outgoing_connections.remove(&connection_id);
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::types::ConnectionId;
#[test]
fn connection_limits() {
let config = ConnectionLimitsConfig::default()
.max_incoming_connections(Some(3))
.max_outgoing_connections(Some(2));
let mut limits = ConnectionLimits::new(config);
let connection_id_in_1 = ConnectionId::random();
let connection_id_in_2 = ConnectionId::random();
let connection_id_out_1 = ConnectionId::random();
let connection_id_out_2 = ConnectionId::random();
let connection_id_in_3 = ConnectionId::random();
// Establish incoming connection.
assert!(limits.can_accept_connection(true).is_ok());
limits.accept_established_connection(connection_id_in_1, true);
assert_eq!(limits.incoming_connections.len(), 1);
assert!(limits.can_accept_connection(true).is_ok());
limits.accept_established_connection(connection_id_in_2, true);
assert_eq!(limits.incoming_connections.len(), 2);
assert!(limits.can_accept_connection(true).is_ok());
limits.accept_established_connection(connection_id_in_3, true);
assert_eq!(limits.incoming_connections.len(), 3);
assert_eq!(
limits.can_accept_connection(true).unwrap_err(),
ConnectionLimitsError::MaxIncomingConnectionsExceeded
);
assert_eq!(limits.incoming_connections.len(), 3);
// Establish outgoing connection.
assert!(limits.can_accept_connection(false).is_ok());
limits.accept_established_connection(connection_id_out_1, false);
assert_eq!(limits.incoming_connections.len(), 3);
assert_eq!(limits.outgoing_connections.len(), 1);
assert!(limits.can_accept_connection(false).is_ok());
limits.accept_established_connection(connection_id_out_2, false);
assert_eq!(limits.incoming_connections.len(), 3);
assert_eq!(limits.outgoing_connections.len(), 2);
assert_eq!(
limits.can_accept_connection(false).unwrap_err(),
ConnectionLimitsError::MaxOutgoingConnectionsExceeded
);
// Close connections with peer a.
limits.on_connection_closed(connection_id_in_1);
assert_eq!(limits.incoming_connections.len(), 2);
assert_eq!(limits.outgoing_connections.len(), 2);
limits.on_connection_closed(connection_id_out_1);
assert_eq!(limits.incoming_connections.len(), 2);
assert_eq!(limits.outgoing_connections.len(), 1);
}
}
| rust | MIT | 991aa12f60db41543735394bf71fba09332752f8 | 2026-01-04T20:20:42.179941Z | false |
paritytech/litep2p | https://github.com/paritytech/litep2p/blob/991aa12f60db41543735394bf71fba09332752f8/src/transport/manager/types.rs | src/transport/manager/types.rs | // Copyright 2023 litep2p developers
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
use crate::transport::manager::{address::AddressStore, peer_state::PeerState};
/// Supported protocols.
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
pub enum SupportedTransport {
/// TCP.
Tcp,
/// QUIC.
#[cfg(feature = "quic")]
Quic,
/// WebRTC
#[cfg(feature = "webrtc")]
WebRtc,
/// WebSocket
#[cfg(feature = "websocket")]
WebSocket,
}
/// Peer context.
#[derive(Debug)]
pub struct PeerContext {
/// Peer state.
pub state: PeerState,
/// Known addresses of peer.
pub addresses: AddressStore,
}
impl Default for PeerContext {
fn default() -> Self {
Self {
state: PeerState::Disconnected { dial_record: None },
addresses: AddressStore::new(),
}
}
}
| rust | MIT | 991aa12f60db41543735394bf71fba09332752f8 | 2026-01-04T20:20:42.179941Z | false |
paritytech/litep2p | https://github.com/paritytech/litep2p/blob/991aa12f60db41543735394bf71fba09332752f8/src/transport/manager/mod.rs | src/transport/manager/mod.rs | // Copyright 2023 litep2p developers
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
use crate::{
addresses::PublicAddresses,
codec::ProtocolCodec,
crypto::ed25519::Keypair,
error::{AddressError, DialError, Error},
executor::Executor,
protocol::{InnerTransportEvent, TransportService},
transport::{
manager::{
address::AddressRecord,
handle::InnerTransportManagerCommand,
peer_state::{ConnectionRecord, PeerState, StateDialResult},
types::PeerContext,
},
Endpoint, Transport, TransportEvent, MAX_PARALLEL_DIALS,
},
types::{protocol::ProtocolName, ConnectionId},
BandwidthSink, PeerId,
};
use address::{scores, AddressStore};
use futures::{Stream, StreamExt};
use indexmap::IndexMap;
use multiaddr::{Multiaddr, Protocol};
use multihash::Multihash;
use parking_lot::RwLock;
use tokio::sync::mpsc::{channel, Receiver, Sender};
use std::{
collections::{HashMap, HashSet},
pin::Pin,
sync::{
atomic::{AtomicUsize, Ordering},
Arc,
},
task::{Context, Poll},
time::Duration,
};
pub use handle::{TransportHandle, TransportManagerHandle};
pub use types::SupportedTransport;
pub(crate) mod address;
pub mod limits;
mod peer_state;
mod types;
pub(crate) mod handle;
// TODO: https://github.com/paritytech/litep2p/issues/268 Periodically clean up idle peers.
// TODO: https://github.com/paritytech/litep2p/issues/344 add lots of documentation
/// Logging target for the file.
const LOG_TARGET: &str = "litep2p::transport-manager";
/// The connection established result.
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
enum ConnectionEstablishedResult {
/// Accept connection and inform `Litep2p` about the connection.
Accept,
/// Reject connection.
Reject,
}
/// [`crate::transport::manager::TransportManager`] events.
pub enum TransportManagerEvent {
/// Connection closed to remote peer.
ConnectionClosed {
/// Peer ID.
peer: PeerId,
/// Connection ID.
connection: ConnectionId,
},
}
// Protocol context.
#[derive(Debug, Clone)]
pub struct ProtocolContext {
/// Codec used by the protocol.
pub codec: ProtocolCodec,
/// TX channel for sending events to protocol.
pub tx: Sender<InnerTransportEvent>,
/// Fallback names for the protocol.
pub fallback_names: Vec<ProtocolName>,
}
impl ProtocolContext {
/// Create new [`ProtocolContext`].
fn new(
codec: ProtocolCodec,
tx: Sender<InnerTransportEvent>,
fallback_names: Vec<ProtocolName>,
) -> Self {
Self {
tx,
codec,
fallback_names,
}
}
}
/// Transport context for enabled transports.
struct TransportContext {
/// Polling index.
index: usize,
/// Registered transports.
transports: IndexMap<SupportedTransport, Box<dyn Transport<Item = TransportEvent>>>,
}
impl TransportContext {
/// Create new [`TransportContext`].
pub fn new() -> Self {
Self {
index: 0usize,
transports: IndexMap::new(),
}
}
/// Get an iterator of supported transports.
pub fn keys(&self) -> impl Iterator<Item = &SupportedTransport> {
self.transports.keys()
}
/// Get mutable access to transport.
pub fn get_mut(
&mut self,
key: &SupportedTransport,
) -> Option<&mut Box<dyn Transport<Item = TransportEvent>>> {
self.transports.get_mut(key)
}
/// Register `transport` to `TransportContext`.
pub fn register_transport(
&mut self,
name: SupportedTransport,
transport: Box<dyn Transport<Item = TransportEvent>>,
) {
assert!(self.transports.insert(name, transport).is_none());
}
}
impl Stream for TransportContext {
type Item = (SupportedTransport, TransportEvent);
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
if self.transports.is_empty() {
// Terminate if we don't have any transports installed.
return Poll::Ready(None);
}
let len = self.transports.len();
for _ in 0..len {
let current = self.index;
self.index = (current + 1) % len;
let (key, stream) = self.transports.get_index_mut(current).expect("transport to exist");
match stream.poll_next_unpin(cx) {
Poll::Pending => {}
Poll::Ready(None) => {
return Poll::Ready(None);
}
Poll::Ready(Some(event)) => {
let event = Some((*key, event));
return Poll::Ready(event);
}
}
}
Poll::Pending
}
}
/// Litep2p connection manager.
pub struct TransportManager {
/// Local peer ID.
local_peer_id: PeerId,
/// Keypair.
keypair: Keypair,
/// Bandwidth sink.
bandwidth_sink: BandwidthSink,
/// Maximum parallel dial attempts per peer.
max_parallel_dials: usize,
/// Installed protocols.
protocols: HashMap<ProtocolName, ProtocolContext>,
/// All names (main and fallback(s)) of the installed protocols.
protocol_names: HashSet<ProtocolName>,
/// Listen addresses.
listen_addresses: Arc<RwLock<HashSet<Multiaddr>>>,
/// Listen addresses.
public_addresses: PublicAddresses,
/// Next connection ID.
next_connection_id: Arc<AtomicUsize>,
/// Next substream ID.
next_substream_id: Arc<AtomicUsize>,
/// Installed transports.
transports: TransportContext,
/// Peers
peers: Arc<RwLock<HashMap<PeerId, PeerContext>>>,
/// Handle to [`crate::transport::manager::TransportManager`].
transport_manager_handle: TransportManagerHandle,
/// RX channel for receiving events from installed transports.
event_rx: Receiver<TransportManagerEvent>,
/// RX channel for receiving commands from installed protocols.
cmd_rx: Receiver<InnerTransportManagerCommand>,
/// TX channel for transport events that is given to installed transports.
event_tx: Sender<TransportManagerEvent>,
/// Pending connections.
pending_connections: HashMap<ConnectionId, PeerId>,
/// Connection limits.
connection_limits: limits::ConnectionLimits,
/// Opening connections errors.
opening_errors: HashMap<ConnectionId, Vec<(Multiaddr, DialError)>>,
}
/// Builder for [`crate::transport::manager::TransportManager`].
pub struct TransportManagerBuilder {
/// Keypair.
keypair: Option<Keypair>,
/// Supported transports.
supported_transports: HashSet<SupportedTransport>,
/// Bandwidth sink.
bandwidth_sink: Option<BandwidthSink>,
/// Maximum parallel dial attempts per peer.
max_parallel_dials: usize,
/// Connection limits config.
connection_limits_config: limits::ConnectionLimitsConfig,
}
impl Default for TransportManagerBuilder {
fn default() -> Self {
Self::new()
}
}
impl TransportManagerBuilder {
/// Create new [`crate::transport::manager::TransportManagerBuilder`].
pub fn new() -> Self {
Self {
keypair: None,
supported_transports: HashSet::new(),
bandwidth_sink: None,
max_parallel_dials: MAX_PARALLEL_DIALS,
connection_limits_config: limits::ConnectionLimitsConfig::default(),
}
}
/// Set the keypair
pub fn with_keypair(mut self, keypair: Keypair) -> Self {
self.keypair = Some(keypair);
self
}
/// Set the supported transports
pub fn with_supported_transports(
mut self,
supported_transports: HashSet<SupportedTransport>,
) -> Self {
self.supported_transports = supported_transports;
self
}
/// Set the bandwidth sink
pub fn with_bandwidth_sink(mut self, bandwidth_sink: BandwidthSink) -> Self {
self.bandwidth_sink = Some(bandwidth_sink);
self
}
/// Set the maximum parallel dials per peer
pub fn with_max_parallel_dials(mut self, max_parrallel_dials: usize) -> Self {
self.max_parallel_dials = max_parrallel_dials;
self
}
/// Set connection limits configuration.
pub fn with_connection_limits_config(
mut self,
connection_limits_config: limits::ConnectionLimitsConfig,
) -> Self {
self.connection_limits_config = connection_limits_config;
self
}
/// Build [`TransportManager`].
pub fn build(self) -> TransportManager {
let keypair = self.keypair.unwrap_or_else(Keypair::generate);
let local_peer_id = PeerId::from_public_key(&keypair.public().into());
let peers = Arc::new(RwLock::new(HashMap::new()));
let (cmd_tx, cmd_rx) = channel(256);
let (event_tx, event_rx) = channel(256);
let listen_addresses = Arc::new(RwLock::new(HashSet::new()));
let public_addresses = PublicAddresses::new(local_peer_id);
let handle = TransportManagerHandle::new(
local_peer_id,
peers.clone(),
cmd_tx,
self.supported_transports,
listen_addresses.clone(),
public_addresses.clone(),
);
TransportManager {
local_peer_id,
keypair,
bandwidth_sink: self.bandwidth_sink.unwrap_or_else(BandwidthSink::new),
max_parallel_dials: self.max_parallel_dials,
protocols: HashMap::new(),
protocol_names: HashSet::new(),
listen_addresses,
public_addresses,
next_connection_id: Arc::new(AtomicUsize::new(0usize)),
next_substream_id: Arc::new(AtomicUsize::new(0usize)),
transports: TransportContext::new(),
peers,
transport_manager_handle: handle,
event_rx,
cmd_rx,
event_tx,
pending_connections: HashMap::new(),
connection_limits: limits::ConnectionLimits::new(self.connection_limits_config),
opening_errors: HashMap::new(),
}
}
}
impl TransportManager {
/// Get iterator to installed protocols.
pub fn protocols(&self) -> impl Iterator<Item = &ProtocolName> {
self.protocols.keys()
}
/// Get iterator to installed transports
pub fn installed_transports(&self) -> impl Iterator<Item = &SupportedTransport> {
self.transports.keys()
}
/// Get next connection ID.
fn next_connection_id(&self) -> ConnectionId {
let connection_id = self.next_connection_id.fetch_add(1usize, Ordering::Relaxed);
ConnectionId::from(connection_id)
}
/// Get the transport manager handle
pub fn transport_manager_handle(&self) -> TransportManagerHandle {
self.transport_manager_handle.clone()
}
/// Register protocol to the [`crate::transport::manager::TransportManager`].
///
/// This allocates new context for the protocol and returns a handle
/// which the protocol can use the interact with the transport subsystem.
pub fn register_protocol(
&mut self,
protocol: ProtocolName,
fallback_names: Vec<ProtocolName>,
codec: ProtocolCodec,
keep_alive_timeout: Duration,
) -> TransportService {
assert!(!self.protocol_names.contains(&protocol));
for fallback in &fallback_names {
if self.protocol_names.contains(fallback) {
panic!("duplicate fallback protocol given: {fallback:?}");
}
}
let (service, sender) = TransportService::new(
self.local_peer_id,
protocol.clone(),
fallback_names.clone(),
self.next_substream_id.clone(),
self.transport_manager_handle(),
keep_alive_timeout,
);
self.protocols.insert(
protocol.clone(),
ProtocolContext::new(codec, sender, fallback_names.clone()),
);
self.protocol_names.insert(protocol);
self.protocol_names.extend(fallback_names);
service
}
/// Unregister a protocol in response of the user dropping the protocol handle.
fn unregister_protocol(&mut self, protocol: ProtocolName) {
let Some(context) = self.protocols.remove(&protocol) else {
tracing::error!(target: LOG_TARGET, ?protocol, "Cannot unregister protocol, not registered");
return;
};
for fallback in &context.fallback_names {
if !self.protocol_names.remove(fallback) {
tracing::error!(target: LOG_TARGET, ?fallback, ?protocol, "Cannot unregister fallback protocol, not registered");
}
}
tracing::info!(
target: LOG_TARGET,
?protocol,
"Protocol fully unregistered"
);
}
/// Acquire `TransportHandle`.
pub fn transport_handle(&self, executor: Arc<dyn Executor>) -> TransportHandle {
TransportHandle {
tx: self.event_tx.clone(),
executor,
keypair: self.keypair.clone(),
protocols: self.protocols.clone(),
bandwidth_sink: self.bandwidth_sink.clone(),
next_substream_id: self.next_substream_id.clone(),
next_connection_id: self.next_connection_id.clone(),
}
}
/// Register transport to `TransportManager`.
pub(crate) fn register_transport(
&mut self,
name: SupportedTransport,
transport: Box<dyn Transport<Item = TransportEvent>>,
) {
tracing::debug!(target: LOG_TARGET, transport = ?name, "register transport");
self.transports.register_transport(name, transport);
self.transport_manager_handle.register_transport(name);
}
/// Get the list of public addresses of the node.
pub(crate) fn public_addresses(&self) -> PublicAddresses {
self.public_addresses.clone()
}
/// Register local listen address.
pub fn register_listen_address(&mut self, address: Multiaddr) {
assert!(!address.iter().any(|protocol| std::matches!(protocol, Protocol::P2p(_))));
let mut listen_addresses = self.listen_addresses.write();
listen_addresses.insert(address.clone());
listen_addresses.insert(address.with(Protocol::P2p(
Multihash::from_bytes(&self.local_peer_id.to_bytes()).unwrap(),
)));
}
/// Add one or more known addresses for `peer`.
pub fn add_known_address(
&mut self,
peer: PeerId,
address: impl Iterator<Item = Multiaddr>,
) -> usize {
self.transport_manager_handle.add_known_address(&peer, address)
}
/// Return multiple addresses to dial on supported protocols.
fn supported_transports_addresses(
addresses: &[Multiaddr],
) -> HashMap<SupportedTransport, Vec<Multiaddr>> {
let mut transports = HashMap::<SupportedTransport, Vec<Multiaddr>>::new();
for address in addresses.iter().cloned() {
#[cfg(feature = "quic")]
if address.iter().any(|p| std::matches!(&p, Protocol::QuicV1)) {
transports.entry(SupportedTransport::Quic).or_default().push(address);
continue;
}
#[cfg(feature = "websocket")]
if address.iter().any(|p| std::matches!(&p, Protocol::Ws(_) | Protocol::Wss(_))) {
transports.entry(SupportedTransport::WebSocket).or_default().push(address);
continue;
}
transports.entry(SupportedTransport::Tcp).or_default().push(address);
}
transports
}
/// Dial peer using `PeerId`.
///
/// Returns an error if the peer is unknown or the peer is already connected.
pub async fn dial(&mut self, peer: PeerId) -> crate::Result<()> {
// Don't alter the peer state if there's no capacity to dial.
let available_capacity = self.connection_limits.on_dial_address()?;
// The available capacity is the maximum number of connections that can be established,
// so we limit the number of parallel dials to the minimum of these values.
let limit = available_capacity.min(self.max_parallel_dials);
if peer == self.local_peer_id {
return Err(Error::TriedToDialSelf);
}
let mut peers = self.peers.write();
let context = peers.entry(peer).or_default();
// Check if dialing is possible before allocating addresses.
match context.state.can_dial() {
StateDialResult::AlreadyConnected => return Err(Error::AlreadyConnected),
StateDialResult::DialingInProgress => return Ok(()),
StateDialResult::Ok => {}
};
// The addresses are sorted by score and contain the remote peer ID.
// We double checked above that the remote peer is not the local peer.
let dial_addresses = context.addresses.addresses(limit);
if dial_addresses.is_empty() {
return Err(Error::NoAddressAvailable(peer));
}
let connection_id = self.next_connection_id();
tracing::debug!(
target: LOG_TARGET,
?connection_id,
addresses = ?dial_addresses,
"dial remote peer",
);
let transports = Self::supported_transports_addresses(&dial_addresses);
// Dialing addresses will succeed because the `context.state.can_dial()` returned `Ok`.
let result = context.state.dial_addresses(
connection_id,
dial_addresses.iter().cloned().collect(),
transports.keys().cloned().collect(),
);
if result != StateDialResult::Ok {
tracing::warn!(
target: LOG_TARGET,
?peer,
?connection_id,
state = ?context.state,
"invalid state for dialing",
);
}
for (transport, addresses) in transports {
if addresses.is_empty() {
continue;
}
let Some(installed_transport) = self.transports.get_mut(&transport) else {
continue;
};
installed_transport.open(connection_id, addresses)?;
}
self.pending_connections.insert(connection_id, peer);
Ok(())
}
/// Dial peer using `Multiaddr`.
///
/// Returns an error if address it not valid.
pub async fn dial_address(&mut self, address: Multiaddr) -> crate::Result<()> {
self.connection_limits.on_dial_address()?;
let address_record = AddressRecord::from_multiaddr(address)
.ok_or(Error::AddressError(AddressError::PeerIdMissing))?;
if self.listen_addresses.read().contains(address_record.as_ref()) {
return Err(Error::TriedToDialSelf);
}
tracing::debug!(target: LOG_TARGET, address = ?address_record.address(), "dial address");
let mut protocol_stack = address_record.as_ref().iter();
match protocol_stack
.next()
.ok_or_else(|| Error::TransportNotSupported(address_record.address().clone()))?
{
Protocol::Ip4(_) | Protocol::Ip6(_) => {}
Protocol::Dns(_) | Protocol::Dns4(_) | Protocol::Dns6(_) => {}
transport => {
tracing::error!(
target: LOG_TARGET,
?transport,
"invalid transport, expected `ip4`/`ip6`"
);
return Err(Error::TransportNotSupported(
address_record.address().clone(),
));
}
};
let supported_transport = match protocol_stack
.next()
.ok_or_else(|| Error::TransportNotSupported(address_record.address().clone()))?
{
Protocol::Tcp(_) => match protocol_stack.next() {
#[cfg(feature = "websocket")]
Some(Protocol::Ws(_)) | Some(Protocol::Wss(_)) => SupportedTransport::WebSocket,
Some(Protocol::P2p(_)) => SupportedTransport::Tcp,
_ =>
return Err(Error::TransportNotSupported(
address_record.address().clone(),
)),
},
#[cfg(feature = "quic")]
Protocol::Udp(_) => match protocol_stack
.next()
.ok_or_else(|| Error::TransportNotSupported(address_record.address().clone()))?
{
Protocol::QuicV1 => SupportedTransport::Quic,
_ => {
tracing::debug!(target: LOG_TARGET, address = ?address_record.address(), "expected `quic-v1`");
return Err(Error::TransportNotSupported(
address_record.address().clone(),
));
}
},
protocol => {
tracing::error!(
target: LOG_TARGET,
?protocol,
"invalid protocol"
);
return Err(Error::TransportNotSupported(
address_record.address().clone(),
));
}
};
// when constructing `AddressRecord`, `PeerId` was verified to be part of the address
let remote_peer_id =
PeerId::try_from_multiaddr(address_record.address()).expect("`PeerId` to exist");
// set connection id for the address record and put peer into `Dialing` state
let connection_id = self.next_connection_id();
let dial_record = ConnectionRecord {
address: address_record.address().clone(),
connection_id,
};
{
let mut peers = self.peers.write();
let context = peers.entry(remote_peer_id).or_default();
// Keep the provided record around for possible future dials.
context.addresses.insert(address_record.clone());
match context.state.dial_single_address(dial_record) {
StateDialResult::AlreadyConnected => return Err(Error::AlreadyConnected),
StateDialResult::DialingInProgress => return Ok(()),
StateDialResult::Ok => {}
};
}
self.transports
.get_mut(&supported_transport)
.ok_or(Error::TransportNotSupported(
address_record.address().clone(),
))?
.dial(connection_id, address_record.address().clone())?;
self.pending_connections.insert(connection_id, remote_peer_id);
Ok(())
}
// Update the address on a dial failure.
fn update_address_on_dial_failure(&mut self, address: Multiaddr, error: &DialError) {
let mut peers = self.peers.write();
let score = AddressStore::error_score(error);
// Extract the peer ID at this point to give `NegotiationError::PeerIdMismatch` a chance to
// propagate.
let peer_id = match address.iter().last() {
Some(Protocol::P2p(hash)) => PeerId::from_multihash(hash).ok(),
_ => None,
};
let Some(peer_id) = peer_id else {
return;
};
// We need a valid context for this peer to keep track of failed addresses.
let context = peers.entry(peer_id).or_default();
context.addresses.insert(AddressRecord::new(&peer_id, address.clone(), score));
}
/// Handle dial failure.
///
/// The main purpose of this function is to advance the internal `PeerState`.
fn on_dial_failure(&mut self, connection_id: ConnectionId) -> crate::Result<()> {
tracing::trace!(target: LOG_TARGET, ?connection_id, "on dial failure");
let peer = self.pending_connections.remove(&connection_id).ok_or_else(|| {
tracing::error!(
target: LOG_TARGET,
?connection_id,
"dial failed for a connection that doesn't exist",
);
Error::InvalidState
})?;
let mut peers = self.peers.write();
let context = peers.entry(peer).or_default();
let previous_state = context.state.clone();
if !context.state.on_dial_failure(connection_id) {
tracing::warn!(
target: LOG_TARGET,
?peer,
?connection_id,
state = ?context.state,
"invalid state for dial failure",
);
} else {
tracing::trace!(
target: LOG_TARGET,
?peer,
?connection_id,
?previous_state,
state = ?context.state,
"on dial failure completed"
);
}
Ok(())
}
fn on_pending_incoming_connection(&mut self) -> crate::Result<()> {
self.connection_limits.on_incoming()?;
Ok(())
}
/// Handle closed connection.
fn on_connection_closed(
&mut self,
peer: PeerId,
connection_id: ConnectionId,
) -> Option<TransportEvent> {
tracing::trace!(target: LOG_TARGET, ?peer, ?connection_id, "connection closed");
self.connection_limits.on_connection_closed(connection_id);
let mut peers = self.peers.write();
let context = peers.entry(peer).or_default();
let previous_state = context.state.clone();
let connection_closed = context.state.on_connection_closed(connection_id);
if context.state == previous_state {
tracing::warn!(
target: LOG_TARGET,
?peer,
?connection_id,
state = ?context.state,
"invalid state for a closed connection",
);
} else {
tracing::trace!(
target: LOG_TARGET,
?peer,
?connection_id,
?previous_state,
state = ?context.state,
"on connection closed completed"
);
}
connection_closed.then_some(TransportEvent::ConnectionClosed {
peer,
connection_id,
})
}
/// Update the address on a connection established.
fn update_address_on_connection_established(&mut self, peer: PeerId, endpoint: &Endpoint) {
// The connection can be inbound or outbound.
// For the inbound connection type, in most cases, the remote peer dialed
// with an ephemeral port which it might not be listening on.
// Therefore, we only insert the address into the store if we're the dialer.
if endpoint.is_listener() {
return;
}
let mut peers = self.peers.write();
let record = AddressRecord::new(
&peer,
endpoint.address().clone(),
scores::CONNECTION_ESTABLISHED,
);
let context = peers.entry(peer).or_default();
context.addresses.insert(record);
}
fn on_connection_established(
&mut self,
peer: PeerId,
endpoint: &Endpoint,
) -> crate::Result<ConnectionEstablishedResult> {
self.update_address_on_connection_established(peer, endpoint);
if let Some(dialed_peer) = self.pending_connections.remove(&endpoint.connection_id()) {
if dialed_peer != peer {
tracing::warn!(
target: LOG_TARGET,
?dialed_peer,
?peer,
?endpoint,
"peer ids do not match but transport was supposed to reject connection"
);
debug_assert!(false);
return Err(Error::InvalidState);
}
};
// Reject the connection if exceeded limits.
if let Err(error) = self.connection_limits.can_accept_connection(endpoint.is_listener()) {
tracing::debug!(
target: LOG_TARGET,
?peer,
?endpoint,
?error,
"connection limit exceeded, rejecting connection",
);
return Ok(ConnectionEstablishedResult::Reject);
}
let mut peers = self.peers.write();
let context = peers.entry(peer).or_default();
let previous_state = context.state.clone();
let connection_accepted = context
.state
.on_connection_established(ConnectionRecord::from_endpoint(peer, endpoint));
tracing::trace!(
target: LOG_TARGET,
?peer,
?endpoint,
?previous_state,
state = ?context.state,
"on connection established completed"
);
if connection_accepted {
self.connection_limits
.accept_established_connection(endpoint.connection_id(), endpoint.is_listener());
// Cancel all pending dials if the connection was established.
if let PeerState::Opening {
connection_id,
transports,
..
} = previous_state
{
// cancel all pending dials
transports.iter().for_each(|transport| {
self.transports
.get_mut(transport)
.expect("transport to exist")
.cancel(connection_id);
});
// since an inbound connection was removed, the outbound connection can be
// removed from pending dials
//
// This may race in the following scenario:
//
// T0: we open address X on protocol TCP
// T1: remote peer opens a connection with us
// T2: address X is dialed and event is propagated from TCP to transport manager
// T3: `on_connection_established` is called for T1 and pending connections cleared
// T4: event from T2 is delivered.
//
// TODO: see https://github.com/paritytech/litep2p/issues/276 for more details.
self.pending_connections.remove(&connection_id);
}
return Ok(ConnectionEstablishedResult::Accept);
}
Ok(ConnectionEstablishedResult::Reject)
}
fn on_connection_opened(
&mut self,
transport: SupportedTransport,
connection_id: ConnectionId,
address: Multiaddr,
) -> crate::Result<()> {
let Some(peer) = self.pending_connections.remove(&connection_id) else {
tracing::warn!(
target: LOG_TARGET,
?connection_id,
?transport,
?address,
"connection opened but dial record doesn't exist",
);
debug_assert!(false);
return Err(Error::InvalidState);
};
let mut peers = self.peers.write();
let context = peers.entry(peer).or_default();
// Keep track of the address.
context.addresses.insert(AddressRecord::new(
&peer,
address.clone(),
scores::CONNECTION_ESTABLISHED,
));
let previous_state = context.state.clone();
let record = ConnectionRecord::new(peer, address.clone(), connection_id);
let state_advanced = context.state.on_connection_opened(record);
if !state_advanced {
tracing::warn!(
target: LOG_TARGET,
?peer,
?connection_id,
| rust | MIT | 991aa12f60db41543735394bf71fba09332752f8 | 2026-01-04T20:20:42.179941Z | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.